prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
#
# Copyright 2017 Human Longevity, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import hashlib
import boto3
import moto
import pandas as pd
import pytest
import disdat.api as api
from tests.functional.common import TEST_CONTEXT
TEST_REMOTE = '__test_remote_context__'
TEST_BUCKET = 'test-bucket'
TEST_BUCKET_URL = "s3://{}".format(TEST_BUCKET)
def get_hash(path):
return hashlib.md5(open(path, 'rb').read()).hexdigest()
def test_add_bad_path(tmpdir):
# Create Context
api.context(TEST_CONTEXT)
# Create path to csv file but don't create file
test_csv_path = os.path.join(str(tmpdir), 'test.csv')
# Assert csv file does not exist
assert not os.path.exists(test_csv_path)
# Try to add file to the bundle
with pytest.raises(AssertionError) as ex:
api.add(TEST_CONTEXT, 'bad_path', test_csv_path)
# Assert Exited with error code of 1
assert ex.type == AssertionError
# Make sure bundle does not exist
assert api.get(TEST_CONTEXT, 'test_file_as_bundle_txt_file') is None, 'Bundle should not exist'
api.delete_context(TEST_CONTEXT)
def test_single_file(tmpdir):
# Create Context
api.context(TEST_CONTEXT)
# Create test .csv file
test_csv_path = os.path.join(str(tmpdir), 'test.csv')
df = pd.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]})
df.to_csv(test_csv_path)
# Assert csv_file_exits
assert os.path.exists(test_csv_path)
# Add the file to the bundle
api.add(TEST_CONTEXT, 'test_single_file', test_csv_path)
# Retrieve the bundle
b = api.get(TEST_CONTEXT, 'test_single_file')
# Assert the bundles contain the same data
bundle_hash, file_hash = get_hash(b.data), get_hash(test_csv_path)
assert bundle_hash == file_hash, 'Hashes do not match'
# Test with tags
tag = {'test': 'tag'}
api.add(TEST_CONTEXT, 'test_single_file', test_csv_path, tags=tag)
# Retrieve the bundle
b = api.get(TEST_CONTEXT, 'test_single_file')
# Assert the bundles contain the same data
bundle_hash, file_hash = get_hash(b.data), get_hash(test_csv_path)
assert bundle_hash == file_hash, 'Hashes do not match'
assert b.tags == tag, 'Tags do not match'
# Remove test .csv
os.remove(test_csv_path)
# Assert that data still remains in the bundle
assert api.get(TEST_CONTEXT, 'test_single_file') is not None, 'Bundle should exist'
api.delete_context(TEST_CONTEXT)
def test_add_directory(tmpdir):
# Create Context
api.context(TEST_CONTEXT)
# Directory Structure
# - test.csv
# - second/test_1.txt
# - second/test_2.txt
# - second/third/test_3.txt
# - second/third/test_4.txt
level_1 = ''
level_2 = os.path.join(level_1, 'second')
os.mkdir(os.path.join(str(tmpdir), level_2))
level_3 = os.path.join(level_2, 'third')
os.mkdir(os.path.join(str(tmpdir), level_3))
# Dictionary to hold paths
path_dict = {}
# Create files and save paths
test_csv_name = 'test.csv'
test_csv_path = os.path.join(level_1, test_csv_name)
test_csv_abs_path = os.path.join(str(tmpdir), test_csv_path)
df = pd.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]})
df.to_csv(test_csv_abs_path)
path_dict[test_csv_name] = (test_csv_abs_path, test_csv_path.split('/'))
test_text_1_name = 'test_1.txt'
test_text_1_path = os.path.join(level_2, test_text_1_name)
test_text_name_1_abs_path = os.path.join(str(tmpdir), test_text_1_path)
with open(test_text_name_1_abs_path, 'w') as f:
f.write('Hello!')
path_dict[test_text_1_name] = (test_text_name_1_abs_path, test_text_1_path.split('/'))
test_text_2_name = 'test_2.txt'
test_text_2_path = os.path.join(level_2, test_text_2_name)
test_text_name_2_abs_path = os.path.join(str(tmpdir), test_text_2_path)
with open(test_text_name_2_abs_path, 'w') as f:
f.write('Hello!')
path_dict[test_text_2_name] = (test_text_name_2_abs_path, test_text_2_path.split('/'))
test_text_3_name = 'test_3.txt'
test_text_3_path = os.path.join(level_3, test_text_3_name)
test_text_name_3_abs_path = os.path.join(str(tmpdir), test_text_3_path)
with open(test_text_name_3_abs_path, 'w') as f:
f.write('Third Hello!')
path_dict[test_text_3_name] = (test_text_name_3_abs_path, test_text_3_path.split('/'))
test_text_4_name = 'test_4.txt'
test_text_4_path = os.path.join(level_3, test_text_4_name)
test_text_name_4_abs_path = os.path.join(str(tmpdir), test_text_4_path)
with open(test_text_name_4_abs_path, 'w') as f:
f.write('Third World!')
path_dict[test_text_4_name] = (test_text_name_4_abs_path, test_text_4_path.split('/'))
# Assert files exist
assert os.path.exists(test_csv_abs_path)
assert os.path.exists(test_text_name_1_abs_path)
assert os.path.exists(test_text_name_2_abs_path)
assert os.path.exists(test_text_name_3_abs_path)
assert os.path.exists(test_text_name_4_abs_path)
# Add the directory to the bundle
api.add(TEST_CONTEXT, 'test_directory', str(tmpdir))
# Assert check sums are the same
b = api.get(TEST_CONTEXT, 'test_directory')
for f in b.data:
bundle_file_name = f.split('/')[-1]
local_abs_path, local_split_path = path_dict[bundle_file_name]
# Make sure paths match
assert get_hash(f) == get_hash(local_abs_path), 'Hashes do not match'
bundle_path = os.path.join(*f.split('/')[-len(local_split_path):])
local_path = os.path.join(*local_split_path)
assert local_path == bundle_path, 'Bundle should have the same directory structure'
# Add the directory to the bundle with tags
tag = {'test': 'tag'}
api.add(TEST_CONTEXT, 'test_directory', str(tmpdir), tags=tag)
# Assert check sums are the same
b = api.get(TEST_CONTEXT, 'test_directory')
for f in b.data:
bundle_file_name = f.split('/')[-1]
local_abs_path, local_split_path = path_dict[bundle_file_name]
# Make sure paths match
assert get_hash(f) == get_hash(local_abs_path), 'Hashes do not match'
# Make sure directory structure stays the same
local_path = os.path.join(*local_split_path)
bundle_path = os.path.join(*f.split('/')[-len(local_split_path):])
assert local_path == bundle_path, 'Bundle should have the same directory structure'
# Make sure tags exist
assert b.tags == tag, 'Tags do not match'
api.delete_context(TEST_CONTEXT)
@moto.mock_s3
def deprecated_add_with_treat_as_bundle(tmpdir):
api.context(context_name=TEST_CONTEXT)
# Setup moto s3 resources
s3_client = boto3.client('s3')
s3_resource = boto3.resource('s3', region_name='us-east-1')
s3_resource.create_bucket(Bucket=TEST_BUCKET)
# Make sure bucket is empty
objects = s3_client.list_objects(Bucket=TEST_BUCKET)
assert 'Contents' not in objects, 'Bucket should be empty'
local_paths = []
s3_paths = []
# Create and upload test.csv file
key = 'test.csv'
test_csv_path = os.path.join(str(tmpdir), key)
df = | pd.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]}) | pandas.DataFrame |
"""Format helpers"""
import math
import pandas as pd
import pandas.lib as lib
import numpy as np
pd_is_datetime_arraylike = None
try:
from pandas.core.common import is_datetime_arraylike as pd_is_datetime_arraylike
except:
pass
from functools import partial
def is_datetime_arraylike(arr):
if isinstance(arr, pd.DataFrame):
return arr.apply(pd_is_datetime_arraylike).all()
elif pd_is_datetime_arraylike is not None:
return pd_is_datetime_arraylike(arr)
elif isinstance(arr, pd.DatetimeIndex):
return True
else:
inferred = lib.infer_dtype(arr)
return 'datetime' in inferred
class DateTimeFormat(object):
def __init__(self, fmtstr, coerce=True):
self.fmtstr = fmtstr
self.coerce = coerce
def __call__(self, value):
if isinstance(value, pd.Series):
return value.apply(self.__call__)
else:
if not hasattr(value, 'strftime'):
if self.coerce:
value = pd.to_datetime(value)
if not hasattr(value, 'strftime'):
raise ValueError('failed to coerce %s type=%s to datetime' % (value, type(value)))
else: #
raise ValueError('%s type(%s) has not method strftime' % (value, type(value)))
return (value == value and value.strftime(self.fmtstr)) or str(value)
class NumberFormat(object):
def __init__(self, precision=2, commas=True, parens=True, suffix=None, kind='f', coerce=True,
transform=None, nan='nan', prefix=None, lpad_zero=1, do_raise=0, trunc_dot_zeros=0):
"""
Parameters
----------
precision : int, defaults to 2
Number of decimals places to show
commas : bool, default to True
If true then show commas, else do not
parens : bool, default to True
If True then use parenthesis for showing negative numbers
suffix:
kind:
coerce:
transform:
nan:
prefix:
lpad_zero:
do_raise:
trunc_dot_zeros: bool, default to false
if True and precision is greater than 0, a number such as 3.0 will be returned as just 3
"""
self.transform = transform
self.coerce = coerce
# build format string
self.precision = precision
self.commas = commas
self.parens = parens
self.suffix = suffix or ''
self.prefix = prefix or ''
self.kind = kind
self.nan = nan
self.lpad_zero = lpad_zero
self.do_raise = do_raise
self.trunc_dot_zeros = trunc_dot_zeros
def __call__(self, value, **kwargs):
# apply any overrides
for k, v in kwargs.items():
if hasattr(self, k):
setattr(self, k, v)
self_with_args = partial(self.__call__, **kwargs)
if isinstance(value, pd.Series):
return value.apply(self_with_args)
elif isinstance(value, pd.DataFrame):
return value.applymap(self_with_args)
elif isinstance(value, (list, tuple)):
return list(map(self_with_args, value))
elif isinstance(value, np.ndarray):
if value.ndim == 2:
return self_with_args(pd.DataFrame(value)).values
elif value.ndim == 1:
return self_with_args(pd.Series(value)).values
elif not issubclass(type(value), (float, int)):
if not self.coerce:
raise ValueError('NumberFormat expected number type not %s' % (type(value)))
else:
if self.coerce and not issubclass(type(value), (float, int)):
try:
value = float(value)
except ValueError:
if self.do_raise:
raise
else:
# return the value without doing anything
return value
if np.isnan(value):
return self.nan
# apply transform
value = value if self.transform is None else self.transform(value)
# Build format string
fmt = '{:' + (self.lpad_zero and '0' or '') + (self.commas and ',' or '') + '.' + str(
self.precision) + self.kind + '}'
txt = fmt.format(value)
if self.precision > 0 and self.trunc_dot_zeros:
txt = txt.replace('.' + '0' * self.precision, '')
if self.parens:
isneg = txt[0] == '-'
lp, rp = isneg and ('(', ')') or ('', '')
txt = isneg and txt[1:] or txt
return '{prefix}{lp}{txt}{suffix}{rp}'.format(prefix=self.prefix, txt=txt, suffix=self.suffix, lp=lp, rp=rp)
else:
return '{prefix}{txt}{suffix}'.format(prefix=self.prefix, txt=txt, suffix=self.suffix)
def new_int_formatter(commas=True, parens=True, prefix=None, suffix=None, coerce=True, nan='nan', trunc_dot_zeros=0):
precision = 0
return NumberFormat(**locals())
def new_float_formatter(precision=2, commas=True, parens=True, prefix=None, suffix=None, coerce=True, nan='nan',
trunc_dot_zeros=0):
return NumberFormat(**locals())
def new_thousands_formatter(precision=1, commas=True, parens=True, nan='nan', prefix=None, trunc_dot_zeros=0,
suffix='k'):
transform = lambda v: v * 1e-3
return NumberFormat(**locals())
def new_millions_formatter(precision=1, commas=True, parens=True, nan='nan', prefix=None, trunc_dot_zeros=0,
suffix='M'):
transform = lambda v: v * 1e-6
return NumberFormat(**locals())
def new_billions_formatter(precision=1, commas=True, parens=True, nan='nan', prefix=None, trunc_dot_zeros=0,
suffix='B'):
transform = lambda v: v * 1e-9
return NumberFormat(**locals())
def new_trillions_formatter(precision=1, commas=True, parens=True, nan='nan', prefix=None, trunc_dot_zeros=0):
transform = lambda v: v * 1e-12
suffix = 'T'
return NumberFormat(**locals())
def new_percent_formatter(precision=2, commas=True, parens=True, prefix=None, suffix=None, coerce=True,
transform=lambda v: v,
nan='nan', trunc_dot_zeros=0):
kind = '%'
return NumberFormat(**locals())
def new_datetime_formatter(fmtstr='%d-%b-%y', coerce=True):
return DateTimeFormat(**locals())
def guess_formatter(values, precision=1, commas=True, parens=True, nan='nan', prefix=None, pcts=0,
trunc_dot_zeros=0):
"""Based on the values, return the most suitable formatter
Parameters
----------
values : Series, DataFrame, scalar, list, tuple, or ndarray
Values used to determine which formatter is the best fit
"""
formatter_args = dict(precision=precision, commas=commas, parens=parens, nan=nan, prefix=prefix,
trunc_dot_zeros=trunc_dot_zeros)
try:
if isinstance(values, pd.datetime) and values.hour == 0 and values.minute == 0:
return new_datetime_formatter()
elif is_datetime_arraylike(values):
# basic date formatter if no hours or minutes
if hasattr(values, 'dt'):
if (values.dt.hour == 0).all() and (values.dt.minute == 0).all():
return new_datetime_formatter()
elif isinstance(values, pd.Series):
if values.dropna().apply(lambda d: d.hour == 0).all() and values.apply(lambda d: d.minute == 0).all():
return new_datetime_formatter()
elif isinstance(values, pd.DataFrame):
if values.dropna().applymap(lambda d: d != d or (d.hour == 0 and d.minute == 0)).all().all():
return new_datetime_formatter()
elif isinstance(values, pd.Series):
aval = values.abs()
vmax, vmin = aval.max(), aval.min()
elif isinstance(values, np.ndarray):
if values.ndim == 2:
avalues = | pd.DataFrame(values) | pandas.DataFrame |
# *****************************************************************************
# Copyright (c) 2019, Intel Corporation All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# *****************************************************************************
"""
| :class:`pandas.Series` functions and operators implementations in HPAT
| Also, it contains Numba internal operators which are required for Series type handling
"""
import numpy
import operator
import pandas
from numba.errors import TypingError
from numba.extending import (types, overload, overload_method, overload_attribute)
from numba import types
import hpat
from hpat.hiframes.pd_series_ext import SeriesType
from hpat.str_arr_ext import StringArrayType
from hpat.utils import to_array
@overload(operator.getitem)
def hpat_pandas_series_getitem(self, idx):
"""
Pandas Series operator :attr:`pandas.Series.get` implementation
**Algorithm**: result = series[idx]
**Test**: python -m hpat.runtests hpat.tests.test_series.TestSeries.test_static_getitem_series1
Parameters
----------
series: :obj:`pandas.Series`
input series
idx: :obj:`int`, :obj:`slice` or :obj:`pandas.Series`
input index
Returns
-------
:class:`pandas.Series` or an element of the underneath type
object of :class:`pandas.Series`
"""
_func_name = 'Operator getitem().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
if isinstance(idx, types.Integer):
def hpat_pandas_series_getitem_idx_integer_impl(self, idx):
"""
**Test**: python -m hpat.runtests hpat.tests.test_series.TestSeries.test_series_iloc1
"""
result = self._data[idx]
return result
return hpat_pandas_series_getitem_idx_integer_impl
if isinstance(idx, types.SliceType):
def hpat_pandas_series_getitem_idx_slice_impl(self, idx):
"""
**Test**: python -m hpat.runtests hpat.tests.test_series.TestSeries.test_series_iloc2
"""
result = pandas.Series(self._data[idx])
return result
return hpat_pandas_series_getitem_idx_slice_impl
if isinstance(idx, SeriesType):
def hpat_pandas_series_getitem_idx_series_impl(self, idx):
"""
**Test**: python -m hpat.runtests hpat.tests.test_series.TestSeries.test_setitem_series_bool2
"""
super_index = idx._data
result = self._data[super_index]
return result
return hpat_pandas_series_getitem_idx_series_impl
raise TypingError('{} The index must be an Integer, Slice or a pandas.series. Given: {}'.format(_func_name, idx))
@overload_attribute(SeriesType, 'at')
@overload_attribute(SeriesType, 'iat')
@overload_attribute(SeriesType, 'iloc')
@overload_attribute(SeriesType, 'loc')
def hpat_pandas_series_iloc(self):
"""
Pandas Series operators :attr:`pandas.Series.at`, :attr:`pandas.Series.iat`, :attr:`pandas.Series.iloc`, :attr:`pandas.Series.loc` implementation.
.. only:: developer
Test: python -m hpat.runtests hpat.tests.test_series.TestSeries.test_series_iloc2
Parameters
----------
series: :class:`pandas.Series`
input series
Returns
-------
:obj:`pandas.Series`
returns an object of :obj:`pandas.Series`
"""
_func_name = 'Operator at/iat/iloc/loc().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
def hpat_pandas_series_iloc_impl(self):
return self
return hpat_pandas_series_iloc_impl
@overload_attribute(SeriesType, 'shape')
def hpat_pandas_series_shape(self):
"""
Pandas Series attribute :attr:`pandas.Series.shape` implementation
**Algorithm**: result = series.shape
**Test**: python -m hpat.runtests hpat.tests.test_series.TestSeries.test_series_shape1
Parameters
----------
series: :obj:`pandas.Series`
input series
Returns
-------
:obj:`tuple`
a tuple of the shape of the underlying data
"""
_func_name = 'Attribute shape.'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
def hpat_pandas_series_shape_impl(self):
return self._data.shape
return hpat_pandas_series_shape_impl
@overload_attribute(SeriesType, 'values')
def hpat_pandas_series_iloc(self):
"""
Pandas Series attribute 'values' implementation.
https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.Series.values.html#pandas.Series.values
Algorithm: result = series.values
Where:
series: pandas.series
result: pandas.series as ndarray
Test: python -m hpat.runtests hpat.tests.test_series.TestSeries.test_series_values
"""
_func_name = 'Attribute values.'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
def hpat_pandas_series_values_impl(self):
return self._data
return hpat_pandas_series_values_impl
@overload_attribute(SeriesType, 'index')
def hpat_pandas_series_index(self):
"""
Pandas Series attribute :attr:`pandas.Series.index` implementation
**Algorithm**: result = series.index
**Test**: python -m hpat.runtests hpat.tests.test_series.TestSeries.test_series_index1
python -m hpat.runtests hpat.tests.test_series.TestSeries.test_series_index2
Parameters
----------
series: :obj:`pandas.Series`
input series
Returns
-------
:class:`pandas.Series`
the index of the Series
"""
_func_name = 'Attribute index.'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
def hpat_pandas_series_index_impl(self):
return self._index
return hpat_pandas_series_index_impl
@overload_attribute(SeriesType, 'size')
def hpat_pandas_series_size(self):
"""
Pandas Series attribute :attr:`pandas.Series.size` implementation
.. only:: developer
Test: python -m hpat.runtests hpat.tests.test_series.TestSeries.test_series_size
Parameters
----------
series: :obj:`pandas.Series`
input series
Returns
-------
:class:`pandas.Series`
Return the number of elements in the underlying data.
"""
_func_name = 'Attribute size.'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
def hpat_pandas_series_size_impl(self):
return len(self._data)
return hpat_pandas_series_size_impl
@overload(len)
def hpat_pandas_series_len(self):
"""
Pandas Series operator :func:`len` implementation
.. only:: developer
Test: python -m hpat.runtests hpat.tests.test_series.TestSeries.test_series_len
Parameters
----------
series: :class:`pandas.Series`
Returns
-------
:obj:`int`
number of items in the object
"""
_func_name = 'Operator len().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
def hpat_pandas_series_len_impl(self):
return len(self._data)
return hpat_pandas_series_len_impl
@overload_method(SeriesType, 'isin')
def hpat_pandas_series_isin(self, values):
"""
Pandas Series method :meth:`pandas.Series.isin` implementation.
.. only:: developer
Test: python -m hpat.runtests hpat.tests.test_series.TestSeries.test_series_isin_list1
Parameters
-----------
values : :obj:`list` or :obj:`set` object
specifies values to look for in the series
Returns
-------
:obj:`pandas.Series`
returns :obj:`pandas.Series` object indicating if each element of self is in values
"""
_func_name = 'Method isin().'
if not isinstance(self, SeriesType):
raise TypingError(
'{} The object must be a pandas.series. Given self: {}'.format(_func_name, self))
if not isinstance(values, (types.Set, types.List)):
raise TypingError(
'{} The argument must be set or list-like object. Given values: {}'.format(_func_name, values))
def hpat_pandas_series_isin_impl(self, values):
# TODO: replace with below line when Numba supports np.isin in nopython mode
# return pandas.Series(np.isin(self._data, values))
return pandas.Series([(x in values) for x in self._data])
return hpat_pandas_series_isin_impl
@overload_method(SeriesType, 'append')
def hpat_pandas_series_append(self, to_append):
"""
Pandas Series method :meth:`pandas.Series.append` implementation.
.. only:: developer
Test: python -m hpat.runtests hpat.tests.test_series.TestSeries.test_series_append1
Parameters
-----------
to_append : :obj:`pandas.Series` object
input argument
ignore_index:
*unsupported*
verify_integrity:
*unsupported*
Returns
-------
:obj:`pandas.Series`
returns :obj:`pandas.Series` object
"""
_func_name = 'Method append().'
if not isinstance(self, SeriesType) or not isinstance(to_append, SeriesType):
raise TypingError(
'{} The object must be a pandas.series. Given self: {}, to_append: {}'.format(_func_name, self, to_append))
def hpat_pandas_series_append_impl(self, to_append):
return pandas.Series(self._data + to_append._data)
return hpat_pandas_series_append_impl
@overload_method(SeriesType, 'groupby')
def hpat_pandas_series_groupby(
self,
by=None,
axis=0,
level=None,
as_index=True,
sort=True,
group_keys=True,
squeeze=False,
observed=False):
"""
Pandas Series method :meth:`pandas.Series.groupby` implementation.
.. only:: developer
Test: python -m hpat.runtests hpat.tests.test_series.TestSeries.test_series_groupby_count
Parameters
-----------
self: :class:`pandas.Series`
input arg
by: :obj:`pandas.Series` object
Used to determine the groups for the groupby
axis:
*unsupported*
level:
*unsupported*
as_index:
*unsupported*
sort:
*unsupported*
group_keys:
*unsupported*
squeeze:
*unsupported*
observed:
*unsupported*
Returns
-------
:obj:`pandas.SeriesGroupBy`
returns :obj:`pandas.SeriesGroupBy` object
"""
_func_name = 'Method Series.groupby().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
if by is None and axis is None:
raise TypingError("{} You have to supply one of 'by' or 'axis' parameters".format(_func_name))
if level is not None and not isinstance(level, (types.Integer, types.NoneType, types.Omitted)):
raise TypingError("{} 'level' must be an Integer. Given: {}".format(_func_name, level))
def hpat_pandas_series_groupby_impl(
self,
by=None,
axis=0,
level=None,
as_index=True,
sort=True,
group_keys=True,
squeeze=False,
observed=False):
# TODO Needs to implement parameters value check
# if level is not None and (level < -1 or level > 0):
# raise ValueError("Method Series.groupby(). level > 0 or level < -1 only valid with MultiIndex")
return pandas.core.groupby.SeriesGroupBy(self)
return hpat_pandas_series_groupby_impl
@overload_method(SeriesType, 'ne')
def hpat_pandas_series_ne(self, other, level=None, fill_value=None, axis=0):
"""
Pandas Series method :meth:`pandas.Series.ne` implementation.
.. only:: developer
Test: python -m hpat.runtests hpat.tests.test_series.TestSeries.test_series_op8
Parameters
----------
self: :class:`pandas.Series`
input arg
other: :obj:`pandas.Series`, :obj:`int` or :obj:`float`
input arg
level: :obj:`int` or name
*unsupported*
fill_value: :obj:`float` or None, default None
*unsupported*
axis: default 0
*unsupported*
Returns
-------
:obj:`pandas.Series`
returns :obj:`pandas.Series` object
"""
_func_name = 'Method ne().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
if level is not None or fill_value is not None or axis != 0:
raise TypingError('{} Unsupported parameters. Given level: {}, fill_value: {}, axis: {}'.format(_func_name, level, fill_value, axis))
if isinstance(other, SeriesType):
def hpat_pandas_series_ne_impl(self, other):
"""
Test: python -m hpat.runtests hpat.tests.test_series.TestSeries.test_series_op8
"""
return pandas.Series(self._data != other._data)
return hpat_pandas_series_ne_impl
if isinstance(other, types.Integer) or isinstance(other, types.Float):
def hpat_pandas_series_ne_impl(self, other):
"""
Test: python -m hpat.runtests hpat.tests.test_series.TestSeries.test_series_op8_integer_scalar
Test: python -m hpat.runtests hpat.tests.test_series.TestSeries.test_series_op8_float_scalar
"""
return pandas.Series(self._data != other)
return hpat_pandas_series_ne_impl
raise TypingError('{} The object must be a pandas.series and argument must be a number. Given: {} and other: {}'.format(_func_name, self, other))
@overload_method(SeriesType, 'add')
def hpat_pandas_series_add(self, other, level=None, fill_value=None, axis=0):
"""
Pandas Series method :meth:`pandas.Series.add` implementation.
.. only:: developer
Test: python -m hpat.runtests hpat.tests.test_series.TestSeries.test_series_op5
Parameters
----------
self: :class:`pandas.Series`
input arg
other: :obj:`pandas.Series`, :obj:`int` or :obj:`float`
input arg
level: :obj:`int` or name
*unsupported*
fill_value: :obj:`float` or None, default None
*unsupported*
axis: default 0
*unsupported*
Returns
-------
:obj:`pandas.Series`
returns :obj:`pandas.Series` object
"""
_func_name = 'Method add().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
if level is not None or fill_value is not None or axis != 0:
raise TypingError('{} Unsupported parameters. Given level: {}, fill_value: {}, axis: {}'.format(_func_name, level, fill_value, axis))
if isinstance(other, SeriesType):
def hpat_pandas_series_add_impl(lhs, rhs):
"""
Test: python -m hpat.runtests hpat.tests.test_series.TestSeries.test_series_op5
"""
return pandas.Series(lhs._data + rhs._data)
return hpat_pandas_series_add_impl
if isinstance(other, types.Integer) or isinstance(other, types.Float):
def hpat_pandas_series_add_number_impl(lhs, rhs):
"""
Test: python -m hpat.runtests hpat.tests.test_series.TestSeries.test_series_op5_integer_scalar
Test: python -m hpat.runtests hpat.tests.test_series.TestSeries.test_series_op5_float_scalar
"""
return pandas.Series(lhs._data + rhs)
return hpat_pandas_series_add_number_impl
raise TypingError('{} The object must be a pandas.series and argument must be a number. Given: {} and other: {}'.format(_func_name, self, other))
@overload_method(SeriesType, 'sub')
def hpat_pandas_series_sub(self, other, level=None, fill_value=None, axis=0):
"""
Pandas Series method :meth:`pandas.Series.sub` implementation.
.. only:: developer
Test: python -m hpat.runtests hpat.tests.test_series.TestSeries.test_series_op5
Parameters
----------
self: :class:`pandas.Series`
input arg
other: :obj:`pandas.Series`, :obj:`int` or :obj:`float`
input arg
level: :obj:`int` or name
*unsupported*
fill_value: :obj:`float` or None, default None
*unsupported*
axis: default 0
*unsupported*
Returns
-------
:obj:`pandas.Series`
returns :obj:`pandas.Series` object
"""
_func_name = 'Method sub().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
if level is not None or fill_value is not None or axis != 0:
raise TypingError('{} Unsupported parameters. Given level: {}, fill_value: {}, axis: {}'.format(_func_name, level, fill_value, axis))
if isinstance(other, SeriesType):
def hpat_pandas_series_sub_impl(self, other):
"""
Test: python -m hpat.runtests hpat.tests.test_series.TestSeries.test_series_op5
"""
return pandas.Series(self._data - other._data)
return hpat_pandas_series_sub_impl
if isinstance(other, types.Integer) or isinstance(other, types.Float):
def hpat_pandas_series_sub_number_impl(self, other):
"""
Test: python -m hpat.runtests hpat.tests.test_series.TestSeries.test_series_op5_integer_scalar
Test: python -m hpat.runtests hpat.tests.test_series.TestSeries.test_series_op5_float_scalar
"""
return pandas.Series(self._data - other)
return hpat_pandas_series_sub_number_impl
raise TypingError('{} The object must be a pandas.series or scalar. Given other: {}'.format(_func_name, other))
@overload_method(SeriesType, 'take')
def hpat_pandas_series_take(self, indices, axis=0, is_copy=False):
"""
Pandas Series method :meth:`pandas.Series.take` implementation.
.. only:: developer
Tests: python -m hpat.runtests hpat.tests.test_series.TestSeries.test_series_take_index_default
python -m hpat.runtests hpat.tests.test_series.TestSeries.test_series_take_index_default_unboxing
python -m hpat.runtests hpat.tests.test_series.TestSeries.test_series_take_index_int
python -m hpat.runtests hpat.tests.test_series.TestSeries.test_series_take_index_int_unboxing
python -m hpat.runtests hpat.tests.test_series.TestSeries.test_series_take_index_str
python -m hpat.runtests hpat.tests.test_series.TestSeries.test_series_take_index_str_unboxing
Parameters
----------
self: :obj:`pandas.Series`
input series
indices: :obj:`array-like`
An array of ints indicating which positions to take
axis: {0 or `index`, 1 or `columns`, None}, default 0
The axis on which to select elements. 0 means that we are selecting rows,
1 means that we are selecting columns.
*unsupported*
is_copy: :obj:`bool`, default True
Whether to return a copy of the original object or not.
*unsupported*
Returns
-------
:obj:`pandas.Series`
returns :obj:`pandas.Series` object containing the elements taken from the object
"""
_func_name = 'Method take().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
if not isinstance(indices, types.List):
raise TypingError('{} The indices must be a List. Given: {}'.format(_func_name, indices))
if not (isinstance(axis, (types.Integer, types.Omitted)) or axis == 0):
raise TypingError('{} The axis must be an Integer. Currently unsupported. Given: {}'.format(_func_name, axis))
if not (isinstance(is_copy, (types.Boolean, types.Omitted)) or is_copy == False):
raise TypingError('{} The is_copy must be a boolean. Given: {}'.format(_func_name, is_copy))
if self.index is not types.none:
def hpat_pandas_series_take_impl(self, indices, axis=0, is_copy=False):
local_data = [self._data[i] for i in indices]
local_index = [self._index[i] for i in indices]
return pandas.Series(local_data, local_index)
return hpat_pandas_series_take_impl
else:
def hpat_pandas_series_take_noindex_impl(self, indices, axis=0, is_copy=False):
local_data = [self._data[i] for i in indices]
return pandas.Series(local_data, indices)
return hpat_pandas_series_take_noindex_impl
@overload_method(SeriesType, 'mul')
def hpat_pandas_series_mul(self, other, level=None, fill_value=None, axis=0):
"""
Pandas Series method :meth:`pandas.Series.mul` implementation.
.. only:: developer
Test: python -m hpat.runtests hpat.tests.test_series.TestSeries.test_series_op5
Parameters
----------
self: :class:`pandas.Series`
input arg
other: :obj:`pandas.Series`, :obj:`int` or :obj:`float`
input arg
level: :obj:`int` or name
*unsupported*
fill_value: :obj:`float` or None, default None
*unsupported*
axis: default 0
*unsupported*
Returns
-------
:obj:`pandas.Series`
returns :obj:`pandas.Series` object
"""
_func_name = 'Method mul().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
if level is not None or fill_value is not None or axis != 0:
raise TypingError('{} Unsupported parameters. Given level: {}, fill_value: {}, axis: {}'.format(_func_name, level, fill_value, axis))
if isinstance(other, SeriesType):
def hpat_pandas_series_mul_impl(self, other):
"""
Test: python -m hpat.runtests hpat.tests.test_series.TestSeries.test_series_op5
"""
return pandas.Series(self._data * other._data)
return hpat_pandas_series_mul_impl
if isinstance(other, types.Integer) or isinstance(other, types.Float):
def hpat_pandas_series_mul_number_impl(self, other):
"""
Test: python -m hpat.runtests hpat.tests.test_series.TestSeries.test_series_op5_integer_scalar
Test: python -m hpat.runtests hpat.tests.test_series.TestSeries.test_series_op5_float_scalar
"""
return pandas.Series(self._data * other)
return hpat_pandas_series_mul_number_impl
raise TypingError('{} The object must be a pandas.series or scalar. Given other: {}'.format(_func_name, other))
@overload_method(SeriesType, 'div')
@overload_method(SeriesType, 'truediv')
def hpat_pandas_series_div(self, other, level=None, fill_value=None, axis=0):
"""
Pandas Series method :meth:`pandas.Series.div` and :meth:`pandas.Series.truediv` implementation.
.. only:: developer
Test: python -m hpat.runtests hpat.tests.test_series.TestSeries.test_series_op5
Parameters
----------
self: :class:`pandas.Series`
input arg
other: :obj:`pandas.Series`, :obj:`int` or :obj:`float`
input arg
level: :obj:`int` or name
*unsupported*
fill_value: :obj:`float` or None, default None
*unsupported*
axis: default 0
*unsupported*
Returns
-------
:obj:`pandas.Series`
returns :obj:`pandas.Series` object
"""
_func_name = 'Method div() or truediv().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
if level is not None or fill_value is not None or axis != 0:
raise TypingError('{} Unsupported parameters. Given level: {}, fill_value: {}, axis: {}'.format(_func_name, level, fill_value, axis))
if isinstance(other, SeriesType):
def hpat_pandas_series_div_impl(self, other):
"""
Test: python -m hpat.runtests hpat.tests.test_series.TestSeries.test_series_op5
"""
return pandas.Series(self._data / other._data)
return hpat_pandas_series_div_impl
if isinstance(other, types.Integer) or isinstance(other, types.Float):
def hpat_pandas_series_div_number_impl(self, other):
"""
Test: python -m hpat.runtests hpat.tests.test_series.TestSeries.test_series_op5_integer_scalar
Test: python -m hpat.runtests hpat.tests.test_series.TestSeries.test_series_op5_float_scalar
"""
return pandas.Series(self._data / other)
return hpat_pandas_series_div_number_impl
raise TypingError('{} The object must be a pandas.series or scalar. Given other: {}'.format(_func_name, other))
@overload_method(SeriesType, 'floordiv')
def hpat_pandas_series_floordiv(self, other, level=None, fill_value=None, axis=0):
"""
Pandas Series method :meth:`pandas.Series.floordiv` implementation.
.. only:: developer
Test: python -m hpat.runtests hpat.tests.test_series.TestSeries.test_series_op5
Parameters
----------
self: :class:`pandas.Series`
input arg
other: :obj:`pandas.Series`, :obj:`int` or :obj:`float`
input arg
level: :obj:`int` or name
*unsupported*
fill_value: :obj:`float` or None, default None
*unsupported*
axis: default 0
*unsupported*
Returns
-------
:obj:`pandas.Series`
returns :obj:`pandas.Series` object
"""
_func_name = 'Method floordiv().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
if level is not None or fill_value is not None or axis != 0:
raise TypingError('{} Unsupported parameters. Given level: {}, fill_value: {}, axis: {}'.format(_func_name, level, fill_value, axis))
if isinstance(other, SeriesType):
def hpat_pandas_series_floordiv_impl(self, other):
"""
Test: python -m hpat.runtests hpat.tests.test_series.TestSeries.test_series_op5
"""
return pandas.Series(self._data // other._data)
return hpat_pandas_series_floordiv_impl
if isinstance(other, types.Integer) or isinstance(other, types.Float):
def hpat_pandas_series_floordiv_number_impl(self, other):
"""
Test: python -m hpat.runtests hpat.tests.test_series.TestSeries.test_series_op5_integer_scalar
Test: python -m hpat.runtests hpat.tests.test_series.TestSeries.test_series_op5_float_scalar
"""
return pandas.Series(self._data // other)
return hpat_pandas_series_floordiv_number_impl
raise TypingError('{} The object must be a pandas.series or scalar. Given other: {}'.format(_func_name, other))
@overload_method(SeriesType, 'pow')
def hpat_pandas_series_pow(self, other, level=None, fill_value=None, axis=0):
"""
Pandas Series method :meth:`pandas.Series.pow` implementation.
.. only:: developer
Test: python -m hpat.runtests hpat.tests.test_series.TestSeries.test_series_op5
Parameters
----------
self: :class:`pandas.Series`
input arg
other: :obj:`pandas.Series`, :obj:`int` or :obj:`float`
input arg
level: :obj:`int` or name
*unsupported*
fill_value: :obj:`float` or None, default None
*unsupported*
axis: default 0
*unsupported*
Returns
-------
:obj:`pandas.Series`
returns :obj:`pandas.Series` object
"""
_func_name = 'Method pow().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
if level is not None or fill_value is not None or axis != 0:
raise TypingError('{} Unsupported parameters. Given level: {}, fill_value: {}, axis: {}'.format(_func_name, level, fill_value, axis))
if isinstance(other, SeriesType):
def hpat_pandas_series_pow_impl(self, other):
"""
Test: python -m hpat.runtests hpat.tests.test_series.TestSeries.test_series_op5
"""
return pandas.Series(self._data ** other._data)
return hpat_pandas_series_pow_impl
if isinstance(other, types.Integer) or isinstance(other, types.Float):
def hpat_pandas_series_pow_impl(self, other):
"""
Test: python -m hpat.runtests hpat.tests.test_series.TestSeries.test_series_op5_integer_scalar
Test: python -m hpat.runtests hpat.tests.test_series.TestSeries.test_series_op5_float_scalar
"""
return pandas.Series(self._data ** other)
return hpat_pandas_series_pow_impl
raise TypingError('{} The object must be a pandas.series and argument must be a number. Given: {} and other: {}'.format(_func_name, self, other))
@overload_method(SeriesType, 'quantile')
def hpat_pandas_series_quantile(self, q=0.5, interpolation='linear'):
"""
Pandas Series method :meth:`pandas.Series.quantile` implementation.
.. only:: developer
Test: python -m hpat.runtests hpat.tests.test_series.TestSeries.test_series_quantile
python -m hpat.runtests hpat.tests.test_series.TestSeries.test_series_quantile_q_vector
Parameters
-----------
q : :obj: float or array-like object, default 0.5
the quantile(s) to compute
interpolation: 'linear', 'lower', 'higher', 'midpoint', 'nearest', default `linear`
*unsupported* by Numba
Returns
-------
:obj:`pandas.Series` or float
"""
_func_name = 'Method quantile().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
if not isinstance(interpolation, types.Omitted) and interpolation is not 'linear':
raise TypingError('{} Unsupported parameters. Given interpolation: {}'.format(_func_name, interpolation))
if not isinstance(q, (types.Number, types.Omitted, types.List)) and q != 0.5:
raise TypingError('{} The parameter must be float. Given type q: {}'.format(_func_name, type(q)))
def hpat_pandas_series_quantile_impl(self, q=0.5, interpolation='linear'):
return numpy.quantile(self._data, q)
return hpat_pandas_series_quantile_impl
@overload_method(SeriesType, 'min')
def hpat_pandas_series_min(self, axis=None, skipna=True, level=None, numeric_only=None):
"""
Pandas Series method :meth:`pandas.Series.min` implementation.
.. only:: developer
Test: python -m hpat.runtests hpat.tests.test_series.TestSeries.test_series_min
python -m hpat.runtests hpat.tests.test_series.TestSeries.test_series_min_param
Parameters
-----------
axis:
*unsupported*
skipna: :obj:`bool` object
Exclude nan values when computing the result
level:
*unsupported*
numeric_only:
*unsupported*
Returns
-------
:obj:
returns :obj: scalar
"""
_func_name = 'Method min().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
if not isinstance(self.data.dtype, (types.Integer, types.Float)):
raise TypingError('{} Currently function supports only numeric values. Given data type: {}'.format(_func_name, self.data.dtype))
if not isinstance(skipna, (types.Omitted, types.Boolean)) and skipna is not True:
raise TypingError(
'{} The parameter must be a boolean type. Given type skipna: {}'.format(_func_name, skipna))
if not (isinstance(axis, types.Omitted) or axis is None) \
or not (isinstance(level, types.Omitted) or level is None) \
or not (isinstance(numeric_only, types.Omitted) or numeric_only is None):
raise TypingError(
'{} Unsupported parameters. Given axis: {}, level: {}, numeric_only: {}'.format(_func_name, axis, level,
numeric_only))
def hpat_pandas_series_min_impl(self, axis=None, skipna=True, level=None, numeric_only=None):
if skipna:
return numpy.nanmin(self._data)
return self._data.min()
return hpat_pandas_series_min_impl
@overload_method(SeriesType, 'max')
def hpat_pandas_series_max(self, axis=None, skipna=True, level=None, numeric_only=None):
"""
Pandas Series method :meth:`pandas.Series.max` implementation.
.. only:: developer
Test: python -m hpat.runtests hpat.tests.test_series.TestSeries.test_series_max
python -m hpat.runtests hpat.tests.test_series.TestSeries.test_series_max_param
Parameters
-----------
axis:
*unsupported*
skipna: :obj:`bool` object
Exclude nan values when computing the result
level:
*unsupported*
numeric_only:
*unsupported*
Returns
-------
:obj:
returns :obj: scalar
"""
_func_name = 'Method max().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
if not isinstance(self.data.dtype, (types.Integer, types.Float)):
raise TypingError('{} Currently function supports only numeric values. Given data type: {}'.format(_func_name, self.data.dtype))
if not isinstance(skipna, (types.Omitted, types.Boolean)) and skipna is not True:
raise TypingError(
'{} The parameter must be a boolean type. Given type skipna: {}'.format(_func_name, skipna))
if not (isinstance(axis, types.Omitted) or axis is None) \
or not (isinstance(level, types.Omitted) or level is None) \
or not (isinstance(numeric_only, types.Omitted) or numeric_only is None):
raise TypingError(
'{} Unsupported parameters. Given axis: {}, level: {}, numeric_only: {}'.format(_func_name, axis, level,
numeric_only))
def hpat_pandas_series_max_impl(self, axis=None, skipna=True, level=None, numeric_only=None):
if skipna:
return numpy.nanmax(self._data)
return self._data.max()
return hpat_pandas_series_max_impl
@overload_method(SeriesType, 'mod')
def hpat_pandas_series_mod(self, other, level=None, fill_value=None, axis=0):
"""
Pandas Series method :meth:`pandas.Series.mod` implementation.
.. only:: developer
Test: python -m hpat.runtests hpat.tests.test_series.TestSeries.test_series_op5
Parameters
----------
self: :class:`pandas.Series`
input arg
other: :obj:`pandas.Series`, :obj:`int` or :obj:`float`
input arg
level: :obj:`int` or name
*unsupported*
fill_value: :obj:`float` or None, default None
*unsupported*
axis: default 0
*unsupported*
Returns
-------
:obj:`pandas.Series`
returns :obj:`pandas.Series` object
"""
_func_name = 'Method mod().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
if level is not None or fill_value is not None or axis != 0:
raise TypingError('{} Unsupported parameters. Given level: {}, fill_value: {}, axis: {}'.format(_func_name, level, fill_value, axis))
if isinstance(other, SeriesType):
def hpat_pandas_series_mod_impl(self, other):
"""
Test: python -m hpat.runtests hpat.tests.test_series.TestSeries.test_series_op5
"""
return pandas.Series(self._data % other._data)
return hpat_pandas_series_mod_impl
if isinstance(other, types.Integer) or isinstance(other, types.Float):
def hpat_pandas_series_mod_impl(self, other):
"""
Test: python -m hpat.runtests hpat.tests.test_series.TestSeries.test_series_op5_integer_scalar
Test: python -m hpat.runtests hpat.tests.test_series.TestSeries.test_series_op5_float_scalar
"""
return | pandas.Series(self._data % other) | pandas.Series |
"""Performance visualization class"""
import os
from dataclasses import dataclass, field
from typing import Dict, List
import pandas as pd
import seaborn as sns
import scikit_posthocs as sp
from matplotlib.backends.backend_pdf import PdfPages
from matplotlib import pyplot
import matplotlib.pylab as plt
from tqdm import tqdm
from src.data import Data
@dataclass
class VizMetrics(Data):
"""Generates plots to visualize models performance.
This object generates performance plots to compare different spatial
cross-validation approaches.
Attributes
----------
root_path : str
Root path
index_col : str
The metrics csv index column name
fs_method : str
The feature selection method used
ml_method : str
The machine learning method used
"""
cv_methods: List = field(default_factory=list)
index_col: str = None
fs_method: str = None
ml_method: str = None
cv_methods_path: List = field(default_factory=list)
cv_methods_results: Dict = field(default_factory=dict)
def init_methods_path(self):
"""Initialize spatial cv folder paths"""
self.cv_methods_path = [
os.path.join(
self.root_path,
"results",
method,
"evaluations",
self.fs_method,
self.ml_method,
"metrics.csv",
)
for method in self.cv_methods
]
def load_cv_results(self):
"""Load metric results from each spatial cv being considered"""
for data_path, method in zip(self.cv_methods_path, self.cv_methods):
self.cv_methods_results[method] = pd.read_csv(
data_path, index_col=self.index_col
)
def generate_metric_df(self, metric):
"""Generates a dataframe for a given metric"""
index_fold = self.cv_methods_results["Optimistic"].index
metric_df = | pd.DataFrame(columns=self.cv_methods, index=index_fold) | pandas.DataFrame |
from extract_from_html import *
import tensorflow as tf
import nltk
from visualize_data import display_data,compute_accuracy
import pandas as pd
import argparse
stopwords = nltk.corpus.stopwords.words('english')
english_words = set(nltk.corpus.words.words())
def clean_reviews(reviews):
clean_reviews = []
for text in reviews:
tokens = nltk.word_tokenize(text)
no_digits = [w for w in tokens if
not any(n.isdigit() for n in w)] # remove numbers and words containing numbers
punctuation = [word for word in no_digits if word.isalpha()] # remove punctuation
stopwords = nltk.corpus.stopwords.words('english')
processed = [w for w in punctuation if w.lower() not in stopwords] # remove stop words
clean_reviews.append(' '.join([s for s in processed if len(s) > 2 ])) # removes words shorter then 2
return clean_reviews
def compute_scores(result):
result[:,0] *= 2
result[:,1] *= 5.5
result[:,2] *= 9
return result.sum(axis=1)
def run(name='moviescoreai',output='run',
start=False, movies_url = "https://www.imdb.com/movies-in-theaters/?ref_=nv_mv_inth"):
model = tf.keras.models.load_model("src/weights")
print()
print("The movies are:")
print()
reviews_df = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
import pandas as pd
import pandas.types.concat as _concat
import pandas.util.testing as tm
class TestConcatCompat(tm.TestCase):
def check_concat(self, to_concat, exp):
for klass in [pd.Index, pd.Series]:
to_concat_klass = [klass(c) for c in to_concat]
res = _concat.get_dtype_kinds(to_concat_klass)
self.assertEqual(res, set(exp))
def test_get_dtype_kinds(self):
to_concat = [['a'], [1, 2]]
self.check_concat(to_concat, ['i', 'object'])
to_concat = [[3, 4], [1, 2]]
self.check_concat(to_concat, ['i'])
to_concat = [[3, 4], [1, 2.1]]
self.check_concat(to_concat, ['i', 'f'])
def test_get_dtype_kinds_datetimelike(self):
to_concat = [pd.DatetimeIndex(['2011-01-01']),
pd.DatetimeIndex(['2011-01-02'])]
self.check_concat(to_concat, ['datetime'])
to_concat = [pd.TimedeltaIndex(['1 days']),
pd.TimedeltaIndex(['2 days'])]
self.check_concat(to_concat, ['timedelta'])
def test_get_dtype_kinds_datetimelike_object(self):
to_concat = [pd.DatetimeIndex(['2011-01-01']),
| pd.DatetimeIndex(['2011-01-02'], tz='US/Eastern') | pandas.DatetimeIndex |
#!/usr/bin/python3
from gooey import *
from Bio import SeqIO
from Bio.Seq import Seq, MutableSeq, reverse_complement
from Bio.Data import IUPACData
import pandas as pd
pd.options.mode.chained_assignment = None
# input parameters
@Gooey(required_cols=2, program_name='CpG island identificator', header_bg_color= '#DCDCDC', terminal_font_color= '#DCDCDC', terminal_panel_color= '#DCDCDC')
def main():
ap = GooeyParser(description="identify CpG islands on one or many sequences based on the Gardiner-Garden and Frommer (1987) method")
ap.add_argument("-in", "--input", required=False, widget='FileChooser', help="input single-fasta file")
ap.add_argument("-gc", "--gc", required=False, default=50, help="min GC content(support for S and W nucleotides).Default= 50")
ap.add_argument("-ratio", "--ratio", required=False, default=0.6, help="min ratio of the Obs/Exp value, type = float. Default= 0.6")
ap.add_argument("-step", "--step", required=True, help="step size for CpG identification, type = integer")
ap.add_argument("-win", "--window", required=True, help="window size for CpG identification, type = integer")
ap.add_argument("-pro", "--program", type=int, default=1, required=False, help="program to select 1) 1 single-fasta file 2) many single-fasta files. Default is 1")
ap.add_argument("-dir", "--directory", required=False, type=str, widget='DirChooser', help="directory to search for fasta files")
ap.add_argument("-out", "--output", required=False, widget='FileSaver', help="output txt file")
args = vars(ap.parse_args())
# calculate obs value
def obs(seq):
return seq.count('CG')
# calculate Exp value
def exp(seq):
return round(seq.count('C') * seq.count('G') / int(args['window']), 2)
# calculate gc content
def gc_content(seq):
gc = sum(seq.count(x) for x in ["G", "C", "S"])
return round(gc * 100 / sum(seq.count(x) for x in ["A", "T", "G", "C", "S", "W"]), 2)
# main
gcobs = []
gcexp = []
headers = [] # setup empty lists
# choose program
if args['program'] == 1:
# 1 single-fasta file
record = SeqIO.read(args['input'], "fasta")
rev = reverse_complement(record.seq)
for i in range(0, len(record.seq) - int(args['window']) + 1, int(args['step'])):
if gc_content(record.seq[i:i + int(args['window'])]) > float(args['gc']):
gcobs.append(obs(record.seq[i:i + int(args['window'])]))
gcexp.append(exp(record.seq[i:i + int(args['window'])]))
headers.append(i)
if gc_content(rev[i:i + int(args['window'])]) > float(args['gc']):
gcobs.append(obs(rev[i:i + int(args['window'])]))
gcexp.append(exp(rev[i:i + int(args['window'])]))
headers.append(i -len(record.seq))
# create data frame
df = pd.DataFrame()
df['start'] = headers
df['obs'] = gcobs
df['exp'] = gcexp
df['obs/exp'] = round(df['obs']/df['exp'], 2)
df = df[df['exp'] > 0]
df = df[df['obs/exp'] > float(args['ratio'])]
df = df.sort_values(by=['obs/exp'], ascending=False)
df['id'] = record.id
start = df.iloc[:,0]
end = start.astype(int) + int(args['window'])
df1 = pd.DataFrame()
df1 = df[["id"]]
df1['start'] = start
df1['end'] = end
df1['obs'] = df[["obs"]]
df1['exp'] = df[["exp"]]
df1['obs/exp'] = df[["obs/exp"]]
# export
with open(args['output'], 'a') as f:
f.write(
df1.to_csv(header = True, index = False, sep = '\t', doublequote= False, line_terminator= '\n')
)
# many single fasta files
else:
# import each fasta file from the working directory
for filename in sorted(os.listdir(os.chdir(args['directory']))):
if filename.endswith(".fa") or filename.endswith(".fasta"):
record = SeqIO.read(filename, "fasta")
rev = reverse_complement(record.seq)
for i in range(0, len(record.seq) - int(args['window']) + 1, int(args['step'])):
if gc_content(record.seq[i:i + int(args['window'])]) > float(args['gc']):
gcobs.append(obs(record.seq[i:i + int(args['window'])]))
gcexp.append(exp(record.seq[i:i + int(args['window'])]))
headers.append(i)
if gc_content(rev[i:i + int(args['window'])]) > float(args['gc']):
gcobs.append(obs(rev[i:i + int(args['window'])]))
gcexp.append(exp(rev[i:i + int(args['window'])]))
headers.append(i -len(record.seq))
# create data frame
df = pd.DataFrame()
df['start'] = headers
df['obs'] = gcobs
df['exp'] = gcexp
df['obs/exp'] = round(df['obs']/df['exp'], 2)
df = df[df['exp'] > 0]
df = df[df['obs/exp'] > float(args['ratio'])]
df = df.sort_values(by=['obs/exp'], ascending=False)
df['id'] = record.id
start = df.iloc[:,0]
end = start.astype(int) + int(args['window'])
df1 = | pd.DataFrame() | pandas.DataFrame |
import urllib3
from bs4 import BeautifulSoup as bs
import pandas as pd
import os.path
import sys
import csv
from pathlib import Path
# Grabs raw web page from basketball reference and converts it into a text file for NLP functionality
class raw_text(object):
def process_raw_text(self, year):
url = 'https://www.basketball-reference.com/leagues/NBA_{}_transactions.html'.format(year)
http = urllib3.PoolManager()
response = http.request('GET',url)
soup = bs(response.data,'html.parser')
transaction_str = ""
transaction_df = pd.DataFrame()
rows = soup.find('ul',attrs={'class': 'page_index'})
for row in rows.find_all('li'):
dates = row.find_all('span')
for date in dates:
cells = row.find_all('p')
for cell in cells:
transaction = [[date.text,cell.text]]
transaction_str+=date.text + " "
transaction_str+=cell.text + " "
# print(date.text,cell.text)
df_hold = | pd.DataFrame(transaction) | pandas.DataFrame |
import pandas as pd
import numpy as np
import altair as alt
import matplotlib.pyplot as plt
def get_first_row(s):
return s.iloc[0]
#Reads the first line of line of data and determines if data is categorical, quantitative or nominal
def auto_get_data_type(df):
type_dict = dict()
columns = list(df.columns)
for column in columns:
value = get_first_row(df[column])
if isinstance(value, str):
if value.isnumeric():
type_dict[column] = 'Q'
else:
type_dict[column] = 'C'
else:
type_dict[column] = 'Q'
return type_dict
#Manually enter if data is categorical, quantitative, nominal or id
def manual_entry_data_type(df):
type_dict = dict()
for column in list(df.columns):
type_dict[column] = input('Enter the variable type for {} (Quantitative/Categorical/Index/Time) Q/C/I/T:'.format(column))
return type_dict
def get_df_column_list(df):
return list(df.columns)
def manual_data_type_entry(df):
value = input('First time data entry (F), Correction (C), Skip this (S):')
if value == 'F':
type_dict = manual_enter_data_type(df)
elif value == 'C':
correction = 'y'
while correction == 'y':
variable = input('Enter variable name:')
value = input('Enter variable type:')
type_dict[variable] = value
correction = input('Update more variables(y/n):')
elif value == 'S':
print('Cool! here is dict:',type_dict)
return type_dict
def get_column_names_for_variable_type(columns,type_dict,variable_type):
cat_columns = [key for key,value in type_dict.items() if value is variable_type]
return cat_columns
def get_data_for_variables(df,data_type_dict,variable_type):
#print('get_data_for_variables--------------->',df)
columns = get_df_column_list(df)
var_columns = get_column_names_for_variable_type(columns,data_type_dict,variable_type)
index_column = get_index_column(columns,data_type_dict)
data_dict = dict()
if variable_type == 'C':
for column in var_columns:
summary = df.groupby(column).agg({index_column: 'count'}).reset_index()
data_dict[column] = summary
return data_dict,var_columns
elif variable_type == 'Q':
for column in var_columns:
quantitative_data = clean_quantitative_data(df[column])
data_dict[column] = quantitative_data
return data_dict,var_columns
def get_index_column(columns,type_dict):
index_column = [key for key,value in type_dict.items() if value is 'I']
return index_column[0]
def get_time_column(columns,type_dict):
time_column = [key for key,value in type_dict.items() if value is 'T']
return time_column[0]
def create_sorted_bar_chart(df,x_name,y_name,color='orange'):
chart = alt.Chart(df).mark_bar(color=color).encode(
x = x_name,
y = alt.Y(y_name, sort='-x'))
return chart
def get_x_y_column_names(df):
columns = get_df_column_list(df)
x_name = columns[1]
y_name = columns[0]
return x_name,y_name
def show_sorted_bar_chart(df):
x_name,y_name = get_x_y_column_names(df)
chart = create_sorted_bar_chart(df,x_name,y_name,color='orange')
return chart
def clean_quantitative_data(s):
s = pd.to_numeric(s, errors='coerce', downcast='float').dropna()
return s
def clean_dataframe_for_timeseries(df,data_type_dict):
columns = list(df.columns)
for column in columns:
value = data_type_dict[column]
if value == 'T':
df[column] = pd.to_datetime(df[column])
elif value == 'Q':
df[column] = pd.to_numeric(df[column], errors='coerce', downcast='float')
else:
continue
def get_central_tendency_for_variable(s):
return s.mean(),s.median(),s.mode()
def get_spread_for_variable(s):
return s.std(),s.var()
def get_skew_kurt_for_variable(s):
return s.kurtosis(),s.skew()
def get_summary_statistics(s):
mean, median, mode = get_central_tendency_for_variable(s)
std, var = get_spread_for_variable(s)
kurtosis, skewness = get_skew_kurt_for_variable(s)
summary_dict = {'mean':mean, 'median':median, 'mode':mode,'std':std,
'var':var,'kurtosis':kurtosis,'skewness':skewness}
return | pd.DataFrame(summary_dict) | pandas.DataFrame |
import warnings
from decimal import Decimal
from typing import List, Tuple, Dict
from pandas import DataFrame
from pandas.core.common import SettingWithCopyWarning
from model.DomObject import DomObject
from service.i_scraping_service import IScrapingService
from service.ulitity import extract_numbers, regex
def get_olympus_lens_list(scraping: IScrapingService) -> DataFrame:
# レンズのURL一覧を取得する
page = scraping.get_page('https://www.olympus-imaging.jp/product/dslr/mlens/index.html', cache=False)
lens_list: List[Tuple[str, str]] = []
for a_element in page.find_all('h2.productName > a'):
lens_name = a_element.text.split('/')[0].replace('\n', '')
if 'M.ZUIKO' not in lens_name:
continue
lens_product_number = a_element.attrs['href'].replace('/product/dslr/mlens/', '').replace('/index.html', '')
lens_list.append((lens_name, lens_product_number))
page = scraping.get_page('https://www.olympus-imaging.jp/product/dslr/record/index.html', cache=False)
for a_element in page.find_all('div.section'):
div_element = a_element.find('div.mb15 > h2')
a_element2 = a_element.find('li > a')
if div_element is None or a_element2 is None:
continue
lens_name = div_element.text
if 'M.ZUIKO DIGITAL' not in lens_name:
continue
lens_product_number = a_element2.attrs['href'].replace('/product/dslr/mlens/', '').replace('/index.html', '')
lens_list.append((lens_name, lens_product_number))
# レンズごとに情報を取得する
lens_data_list: List[Dict[str, str]] = []
for lens_name, lens_product_number in lens_list:
# 詳細ページから情報を取得する
if lens_product_number != '14-42_35-56':
spec_url = f'https://www.olympus-imaging.jp/product/dslr/mlens/{lens_product_number}/spec.html'
else:
spec_url = f'https://www.olympus-imaging.jp/product/dslr/mlens/{lens_product_number}/spec/index.html'
page = scraping.get_page(spec_url)
temp_dict: Dict[str, str] = {}
for tr_element in page.find('table').find_all('tr'):
tr_element: DomObject = tr_element
# th側は、spanで囲まれてたりstrongで囲まれてたりするクソ仕様なので、力技で解決させた
th_element = tr_element.find('th > span')
if th_element is None:
th_element = tr_element.find('th > strong')
if th_element is None:
th_element = tr_element.find('th')
# td側はそのまま
td_element = tr_element.find('td')
# 合体
temp_dict[th_element.text] = td_element.text
# 製品トップページから情報を取得する
index_url = f'https://www.olympus-imaging.jp/product/dslr/mlens/{lens_product_number}/index.html'
page = scraping.get_page(index_url)
temp_dict['URL'] = index_url
table_element = page.find('table')
# 詳細ページとはth・tdの拾い方を変えているのは、
# M.ZUIKO DIGITAL ED 30mm F3.5 Macroの製品トップページの時のみ、
# 希望小売価格「だけ」が取得できない不具合があったため
for th_element, td_element in zip(table_element.find_all('th'), table_element.find_all('td')):
th_element2 = th_element.find('span')
if th_element2 is None:
th_element2 = th_element.find('strong')
if th_element2 is None:
th_element2 = th_element
temp_dict[th_element2.text] = td_element.text
# 必要な列を追加
temp_dict['name'] = lens_name.replace(' ', ' ')
temp_dict['product_number'] = lens_product_number
# 不要な列を削除
del_column_list = [
'レンズ構成',
'フォーカシング方式',
'AF方式',
'特長',
'マウント規格',
'画角',
'最近接撮影範囲',
'絞り羽枚数',
'同梱品',
'主な同梱品',
'別売りアクセサリー',
'別売アクセサリー',
'製品名',
'JANコード',
'JAN',
'発売日',
'オンラインショップ',
'フード',
'最大口径比',
'最小口径比',
'最大口径比/最小口径比',
'35mm判換算最大撮影倍率',
'最大撮影倍率(35mm判換算)',
'手ぶれ補正性能',
'ズーム',
'ズーム方式',
]
for column in del_column_list:
if column in temp_dict:
del temp_dict[column]
# 一部列だけ列名を変更しないと結合できないので対処
if '大きさ 最大径×長さ' in temp_dict:
temp_dict['大きさ 最大径×全長'] = temp_dict['大きさ 最大径×長さ']
del temp_dict['大きさ 最大径×長さ']
if '大きさ 最大径 × 全長' in temp_dict:
temp_dict['大きさ 最大径×全長'] = temp_dict['大きさ 最大径 × 全長']
del temp_dict['大きさ 最大径 × 全長']
if '大きさ 最大径×全長' in temp_dict:
temp_dict['大きさ 最大径×全長'] = temp_dict['大きさ 最大径×全長']
del temp_dict['大きさ 最大径×全長']
if '大きさ 最大径 x 全長' in temp_dict:
temp_dict['大きさ 最大径×全長'] = temp_dict['大きさ 最大径 x 全長']
del temp_dict['大きさ 最大径 x 全長']
if '防滴性能 / 防塵機構' in temp_dict:
temp_dict['防滴処理'] = temp_dict['防滴性能 / 防塵機構']
del temp_dict['防滴性能 / 防塵機構']
if '価格' in temp_dict:
temp_dict['希望小売価格'] = temp_dict['価格']
del temp_dict['価格']
lens_data_list.append(temp_dict)
df = | DataFrame.from_records(lens_data_list) | pandas.DataFrame.from_records |
# networkx experimentation and link graph plotting tests
# not in active use for the search engine but left here for reference
import matplotlib.pyplot as plt
import networkx as nx
import pandas as pd
import sqlite3
from nltk import FreqDist
from networkx.drawing.nx_agraph import graphviz_layout
import spacy
nlp = spacy.load("en_core_web_md")
def plot_keyword_frequency():
connection = sqlite3.connect("search.db")
cursor = connection.cursor()
keywords = cursor.execute("SELECT keywords FROM posts;").fetchall()
all_keywords = []
for k in keywords:
for l in k[0].split(", "):
all_keywords.append(l)
distribution = FreqDist(all_keywords)
print(distribution.most_common(20))
distribution.plot(20)
def plot_linkrot_by_date():
plt.figure()
plt.title("Linkrot by date")
plt.xlabel("Date")
plt.ylabel("Number of URLs")
pd.read_csv("data/external_link_status.csv").groupby(["status_code"]).count()["url"].plot(kind="line")
pd.read_csv("data/external_link_status.csv").groupby(["status_code"]).count()["url"].plot(kind="line")
# df = df[df["status_code"] == 200]
# df.plot(kind="line", x="date", y="status_code")
plt.show()
plt.savefig("charts/linkrot_by_date.png")
def keyword_knowledge_graph():
keywords = []
G = nx.Graph()
connection = sqlite3.connect("search.db")
cursor = connection.cursor()
rows = cursor.execute("SELECT keywords, url FROM posts;").fetchall()
count = 0
for r in rows:
if count == 150:
break
post_keywords = []
# for keyword in r[0].split(", "):
# # if "what is" in keyword:
# G.add_node(keyword)
# if len(post_keywords) > 0:
# G.add_edge(post_keywords[0], keyword)
for keyword in r[0].split(", "):
keyword = keyword.replace("the", "").replace("this", "")
post_keywords.append(keyword)
G.add_node(keyword)
# if not keyword.islower():
# G.add_edge("proper noun", keyword)
for k in post_keywords:
G.add_edge(post_keywords[0], k)
count += 1
nx.draw(G, with_labels=True)
plt.plot()
plt.show()
print([n for n in G.neighbors("coffee") if "coffee" in n.lower() and n.islower()][:7])
# get coffee edge
# for n in G.neighbors(to_check):
# print(n)
# print(nlp(n).similarity(nlp(to_check)))
# if nlp(n).similarity(nlp(to_check)):
# print(nlp(n).similarity(nlp(to_check)))
#plt.show()
return G
def show_error_codes():
df = | pd.read_csv("data/external_link_status.csv") | pandas.read_csv |
# ----------------
# IMPORT PACKAGES
# ----------------
import pandas as pd
from sklearn.ensemble import RandomForestClassifier
import sklearn.metrics as skm
import numpy as np
import matplotlib.pyplot as plt
# ----------------
# OBTAIN DATA
# ----------------
# Data Source: https://archive.ics.uci.edu/ml/machine-learning-databases/00240/
# ----------------
# PROFILE DATA
# ----------------
# Determine number of observations or data points in the training data set.
subjects = pd.read_csv("train/subject_train.txt", header=None, delim_whitespace=True, index_col=False)
observations = len(subjects)
participants = len(subjects.stack().value_counts())
subjects.columns = ["Subject"]
print("Number of Observations: " + str(observations))
print("Number of Participants: " + str(participants))
# Determine the number of features in the data set.
features = pd.read_csv("features.txt", header=None, delim_whitespace=True, index_col=False)
num_features = len(features)
print("Number of Features: " + str(num_features))
print("")
# Data munging of the predictor and target variables starting with the column names.
x = | pd.read_csv("train/X_train.txt", header=None, delim_whitespace=True, index_col=False) | pandas.read_csv |
# laod library
import pandas as pd
# create data frame
df = | pd.DataFrame() | pandas.DataFrame |
from datetime import datetime
import json
from os.path import join, exists
from tempfile import TemporaryDirectory
import numpy as np
import pandas as pd
from delphi_utils import read_params
from delphi_cdc_covidnet.update_sensor import update_sensor
params = read_params()
STATIC_DIR = params["static_file_dir"]
class TestUpdateSensor:
def test_syn_update_sensor(self):
with TemporaryDirectory() as temp_dir:
# Create synthetic data
state_1 = {"datadownload": [
{
"catchment": "California", "network": "Network A", "age_category": "Overall",
"year": "2020", "mmwr-year": "2020", "mmwr-week": "10",
"cumulative-rate": 2.5, "weekly-rate": 0.7
}, {
"catchment": "California", "network": "Network A", "age_category": "Overall",
"year": "2020", "mmwr-year": "2020", "mmwr-week": "11",
"cumulative-rate": 3.5, "weekly-rate": 1.4
}, {
"catchment": "California", "network": "Network A", "age_category": "Overall",
"year": "2020", "mmwr-year": "2020", "mmwr-week": "12",
"cumulative-rate": 4.2, "weekly-rate": 1.9
}]}
state_2 = {"datadownload": [
{
"catchment": "Pennsylvania", "network": "Network B", "age_category": "Overall",
"year": "2020", "mmwr-year": "2020", "mmwr-week": "10",
"cumulative-rate": 10.3, "weekly-rate": 0.9
}, {
"catchment": "Pennsylvania", "network": "Network B", "age_category": "Overall",
"year": "2020", "mmwr-year": "2020", "mmwr-week": "11",
"cumulative-rate": 11.2, "weekly-rate": 4.5
}, {
"catchment": "Pennsylvania", "network": "Network B", "age_category": "Overall",
"year": "2020", "mmwr-year": "2020", "mmwr-week": "12",
"cumulative-rate": 11.8, "weekly-rate": 1.2
}]}
state_files = [join(temp_dir, state) for state in ["state_1.json", "state_2.json"]]
with open(state_files[0], "w") as f_json:
json.dump(state_1, f_json)
with open(state_files[1], "w") as f_json:
json.dump(state_2, f_json)
for state_file in state_files:
assert exists(state_file)
mmwr_info = pd.DataFrame([
{
"mmwrid": 3036, "weekend": "2020-03-07", "weeknumber": 10,
"weekstart": "2020-03-01", "year": 2020, "seasonid": 59
}, {
"mmwrid": 3037, "weekend": "2020-03-14", "weeknumber": 11,
"weekstart": "2020-03-08", "year": 2020, "seasonid": 59
}, {
"mmwrid": 3038, "weekend": "2020-03-21", "weeknumber": 12,
"weekstart": "2020-03-15", "year": 2020, "seasonid": 59
}])
mmwr_info["weekstart"] = pd.to_datetime(mmwr_info["weekstart"])
mmwr_info["weekend"] = pd.to_datetime(mmwr_info["weekend"])
# End date set up to be before last week of data
start_date = datetime(year=2020, month=3, day=7)
end_date = datetime(year=2020, month=3, day=17)
# Generate the csvs
hosp_df = update_sensor(state_files, mmwr_info, temp_dir, start_date, end_date)
# Check dataframe returned
assert hosp_df.index.nlevels == 2
assert set(hosp_df.index.names) == {"date", "geo_id"}
assert set(hosp_df.index.get_level_values("geo_id")) == {"ca", "pa"}
assert set(hosp_df.index.get_level_values("date")) == \
{datetime(2020, 3, 7), datetime(2020, 3, 14)}
assert set(hosp_df["epiweek"].unique()) == {10, 11}
geo_index = hosp_df.index.get_level_values("geo_id")
assert np.allclose(hosp_df.loc[geo_index == "ca", "val"], [2.5, 3.5])
assert np.allclose(hosp_df.loc[geo_index == "pa", "val"], [10.3, 11.2])
assert pd.isna(hosp_df["se"]).all()
assert | pd.isna(hosp_df["sample_size"]) | pandas.isna |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy import stats
import pydot
from sklearn import preprocessing, model_selection
from sklearn.tree import export_graphviz
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import cross_val_score
from sklearn.metrics import mean_squared_error, mean_absolute_error
from treeinterpreter import treeinterpreter as ti
from pandas.plotting import register_matplotlib_converters
| register_matplotlib_converters() | pandas.plotting.register_matplotlib_converters |
from __future__ import division
import pandas as pd
import SimpleITK as sitk
import numpy as np
import os
import argparse
def resample_img(itk_image, out_spacing=[2.0, 2.0, 2.0], is_label=False):
# resample images to 2mm spacing with simple itk
original_spacing = itk_image.GetSpacing()
original_size = itk_image.GetSize()
out_size = [
int(np.round(original_size[0] * (original_spacing[0] / out_spacing[0]))),
int(np.round(original_size[1] * (original_spacing[1] / out_spacing[1]))),
int(np.round(original_size[2] * (original_spacing[2] / out_spacing[2])))]
resample = sitk.ResampleImageFilter()
resample.SetOutputSpacing(out_spacing)
resample.SetSize(out_size)
resample.SetOutputDirection(itk_image.GetDirection())
resample.SetOutputOrigin(itk_image.GetOrigin())
resample.SetTransform(sitk.Transform())
resample.SetDefaultPixelValue(itk_image.GetPixelIDValue())
if is_label:
resample.SetInterpolator(sitk.sitkNearestNeighbor)
else:
resample.SetInterpolator(sitk.sitkBSpline)
return resample.Execute(itk_image)
def normalise(itk_image):
# normalise and clip images
np_img = sitk.GetArrayFromImage(itk_image)
np_img = np.clip(np_img, -1000., 800.).astype(np.float32)
np_img = (np_img + 1000.) / 900. - 1.
s_itk_image = sitk.GetImageFromArray(np_img)
s_itk_image.CopyInformation(itk_image)
return s_itk_image
def split_data(files, path, no_split=True, no_label=False):
if no_split:
# use this for test or so
imgs = [os.path.join(path, 'img', 'img{}.nii.gz'.format(f))
for f in files]
if not no_label:
lbls = [os.path.join(path, 'label', 'label{}.nii.gz'.format(f))
for f in files]
pd.DataFrame(data={'imgs': imgs, 'lbls': lbls}).to_csv(
'no_split.csv', index=False)
else:
pd.DataFrame(data={'imgs': imgs}).to_csv(
'no_split.csv', index=False)
else:
# split train data into train and val
rng = np.random.RandomState(42)
ids = [f[3:7] for f in files]
validation = rng.choice(ids, 7)
train = [f for f in ids if f not in validation]
train_imgs = [os.path.join(path, 'img', 'img{}.nii.gz'.format(f))
for f in train]
if not no_label:
train_lbls = [os.path.join(
path, 'label', 'label{}.nii.gz'.format(f)) for f in train]
pd.DataFrame(data={'imgs': train_imgs, 'lbls': train_lbls}).to_csv(
'train.csv', index=False)
else:
pd.DataFrame(data={'imgs': train_imgs}).to_csv(
'train.csv', index=False)
val_imgs = [os.path.join(path, 'img', 'img{}.nii.gz'.format(f))
for f in validation]
if not no_label:
val_lbls = [os.path.join(path, 'label', 'label{}.nii.gz'.format(f))
for f in validation]
pd.DataFrame(data={'imgs': val_imgs, 'lbls': val_lbls}).to_csv(
'val.csv', index=False)
else:
| pd.DataFrame(data={'imgs': val_imgs}) | pandas.DataFrame |
__version__ = '0.1.3'
__maintainer__ = '<NAME> 31.12.2019'
__contributors__ = '<NAME>, <NAME>'
__email__ = '<EMAIL>'
__birthdate__ = '31.12.2019'
__status__ = 'dev' # options are: dev, test, prod
#----- imports & packages ------
if __package__ is None or __package__ == '':
import sys
from os import path
sys.path.append(path.dirname(path.dirname(path.dirname(__file__))))
import pprint
import pandas as pd
import numpy as np
import warnings
from pathlib import Path
from zipfile import ZipFile
class DataParser:
def __init__(self, configDict: dict, datasetID: str, loadEncrypted=False):
"""
Basic class for parsing a mobility survey trip data set. Currently the both German travel surveys MiD 2008 and
MiD 2017 are pre-configured and one of the two can be given (default: MiD 2017).
The data set can be provided from an encrypted file on a server in which case the link to the ZIP-file as well
as a link to the file within the ZIP-file have to be supplied in the globalConfig and a password has to be
supplied in the parseConfig.
Columns relevant for the EV simulation are selected from the entirety of the data and renamed to VencoPy
internal variable names given in the dictionary parseConfig['dataVariables'] for the respective survey data set.
Manually configured exclude, include, greaterThan and smallerThan filters are applied as they are specified in
parseConfig. For some columns, raw data is transferred to human readable strings and respective columns are
added. Pandas timestamp columns are synthesized from the given trip start and trip end time information.
:param configDict: A dictionary containing multiple yaml config files
:param datasetID: Currently, MiD08 and MiD17 are implemented as travel survey data sets
:param loadEncrypted: If True, load an encrypted ZIP file as specified in parseConfig
"""
self.parseConfig = configDict['parseConfig']
self.globalConfig = configDict['globalConfig']
self.localPathConfig = configDict['localPathConfig']
self.datasetID = self.checkDatasetID(datasetID, self.parseConfig)
self.rawDataPath = Path(self.localPathConfig['pathAbsolute'][self.datasetID]) / self.globalConfig['files'][self.datasetID]['tripsDataRaw']
self.subDict = {}
self.rawData = None
self.data = None
self.__filterDict = {}
self.columns = self.compileVariableList()
self.filterDictNameList = ['include', 'exclude', 'greaterThan', 'smallerThan']
self.updateFilterDict()
print('Parsing properties set up')
if loadEncrypted:
print(f"Starting to retrieve encrypted data file from "
f"{self.globalConfig['pathAbsolute']['encryptedZipfile']}")
self.loadEncryptedData(pathToZip=Path(self.globalConfig['pathAbsolute']['encryptedZipfile']) /
self.globalConfig['files'][self.datasetID]['encryptedZipFileB2'],
pathInZip=self.globalConfig['files'][self.datasetID]['tripDataZipFileRaw'])
else:
print(f"Starting to retrieve local data file from {self.rawDataPath}")
self.loadData()
def updateFilterDict(self) -> None:
"""
Internal function to parse the filter dictionary of a specified data set from parseConfig.yaml
:return: None
"""
self.__filterDict[self.datasetID] = self.parseConfig['filterDicts'][self.datasetID]
self.__filterDict[self.datasetID] = {iKey: iVal for iKey, iVal in self.__filterDict[self.datasetID].items() if self.__filterDict[self.datasetID][iKey] is not
None}
def checkDatasetID(self, datasetID: str, parseConfig: dict) -> str:
"""
General check if data set ID is defined in parseConfig.yaml
:param datasetID: list of strings declaring the datasetIDs to be read in
:param parseConfig: A yaml config file holding a dictionary with the keys 'pathRelative' and 'pathAbsolute'
:return: Returns a string value of a mobility data
"""
availableDatasetIDs = parseConfig['dataVariables']['datasetID']
assert datasetID in availableDatasetIDs, \
f'Defined datasetID {datasetID} not specified under dataVariables in parseConfig. Specified datasetIDs ' \
f'are {availableDatasetIDs}'
return datasetID
def compileVariableList(self) -> list:
"""
Clean up the replacement dictionary of raw data file variable (column) names. This has to be done because some
variables that may be relevant for the analysis later on are only contained in one raw data set while not
contained in another one. E.g. if a trip is an intermodal trip was only assessed in the MiD 2017 while it wasn't
in the MiD 2008. This has to be mirrored by the filter dict for the respective data set.
:return: List of variables
"""
listIndex = self.parseConfig['dataVariables']['datasetID'].index(self.datasetID)
variables = [val[listIndex] if not val[listIndex] == 'NA' else 'NA' for key, val in
self.parseConfig['dataVariables'].items()]
variables.remove(self.datasetID)
self.removeNA(variables)
return variables
def removeNA(self, variables: list):
"""
Removes all strings that can be capitalized to 'NA' from the list of variables
:param variables: List of variables of the mobility dataset
:return: Returns a list with non NA values
"""
vars = [iVar.upper() for iVar in variables]
counter = 0
for idx, iVar in enumerate(vars):
if iVar == 'NA':
del variables[idx - counter]
counter += 1
def loadData(self):
"""
Loads data specified in self.rawDataPath and stores it in self.rawData. Raises an exception if a invalid suffix
is specified in self.rawDataPath. READ IN OF CSV HAS NOT BEEN EXTENSIVELY TESTED BEFORE BETA RELEASE.
:return: None
"""
# Future releases: Are potential error messages (.dta not being a stata file even as the ending matches)
# readable for the user? Should we have a manual error treatment here?
if self.rawDataPath.suffix == '.dta':
self.rawData = pd.read_stata(self.rawDataPath, convert_categoricals=False, convert_dates=False,
preserve_dtypes=False)
# This has not been tested before the beta release
elif self.rawDataPath.suffix == '.csv':
self.rawData = pd.read_csv(self.rawDataPath)
else:
Exception(f"Data type {self.rawDataPath.suffix} not yet specified. Available types so far are .dta and "
f".csv")
print(f'Finished loading {len(self.rawData)} rows of raw data of type {self.rawDataPath.suffix}')
def loadEncryptedData(self, pathToZip, pathInZip):
"""
Since the MiD data sets are only accessible by an extensive data security contract, VencoPy provides the
possibility to access encrypted zip files. An encryption password has to be given in parseConfig.yaml in order
to access the encrypted file. Loaded data is stored in self.rawData
:param pathToZip: path from current working directory to the zip file or absolute path to zipfile
:param pathInZip: Path to trip data file within the encrypted zipfile
:return: None
"""
with ZipFile(pathToZip) as myzip:
if '.dta' in pathInZip:
self.rawData = pd.read_stata(myzip.open(pathInZip, pwd=bytes(self.parseConfig['encryptionPW'],
encoding='utf-8')),
convert_categoricals=False, convert_dates=False, preserve_dtypes=False)
else: # if '.csv' in pathInZip:
self.rawData = pd.read_csv(myzip.open(pathInZip, pwd=bytes(self.parseConfig['encryptionPW'],
encoding='utf-8')), sep=';', decimal=',')
print(f'Finished loading {len(self.rawData)} rows of raw data of type {self.rawDataPath.suffix}')
def selectColumns(self):
"""
Function to filter the rawData for only relevant columns as specified by parseConfig and cleaned in
self.compileVariablesList(). Stores the subset of data in self.data
:return: None
"""
self.data = self.rawData.loc[:, self.columns]
def harmonizeVariables(self):
"""
Harmonizes the input data variables to match internal VencoPy names given as specified in the mapping in
parseConfig['dataVariables']. So far mappings for MiD08 and MiD17 are given. Since the MiD08 doesn't provide
a combined household and person unique identifier, it is synthesized of the both IDs.
:return: None
"""
replacementDict = self.createReplacementDict(self.datasetID, self.parseConfig['dataVariables'])
dataRenamed = self.data.rename(columns=replacementDict)
if self.datasetID == 'MiD08':
dataRenamed['hhPersonID'] = (dataRenamed['hhID'].astype('string') +
dataRenamed['personID'].astype('string')).astype('int')
self.data = dataRenamed
print('Finished harmonization of variables')
def createReplacementDict(self, datasetID: str, dictRaw: dict) -> dict:
"""
Creates the mapping dictionary from raw data variable names to VencoPy internal variable names as specified
in parseConfig.yaml for the specified data set.
:param datasetID: list of strings declaring the datasetIDs to be read in
:param dictRaw: Contains dictionary of the raw data
:return: Dictionary with internal names as keys and raw data column names as values.
"""
if datasetID in dictRaw['datasetID']:
listIndex = dictRaw['datasetID'].index(datasetID)
return {val[listIndex]: key for (key, val) in dictRaw.items()}
else:
raise ValueError(f'Data set {datasetID} not specified in parseConfig variable dictionary.')
def convertTypes(self):
"""
Convert raw column types to predefined python types as specified in parseConfig['inputDTypes'][datasetID]. This is mainly
done for performance reasons. But also in order to avoid index values that are of type int to be cast to float.
The function operates only on self.data and writes back changes to self.data
:return: None
"""
# Filter for dataset specific columns
conversionDict = self.parseConfig['inputDTypes'][self.datasetID]
keys = {iCol for iCol in conversionDict.keys() if iCol in self.data.columns}
self.subDict = {key: conversionDict[key] for key in conversionDict.keys() & keys}
self.data = self.data.astype(self.subDict)
def returnDictBottomValues(self, baseDict: dict, lst: list = []) -> list:
"""
Returns a list of all dictionary values of the last dictionary level (the bottom) of baseDict. The parameter
lst is used as an interface between recursion levels.
:param baseDict: Dictionary of variables
:param lst: empty list, is used as interface to next recursion
:return: Returns a list with all the bottom dictionary values
"""
for iKey, iVal in baseDict.items():
if isinstance(iVal, dict):
lst = self.returnDictBottomValues(iVal, lst)
else:
if iVal is not None:
lst.append(iVal)
return lst
def checkFilterDict(self):
"""
Checking if all values of filter dictionaries are of type list. Currently only checking if list of list str
not typechecked all(map(self.__checkStr, val). Conditionally triggers an assert.
:return: None
"""
assert all(isinstance(val, list) for val in self.returnDictBottomValues(self.__filterDict[self.datasetID])), \
f'All values in filter dictionaries have to be lists, but are not'
def returnDictBottomKeys(self, baseDict: dict, lst: list = None) -> list:
"""
Returns the lowest level keys of baseDict and returns all of them as a list. The parameter lst is used as
interface between recursion levels.
:param baseDict: Dictionary of variables
:param lst: empty list, used as interface between recursion levels
:return: Returns a list with all the bottom level dictionary keys
"""
if lst is None:
lst = []
for iKey, iVal in baseDict.items():
if isinstance(iVal, dict):
lst = self.returnDictBottomKeys(iVal, lst)
else:
if iVal is not None:
lst.append(iKey)
return lst
def filter(self):
"""
Wrapper function to carry out filtering for the four filter logics of including, excluding, greaterThan and
smallerThan. If a filterDict is defined with a different key, a warning is thrown. The function operates on
self.data class-internally.
:return: None
"""
print(f'Starting filtering, applying {len(self.returnDictBottomKeys(self.__filterDict[self.datasetID]))} filters.')
ret = | pd.DataFrame(index=self.data.index) | pandas.DataFrame |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import decimal
import datetime
import pandas as pd
from scipy.optimize import fsolve
from django.http import HttpResponse
from django.shortcuts import render
from .models import Currency, Category, Bank, Account, AccountCategory, AccountRec, Risk, InvProj, InvRec
from . import tables
def proj_stat(request, projid):
proj = InvProj.objects.get(id=int(projid))
tab = tables.InvRecTable(proj.invrec_set.all(), request=request)
env = {
'proj': proj,
'table': tab,
}
return render(request, 'inv/proj_stat.html', env)
def add_vectory(a, b):
return [i+j for i, j in zip(a, b)]
def sub_vectory(a, b):
return [i-j for i, j in zip(a, b)]
def div_vectory(a, b):
return [i/j for i, j in zip(a, b) if j != 0]
def balance_sheet(request):
curs = Currency.objects.all()
sheet = {}
for cat in Category.objects.all():
values = cat.values_by_currency()
l = []
total = 0
for cur in curs:
l.append(values.get(cur.name, decimal.Decimal()))
total += values.get(cur.name, decimal.Decimal()) * cur.rate
l.append(total)
sheet.setdefault(cat.cat, [])
sheet[cat.cat].append((cat, l))
assets = [decimal.Decimal(),]*(len(curs)+1)
liabilities = [decimal.Decimal(),]*(len(curs)+1)
for i in range(1, 6):
sums = [decimal.Decimal(),]*(len(curs)+1)
for name, values in sheet[i]:
sums = add_vectory(sums, values)
if i == 1:
current_asset = sums[:]
if i == 2:
current_liabilities = sums[:]
if i in {1, 3, 5}:
assets = add_vectory(assets, sums)
else:
liabilities = add_vectory(liabilities, sums)
sheet[i].append(({'name': '小记'}, sums))
liquidity_list = div_vectory(map(float, current_asset),
map(float, current_liabilities))
if liquidity_list:
liquidity_ratio = min(liquidity_list)
else:
liquidity_ratio = -1
debt_asset_ratio = max(div_vectory(map(float, liabilities),
map(float, assets)))
env = {
'sheet': sheet,
'curs': curs,
'assets': assets,
'liabilities': liabilities,
'equity': list(sub_vectory(assets, liabilities)),
'liquidity_ratio': liquidity_ratio,
'debt_asset_ratio': 100*debt_asset_ratio,
}
return render(request, 'inv/balance_sheet.html', env)
def income_outgoing_sheet(request):
lastyear = datetime.date.today()-datetime.timedelta(days=365)
td = datetime.date.today()
income = []
for cat in AccountCategory.objects.filter(cat=1).all():
num = sum((rec.value for rec in cat.accountrec_set.filter(date__gte=lastyear).all()))
if num:
income.append((cat.name, num))
s_income = sum((n for c, n in income))
income.append(('小计', s_income))
outgoing = []
for cat in AccountCategory.objects.filter(cat=2).all():
num = sum((rec.value for rec in cat.accountrec_set.filter(date__gte=lastyear).all()))
if num:
outgoing.append((cat.name, num))
s_outgoing = sum((n for c, n in outgoing))
outgoing.append(('小计', s_outgoing))
investments = []
iotab = []
for cat in Category.objects.filter(cat=5).all():
num = 0
for proj in cat.invproj_set.filter(isopen=False, end__gte=lastyear).all():
num -= (proj.value*proj.acct.currency.rate).quantize(decimal.Decimal('1.00'))
iotab.extend(proj.calc_iotab(td, True))
if num:
investments.append((cat.name, num))
s_investments = sum((n for c, n in investments))
investments.append(('小计', s_investments))
def f(r):
return sum((value*r**dur for dur, value in iotab))
r = fsolve(f, 1.01)[0]
env = {
'income': income,
'outgoing': outgoing,
'investments': investments,
'total_income': s_income+s_investments,
'total_outgoing': s_outgoing,
'net_income': s_income+s_investments-s_outgoing,
'saving_rate': 100*(s_income+s_investments-s_outgoing)/(s_income+s_investments),
'invest_income_rate': 100*s_investments/(s_income+s_investments),
'invest_outgoing_rate': 100*s_investments/s_outgoing,
'invest_rate': 365*100*(r-1),
}
return render(request, 'inv/ios.html', env)
def income_details(request):
df = pd.DataFrame()
for cat in AccountCategory.objects.filter(cat=1).all():
s = pd.Series(name=cat.name)
for rec in cat.accountrec_set.all():
dt = rec.date.replace(day=1)
if dt not in s:
s[dt] = rec.value
else:
s[dt] += rec.value
if s.count():
df = df.join(s, how='outer')
for cat in Category.objects.filter(cat=5).all():
s = pd.Series(name=cat.name)
for proj in cat.invproj_set.filter(isopen=False).all():
dt = proj.end.replace(day=1)
value = (proj.value*proj.acct.currency.rate).quantize(decimal.Decimal('1.00'))
if dt not in s:
s[dt] = -value
else:
s[dt] += -value
if s.count():
df = df.join(s, how='outer')
df = df.sort_index()
df['总计'] = df.sum(axis=1)
env = {
'title': '收入细节表',
'code': df.to_html(border=0, classes='table table-striped table-responsive'),
}
return render(request, 'inv/raw.html', env)
def outgoing_details(request):
df = pd.DataFrame()
for cat in AccountCategory.objects.filter(cat=2).all():
s = | pd.Series(name=cat.name) | pandas.Series |
import pandas
import numpy as np
from pandas import DataFrame
from sklearn.cross_validation import train_test_split
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.naive_bayes import MultinomialNB
deneme = []
human = []
with open("human.txt","r") as f:
for line in f:
human.append(line)
deneme.append(1)
bot = []
with open("robot.txt","r") as f:
for line in f:
bot.append(line)
deneme.append(0)
data = {'text': human+bot,'status': deneme}
frame = | pandas.DataFrame(data) | pandas.DataFrame |
import pandas as pd
from functools import reduce
from datetime import datetime
import numpy as np
from EnergyIntensityIndicators.pull_bea_api import BEA_api
from EnergyIntensityIndicators.get_census_data import Econ_census
from EnergyIntensityIndicators.utilities.standard_interpolation \
import standard_interpolation
class NonManufacturing:
""" Prior to 2012, total nonmanufacturing
energy consumption (electricity and fuels) was estimated as a residual
between the supply-side estimates of industrial consumption published
by EIA and the end-user estimates for manufacturing based upon the MECS
(supplemented by census-based data, as described above). The residual-based
method produced very unsatisfactory results; year-to-year changes in
energy consumption were implausible in a large number of instances.
A complicating factor for fuels is that industrial consumption
estimates published by EIA include energy products used as chemical
feedstocks and other nonfuel purposes. As a result, a preliminary
effort was undertaken in mid-2012 to estimate energy consumption
from the user side for these sectors.
"""
def __init__(self, naics_digits):
self.currentYear = datetime.now().year
self.naics_digits = naics_digits
self.BEA_data = \
BEA_api(years=list(range(1949, self.currentYear + 1)))
self.BEA_go_nominal = \
self.BEA_data.get_data(table_name='go_nominal')
self.BEA_go_quant_index = \
self.BEA_data.get_data(table_name='go_quant_index')
self.BEA_va_nominal = \
self.BEA_data.get_data(table_name='va_nominal')
self.BEA_va_quant_index = \
self.BEA_data.get_data(table_name='va_quant_index')
@staticmethod
def indicators_nonman_2018_bea():
"""Reformat value added and gross output chain quantity
indexes from GrossOutput_1967-2018PNNL_213119.xlsx/
ChainQtyIndexes (EA301:EJ349) and
ValueAdded_1969-2018_PNNL_010120.xlsx/
ChainQtyIndexes (EA301:EJ349) respectively
"""
va_quant_index, go_quant_index =\
BEA_api(years=list(range(1949, 2018))).chain_qty_indexes()
return va_quant_index, go_quant_index
def get_econ_census(self):
"""Collect economic census data
Returns:
[type]: [description]
"""
economic_census = Econ_census()
economic_census_years = list(range(1987, self.currentYear + 1, 5))
e_c_data = {str(y): economic_census.get_data(year=y)
for y in economic_census_years}
return e_c_data
@staticmethod
def petroleum_prices(retail_gasoline, retail_diesel,
excl_tax_gasoline, excl_tax_diesel):
"""Get petroleum prices
Args:
retail_gasoline ([type]): [description]
retail_diesel ([type]): [description]
excl_tax_gasoline ([type]): [description]
excl_tax_diesel ([type]): [description]
Returns:
dollar_mmbtu [type]: [description]
lubricant [type]: [description]
"""
retail_gasoline.loc[2011] = 3.527
retail_gasoline.loc[2012] = 3.644
retail_gasoline.loc[2013] = 3.526
retail_gasoline.loc[2014] = 3.367
retail_gasoline.loc[2015] = 2.448
retail_gasoline.loc[2016] = 2.142
retail_gasoline.loc[2017] = 2.408
retail_gasoline['Excl. Tax'] = \
retail_gasoline.divide(
retail_gasoline.loc[1994, 'Retail']).multiply(
excl_tax_gasoline.loc[1994])
retail_gasoline['$/MMBtu'] = \
retail_gasoline.divide(
retail_gasoline.loc[1994, 'Retail']).multiply(
excl_tax_gasoline.loc[1994])
retail_diesel['Excl. Tax'] = \
retail_diesel.divide(
retail_diesel.loc[1994, 'Retail']).multiply(
excl_tax_diesel.loc[1994])
retail_diesel['$/MMBtu'] = \
retail_diesel.divide(
retail_diesel.loc[1994, 'Retail']).multiply(
excl_tax_diesel.loc[1994])
gasoline_weight = 0.3
diesel_weight = 0.7
lubricant_weights = 2
dollar_mmbtu = \
retail_diesel['$/MMBtu'] * diesel_weight + \
retail_gasoline['$/MMBtu'] * gasoline_weight
lubricant = dollar_mmbtu.multiply(lubricant_weights)
return dollar_mmbtu, lubricant
def construction_raw_data(self):
"""Equivalent to Construction_energy_011920.xlsx['Construction']
Returns:
construction_elec [type]: [description]
construction_fuels [type]: [description]
TODO: automatically update data
"""
stb0303 = \
pd.read_excel(
'./EnergyIntensityIndicators/Industry/Data/stb0303.xlsx',
sheet_name='stb0303')
stb0304 = \
pd.read_excel(
'./EnergyIntensityIndicators/Industry/Data/stb0304.xlsx',
sheet_name='stb0304')
stb0523 = \
pd.read_excel(
'./EnergyIntensityIndicators/Industry/Data/stb0523.xlsx',
sheet_name='stb0523')
stb0524 = \
pd.read_csv(
'https://www.eia.gov/totalenergy/data/browser/csv.php?tbl=T09.04')
construction_elec_fuels = \
pd.read_csv(
'./EnergyIntensityIndicators/Industry/Data/construction_elec_fuels.csv').set_index('Year')
construction_elec_fuels = \
construction_elec_fuels.rename(
columns={' Electricity':
'Electricity'})
construction_elec = construction_elec_fuels[['Electricity']]
construction_fuels = construction_elec_fuels[['Total Fuel']]
return construction_elec, construction_fuels
def construction(self):
"""Build data dictionary for the construction sector
https://www.census.gov/data/tables/2017/econ/economic-census/naics-sector-23.html
https://www.census.gov/data/tables/2012/econ/census/construction.html
http://factfinder2.census.gov/faces/tableservices/jsf/pages/productview.xhtml?pid=ECN_2007_US_23I1&prodType=table
http://factfinder2.census.gov/faces/tableservices/jsf/pages/productview.xhtml?pid=ECN_2002_US_23I04A&prodType=table
http://www.census.gov/epcd/www/97EC23.HTM
http://www.census.gov/prod/www/abs/cciview.html
data_dict (dict): [Description]
"""
# NonMan_output_data / M, Y
value_added, gross_output = self.indicators_nonman_2018_bea()
value_added = value_added[['Construction']]
gross_output = \
gross_output[['Construction']].rename(
columns={'Construction': 'Gross Output'})
gross_output['Output*0.0001'] = \
gross_output['Gross Output'].multiply(0.0001)
electricity, fuels = self.construction_raw_data()
elec_intensity = electricity.merge(gross_output,
how='outer',
left_index=True,
right_index=True)
elec_intensity['elec_intensity'] = \
elec_intensity['Electricity'].divide(
elec_intensity['Output*0.0001'].values)
elec_intensity = \
standard_interpolation(elec_intensity,
name_to_interp='elec_intensity',
axis=1).fillna(method='bfill')
fuels_intensity = \
fuels.merge(gross_output, how='outer',
left_index=True, right_index=True)
fuels_intensity['fuels_intensity'] = \
fuels_intensity['Total Fuel'].divide(
fuels_intensity['Output*0.0001'] .values)
fuels_intensity.loc[1982, 'fuels_intensity'] = np.nan
fuels_intensity.loc[2002, 'fuels_intensity'] = np.nan
fuels_intensity = \
standard_interpolation(fuels_intensity,
name_to_interp='fuels_intensity',
axis=1).fillna(method='bfill')
final_electricity = elec_intensity[[
'elec_intensity']].multiply(
elec_intensity['Output*0.0001'], axis='index')
final_electricity = final_electricity.rename(columns={'elec_intensity':
'Construction'})
final_fuels = fuels_intensity[[
'fuels_intensity']].multiply(
fuels_intensity['Output*0.0001'], axis='index')
final_fuels = final_fuels.rename(columns={'fuels_intensity':
'Construction'})
gross_output = gross_output.drop('Output*0.0001', axis=1)
gross_output = gross_output.rename(columns={'Gross Output':
'Construction'})
data_dict = {'energy':
{'elec': final_electricity,
'fuels': final_fuels},
'activity':
{'gross_output': gross_output,
'value_added': value_added}}
return data_dict
def agriculture(self):
"""Build data dictionary for the agricultural sector
Returns:
data_dict (dict): [description]
"""
# Annual Estimates of energy by fuel for the farm sector for the
# period 1965-2002
miranowski_data = \
pd.read_excel(
'./EnergyIntensityIndicators/Industry/Data/miranowski_data.xlsx',
sheet_name='Ag Cons by Use', skiprows=4, skipfooter=9,
usecols='A,F:G', index_col=0,
names=['Year', 'Electricity', 'Direct Ag. Energy Use'])
miranowski_data = miranowski_data.reset_index()
miranowski_data['Year'] = pd.to_numeric(miranowski_data['Year'],
errors='coerce')
miranowski_data = miranowski_data.dropna(
subset=['Year']).set_index('Year')
miranowski_data.index = miranowski_data.index.astype(int)
adjustment_factor = 10500 / 3412 # Assume 10,500 Btu/Kwh
# NonMan_output_data_010420.xlsx column G, S (value added and gross
# output chain qty indexes for farms)
value_added, gross_output = self.indicators_nonman_2018_bea()
value_added.index = value_added.index.astype(int)
gross_output.index = gross_output.index.astype(int)
value_added = value_added[['Farms']]
gross_output = gross_output[['Farms']]
elec_prm = miranowski_data[[
'Electricity']].rename(
columns={'Electricity': 'elec'})
elec_site = elec_prm.divide(adjustment_factor)
elec_site.index = elec_site.index.astype(int)
elec_df = elec_site[~elec_site.index.duplicated()]
fuels_df = miranowski_data[['Direct Ag. Energy Use']].subtract(
elec_prm.values).rename(
columns={'Direct Ag. Energy Use': 'fuels'})
fuels_df = fuels_df[~fuels_df.index.duplicated()]
fuels_df.index = fuels_df.index.astype(int)
elec_df = elec_df.merge(gross_output,
left_index=True,
right_index=True,
how='outer')
fuels_df = fuels_df.merge(gross_output,
left_index=True,
right_index=True,
how='outer')
elec_df['elec_intensity'] = elec_df['elec'].divide(
gross_output['Farms'] * 0.001, axis='index')
fuels_df['fuels_intensity'] = fuels_df['fuels'].divide(
gross_output['Farms'] * 0.001, axis='index')
electricity_final = elec_df[['elec_intensity']].multiply(
gross_output['Farms'] * 0.001, axis='index').ffill()
electricity_final = \
electricity_final.rename(
columns={'elec_intensity': 'Agriculture, Forestry & Fishing'})
electricity_final.index = electricity_final.index.astype(int)
fuels_final = fuels_df[['fuels_intensity']].multiply(
gross_output['Farms'] * 0.001, axis='index')
# Calculated in Agricultural_energy_010420/Farms
fuels_fill = pd.DataFrame([[641.0], [717.2], [657.7], [635.2], [732.1], [638.5],
[791.4], [689.0], [652.1], [675.0], [740.2], [782.8],
[906.9], [929.6], [820.9]],
index=list(range(2003, 2017 + 1)),
columns=['fuels_fill'])
fuels_final = fuels_final.merge(fuels_fill,
how='outer',
left_index=True,
right_index=True)
fuels_final = \
fuels_final.rename(
columns={'fuels_intensity': 'Agriculture, Forestry & Fishing'})
fuels_final = \
fuels_final['Agriculture, Forestry & Fishing'].fillna(
fuels_final['fuels_fill']).to_frame(
name='Agriculture, Forestry & Fishing')
# fuels_final = fuels_final.drop('fuels_fill', axis=1)
fuels_final.index = fuels_final.index.astype(int)
value_added = value_added.rename(
columns={'Farms': 'Agriculture, Forestry & Fishing'})
gross_output = gross_output.rename(
columns={'Farms': 'Agriculture, Forestry & Fishing'})
data_dict = {'energy': {'elec': electricity_final,
'fuels': fuels_final},
'activity': {'gross_output': gross_output,
'value_added': value_added}}
return data_dict
def aggregate_mining_data(self, mining_df, allfos=False):
"""[summary]
Args:
mining_df ([type]): [description]
allfos (bool, optional): [description]. Defaults to False.
Returns:
to_transfer (pd.DataFrame): [description]
"""
mapping = {5: 'Iron and Ferroalloy mining',
6: 'Uranium - vanadium ores',
7: 'Nonferrous metals',
8: 'Anthracite Coal',
9: 'Bituminous Coal',
10: 'Crude Petroleum',
11: 'Natural Gas',
12: 'Natural Gas Liquids',
13: 'Stone and clay mining',
14: 'Chemical and Fertilizer',
15: 'Oil and gas well drilling'}
mapping_df = \
pd.DataFrame.from_dict(mapping, orient='index',
columns=['Industry'])
mapping_df.index.name = 'Year'
mapping_df = mapping_df.reset_index()
if allfos:
mapping_df['Year'] = mapping_df['Year'].subtract(1)
mapping_df['Year'] = mapping_df['Year'].astype(int)
mining_df = mining_df.merge(mapping_df, how='right', on='Year')
mining_df = mining_df.drop(
['Year', 'NAICS'], axis=1).set_index('Industry')
mining_df = mining_df.transpose()
mining_df['Crude Petroleum and Natural Gas'] = \
mining_df[['Crude Petroleum', 'Natural Gas', 'Natural Gas Liquids']].sum(axis=1)
mining_df['Coal Mining'] = \
mining_df[['Anthracite Coal', 'Bituminous Coal']].sum(axis=1)
mining_df['Metal Ore Mining'] = \
mining_df[['Iron and Ferroalloy mining',
'Uranium - vanadium ores',
'Nonferrous metals']].sum(axis=1)
mining_df['Nonmetallic mineral mining'] = \
mining_df[['Stone and clay mining',
'Chemical and Fertilizer']].sum(axis=1)
to_transfer = mining_df[['Crude Petroleum and Natural Gas',
'Coal Mining', 'Metal Ore Mining',
'Nonmetallic mineral mining',
'Oil and gas well drilling']].rename(
columns={'Oil and gas well drilling':
'Support Activities',
'Crude Petroleum and Natural Gas':
'Crude Pet'})
return to_transfer
@staticmethod
def build_mining_output(factor, gross_output, value_added,
elec, fuels, sector_estimates_elec,
sector_estimates_fuels, col_name):
gross_output.index = gross_output.index.astype(int)
elec.index = elec.index.astype(int)
fuels.index = fuels.index.astype(int)
sector_estimates_elec.index = sector_estimates_elec.index.astype(int)
sector_estimates_fuels.index = sector_estimates_fuels.index.astype(int)
"""Build data dictionary for the mining subsector"""
elec = elec.rename(columns={col_name: 'elec'})
fuels = fuels.rename(columns={col_name: 'fuels'})
sector_estimates_elec = \
sector_estimates_elec.rename(
columns={col_name: 'elec'})
sector_estimates_fuels = \
sector_estimates_fuels.rename(
columns={col_name: 'fuels'})
elec = pd.concat([elec, sector_estimates_elec], axis=0)
fuels = | pd.concat([fuels, sector_estimates_fuels], axis=0) | pandas.concat |
# Calculating Annual California State Median HIR
import pandas as pd
import numpy as np
import array
# Median Income
ca_med_inc = pd.read_excel('h08.xls', skiprows=4, usecols=([0,1,3,7,9,11,13] + list(range(17,64,2))))[:10] # Source: https://www.census.gov/data/tables/time-series/demo/income-poverty/historical-income-households.html (Table H-8)
ca_inc8918 = ca_med_inc.iloc[6].values.tolist()[1:][::-1]
ca_med_inc = pd.DataFrame({'Year':range(1989, 2019), 'Med_Inc':ca_inc8918})
#-------------------------------------------------------------------------------------------------------------------------------------#
# Median Home Price (Zillow- 1996-2018) *Zillow data is unavailable before 1996
zillow_st = pd.read_csv('State_Zhvi_AllHomes.csv', usecols=([1] + list(range(5, 270,12)))) # Source: https://www.zillow.com/research/data/ (ZHVI All Homes - State - June prices)
zillow_st.columns = ['State'] + list(range(1996,2019))
ca = zillow_st.iloc[0].tolist()[1:]
ca_home_df = pd.DataFrame({'Year':range(1996, 2019), 'Med_Home':ca})
#-------------------------------------------------------------------------------------------------------------------------------------#
# Median Home Price (FHFA- 1989-1995) *Will merge w/ Zillow data
fhfa_df = pd.read_csv('county_home_inc_hir_8918.csv')
fhfa_df = fhfa_df[(fhfa_df.FIPS > 6000) & (fhfa_df.FIPS < 7000)]
units8918 = | pd.read_csv('county_housing_units_8918.csv') | pandas.read_csv |
""" test the scalar Timestamp """
import pytz
import pytest
import dateutil
import calendar
import locale
import numpy as np
from dateutil.tz import tzutc
from pytz import timezone, utc
from datetime import datetime, timedelta
import pandas.util.testing as tm
import pandas.util._test_decorators as td
from pandas.tseries import offsets
from pandas._libs.tslibs import conversion
from pandas._libs.tslibs.timezones import get_timezone, dateutil_gettz as gettz
from pandas.errors import OutOfBoundsDatetime
from pandas.compat import long, PY3
from pandas.compat.numpy import np_datetime64_compat
from pandas import Timestamp, Period, Timedelta, NaT
class TestTimestampProperties(object):
def test_properties_business(self):
ts = Timestamp('2017-10-01', freq='B')
control = Timestamp('2017-10-01')
assert ts.dayofweek == 6
assert not ts.is_month_start # not a weekday
assert not ts.is_quarter_start # not a weekday
# Control case: non-business is month/qtr start
assert control.is_month_start
assert control.is_quarter_start
ts = Timestamp('2017-09-30', freq='B')
control = Timestamp('2017-09-30')
assert ts.dayofweek == 5
assert not ts.is_month_end # not a weekday
assert not ts.is_quarter_end # not a weekday
# Control case: non-business is month/qtr start
assert control.is_month_end
assert control.is_quarter_end
def test_fields(self):
def check(value, equal):
# that we are int/long like
assert isinstance(value, (int, long))
assert value == equal
# GH 10050
ts = Timestamp('2015-05-10 09:06:03.000100001')
check(ts.year, 2015)
check(ts.month, 5)
check(ts.day, 10)
check(ts.hour, 9)
check(ts.minute, 6)
check(ts.second, 3)
pytest.raises(AttributeError, lambda: ts.millisecond)
check(ts.microsecond, 100)
check(ts.nanosecond, 1)
check(ts.dayofweek, 6)
check(ts.quarter, 2)
check(ts.dayofyear, 130)
check(ts.week, 19)
check(ts.daysinmonth, 31)
check(ts.daysinmonth, 31)
# GH 13303
ts = Timestamp('2014-12-31 23:59:00-05:00', tz='US/Eastern')
check(ts.year, 2014)
check(ts.month, 12)
check(ts.day, 31)
check(ts.hour, 23)
check(ts.minute, 59)
check(ts.second, 0)
pytest.raises(AttributeError, lambda: ts.millisecond)
check(ts.microsecond, 0)
check(ts.nanosecond, 0)
check(ts.dayofweek, 2)
check(ts.quarter, 4)
check(ts.dayofyear, 365)
check(ts.week, 1)
check(ts.daysinmonth, 31)
ts = Timestamp('2014-01-01 00:00:00+01:00')
starts = ['is_month_start', 'is_quarter_start', 'is_year_start']
for start in starts:
assert getattr(ts, start)
ts = Timestamp('2014-12-31 23:59:59+01:00')
ends = ['is_month_end', 'is_year_end', 'is_quarter_end']
for end in ends:
assert getattr(ts, end)
# GH 12806
@pytest.mark.parametrize('data',
[Timestamp('2017-08-28 23:00:00'),
Timestamp('2017-08-28 23:00:00', tz='EST')])
@pytest.mark.parametrize('time_locale', [
None] if tm.get_locales() is None else [None] + tm.get_locales())
def test_names(self, data, time_locale):
# GH 17354
# Test .weekday_name, .day_name(), .month_name
with tm.assert_produces_warning(DeprecationWarning,
check_stacklevel=False):
assert data.weekday_name == 'Monday'
if time_locale is None:
expected_day = 'Monday'
expected_month = 'August'
else:
with tm.set_locale(time_locale, locale.LC_TIME):
expected_day = calendar.day_name[0].capitalize()
expected_month = calendar.month_name[8].capitalize()
assert data.day_name(time_locale) == expected_day
assert data.month_name(time_locale) == expected_month
# Test NaT
nan_ts = Timestamp(NaT)
assert np.isnan(nan_ts.day_name(time_locale))
assert np.isnan(nan_ts.month_name(time_locale))
@pytest.mark.parametrize('tz', [None, 'UTC', 'US/Eastern', 'Asia/Tokyo'])
def test_is_leap_year(self, tz):
# GH 13727
dt = Timestamp('2000-01-01 00:00:00', tz=tz)
assert dt.is_leap_year
assert isinstance(dt.is_leap_year, bool)
dt = Timestamp('1999-01-01 00:00:00', tz=tz)
assert not dt.is_leap_year
dt = Timestamp('2004-01-01 00:00:00', tz=tz)
assert dt.is_leap_year
dt = Timestamp('2100-01-01 00:00:00', tz=tz)
assert not dt.is_leap_year
def test_woy_boundary(self):
# make sure weeks at year boundaries are correct
d = datetime(2013, 12, 31)
result = Timestamp(d).week
expected = 1 # ISO standard
assert result == expected
d = datetime(2008, 12, 28)
result = Timestamp(d).week
expected = 52 # ISO standard
assert result == expected
d = datetime(2009, 12, 31)
result = Timestamp(d).week
expected = 53 # ISO standard
assert result == expected
d = datetime(2010, 1, 1)
result = Timestamp(d).week
expected = 53 # ISO standard
assert result == expected
d = datetime(2010, 1, 3)
result = Timestamp(d).week
expected = 53 # ISO standard
assert result == expected
result = np.array([Timestamp(datetime(*args)).week
for args in [(2000, 1, 1), (2000, 1, 2), (
2005, 1, 1), (2005, 1, 2)]])
assert (result == [52, 52, 53, 53]).all()
class TestTimestampConstructors(object):
def test_constructor(self):
base_str = '2014-07-01 09:00'
base_dt = datetime(2014, 7, 1, 9)
base_expected = 1404205200000000000
# confirm base representation is correct
import calendar
assert (calendar.timegm(base_dt.timetuple()) * 1000000000 ==
base_expected)
tests = [(base_str, base_dt, base_expected),
('2014-07-01 10:00', datetime(2014, 7, 1, 10),
base_expected + 3600 * 1000000000),
('2014-07-01 09:00:00.000008000',
datetime(2014, 7, 1, 9, 0, 0, 8),
base_expected + 8000),
('2014-07-01 09:00:00.000000005',
Timestamp('2014-07-01 09:00:00.000000005'),
base_expected + 5)]
timezones = [(None, 0), ('UTC', 0), (pytz.utc, 0), ('Asia/Tokyo', 9),
('US/Eastern', -4), ('dateutil/US/Pacific', -7),
(pytz.FixedOffset(-180), -3),
(dateutil.tz.tzoffset(None, 18000), 5)]
for date_str, date, expected in tests:
for result in [Timestamp(date_str), Timestamp(date)]:
# only with timestring
assert result.value == expected
assert conversion.pydt_to_i8(result) == expected
# re-creation shouldn't affect to internal value
result = Timestamp(result)
assert result.value == expected
assert conversion.pydt_to_i8(result) == expected
# with timezone
for tz, offset in timezones:
for result in [Timestamp(date_str, tz=tz), Timestamp(date,
tz=tz)]:
expected_tz = expected - offset * 3600 * 1000000000
assert result.value == expected_tz
assert conversion.pydt_to_i8(result) == expected_tz
# should preserve tz
result = Timestamp(result)
assert result.value == expected_tz
assert conversion.pydt_to_i8(result) == expected_tz
# should convert to UTC
result = Timestamp(result, tz='UTC')
expected_utc = expected - offset * 3600 * 1000000000
assert result.value == expected_utc
assert conversion.pydt_to_i8(result) == expected_utc
def test_constructor_with_stringoffset(self):
# GH 7833
base_str = '2014-07-01 11:00:00+02:00'
base_dt = datetime(2014, 7, 1, 9)
base_expected = 1404205200000000000
# confirm base representation is correct
import calendar
assert (calendar.timegm(base_dt.timetuple()) * 1000000000 ==
base_expected)
tests = [(base_str, base_expected),
('2014-07-01 12:00:00+02:00',
base_expected + 3600 * 1000000000),
('2014-07-01 11:00:00.000008000+02:00', base_expected + 8000),
('2014-07-01 11:00:00.000000005+02:00', base_expected + 5)]
timezones = [(None, 0), ('UTC', 0), (pytz.utc, 0), ('Asia/Tokyo', 9),
('US/Eastern', -4), ('dateutil/US/Pacific', -7),
(pytz.FixedOffset(-180), -3),
(dateutil.tz.tzoffset(None, 18000), 5)]
for date_str, expected in tests:
for result in [Timestamp(date_str)]:
# only with timestring
assert result.value == expected
assert conversion.pydt_to_i8(result) == expected
# re-creation shouldn't affect to internal value
result = Timestamp(result)
assert result.value == expected
assert conversion.pydt_to_i8(result) == expected
# with timezone
for tz, offset in timezones:
result = Timestamp(date_str, tz=tz)
expected_tz = expected
assert result.value == expected_tz
assert conversion.pydt_to_i8(result) == expected_tz
# should preserve tz
result = Timestamp(result)
assert result.value == expected_tz
assert conversion.pydt_to_i8(result) == expected_tz
# should convert to UTC
result = Timestamp(result, tz='UTC')
expected_utc = expected
assert result.value == expected_utc
assert conversion.pydt_to_i8(result) == expected_utc
# This should be 2013-11-01 05:00 in UTC
# converted to Chicago tz
result = Timestamp('2013-11-01 00:00:00-0500', tz='America/Chicago')
assert result.value == Timestamp('2013-11-01 05:00').value
expected = "Timestamp('2013-11-01 00:00:00-0500', tz='America/Chicago')" # noqa
assert repr(result) == expected
assert result == eval(repr(result))
# This should be 2013-11-01 05:00 in UTC
# converted to Tokyo tz (+09:00)
result = Timestamp('2013-11-01 00:00:00-0500', tz='Asia/Tokyo')
assert result.value == Timestamp('2013-11-01 05:00').value
expected = "Timestamp('2013-11-01 14:00:00+0900', tz='Asia/Tokyo')"
assert repr(result) == expected
assert result == eval(repr(result))
# GH11708
# This should be 2015-11-18 10:00 in UTC
# converted to Asia/Katmandu
result = Timestamp("2015-11-18 15:45:00+05:45", tz="Asia/Katmandu")
assert result.value == Timestamp("2015-11-18 10:00").value
expected = "Timestamp('2015-11-18 15:45:00+0545', tz='Asia/Katmandu')"
assert repr(result) == expected
assert result == eval(repr(result))
# This should be 2015-11-18 10:00 in UTC
# converted to Asia/Kolkata
result = Timestamp("2015-11-18 15:30:00+05:30", tz="Asia/Kolkata")
assert result.value == Timestamp("2015-11-18 10:00").value
expected = "Timestamp('2015-11-18 15:30:00+0530', tz='Asia/Kolkata')"
assert repr(result) == expected
assert result == eval(repr(result))
def test_constructor_invalid(self):
with tm.assert_raises_regex(TypeError, 'Cannot convert input'):
Timestamp(slice(2))
with tm.assert_raises_regex(ValueError, 'Cannot convert Period'):
Timestamp(Period('1000-01-01'))
def test_constructor_invalid_tz(self):
# GH#17690
with tm.assert_raises_regex(TypeError, 'must be a datetime.tzinfo'):
Timestamp('2017-10-22', tzinfo='US/Eastern')
with tm.assert_raises_regex(ValueError, 'at most one of'):
Timestamp('2017-10-22', tzinfo=utc, tz='UTC')
with tm.assert_raises_regex(ValueError, "Invalid frequency:"):
# GH#5168
# case where user tries to pass tz as an arg, not kwarg, gets
# interpreted as a `freq`
Timestamp('2012-01-01', 'US/Pacific')
def test_constructor_tz_or_tzinfo(self):
# GH#17943, GH#17690, GH#5168
stamps = [Timestamp(year=2017, month=10, day=22, tz='UTC'),
Timestamp(year=2017, month=10, day=22, tzinfo=utc),
Timestamp(year=2017, month=10, day=22, tz=utc),
Timestamp(datetime(2017, 10, 22), tzinfo=utc),
Timestamp(datetime(2017, 10, 22), tz='UTC'),
Timestamp(datetime(2017, 10, 22), tz=utc)]
assert all(ts == stamps[0] for ts in stamps)
def test_constructor_positional(self):
# see gh-10758
with pytest.raises(TypeError):
Timestamp(2000, 1)
with pytest.raises(ValueError):
Timestamp(2000, 0, 1)
with pytest.raises(ValueError):
Timestamp(2000, 13, 1)
with pytest.raises(ValueError):
Timestamp(2000, 1, 0)
with pytest.raises(ValueError):
Timestamp(2000, 1, 32)
# see gh-11630
assert (repr(Timestamp(2015, 11, 12)) ==
repr(Timestamp('20151112')))
assert (repr(Timestamp(2015, 11, 12, 1, 2, 3, 999999)) ==
repr(Timestamp('2015-11-12 01:02:03.999999')))
def test_constructor_keyword(self):
# GH 10758
with pytest.raises(TypeError):
Timestamp(year=2000, month=1)
with pytest.raises(ValueError):
Timestamp(year=2000, month=0, day=1)
with pytest.raises(ValueError):
Timestamp(year=2000, month=13, day=1)
with pytest.raises(ValueError):
Timestamp(year=2000, month=1, day=0)
with pytest.raises(ValueError):
Timestamp(year=2000, month=1, day=32)
assert (repr(Timestamp(year=2015, month=11, day=12)) ==
repr(Timestamp('20151112')))
assert (repr(Timestamp(year=2015, month=11, day=12, hour=1, minute=2,
second=3, microsecond=999999)) ==
repr(Timestamp('2015-11-12 01:02:03.999999')))
def test_constructor_fromordinal(self):
base = datetime(2000, 1, 1)
ts = Timestamp.fromordinal(base.toordinal(), freq='D')
assert base == ts
assert ts.freq == 'D'
assert base.toordinal() == ts.toordinal()
ts = Timestamp.fromordinal(base.toordinal(), tz='US/Eastern')
assert Timestamp('2000-01-01', tz='US/Eastern') == ts
assert base.toordinal() == ts.toordinal()
# GH#3042
dt = datetime(2011, 4, 16, 0, 0)
ts = Timestamp.fromordinal(dt.toordinal())
assert ts.to_pydatetime() == dt
# with a tzinfo
stamp = Timestamp('2011-4-16', tz='US/Eastern')
dt_tz = stamp.to_pydatetime()
ts = Timestamp.fromordinal(dt_tz.toordinal(), tz='US/Eastern')
assert ts.to_pydatetime() == dt_tz
@pytest.mark.parametrize('result', [
Timestamp(datetime(2000, 1, 2, 3, 4, 5, 6), nanosecond=1),
Timestamp(year=2000, month=1, day=2, hour=3, minute=4, second=5,
microsecond=6, nanosecond=1),
Timestamp(year=2000, month=1, day=2, hour=3, minute=4, second=5,
microsecond=6, nanosecond=1, tz='UTC'),
Timestamp(2000, 1, 2, 3, 4, 5, 6, 1, None),
Timestamp(2000, 1, 2, 3, 4, 5, 6, 1, pytz.UTC)])
def test_constructor_nanosecond(self, result):
# GH 18898
expected = Timestamp(datetime(2000, 1, 2, 3, 4, 5, 6), tz=result.tz)
expected = expected + Timedelta(nanoseconds=1)
assert result == expected
@pytest.mark.parametrize('arg', ['year', 'month', 'day', 'hour', 'minute',
'second', 'microsecond', 'nanosecond'])
def test_invalid_date_kwarg_with_string_input(self, arg):
kwarg = {arg: 1}
with pytest.raises(ValueError):
Timestamp('2010-10-10 12:59:59.999999999', **kwarg)
def test_out_of_bounds_value(self):
one_us = np.timedelta64(1).astype('timedelta64[us]')
# By definition we can't go out of bounds in [ns], so we
# convert the datetime64s to [us] so we can go out of bounds
min_ts_us = np.datetime64(Timestamp.min).astype('M8[us]')
max_ts_us = np.datetime64(Timestamp.max).astype('M8[us]')
# No error for the min/max datetimes
Timestamp(min_ts_us)
Timestamp(max_ts_us)
# One us less than the minimum is an error
with pytest.raises(ValueError):
Timestamp(min_ts_us - one_us)
# One us more than the maximum is an error
with pytest.raises(ValueError):
Timestamp(max_ts_us + one_us)
def test_out_of_bounds_string(self):
with pytest.raises(ValueError):
Timestamp('1676-01-01')
with pytest.raises(ValueError):
Timestamp('2263-01-01')
def test_barely_out_of_bounds(self):
# GH#19529
# GH#19382 close enough to bounds that dropping nanos would result
# in an in-bounds datetime
with pytest.raises(OutOfBoundsDatetime):
Timestamp('2262-04-11 23:47:16.854775808')
def test_bounds_with_different_units(self):
out_of_bounds_dates = ('1677-09-21', '2262-04-12')
time_units = ('D', 'h', 'm', 's', 'ms', 'us')
for date_string in out_of_bounds_dates:
for unit in time_units:
dt64 = np.datetime64(date_string, dtype='M8[%s]' % unit)
with pytest.raises(ValueError):
Timestamp(dt64)
in_bounds_dates = ('1677-09-23', '2262-04-11')
for date_string in in_bounds_dates:
for unit in time_units:
dt64 = np.datetime64(date_string, dtype='M8[%s]' % unit)
Timestamp(dt64)
def test_min_valid(self):
# Ensure that Timestamp.min is a valid Timestamp
Timestamp(Timestamp.min)
def test_max_valid(self):
# Ensure that Timestamp.max is a valid Timestamp
Timestamp(Timestamp.max)
def test_now(self):
# GH#9000
ts_from_string = Timestamp('now')
ts_from_method = Timestamp.now()
ts_datetime = datetime.now()
ts_from_string_tz = Timestamp('now', tz='US/Eastern')
ts_from_method_tz = Timestamp.now(tz='US/Eastern')
# Check that the delta between the times is less than 1s (arbitrarily
# small)
delta = Timedelta(seconds=1)
assert abs(ts_from_method - ts_from_string) < delta
assert abs(ts_datetime - ts_from_method) < delta
assert abs(ts_from_method_tz - ts_from_string_tz) < delta
assert (abs(ts_from_string_tz.tz_localize(None) -
ts_from_method_tz.tz_localize(None)) < delta)
def test_today(self):
ts_from_string = Timestamp('today')
ts_from_method = Timestamp.today()
ts_datetime = datetime.today()
ts_from_string_tz = Timestamp('today', tz='US/Eastern')
ts_from_method_tz = Timestamp.today(tz='US/Eastern')
# Check that the delta between the times is less than 1s (arbitrarily
# small)
delta = Timedelta(seconds=1)
assert abs(ts_from_method - ts_from_string) < delta
assert abs(ts_datetime - ts_from_method) < delta
assert abs(ts_from_method_tz - ts_from_string_tz) < delta
assert (abs(ts_from_string_tz.tz_localize(None) -
ts_from_method_tz.tz_localize(None)) < delta)
class TestTimestamp(object):
def test_tz(self):
tstr = '2014-02-01 09:00'
ts = Timestamp(tstr)
local = ts.tz_localize('Asia/Tokyo')
assert local.hour == 9
assert local == Timestamp(tstr, tz='Asia/Tokyo')
conv = local.tz_convert('US/Eastern')
assert conv == Timestamp('2014-01-31 19:00', tz='US/Eastern')
assert conv.hour == 19
# preserves nanosecond
ts = Timestamp(tstr) + offsets.Nano(5)
local = ts.tz_localize('Asia/Tokyo')
assert local.hour == 9
assert local.nanosecond == 5
conv = local.tz_convert('US/Eastern')
assert conv.nanosecond == 5
assert conv.hour == 19
def test_utc_z_designator(self):
assert get_timezone(Timestamp('2014-11-02 01:00Z').tzinfo) == 'UTC'
def test_asm8(self):
np.random.seed(7960929)
ns = [Timestamp.min.value, Timestamp.max.value, 1000]
for n in ns:
assert (Timestamp(n).asm8.view('i8') ==
np.datetime64(n, 'ns').view('i8') == n)
assert (Timestamp('nat').asm8.view('i8') ==
np.datetime64('nat', 'ns').view('i8'))
def test_class_ops_pytz(self):
def compare(x, y):
assert (int(Timestamp(x).value / 1e9) ==
int(Timestamp(y).value / 1e9))
compare(Timestamp.now(), datetime.now())
compare(Timestamp.now('UTC'), datetime.now(timezone('UTC')))
compare(Timestamp.utcnow(), datetime.utcnow())
compare(Timestamp.today(), datetime.today())
current_time = calendar.timegm(datetime.now().utctimetuple())
compare(Timestamp.utcfromtimestamp(current_time),
datetime.utcfromtimestamp(current_time))
compare(Timestamp.fromtimestamp(current_time),
datetime.fromtimestamp(current_time))
date_component = datetime.utcnow()
time_component = (date_component + timedelta(minutes=10)).time()
compare(Timestamp.combine(date_component, time_component),
datetime.combine(date_component, time_component))
def test_class_ops_dateutil(self):
def compare(x, y):
assert (int(np.round(Timestamp(x).value / 1e9)) ==
int(np.round(Timestamp(y).value / 1e9)))
compare(Timestamp.now(), datetime.now())
compare(Timestamp.now('UTC'), datetime.now(tzutc()))
compare(Timestamp.utcnow(), datetime.utcnow())
compare(Timestamp.today(), datetime.today())
current_time = calendar.timegm(datetime.now().utctimetuple())
compare(Timestamp.utcfromtimestamp(current_time),
datetime.utcfromtimestamp(current_time))
compare(Timestamp.fromtimestamp(current_time),
datetime.fromtimestamp(current_time))
date_component = datetime.utcnow()
time_component = (date_component + timedelta(minutes=10)).time()
compare(Timestamp.combine(date_component, time_component),
datetime.combine(date_component, time_component))
def test_basics_nanos(self):
val = np.int64(946684800000000000).view('M8[ns]')
stamp = Timestamp(val.view('i8') + 500)
assert stamp.year == 2000
assert stamp.month == 1
assert stamp.microsecond == 0
assert stamp.nanosecond == 500
# GH 14415
val = np.iinfo(np.int64).min + 80000000000000
stamp = Timestamp(val)
assert stamp.year == 1677
assert stamp.month == 9
assert stamp.day == 21
assert stamp.microsecond == 145224
assert stamp.nanosecond == 192
def test_unit(self):
def check(val, unit=None, h=1, s=1, us=0):
stamp = Timestamp(val, unit=unit)
assert stamp.year == 2000
assert stamp.month == 1
assert stamp.day == 1
assert stamp.hour == h
if unit != 'D':
assert stamp.minute == 1
assert stamp.second == s
assert stamp.microsecond == us
else:
assert stamp.minute == 0
assert stamp.second == 0
assert stamp.microsecond == 0
assert stamp.nanosecond == 0
ts = Timestamp('20000101 01:01:01')
val = ts.value
days = (ts - Timestamp('1970-01-01')).days
check(val)
check(val / long(1000), unit='us')
check(val / long(1000000), unit='ms')
check(val / long(1000000000), unit='s')
check(days, unit='D', h=0)
# using truediv, so these are like floats
if PY3:
check((val + 500000) / long(1000000000), unit='s', us=500)
check((val + 500000000) / long(1000000000), unit='s', us=500000)
check((val + 500000) / long(1000000), unit='ms', us=500)
# get chopped in py2
else:
check((val + 500000) / long(1000000000), unit='s')
check((val + 500000000) / long(1000000000), unit='s')
check((val + 500000) / long(1000000), unit='ms')
# ok
check((val + 500000) / long(1000), unit='us', us=500)
check((val + 500000000) / long(1000000), unit='ms', us=500000)
# floats
check(val / 1000.0 + 5, unit='us', us=5)
check(val / 1000.0 + 5000, unit='us', us=5000)
check(val / 1000000.0 + 0.5, unit='ms', us=500)
check(val / 1000000.0 + 0.005, unit='ms', us=5)
check(val / 1000000000.0 + 0.5, unit='s', us=500000)
check(days + 0.5, unit='D', h=12)
def test_roundtrip(self):
# test value to string and back conversions
# further test accessors
base = Timestamp('20140101 00:00:00')
result = Timestamp(base.value + Timedelta('5ms').value)
assert result == Timestamp(str(base) + ".005000")
assert result.microsecond == 5000
result = Timestamp(base.value + Timedelta('5us').value)
assert result == Timestamp(str(base) + ".000005")
assert result.microsecond == 5
result = Timestamp(base.value + Timedelta('5ns').value)
assert result == Timestamp(str(base) + ".000000005")
assert result.nanosecond == 5
assert result.microsecond == 0
result = Timestamp(base.value + Timedelta('6ms 5us').value)
assert result == Timestamp(str(base) + ".006005")
assert result.microsecond == 5 + 6 * 1000
result = Timestamp(base.value + Timedelta('200ms 5us').value)
assert result == Timestamp(str(base) + ".200005")
assert result.microsecond == 5 + 200 * 1000
def test_hash_equivalent(self):
d = {datetime(2011, 1, 1): 5}
stamp = Timestamp(datetime(2011, 1, 1))
assert d[stamp] == 5
class TestTimestampNsOperations(object):
def setup_method(self, method):
self.timestamp = Timestamp(datetime.utcnow())
def assert_ns_timedelta(self, modified_timestamp, expected_value):
value = self.timestamp.value
modified_value = modified_timestamp.value
assert modified_value - value == expected_value
def test_timedelta_ns_arithmetic(self):
self.assert_ns_timedelta(self.timestamp + np.timedelta64(-123, 'ns'),
-123)
def test_timedelta_ns_based_arithmetic(self):
self.assert_ns_timedelta(self.timestamp + np.timedelta64(
1234567898, 'ns'), 1234567898)
def test_timedelta_us_arithmetic(self):
self.assert_ns_timedelta(self.timestamp + np.timedelta64(-123, 'us'),
-123000)
def test_timedelta_ms_arithmetic(self):
time = self.timestamp + np.timedelta64(-123, 'ms')
self.assert_ns_timedelta(time, -123000000)
def test_nanosecond_string_parsing(self):
ts = Timestamp('2013-05-01 07:15:45.123456789')
# GH 7878
expected_repr = '2013-05-01 07:15:45.123456789'
expected_value = 1367392545123456789
assert ts.value == expected_value
assert expected_repr in repr(ts)
ts = Timestamp('2013-05-01 07:15:45.123456789+09:00', tz='Asia/Tokyo')
assert ts.value == expected_value - 9 * 3600 * 1000000000
assert expected_repr in repr(ts)
ts = Timestamp('2013-05-01 07:15:45.123456789', tz='UTC')
assert ts.value == expected_value
assert expected_repr in repr(ts)
ts = Timestamp('2013-05-01 07:15:45.123456789', tz='US/Eastern')
assert ts.value == expected_value + 4 * 3600 * 1000000000
assert expected_repr in repr(ts)
# GH 10041
ts = Timestamp('20130501T071545.123456789')
assert ts.value == expected_value
assert expected_repr in repr(ts)
def test_nanosecond_timestamp(self):
# GH 7610
expected = 1293840000000000005
t = Timestamp('2011-01-01') + offsets.Nano(5)
assert repr(t) == "Timestamp('2011-01-01 00:00:00.000000005')"
assert t.value == expected
assert t.nanosecond == 5
t = | Timestamp(t) | pandas.Timestamp |
""" Copyright start
Copyright (C) 2008 - 2022 Fortinet Inc.
All rights reserved.
FORTINET CONFIDENTIAL & FORTINET PROPRIETARY SOURCE CODE
Copyright end """
from asyncore import read
import requests
import pandas as pd
import numpy as np
import csv
from os.path import join
import json
from connectors.core.connector import get_logger, ConnectorError
from connectors.cyops_utilities.builtins import download_file_from_cyops
from integrations.crudhub import make_request
from .constants import LOGGER_NAME
logger = get_logger(LOGGER_NAME)
def extract_data_from_csv(config, params):
try:
numberOfRowsToSkip = None
isSingleColumn = None
no_of_columns = None
isCSVWithoutHeaders = False
file_iri = handle_params(params,params.get('value'))
file_path = join('/tmp', download_file_from_cyops(file_iri)['cyops_file_path'])
if params.get('numberOfRowsToSkip'):
numberOfRowsToSkip = params.get('numberOfRowsToSkip')
res = _check_if_csv(file_path,numberOfRowsToSkip)
logger.info(res)
if res.get('headers') == False:
isCSVWithoutHeaders = True
no_of_columns = res.get('columns')
if res.get('columns') == 1:
isSingleColumn = True
if params.get('columnNames') != "": # CSV file with column header and specific columns to use in creating recordset
columnNames = params.get('columnNames')
columnNames = columnNames.split(",")
# We are passing specific columns name to filter data from here
df = _read_file_specific_columns(file_path,columnNames,numberOfRowsToSkip)
elif isSingleColumn and not isCSVWithoutHeaders : # CSV file with one column and header
df = _read_file_single_column(file_path,numberOfRowsToSkip)
elif isSingleColumn and isCSVWithoutHeaders: # CSV file with one column and no header
df = _read_file_single_column_no_header(file_path,numberOfRowsToSkip,no_of_columns)
elif isCSVWithoutHeaders: # CSV file without column header and all columns
df = _read_file_no_headers(file_path,numberOfRowsToSkip,no_of_columns)
else:
# We are reading complete file assuming it has column header
df = _read_file_all_columns(file_path,numberOfRowsToSkip)
# If user has selected to deduplicate recordset
try:
if params.get('deDupValuesOn'):
deDupValuesOn = params.get('deDupValuesOn')
deDupValuesOn = deDupValuesOn.split(",")
df=df.drop_duplicates(subset=deDupValuesOn, keep='first')
except Exception as Err:
logger.error('Error in deduplicating data extract_data_from_csv(): %s' % Err)
raise ConnectorError('Error in deduplicating data extract_data_from_csv(): %s' % Err)
# Replace empty values with N/A
df = df.fillna('N/A')
#Create small chunks of dataset to consume by playbook if requested by user otherwise return complete recordset
if params.get('recordBatch'):
smaller_datasets = np.array_split(df, 20)
all_records = []
for batch in smaller_datasets:
all_records.append(batch.to_dict("records"))
final_result = {"records": all_records}
else:
final_result = df.to_dict("records")
return final_result
except Exception as Err:
logger.error('Error in extract_data_from_csv(): %s' % Err)
raise ConnectorError('Error in processing CSV File: %s' % Err)
def merge_two_csv_and_extract_data(config, params):
try:
if (params.get('mergeColumnNames')):
mergeColumn = params.get('mergeColumnNames')
mergeColumn = mergeColumn.split(",")
fileOneIRI = handle_params(params,params.get('file_one_value'))
fileOnePath = join('/tmp', download_file_from_cyops(fileOneIRI)['cyops_file_path'])
fileTwoIRI = handle_params(params,params.get('file_two_value'))
fileTwoPath = join('/tmp', download_file_from_cyops(fileTwoIRI)['cyops_file_path'])
logger.info(params)
# Read First File
df1 = _read_and_return_ds(fileOnePath,params,config,filePassed="First")
# Read Second File
df2= _read_and_return_ds(fileTwoPath,params,config,filePassed="Second")
#Merge both files
combined_recordSet =pd.merge(df1,df2,how='left',left_on=mergeColumn,right_on=mergeColumn)
# If user has selected to deduplicate recordset
try:
if params.get('deDupValuesOn'):
deDupValuesOn = params.get('deDupValuesOn')
deDupValuesOn = deDupValuesOn.split(",")
combined_recordSet=combined_recordSet.drop_duplicates(subset=deDupValuesOn, keep='first')
except Exception as Err:
logger.error('Error in deduplicating data extract_data_from_csv(): %s' % Err)
raise ConnectorError('Error in deduplicating data extract_data_from_csv(): %s' % Err)
# Replace empty values with N/A
combined_recordSet = combined_recordSet.fillna('N/A')
#Create small chunks of dataset to consume by playbook if requested by user otherwise return complete recordset
if params.get('recordBatch'):
smaller_datasets = np.array_split(combined_recordSet, 20)
all_records = []
for batch in smaller_datasets:
all_records.append(batch.to_dict("records"))
final_result = {"records": all_records}
else:
final_result = combined_recordSet.to_dict("records")
return final_result
except Exception as Err:
logger.error('Error in merge_two_csv_and_extract_data(): %s' % Err)
raise ConnectorError('Error in processing CSV File: %s' % Err)
def concat_two_csv_and_extract_data(config, params):
try:
fileOneIRI = handle_params(params,params.get('file_one_value'))
fileOnePath = join('/tmp', download_file_from_cyops(fileOneIRI)['cyops_file_path'])
fileTwoIRI = handle_params(params,params.get('file_two_value'))
fileTwoPath = join('/tmp', download_file_from_cyops(fileTwoIRI)['cyops_file_path'])
logger.info(params)
df1 = _read_and_return_ds(fileOnePath,params,config,filePassed="First")
df2= _read_and_return_ds(fileTwoPath,params,config,filePassed="Second")
#concat both files
combined_recordSet =pd.concat([df1,df2])
# If user has selected to deduplicate recordset
try:
if params.get('deDupValuesOn'):
deDupValuesOn = params.get('deDupValuesOn')
deDupValuesOn = deDupValuesOn.split(",")
combined_recordSet=combined_recordSet.drop_duplicates(subset=deDupValuesOn, keep='first')
except Exception as Err:
logger.error('Error in deduplicating data extract_data_from_csv(): %s' % Err)
raise ConnectorError('Error in deduplicating data extract_data_from_csv(): %s' % Err)
# Replace empty values with N/A
combined_recordSet = combined_recordSet.fillna('N/A')
#Create small chunks of dataset to consume by playbook if requested by user otherwise return complete recordset
if params.get('recordBatch'):
smaller_datasets = np.array_split(combined_recordSet, 20)
all_records = []
for batch in smaller_datasets:
all_records.append(batch.to_dict("records"))
final_result = {"records": all_records}
else:
final_result = combined_recordSet.to_dict("records")
return final_result
except Exception as Err:
logger.error('Error in concat_two_csv_and_extract_data(): %s' % Err)
raise ConnectorError('Error in processing CSV File: %s' % Err)
def join_two_csv_and_extract_data(config, params):
try:
fileOneIRI = handle_params(params,params.get('file_one_value'))
fileOnePath = join('/tmp', download_file_from_cyops(fileOneIRI)['cyops_file_path'])
fileTwoIRI = handle_params(params,params.get('file_two_value'))
fileTwoPath = join('/tmp', download_file_from_cyops(fileTwoIRI)['cyops_file_path'])
df1 = _read_and_return_ds(fileOnePath,params,config,filePassed="First")
df2= _read_and_return_ds(fileTwoPath,params,config,filePassed="Second")
#Join both files
combined_recordSet =df1.join(df2,lsuffix='_FirstFile', rsuffix='_SecondFile')
# If user has selected to deduplicate recordset
try:
if params.get('deDupValuesOn'):
deDupValuesOn = params.get('deDupValuesOn')
deDupValuesOn = deDupValuesOn.split(",")
combined_recordSet=combined_recordSet.drop_duplicates(subset=deDupValuesOn, keep='first')
except Exception as Err:
logger.error('Error in deduplicating data extract_data_from_csv(): %s' % Err)
raise ConnectorError('Error in deduplicating data extract_data_from_csv(): %s' % Err)
# Replace empty values with N/A
combined_recordSet = combined_recordSet.fillna('N/A')
#Create small chunks of dataset to cosume by playbook if requested by user otherwise return complete recordset
if params.get('recordBatch'):
smaller_datasets = np.array_split(combined_recordSet, 20)
all_records = []
for batch in smaller_datasets:
all_records.append(batch.to_dict("records"))
final_result = {"records": all_records}
else:
final_result = combined_recordSet.to_dict("records")
return final_result
except Exception as Err:
logger.error('Error in join_two_csv_and_extract_data(): %s' % Err)
raise ConnectorError('Error in processing CSV File: %s' % Err)
def _read_file_specific_columns(filepath,columns_t,numberOfRowsToSkip=None):
try:
chunk = pd.read_csv('{}'.format(filepath), delimiter=',', encoding="utf-8-sig",skiprows=numberOfRowsToSkip,chunksize=100000,error_bad_lines=False,usecols=columns_t)
df = | pd.concat(chunk) | pandas.concat |
from __future__ import division
from functools import wraps
import pandas as pd
import numpy as np
import time
import csv, sys
import os.path
import logging
from .ted_functions import TedFunctions
from .ted_aggregate_methods import TedAggregateMethods
from base.uber_model import UberModel, ModelSharedInputs
class TedSpeciesProperties(object):
"""
Listing of species properties that will eventually be read in from a SQL db
"""
def __init__(self):
"""Class representing Species properties"""
super(TedSpeciesProperties, self).__init__()
self.sci_name = pd.Series([], dtype='object')
self.com_name = pd.Series([], dtype='object')
self.taxa = pd.Series([], dtype='object')
self.order = pd.Series([], dtype='object')
self.usfws_id = pd.Series([], dtype='object')
self.body_wgt = pd.Series([], dtype='object')
self.diet_item = pd.Series([], dtype='object')
self.h2o_cont = pd.Series([], dtype='float')
def read_species_properties(self):
# this is a temporary method to initiate the species/diet food items lists (this will be replaced with
# a method to access a SQL database containing the properties
#filename = './ted/tests/TEDSpeciesProperties.csv'
filename = os.path.join(os.path.dirname(__file__),'tests/TEDSpeciesProperties.csv')
try:
with open(filename,'rt') as csvfile:
# csv.DictReader uses first line in file for column headings by default
dr = pd.read_csv(csvfile) # comma is default delimiter
except csv.Error as e:
sys.exit('file: %s, %s' (filename, e))
print(dr)
self.sci_name = dr.ix[:,'Scientific Name']
self.com_name = dr.ix[:,'Common Name']
self.taxa = dr.ix[:,'Taxa']
self.order = dr.ix[:,'Order']
self.usfws_id = dr.ix[:,'USFWS Species ID (ENTITY_ID)']
self.body_wgt= dr.ix[:,'BW (g)']
self.diet_item = dr.ix[:,'Food item']
self.h2o_cont = dr.ix[:,'Water content of diet']
class TedInputs(ModelSharedInputs):
"""
Required inputs class for Ted.
"""
def __init__(self):
"""Class representing the inputs for Ted"""
super(TedInputs, self).__init__()
# Inputs: Assign object attribute variables from the input Pandas DataFrame
self.chemical_name = pd.Series([], dtype="object", name="chemical_name")
# application parameters for min/max application scenarios
self.crop_min = pd.Series([], dtype="object", name="crop")
self.app_method_min = pd.Series([], dtype="object", name="app_method_min")
self.app_rate_min = pd.Series([], dtype="float", name="app_rate_min")
self.num_apps_min = pd.Series([], dtype="int", name="num_apps_min")
self.app_interval_min = pd.Series([], dtype="int", name="app_interval_min")
self.droplet_spec_min = pd.Series([], dtype="object", name="droplet_spec_min")
self.boom_hgt_min = pd.Series([], dtype="object", name="droplet_spec_min")
self.pest_incorp_depth_min = pd.Series([], dtype="object", name="pest_incorp_depth")
self.crop_max = pd.Series([], dtype="object", name="crop")
self.app_method_max = pd.Series([], dtype="object", name="app_method_max")
self.app_rate_max = pd.Series([], dtype="float", name="app_rate_max")
self.num_apps_max = pd.Series([], dtype="int", name="num_app_maxs")
self.app_interval_max = pd.Series([], dtype="int", name="app_interval_max")
self.droplet_spec_max = pd.Series([], dtype="object", name="droplet_spec_max")
self.boom_hgt_max = pd.Series([], dtype="object", name="droplet_spec_max")
self.pest_incorp_depth_max = pd.Series([], dtype="object", name="pest_incorp_depth")
# physical, chemical, and fate properties of pesticide
self.foliar_diss_hlife = pd.Series([], dtype="float", name="foliar_diss_hlife")
self.aerobic_soil_meta_hlife = pd.Series([], dtype="float", name="aerobic_soil_meta_hlife")
self.frac_retained_mamm = pd.Series([], dtype="float", name="frac_retained_mamm")
self.frac_retained_birds = pd.Series([], dtype="float", name="frac_retained_birds")
self.log_kow = pd.Series([], dtype="float", name="log_kow")
self.koc = pd.Series([], dtype="float", name="koc")
self.solubility = pd.Series([], dtype="float", name="solubility")
self.henry_law_const = pd.Series([], dtype="float", name="henry_law_const")
# bio concentration factors (ug active ing/kg-ww) / (ug active ing/liter)
self.aq_plant_algae_bcf_mean = pd.Series([], dtype="float", name="aq_plant_algae_bcf_mean")
self.aq_plant_algae_bcf_upper = pd.Series([], dtype="float", name="aq_plant_algae_bcf_upper")
self.inv_bcf_mean = pd.Series([], dtype="float", name="inv_bcf_mean")
self.inv_bcf_upper = pd.Series([], dtype="float", name="inv_bcf_upper")
self.fish_bcf_mean = pd.Series([], dtype="float", name="fish_bcf_mean")
self.fish_bcf_upper = pd.Series([], dtype="float", name="fish_bcf_upper")
# bounding water concentrations (ug active ing/liter)
self.water_conc_1 = pd.Series([], dtype="float", name="water_conc_1") # lower bound
self.water_conc_2 = pd.Series([], dtype="float", name="water_conc_2") # upper bound
# health value inputs
# naming convention (based on listing from OPP TED Excel spreadsheet 'inputs' worksheet):
# dbt: dose based toxicity
# cbt: concentration-based toxicity
# arbt: application rate-based toxicity
# 1inmill_mort: 1/million mortality (note initial character is numeral 1, not letter l)
# 1inten_mort: 10% mortality (note initial character is numeral 1, not letter l)
# others are self explanatory
# dose based toxicity(dbt): mammals (mg-pest/kg-bw) & weight of test animal (grams)
self.dbt_mamm_1inmill_mort = pd.Series([], dtype="float", name="dbt_mamm_1inmill_mort")
self.dbt_mamm_1inten_mort = pd.Series([], dtype="float", name="dbt_mamm_1inten_mort")
self.dbt_mamm_low_ld50 = pd.Series([], dtype="float", name="dbt_mamm_low_ld50")
self.dbt_mamm_rat_oral_ld50 = pd.Series([], dtype="float", name="dbt_mamm_1inten_mort")
self.dbt_mamm_rat_derm_ld50 = pd.Series([], dtype="float", name="dbt_mamm_rat_derm_ld50")
self.dbt_mamm_rat_inhal_ld50 = pd.Series([], dtype="float", name="dbt_mamm_rat_inhal_ld50")
self.dbt_mamm_sub_direct = pd.Series([], dtype="float", name="dbt_mamm_sub_direct")
self.dbt_mamm_sub_indirect = pd.Series([], dtype="float", name="dbt_mamm_sub_indirect")
self.dbt_mamm_1inmill_mort_wgt = pd.Series([], dtype="float", name="dbt_mamm_1inmill_mort_wgt")
self.dbt_mamm_1inten_mort_wgt = pd.Series([], dtype="float", name="dbt_mamm_1inten_mort_wgt")
self.dbt_mamm_low_ld50_wgt = pd.Series([], dtype="float", name="dbt_mamm_low_ld50_wgt")
self.dbt_mamm_rat_oral_ld50_wgt = pd.Series([], dtype="float", name="dbt_mamm_1inten_mort_wgt")
self.dbt_mamm_rat_derm_ld50_wgt = pd.Series([], dtype="float", name="dbt_mamm_rat_derm_ld50_wgt")
self.dbt_mamm_rat_inhal_ld50_wgt = pd.Series([], dtype="float", name="dbt_mamm_rat_inhal_ld50_wgt")
self.dbt_mamm_sub_direct_wgt = pd.Series([], dtype="float", name="dbt_mamm_sub_direct_wgt")
self.dbt_mamm_sub_indirect_wgt = pd.Series([], dtype="float", name="dbt_mamm_sub_indirect_wgt")
# dose based toxicity(dbt): birds (mg-pest/kg-bw) & weight of test animal (grams)
self.dbt_bird_1inmill_mort = pd.Series([], dtype="float", name="dbt_bird_1inmill_mort")
self.dbt_bird_1inten_mort = pd.Series([], dtype="float", name="dbt_bird_1inten_mort")
self.dbt_bird_low_ld50 = pd.Series([], dtype="float", name="dbt_bird_low_ld50")
self.dbt_bird_hc05 = pd.Series([], dtype="float", name="dbt_bird_hc05")
self.dbt_bird_hc50 = pd.Series([], dtype="float", name="dbt_bird_hc50")
self.dbt_bird_hc95 = pd.Series([], dtype="float", name="dbt_bird_hc95")
self.dbt_bird_sub_direct = pd.Series([], dtype="float", name="dbt_bird_sub_direct")
self.dbt_bird_sub_indirect = pd.Series([], dtype="float", name="dbt_bird_sub_indirect")
self.mineau_sca_fact = | pd.Series([], dtype="float", name="mineau_sca_fact") | pandas.Series |
import argparse
import multiprocessing
import os
import random as rn
from typing import List, Tuple, Union
import cv2
import numpy as np
import pandas as pd
from joblib import Parallel, delayed
from tqdm import tqdm
import config
import preprocessing.augment_op as aop
from pe_logger import PELogger
# Set seed to get reproducible augmentations
np.random.seed(1)
rn.seed(2)
logger = PELogger().get_logger()
CONFIG = config.config()
def augment(
df_row: pd.DataFrame,
target_w: int,
target_h: int,
aug_list: List[Union[List[str], str]],
input_dir: str,
output_dir: str,
) -> pd.DataFrame:
"""Apply data augmentation to images listed in given dataframe
Args:
df: dataframe consisting of image filenames and metadata to process
target_w: pixel width to which images will be scaled
target_h: pixel height to which image will be scaled
aug_list: list of augmentations to apply (can be nested)
input_dir: directory containing input images
output_dir: output directory for augmented PNG files
Returns:
dataframe including rows for augmented images
"""
new_df = pd.DataFrame(columns=df_row.columns)
filepath = df_row["png_filename"].squeeze()
series_instance_uid = df_row["SeriesInstanceUID"].squeeze()
sop_instance_uid = df_row["SOPInstanceUID"].squeeze()
im_orig = cv2.imread(os.path.join(input_dir, filepath))
filename = os.path.basename(filepath)
for aug in aug_list:
df_new_row = df_row.copy()
im = im_orig.copy()
if isinstance(aug, list):
for a in aug:
im = getattr(aop, a)(im)
im_aug = im
aug_str = "aug_" + "_".join(aug)
else:
im_aug = getattr(aop, aug)(im)
aug_str = "aug_" + aug
im_aug = im
if im_aug.shape[0] != target_h or im_aug.shape[1] != target_w:
im_aug = cv2.resize(im_aug, (target_w, target_h))
filename_aug = filename.replace(filename[-4:], "_" + aug_str + filename[-4:])
output_filename = os.path.join(output_dir, filename_aug)
# Update affected parameters
df_new_row["augmented"] = 1
df_new_row["augment_type"] = aug_str
df_new_row["png_filename"] = filename_aug
df_new_row["SeriesInstanceUID"] = series_instance_uid + "_" + aug_str
df_new_row["SOPInstanceUID"] = sop_instance_uid + "_" + aug_str
assert cv2.imwrite(output_filename, im_aug), (
"Failed to write augmented file" + output_filename
)
new_df = pd.concat([new_df, df_new_row], axis=0, ignore_index=True)
return new_df
def _set_defaults_for_new_cols(df: pd.DataFrame, target_w: int, target_h: int):
df["rows"] = target_h
df["columns"] = target_w
df["augmented"] = 0
df["augment_type"] = "none"
def _get_num_neg_per_pos_label(df: pd.DataFrame) -> int:
counts = df["label"].value_counts()
assert len(counts) == 2, "Dataset contains invalid labels"
return int(counts[0] / float(counts[1]) + 0.5)
def get_aug_split_for_negative(
df: pd.DataFrame, frac: float = 0.1
) -> Tuple[pd.DataFrame, pd.DataFrame]:
"""Split dataframe's negative slices to two dataframes.
Args:
df: dataframe with image filenames
frac: fraction of series to augment - (i.e. not slices). Defaults to 0.1.
Returns:
Tuple of dataframes: first dataframe contains images not scheduled for augmentation
and the other one in turn contains images that should be augmented.
"""
df_neg = df.loc[df.label == 0]
df_neg_series = df_neg.groupby("SeriesInstanceUID").first()
df_apply_aug_series = df_neg_series.sample(frac=frac)
df_no_aug = df_neg.loc[~df_neg["SeriesInstanceUID"].isin(df_apply_aug_series.index)]
df_apply_aug = df_neg.loc[df_neg["SeriesInstanceUID"].isin(df_apply_aug_series.index)]
return df_no_aug, df_apply_aug
def augment_df(
df: pd.DataFrame,
target_w: int,
target_h: int,
aug_list: List[Union[List[str], str]],
num_augs: int,
input_dir: str,
output_dir: str,
) -> pd.DataFrame:
"""Augment slices listed in given dataframe in parallel by
selecting num_augs of augmentations from aug_list.
Returns a dataframe that includes rows for augmented images
"""
df_aug = pd.DataFrame(columns=df.columns)
augs_to_apply = aug_list
if num_augs < len(aug_list):
aug_indices = np.random.choice(np.arange(len(aug_list)), num_augs, replace=False)
augs_to_apply = [aug_list[idx] for idx in aug_indices]
# Process series by series with slices in parallel
series_list = df["SeriesInstanceUID"].unique()
for series in tqdm(series_list, unit="series"):
df_series = df.loc[df["SeriesInstanceUID"] == series]
df_series_aug = Parallel(n_jobs=multiprocessing.cpu_count())(
delayed(augment)(
df_series.iloc[row : row + 1, :],
target_w,
target_h,
augs_to_apply,
input_dir,
output_dir,
)
for row in range(df_series.shape[0])
)
df_aug = | pd.concat([df_aug, *df_series_aug], axis=0, ignore_index=True) | pandas.concat |
#from dqn_env import TrainLine
import sys
sys.path.append('.\subway_system')
from subway_env import TrainLine
from RL_brain import DeepQNetwork
import numpy as np
import matplotlib.pyplot as mplt
import tensorflow as tf
import pandas as pd
import TrainAndRoadCharacter as trc
def plot(r,ylabel):
import matplotlib.pyplot as plt
plt.plot(np.arange(len(r)), r, linewidth=1)
plt.ylabel(ylabel)
plt.xlabel('training episodes')
plt.savefig("./img/"+ylabel+".png")
plt.show()
def draw_mean(r,ylabel):
import matplotlib.pyplot as plt
x_10 = []
temp = []
count = 0
for i in range (len(r)):
temp.append(r[i])
count += 1
if count >= 10:
x_10.append(sum(temp) / 10)
temp = []
count = 0
plt.plot(np.arange(len(x_10)), x_10, linewidth=1)
plt.ylabel('mean' + ylabel)
plt.xlabel('training episodes X10')
plt.savefig("./img/"+'mean' +ylabel+".png")
plt.show()
def run_train():
total_step = 0
Max_iteras= 3000
for episode in range(Max_iteras):
#训练5000次
r1_max = 0
step = 0
r1 = 0
pl=[] #位置
vl=[] #速度
ul=[] #加速度
al=[] #动作
# initial observation
observation = env.reset()
#env.bef_print()
while True:
# fresh env
#env.render()
# RL choose action based on observation
action = RL.choose_action(observation)
#强行推上曲线
pos = observation[0] * env.S
veo = observation[1] * env.max_speed
if pos <100 and veo < env.avg_speed:
action = 8
# RL take action and get next observation and reward
observation_,E,reward, done, action = env.step(action) # action =0-6 最后会被转换到转化为[-0.3, 0.3]
r1 = r1 * 0.99 + reward
RL.store_transition(observation, action, reward, observation_)
if (total_step > 5000 and total_step % 32 == 0 ):
RL.learn()
# swap observation
observation = observation_
# o1 =observation
if episode%20==0 or episode==Max_iteras-1:
pl.append(pos)
vl.append(veo)
ul.append(observation[3])
al.append(action)
# break while loop when end of this episode
if done:
# env.subFilterFactor(Max_iteras) #减少平滑因子
r.append(r1)
energy.append(E)
print(observation_[2]*env.T,env.TErrorSum,env.filterFactor,RL.epsilon)
RL.increase_epsilon()
tlist.append(observation_[2]*env.T)
#曲线判定函数,决定是否保存曲线 :旅行距离是否合适,时间是否接近,以episode_speed.csv为 文件名
if r1 > r1_max and episode>1500 and episode%20 == 0:
r1_max =r1
Curve=np.mat([pl,vl,ul,al])
CurveData=pd.DataFrame(data=Curve.T,columns=['s','v','acc','action'])
CurveData.to_csv("./Curve/"+str(episode)+"_CurveData.csv")
if episode==Max_iteras-1:
print(r1)
# f1 = open('datat.txt', 'r+')
# f1.read()
# print(episode, (step + 5)/5, file=f1)
# f1.close()
r.append(r1)
print('Episode finished after {} timesteps'.format((step + 5)/5))
break
# if (5000 > episode >= 4500):
# print(o1)
# f2 = open('vs.txt', 'r+')
# f2.close()
# break
step += 1
total_step += 1
#最后打印结果
print(episode)
if episode%20 ==0 or episode==Max_iteras-1:
trc.plotSpeedLimitRoadGrad('relative')
mplt.plot(pl,vl)
mplt.savefig("./img/"+str(episode)+"v-s.png")
mplt.show()
mplt.plot(pl,ul)
mplt.savefig("./img/"+str(episode)+"u-s.png")
mplt.show()
draw_mean(al,str(episode)+"action-s")
# mplt.savefig("./img/"+str(episode)+"action-s.png")
# mplt.show()
return
# end of game
if __name__ == "__main__":
print("path:"+sys.path[0])
global r,energy,tlist,RL
tf.reset_default_graph()
env = TrainLine(110)
env.seed(1)
RL = DeepQNetwork(env.n_actions, env.n_features,
learning_rate=0.0001,
reward_decay=0.99, #奖励折扣
e_greedy=0.6, #探索效率
replace_target_iter=512,
memory_size=10000,
batch_size=256,
e_greedy_increment=0.35/3000,
# output_graph=True
)
# RL.LoadModel()
energy = []
r = []
tlist = []
run_train()
RL.plot_cost()
plot(r,'reward')
plot(energy,'energy')
plot(tlist,'time')
draw_mean(r,'reward')
draw_mean(energy,'energy')
draw_mean(tlist,'time')
draw_mean(RL.cost_his,'mean_cost')
rdata = pd.DataFrame(r)
rdata.to_csv("reward.csv")
tdata = pd.DataFrame(tlist)
tdata.to_csv("timeError.csv")
costData = pd.DataFrame(RL.cost_his)
costData.to_csv("costData.csv")
Edata = | pd.DataFrame(energy) | pandas.DataFrame |
# Gist example of IB wrapper from here: https://gist.github.com/robcarver17/f50aeebc2ecd084f818706d9f05c1eb4
#
# Download API from http://interactivebrokers.github.io/#
# (must be at least version 9.73)
#
# Install python API code /IBJts/source/pythonclient $ python3 setup.py install
#
# Note: The test cases, and the documentation refer to a python package called IBApi,
# but the actual package is called ibapi. Go figure.
#
# Get the latest version of the gateway:
# https://www.interactivebrokers.com/en/?f=%2Fen%2Fcontrol%2Fsystemstandalone-ibGateway.php%3Fos%3Dunix
# (for unix: windows and mac users please find your own version)
#
# Run the gateway
#
# user: edemo
# pwd: <PASSWORD>
#
# duration units and bar sizes:
# https://interactivebrokers.github.io/tws-api/historical_bars.html#hd_duration
# limitations:
# https://interactivebrokers.github.io/tws-api/historical_limitations.html
import os
import time
import pprint
import queue
import datetime
import traceback
from pytz import timezone
from pathlib import Path
import pandas as pd
import numpy as np
from tqdm import tqdm
import seaborn as sns
import matplotlib.pyplot as plt
from ibapi.wrapper import EWrapper
from ibapi.client import EClient
from ibapi.contract import Contract as IBcontract
from threading import Thread
DEFAULT_HISTORIC_DATA_ID = 50
DEFAULT_GET_CONTRACT_ID = 43
DEFAULT_GET_NP_ID = 42
DEFAULT_GET_EARLIEST_ID = 1
DEFAULT_HISTORIC_NEWS_ID = 1001
## marker for when queue is finished
FINISHED = object()
STARTED = object()
TIME_OUT = object()
class finishableQueue(object):
def __init__(self, queue_to_finish):
self._queue = queue_to_finish
self.status = STARTED
def get(self, timeout):
"""
Returns a list of queue elements once timeout is finished, or a FINISHED flag is received in the queue
:param timeout: how long to wait before giving up
:return: list of queue elements
"""
contents_of_queue = []
finished = False
while not finished:
try:
current_element = self._queue.get(timeout=timeout)
if current_element is FINISHED:
finished = True
self.status = FINISHED
else:
contents_of_queue.append(current_element)
## keep going and try and get more data
except queue.Empty:
## If we hit a time out it's most probable we're not getting a finished element any time soon
## give up and return what we have
finished = True
self.status = TIME_OUT
return contents_of_queue
def timed_out(self):
return self.status is TIME_OUT
class TestWrapper(EWrapper):
"""
The wrapper deals with the action coming back from the IB gateway or TWS instance
We override methods in EWrapper that will get called when this action happens, like currentTime
Extra methods are added as we need to store the results in this object
"""
def __init__(self):
self._my_contract_details = {}
self._my_historic_data_dict = {}
self._my_earliest_timestamp_dict = {}
self._my_np_dict = {}
self._my_hn_dict = {}
self._my_na_dict = {}
self._my_errors = queue.Queue()
## error handling code
def init_error(self):
error_queue = queue.Queue()
self._my_errors = error_queue
def get_error(self, timeout=5):
if self.is_error():
try:
return self._my_errors.get(timeout=timeout)
except queue.Empty:
return None
return None
def is_error(self):
an_error_if=not self._my_errors.empty()
return an_error_if
def error(self, id, errorCode, errorString):
## Overriden method
errormsg = "IB error id %d errorcode %d string %s" % (id, errorCode, errorString)
self._my_errors.put(errormsg)
## get contract details code
def init_contractdetails(self, reqId):
self._my_contract_details[reqId] = queue.Queue()
return self._my_contract_details[reqId]
def contractDetails(self, reqId, contractDetails):
## overridden method
if reqId not in self._my_contract_details.keys():
self.init_contractdetails(reqId)
self._my_contract_details[reqId].put(contractDetails)
def contractDetailsEnd(self, reqId):
## overriden method
if reqId not in self._my_contract_details.keys():
self.init_contractdetails(reqId)
self._my_contract_details[reqId].put(FINISHED)
def init_historicprices(self, tickerid):
self._my_historic_data_dict[tickerid] = queue.Queue()
return self._my_historic_data_dict[tickerid]
def init_earliest_timestamp(self, tickerid):
self._my_earliest_timestamp_dict[tickerid] = queue.Queue()
return self._my_earliest_timestamp_dict[tickerid]
def init_np(self, tickerid):
self._my_np_dict[tickerid] = queue.Queue()
return self._my_np_dict[tickerid]
def init_hn(self, requestId):
self._my_hn_dict[requestId] = queue.Queue()
return self._my_hn_dict[requestId]
def init_na(self, requestId):
self._my_na_dict[requestId] = queue.Queue()
return self._my_na_dict[requestId]
def historicalData(self, tickerid, bar):
## Overriden method
## Note I'm choosing to ignore barCount, WAP and hasGaps but you could use them if you like
# pprint.pprint(bar.__dict__)
bardata = (bar.date, bar.open, bar.high, bar.low, bar.close, bar.volume)
historic_data_dict = self._my_historic_data_dict
## Add on to the current data
if tickerid not in historic_data_dict.keys():
self.init_historicprices(tickerid)
historic_data_dict[tickerid].put(bardata)
def headTimestamp(self, tickerid, headTimestamp:str):
## overridden method
if tickerid not in self._my_earliest_timestamp_dict.keys():
self.init_earliest_timestamp(tickerid)
self._my_earliest_timestamp_dict[tickerid].put(headTimestamp)
self._my_earliest_timestamp_dict[tickerid].put(FINISHED)
def newsProviders(self, newsProviders):
## overridden method
tickerid = DEFAULT_GET_NP_ID
if tickerid not in self._my_np_dict.keys():
self.init_np(tickerid)
self._my_np_dict[tickerid].put(newsProviders)
self._my_np_dict[tickerid].put(FINISHED)
def historicalDataEnd(self, tickerid, start:str, end:str):
## overriden method
if tickerid not in self._my_historic_data_dict.keys():
self.init_historicprices(tickerid)
self._my_historic_data_dict[tickerid].put(FINISHED)
def historicalNews(self, requestId, time, providerCode, articleId, headline):
newsdata = (time, providerCode, articleId, headline)
newsdict = self._my_hn_dict
if requestId not in newsdict.keys():
self.init_hn(requestId)
newsdict[requestId].put(newsdata)
def historicalNewsEnd(self, requestId, hasMore):
if requestId not in self._my_hn_dict.keys():
self.init_hn(requestId)
if hasMore:
print('more results available')
self._my_hn_dict[requestId].put(FINISHED)
def newsArticle(self, requestId, articleType, articleText):
if requestId not in self._my_na_dict.keys():
self.init_na(requestId)
self._my_na_dict[requestId].put((articleType, articleText))
self._my_na_dict[requestId].put(FINISHED)
class TestClient(EClient):
"""
The client method
We don't override native methods, but instead call them from our own wrappers
"""
def __init__(self, wrapper):
## Set up with a wrapper inside
EClient.__init__(self, wrapper)
def resolve_ib_contract(self, ibcontract, reqId=DEFAULT_GET_CONTRACT_ID):
"""
From a partially formed contract, returns a fully fledged version
:returns fully resolved IB contract
"""
## Make a place to store the data we're going to return
contract_details_queue = finishableQueue(self.init_contractdetails(reqId))
print("Getting full contract details from the server... ")
self.reqContractDetails(reqId, ibcontract)
## Run until we get a valid contract(s) or get bored waiting
MAX_WAIT_SECONDS = 3
new_contract_details = contract_details_queue.get(timeout = MAX_WAIT_SECONDS)
while self.wrapper.is_error():
print(self.get_error())
if contract_details_queue.timed_out():
print("Exceeded maximum wait for wrapper to confirm finished - seems to be normal behaviour")
if len(new_contract_details)==0:
print("Failed to get additional contract details: returning unresolved contract")
return ibcontract, new_contract_details
if len(new_contract_details)>1:
print("got multiple contracts; using first one")
new_contract_details = new_contract_details[0]
resolved_ibcontract = new_contract_details.contract
return resolved_ibcontract, new_contract_details
def get_IB_historical_data(self,
ibcontract,
whatToShow="ADJUSTED_LAST",
durationStr="1 Y",
barSizeSetting="1 day",
tickerid=DEFAULT_HISTORIC_DATA_ID,
latest_date=None):
"""
Returns historical prices for a contract, up to latest_date
if latest_date is none, uses todays date
latest_date should be of form %Y%m%d %H:%M:%S %Z
ibcontract is a Contract
:returns list of prices in 4 tuples: Open high low close volume
"""
# set latest_date to today and now if it is None
if latest_date is None:
latest_date = get_latest_date_local()
## Make a place to store the data we're going to return
historic_data_queue = finishableQueue(self.init_historicprices(tickerid))
# Request some historical data. Native method in EClient
self.reqHistoricalData(
tickerid, # tickerId,
ibcontract, # contract,
latest_date, # endDateTime,
durationStr, # durationStr,
barSizeSetting, # barSizeSetting,
whatToShow=whatToShow,
useRTH=1,
formatDate=1,
keepUpToDate=False, # <<==== added for api 9.73.2
chartOptions=[] ## chartOptions not used
)
## Wait until we get a completed data, an error, or get bored waiting
MAX_WAIT_SECONDS = 5
while True:
print("Getting historical data from the server... could take %d seconds to complete " % MAX_WAIT_SECONDS)
historic_data = historic_data_queue.get(timeout=MAX_WAIT_SECONDS)
er = ''
while self.wrapper.is_error():
er = self.get_error()
print(er)
if 'Not connected' in er:
print('sleeping 30s to wait for reconnection; suggest restarting TWS')
time.sleep(30)
if "HMDS query returned no data" in er:
print(historic_data)
print(historic_data is None)
if historic_data_queue.timed_out() and er is None:
print("Exceeded maximum wait for wrapper to confirm finished - seems to be normal behaviour")
# only keep trying if not connected
if not 'Not connected' in er:
break
# TODO: this is cancelling query early maybe?
self.cancelHistoricalData(tickerid)
# convert to pandas dataframe
# date, open, high, low, close, vol
# already adjusted for splits
if len(historic_data) != 0:
df = pd.DataFrame.from_records(data=historic_data, index='datetime', columns=['datetime', 'open', 'high', 'low', 'close', 'volume'])
df.index = pd.to_datetime(df.index)
if whatToShow not in ['TRADES', 'ADJUSTED_LAST']:
# volume only available for trades
df.drop('volume', axis=1, inplace=True)
return df
else:
return historic_data
def getEarliestTimestamp(self, contract, whatToShow='ADJUSTED_LAST', useRTH=1, formatDate=1, tickerid=DEFAULT_GET_EARLIEST_ID):
# parameters: https://interactivebrokers.github.io/tws-api/classIBApi_1_1EClient.html#a059b5072d1e8e8e96394e53366eb81f3
## Make a place to store the data we're going to return
## Wait until we get a completed data, an error, or get bored waiting
MAX_WAIT_SECONDS = 2
tries = 0
while True:
tries += 1
earliest_timestamp_queue = finishableQueue(self.init_earliest_timestamp(tickerid))
self.reqHeadTimeStamp(tickerid, contract, whatToShow, useRTH, formatDate)
print("Getting earliest timestamp from the server... could take %d seconds to complete " % MAX_WAIT_SECONDS)
earliest = earliest_timestamp_queue.get(timeout=MAX_WAIT_SECONDS)
while self.wrapper.is_error():
er = self.get_error()
print(er)
if 'No head time stamp' in er:
return None
break
if earliest_timestamp_queue.timed_out():
print("Exceeded maximum wait for wrapper to confirm finished - seems to be normal behaviour")
self.cancelHeadTimeStamp(tickerid)
if len(earliest) != 0 or tries == 20:
return None
break
return earliest[0] # first element in list
def getNewsProviders(self):
"""
available news providers by default are
[140007057343600: BRFG, Briefing.com General Market Columns,
140007057342704: BRFUPDN, Briefing.com Analyst Actions,
140007057343544: DJNL, Dow Jones Newsletters]
"""
## Make a place to store the data we're going to return
tickerid = DEFAULT_GET_NP_ID
np_queue = finishableQueue(self.init_np(tickerid))
# Request news providers. Native method in EClient
self.reqNewsProviders()
## Wait until we get a completed data, an error, or get bored waiting
MAX_WAIT_SECONDS = 2
print("Getting list of news providers from the server... could take %d seconds to complete " % MAX_WAIT_SECONDS)
nps = np_queue.get(timeout=MAX_WAIT_SECONDS)
while self.wrapper.is_error():
print(self.get_error())
if np_queue.timed_out():
print("Exceeded maximum wait for wrapper to confirm finished - seems to be normal behaviour")
return nps[0] # list within a list
def getHistoricalNews(self, reqId, conId, providerCodes, startDateTime, endDateTime, totalResults):
hn_queue = finishableQueue(self.init_hn(reqId))
self.reqHistoricalNews(reqId, conId, providerCodes, startDateTime, endDateTime, totalResults, historicalNewsOptions=[])
## Wait until we get a completed data, an error, or get bored waiting
MAX_WAIT_SECONDS = 15
print("Getting historical news from the server... could take %d seconds to complete " % MAX_WAIT_SECONDS)
hn = hn_queue.get(timeout=MAX_WAIT_SECONDS)
while self.wrapper.is_error():
print(self.get_error())
if hn_queue.timed_out():
print("Exceeded maximum wait for wrapper to confirm finished - seems to be normal behaviour")
return hn
def getNewsArticle(self, reqId, providerCode, articleId):
na_queue = finishableQueue(self.init_na(reqId))
self.reqNewsArticle(reqId, providerCode, articleId, [])
## Wait until we get a completed data, an error, or get bored waiting
MAX_WAIT_SECONDS = 5
print("Getting historical news from the server... could take %d seconds to complete " % MAX_WAIT_SECONDS)
na = na_queue.get(timeout=MAX_WAIT_SECONDS)
while self.wrapper.is_error():
print(self.get_error())
if na_queue.timed_out():
print("Exceeded maximum wait for wrapper to confirm finished - seems to be normal behaviour")
return na
class TestApp(TestWrapper, TestClient):
def __init__(self, ipaddress, portid, clientid):
TestWrapper.__init__(self)
TestClient.__init__(self, wrapper=self)
self.connect(ipaddress, portid, clientid)
thread = Thread(target = self.run)
thread.start()
setattr(self, "_thread", thread)
self.init_error()
def get_hist_data_date_range(self,
ibcontract,
whatToShow='TRADES',
barSizeSetting='3 mins',
start_date=None,
end_date=None,
tickerid=DEFAULT_HISTORIC_DATA_ID):
"""
gets historic data for date range
if start_date is None, then first finds earliest date available,
and gets all data to there
if end_date is None, will get data to latest possible time
start_date and end_date should be strings in format YYYYMMDD
useful options for whatToShow for stocks can be:
ADJUSTED_LAST (adj for splits and dividends)
TRADES (only adjusted for splits)
BID
ASK
OPTION_IMPLIED_VOLATILITY
HISTORICAL_VOLATILITY
"""
# convert start_date string to datetime date object for comparisons
start_date_datetime_date = pd.to_datetime('1800-01-01').date() # early date so it doesn't match df.index.date below (if not updating data)
if start_date is not None:
# go one day past start date just to make sure we have all data
start_date_datetime_date = (pd.to_datetime(start_date) - pd.Timedelta('1D')).date()
smallbars = ['1 secs', '5 secs', '10 secs', '15 secs', '30 secs', '1 min']
max_step_sizes = {'1 secs': '1800 S', # 30 mins
'5 secs': '3600 S', # 1 hour
'10 secs': '14400 S', # 4 hours
'15 secs': '14400 S', # 4 hours
'30 secs': '28800 S', # 8 hours
'1 min': '1 D',
'2 mins': '2 D',
'3 mins': '1 W',
'5 mins': '1 W',
'10 mins': '1 W',
'15 mins': '1 W',
'20 mins': '1 W',
'30 mins': '1 M',
'1 hour': '1 M',
'2 hours': '1 M',
'3 hours': '1 M',
'4 hours': '1 M',
'8 hours': '1 M',
'1 day': '1 Y',
'1 week': '1 Y',
'1 month': '1 Y'}
# TODO: check if earliest timestamp is nothing or before/after end_date
earliest_timestamp = self.getEarliestTimestamp(ibcontract, whatToShow=whatToShow, tickerid=tickerid)
if earliest_timestamp is not None:
earliest_datestamp = earliest_timestamp[:8]
# if timeout, will return empty list
df = []
if end_date is None:
latest_date = None
else:
# TODO: need to adopt this to other than mountain time
latest_date = end_date + ' ' + get_close_hour_local() + ':00:00'
# list is returned if there is an error or something?
tries = 0
while type(df) is list:
tries += 1
df = self.get_IB_historical_data(ibcontract,
whatToShow=whatToShow,
durationStr=max_step_sizes[barSizeSetting],
barSizeSetting=barSizeSetting,
tickerid=tickerid,
latest_date=latest_date)
if tries == 10:
print('tried to get historic data 10x and failed, retutrning None')
return None
earliest_date = df.index[0]
full_df = df
self.df = full_df
df_dates = df.index.date
# keep going until the same result is returned twice...not perfectly efficient but oh well
previous_earliest_date = None
i = 0
start_time = time.time()
is_list = 0
while previous_earliest_date != earliest_date:
i += 1
print(i)
print(previous_earliest_date)
print(earliest_date)
# TODO: if "HMDS query returned no data" in error lots of times, maybe finish it
df = self.get_IB_historical_data(ibcontract,
whatToShow=whatToShow,
durationStr=max_step_sizes[barSizeSetting],
barSizeSetting=barSizeSetting,
tickerid=tickerid,
latest_date=earliest_date.strftime('%Y%m%d %H:%M:%S'))
if type(df) is list:
is_list += 1
# we've probably hit the earliest time we can get
if earliest_timestamp is not None:
if is_list >= 3 and earliest_date.date().strftime('%Y%m%d') == earliest_datestamp:
print("hit earliest timestamp")
break
if is_list >= 10:
print('hit 10 lists in a row')
break
df_dates = None
continue
else:
is_list = 0
previous_earliest_date = earliest_date
earliest_date = df.index[0]
full_df = pd.concat([df, full_df])
self.df = full_df
df_dates = df.index.date
if df_dates.min() <= start_date_datetime_date:
print('start_date_datetime in dates, ending')
break
# no more than 6 requests every 2s for bars under 30s
# https://interactivebrokers.github.io/tws-api/historical_limitations.html
# TODO: take care of 60 requests per 10 mins
if barSizeSetting in smallbars and i >= 6:
time_left = 2 - (time.time() - start_time())
i = 0
time.sleep(time_left)
return full_df
def get_stock_contract(self, ticker='SNAP', reqId=DEFAULT_HISTORIC_DATA_ID):
"""
gets resolved IB contract for stocks
assumes ISLAND exchange for now (NASDAQ and maybe others?)
"""
# available sec types: https://interactivebrokers.github.io/tws-api/classIBApi_1_1Contract.html#a4f83111c0ea37a19fe1dae98e3b67456
ibcontract = IBcontract()
ibcontract.secType = 'STK'
# get todays date, format as YYYYMMDD -- need to check this is correct
# today = datetime.datetime.today().strftime('%Y%m%d')
# ibcontract.lastTradeDateOrContractMonth = '20180711'#today
ibcontract.symbol = ticker
ibcontract.exchange = 'ISLAND'
resolved_ibcontract, contract_details = self.resolve_ib_contract(ibcontract=ibcontract, reqId=reqId)
return resolved_ibcontract, contract_details
def get_otc_contract(self, ticker='SNAP', reqId=DEFAULT_HISTORIC_DATA_ID):
"""
gets resolved IB contract for stocks
assumes ISLAND exchange for now (NASDAQ and maybe others?)
"""
# available sec types: https://interactivebrokers.github.io/tws-api/classIBApi_1_1Contract.html#a4f83111c0ea37a19fe1dae98e3b67456
ibcontract = IBcontract()
ibcontract.secType = 'STK'
# get todays date, format as YYYYMMDD -- need to check this is correct
# today = datetime.datetime.today().strftime('%Y%m%d')
# ibcontract.lastTradeDateOrContractMonth = '20180711'#today
ibcontract.symbol = ticker
ibcontract.exchange = 'ARCAEDGE'
resolved_ibcontract, contract_details = self.resolve_ib_contract(ibcontract=ibcontract, reqId=reqId)
return resolved_ibcontract, contract_details
def get_forex_contract(self, main_currency='USD', second_currency='JPY', reqId=DEFAULT_HISTORIC_DATA_ID):
"""
gets resolved IB contract for stocks
assumes ISLAND exchange for now (NASDAQ and maybe others?)
"""
# available sec types: https://interactivebrokers.github.io/tws-api/classIBApi_1_1Contract.html#a4f83111c0ea37a19fe1dae98e3b67456
ibcontract = IBcontract()
ibcontract.symbol = 'EUR'#second_currency
ibcontract.secType = "CASH"
ibcontract.currency = 'GBP'#main_currency
ibcontract.exchange = "IDEALPRO"
resolved_ibcontract, contract_details = self.resolve_ib_contract(ibcontract=ibcontract, reqId=reqId)
return resolved_ibcontract, contract_details
def download_all_history_stock(self, ticker='SNAP', barSizeSetting='3 mins', reqId=DEFAULT_HISTORIC_DATA_ID, what='TRADES', exchange='ISLAND'):
"""
downloads all historical data for a stock including
TRADES or ADJUSTED_LAST
BID
ASK
OPTION_IMPLIED_VOLATILITY
if data already exists, updates and appends to it
'what' parameter can be 'ADJUSTED_LAST' or 'TRADES'.
ADJUSTED_LAST is the dividend-adjusted prices; trades is only split-adjusted
"""
if exchange == 'ISLAND': # NASDAQ / regular stocks
contract, contract_details = self.get_stock_contract(ticker=ticker, reqId=reqId)
elif exchange == 'ARCAEDGE': # OTC / PINK
contract, contract_details = self.get_otc_contract(ticker=ticker, reqId=reqId)
if what == 'TRADES':
folder = '/home/nate/Dropbox/data/ib_full_adj/data/'
elif what == 'ADJUSTED_LAST':
folder = '/home/nate/Dropbox/data/ib_split_adj_only/data/'
trades_start_date = None
bids_start_date = None
asks_start_date = None
opt_vol_start_date = None
tr_mode = 'w'
bid_mode = 'w'
ask_mode = 'w'
opt_vol_mode = 'w'
bss = barSizeSetting.replace(' ', '_')
trades_filename = folder + ticker + '_trades_' + bss + '.h5'
bid_filename = folder + ticker + '_bid_' + bss + '.h5'
ask_filename = folder + ticker + '_ask_' + bss + '.h5'
opt_vol_filename = folder + ticker + '_opt_vol_' + bss + '.h5'
# TODO: provide option for which files to download;
# check each file individually and update individually
if os.path.exists(trades_filename):
print('trades file exists, going to append...')
cur_trades = pd.read_hdf(trades_filename)
latest_trades_datetime = cur_trades.index[-1]
trades_start_date = latest_trades_datetime.strftime('%Y%m%d')
tr_mode = 'r+'
print('latest trades date is', trades_start_date)
if os.path.exists(bid_filename):
print('bids file exists, going to append')
cur_bids = pd.read_hdf(bid_filename)
latest_bids_datetime = cur_bids.index[-1]
bids_start_date = latest_bids_datetime.strftime('%Y%m%d')
bid_mode='r+'
if os.path.exists(ask_filename):
print('asks filename exists, going to append')
cur_asks = pd.read_hdf(ask_filename)
latest_asks_datetime = cur_asks.index[-1]
asks_start_date = latest_asks_datetime.strftime('%Y%m%d')
ask_mode='r+'
if os.path.exists(opt_vol_filename):
print('opt_vol file exists, gonna append')
cur_opt_vol = pd.read_hdf(opt_vol_filename)
latest_opt_vol_datetime = cur_opt_vol.index[-1]
opt_vol_start_date = latest_opt_vol_datetime.strftime('%Y%m%d')
opt_vol_mode = 'r+' # append to existing files, should throw error if they don't exist
end_date = None#'20170401' # smaller amount of data for prototyping/testing
print('\n\n\ngetting trades...\n\n\n')
trades = self.get_hist_data_date_range(contract, barSizeSetting=barSizeSetting, whatToShow=what, end_date=end_date, start_date=trades_start_date, tickerid=reqId)
if trades is not None:
# write or append data
# TODO: function for cleaning up data and remove duplicates, sort data
# TODO: only append things after the latest datetime, and do it for trades, bid, etc separately
# if appending, get next index after latest existing datetime
tr_append = False # need to set option in to_hdf
bid_append = False
ask_append = False
opt_vol_append = False
if tr_mode == 'r+':
next_trades_idx = trades.loc[latest_trades_datetime:]
if next_trades_idx.shape[0] <= 1 or cur_trades.iloc[-1].equals(trades.iloc[-1]):
print('already have all the data I think for trades')
# return
else:
next_trades_idx = next_trades_idx.index[1]
trades = trades.loc[next_trades_idx:]
tr_append=True
trades.to_hdf(trades_filename, key='data', format='table', complevel=9, complib='blosc:lz4', mode=tr_mode, append=tr_append)
else:
trades.to_hdf(trades_filename, key='data', format='table', complevel=9, complib='blosc:lz4', mode=tr_mode, append=tr_append)
print('\n\n\ngetting bids...\n\n\n')
bid = self.get_hist_data_date_range(contract, barSizeSetting=barSizeSetting, whatToShow='BID', end_date=end_date, start_date=bids_start_date, tickerid=reqId)
if bid is not None:
if bid_mode == 'r+':
next_bids_idx = bid.loc[latest_bids_datetime:]
if next_bids_idx.shape[0] <= 1 or cur_bids.iloc[-1].equals(bid.iloc[-1]):
print('already have all bids data I think')
else:
next_bids_idx = next_bids_idx.index[1]
bid = bid.loc[next_bids_idx:]
bid_append=True
bid.to_hdf(bid_filename, key='data', format='table', complevel=9, complib='blosc:lz4', mode=bid_mode, append=bid_append)
else:
bid.to_hdf(bid_filename, key='data', format='table', complevel=9, complib='blosc:lz4', mode=bid_mode, append=bid_append)
print('\n\n\ngetting asks...\n\n\n')
ask = self.get_hist_data_date_range(contract, barSizeSetting=barSizeSetting, whatToShow='ASK', end_date=end_date, start_date=asks_start_date, tickerid=reqId)
if ask is not None:
if ask_mode == 'r+':
next_asks_idx = ask.loc[latest_asks_datetime:]
if next_asks_idx.shape[0] <= 1 or cur_asks.iloc[-1].equals(ask.iloc[-1]):
print('already have all asks data I think')
else:
next_asks_idx = next_asks_idx.index[1]
ask = ask.loc[next_asks_idx:]
ask_append = True
ask.to_hdf(ask_filename, key='data', format='table', complevel=9, complib='blosc:lz4', mode=ask_mode, append=ask_append)
else:
ask.to_hdf(ask_filename, key='data', format='table', complevel=9, complib='blosc:lz4', mode=ask_mode, append=ask_append)
print('\n\n\ngetting opt_vol...\n\n\n')
opt_vol = self.get_hist_data_date_range(contract, barSizeSetting=barSizeSetting, whatToShow='OPTION_IMPLIED_VOLATILITY', end_date=end_date, start_date=opt_vol_start_date, tickerid=reqId)
if opt_vol is not None:
if opt_vol_mode == 'r+':
# TODO: doesn't seem to be working properly for opt_vol, seems to append every time
next_opt_vol_idx = opt_vol.loc[latest_opt_vol_datetime:]
if next_opt_vol_idx.shape[0] <= 1 or cur_opt_vol.iloc[-1].equals(opt_vol.iloc[-1]):
print('already have all opt_vol data I think')
else:
next_opt_vol_idx = next_opt_vol_idx.index[1]
opt_vol = opt_vol.loc[next_opt_vol_idx:]
opt_vol_append = True
opt_vol.to_hdf(opt_vol_filename, key='data', format='table', complevel=9, complib='blosc:lz4', mode=opt_vol_mode, append=opt_vol_append)
else:
opt_vol.to_hdf(opt_vol_filename, key='data', format='table', complevel=9, complib='blosc:lz4', mode=opt_vol_mode, append=opt_vol_append)
def get_earliest_dates(self, ticker):
contract, contract_details = self.get_stock_contract(ticker=ticker)
for t in ['ADJUSTED_LAST', 'BID', 'ASK', 'OPTION_IMPLIED_VOLATILITY']:
earliest = self.getEarliestTimestamp(contract, tickerid=200)
print(t)
print(earliest)
def get_datetime_from_date(date='2018-06-30'):
"""
not sure if I need this anymore...
converts a date to a datetime (end-of-day) for historical data gathering
date should be a string in format YYYYMMDD
uses eastern timezone (EDT or EST) by default
TODO: convert eastern to local timezone from machine
"""
tz='US/Eastern'
tz_obj = timezone(tz)
date = datetime.datetime.strptime(date, '%Y-%m-%d')
date = date.replace(hour = 16, minute = 0, second = 0)
date = tz_obj.localize(date)
return date.strftime('%Y%m%d %H:%M:%S %Z')
def get_latest_date_local():
"""
gets the latest date with the machine's local timezone
endDateTime and startDateTime "Uses TWS timezone specified at login."
at least for tick-by-tick data
"""
machines_tz = datetime.datetime.now(datetime.timezone.utc).astimezone().tzname()
latest_date = datetime.datetime.today()
# doesn't work with machines tz in there
latest_date = latest_date.strftime('%Y%m%d %H:%M:%S')# + machines_tz
return latest_date
def get_close_hour_local():
"""
gets closing hour in local machine time (4 pm Eastern)
"""
eastern_tz = timezone('US/Eastern')
eastern_close = datetime.datetime(year=2018, month=6, day=29, hour=16)
eastern_close = eastern_tz.localize(eastern_close)
return str(eastern_close.astimezone().hour)
def get_home_dir(repo_name='scrape_ib'):
cwd = str(Path(__file__).resolve())
cwd_list = cwd.split('/')
repo_position = [i for i, s in enumerate(cwd_list) if s == repo_name]
if len(repo_position) > 1:
print("error! more than one intance of repo name in path")
return None
home_dir = '/'.join(cwd_list[:repo_position[0] + 1]) + '/'
return home_dir
def load_data(ticker='SNAP', barSizeSetting='3 mins', what='TRADES'):
"""
loads historical tick data
"""
if what == 'TRADES':
folder = '/home/nate/Dropbox/data/ib_full_adj/data/'
elif what == 'ADJUSTED_LAST':
folder = '/home/nate/Dropbox/data/ib_split_adj_only/data/'
bss = barSizeSetting.replace(' ', '_')
trades = pd.read_hdf(folder + ticker + '_trades_' + bss + '.h5')
# fill 0 volume with 1
trades.at[trades['volume'] == 0, 'volume'] = 1
bid = | pd.read_hdf(folder + ticker + '_bid_' + bss + '.h5') | pandas.read_hdf |
'''
<< New Release >>
For stability issues, R packages are replaced by recent python packages (if available) or removed (otherwise).
'''
### SCIKIT-SURVIVAL
from sksurv.linear_model import CoxPHSurvivalAnalysis, CoxnetSurvivalAnalysis
from sksurv.ensemble import RandomSurvivalForest
from lifelines import WeibullAFTFitter, LogNormalAFTFitter, LogLogisticAFTFitter
import numpy as np
import pandas as pd
import sys, os, warnings, time
#=================================================================
class sksurvSurvival( object ):
""" A parent class for all survival estimators from sksuv. Particular survival models will inherit from this class."""
# methods
def __init__(self):
exec('1+1') # dummy instruction
def fit(self,X,T,Y):
# Put the data in the proper format # check data type first
y = [(Y.iloc[i,0], T.iloc[i,0]) for i in range(len(Y))]
y = np.array(y, dtype=[('status', 'bool'),('time','<f8')])
# print(self.name)
self.model.fit(X,y)
def predict(self,X, time_horizons):
if self.name in ['CoxPH', 'CoxPHRidge']:
surv = self.model.predict_survival_function(X) #returns StepFunction object
preds_ = np.zeros([np.shape(surv)[0], len(time_horizons)])
for t, eval_time in enumerate(time_horizons):
if eval_time > np.max(surv[0].x): #all have the same maximum surv.x
eval_time = np.max(surv[0].x)
preds_[:, t] = np.asarray([(1. - surv[i](eval_time)) for i in range(len(surv))]) #return cif at self.median_tte
elif self.name in ['RandomSurvForest']:
surv = self.model.predict_survival_function(X) #returns numpy array
surv_times = self.model.event_times_
preds_ = np.zeros([np.shape(surv)[0], len(time_horizons)])
for t, eval_time in enumerate(time_horizons):
tmp_time = np.where(eval_time <= surv_times)[0]
if len(tmp_time) == 0:
preds_[:, t] = 1. - surv[:, 0]
else:
preds_[:, t] = 1. - surv[:, tmp_time[0]]
else:
preds_ = self.model.predict(X)
return float(self.direction)*preds_
#-----------------------------------------------------------------
class CoxPH(sksurvSurvival):
""" Cox proportional hazard model."""
def __init__(self):
super(CoxPH, self).__init__()
# super().__init__()
self.name = 'CoxPH'
self.model = CoxPHSurvivalAnalysis(alpha=0.01) #otherwise error occured
self.direction = 1
self.prob_FLAG = True
self.explained = "*Cox proportional model"
self.image_name = "Cox.png"
self.image_size = (500,500)
# ****Model hyper-parameters****
# ((alpha)) : ridge regression penalty. this is not used in CoxPH (c.f. CoxPHRidge)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# None used in CoxPH
def get_hyperparameter_space(self):
return []
#-----------------------------------------------------------------
class CoxPHRidge(sksurvSurvival):
""" Cox proportional hazard model with ridge regression penalty. """
def __init__(self,alpha=10.0):
super(CoxPHRidge, self).__init__()
# super().__init__()
self.alpha = alpha
self.name = 'CoxPHRidge'
self.model = CoxPHSurvivalAnalysis(alpha=self.alpha) #ridge regression penalty
self.direction = 1
self.prob_FLAG = True
self.explained = "*Cox proportional model with ridge regression"
self.image_name = "CoxPHRidge.png"
self.image_size = (500,500)
# ****Model hyper-parameters****
# ((alpha)) : ridge regression penalty.
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def get_hyperparameter_space(self):
hyp_ = [{'name': 'CoxPHRidge.alpha', 'type': 'continuous', 'domain': (0.001,10),'dimensionality': 1}]
return hyp_
class RandomSurvForest(sksurvSurvival):
""" Cox proportional hazard model with ridge regression penalty. """
def __init__(self, n_estimators=100):
super(RandomSurvForest, self).__init__()
# super().__init__()
self.n_estimators = n_estimators
self.name = 'RandomSurvForest'
self.model = RandomSurvivalForest(n_estimators=self.n_estimators)
self.direction = 1
self.prob_FLAG = True
self.explained = "*Random Survival Forest"
self.image_name = "RandomSurvForest.png"
self.image_size = (500,500)
# ****Model hyper-parameters****
# ((alpha)) : ridge regression penalty.
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def get_hyperparameter_space(self):
hyp_ = [{'name': 'RandomSurvForest.n_estimators', 'type': 'continuous', 'domain': (50,500), 'dimensionality': 1}]
return hyp_
#=================================================================
class lifelinesSurvival( object ):
""" A parent class for all survival estimators from sksuv. Particular survival models will inherit from this class."""
# methods
def __init__(self):
exec('1+1') # dummy instruction
def fit(self,X,T,Y):
# Put the data in the proper format # check data type first
df = | pd.concat([X, T, Y], axis=1) | pandas.concat |
#!/usr/bin/env python
"""
manta: microbial association network clustering toolbox.
The script takes a weighted and undirected network as input
and uses this to generate network clusters.
Moreover, it can generate a Cytoscape-compatible layout (with optional taxonomy input).
Detailed explanations are available in the headers of each file.
manta uses the file extension to import networks. Taxonomy tables should be given as tab-delimited files.
These tables can be used to generate a layout for cyjson files.
Other file formats do not export layout coordinates.
manta generates a scoring matrix and uses agglomerative clustering to assign cluster identities.
The scoring matrix is generated through a procedure involving network flow.
Nodes that cluster separately are removed and combined with identified clusters later on.
It is highly likely that networks will not converge neatly, as most real-world networks are unbalanced.
In that case, manta will apply the network flow procedure on a subset of the network.
The network flow procedure relies on the following assumption:
positions in the scoring matrix that are mostly positive throughout permutations, should have only positive values added.
The same is assumed for negative positions.
The ratio defines which positions are considered mostly positive or mostly negative.
Default numeric parameters:
-min Minimum number of clusters. Default: 2.
-ms Minimum cluster size as fraction of network size. Default: 0.2.
-max Maximum number of clusters. Default: 4.
-limit The limit defines the minimum percentage decrease in error per iteration.
If iterations do not decrease the error anymore, the matrix is considered converged. Default: 2.
-perm Number of permutation iterations for network subsetting during partial iterations. Default: number of nodes.
-subset Fraction of edges that are used for subsetting if the input graph is not balanced. Default: 0.8.
-ratio Fraction of scores that need to be positive or negative for edge scores to be considered stable. Default: 0.8.
-scale Edge scale used to separate out weak cluster assignments.
The larger the edge scale, the larger the weak cluster. Default: 0.8.
-rel Number of permutation iterations for reliability estimates.
By default, this number is estimated from the number of dyadic pairs.
-e Fraction of edges to rewire for reliability tests. Default: 0.1.
For demo purposes, we included a network generated from oral samples of bats.
This data was downloaded from QIITA: https://qiita.ucsd.edu/study/description/11815
<NAME>. et al. (2018). Associations between Afrotropical bats, parasites, and microbial symbionts. bioRxiv, 340109.
"""
__author__ = '<NAME>'
__email__ = '<EMAIL>'
__status__ = 'Development'
__license__ = 'Apache 2.0'
import networkx as nx
import sys
import os
import argparse
import manta
from manta.cluster import cluster_graph
from manta.reliability import perm_clusters
from manta.cyjson import write_cyjson, read_cyjson
from manta.layout import generate_layout
import numpy as np
import pandas as pd
import logging.handlers
from pbr.version import VersionInfo
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
# handler to sys.stdout
sh = logging.StreamHandler(sys.stdout)
sh.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
sh.setFormatter(formatter)
logger.addHandler(sh)
# handler to file
# only handler with 'w' mode, rest is 'a'
# once this handler is started, the file writing is cleared
# other handlers append to the file
logpath = "\\".join(os.getcwd().split("\\")[:-1]) + '\\manta.log'
# filelog path is one folder above manta
# pyinstaller creates a temporary folder, so log would be deleted
fh = logging.handlers.RotatingFileHandler(maxBytes=500,
filename=logpath, mode='a')
fh.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
logger.addHandler(fh)
def set_manta():
"""This parser gets input settings for running the *manta* clustering algorithm.
Apart from the parameters specified by cluster_graph,
it requires an input format that can be read by networkx."""
parser = argparse.ArgumentParser(
description='Run the microbial association network clustering algorithm.'
'Exporting as .cyjs allows for import into Cytoscape with '
'a cluster- and phylogeny-informed layout.')
parser.add_argument('-i', '--input_graph',
dest='graph',
help='Input network file. The format is detected based on the extension; \n'
'at the moment, .graphml, .txt (weighted edgelist), .gml and .cyjs are accepted. \n'
'If you set -i to "demo", a demo dataset will be loaded.',
default=None,
required=False)
parser.add_argument('-o', '--output_graph',
dest='fp',
help='Output network file. Specify full file path without extension.',
default=None, required=False)
parser.add_argument('-f', '--file_type',
dest='f',
help='Format of output network file. Default: cyjs.\n'
'The csv format exports cluster assignments as a csv table.',
choices=['gml', 'graphml', 'cyjs', 'csv'],
default='cyjs')
parser.add_argument('-tax', '--taxonomy_table',
dest='tax',
help='Filepath to tab-delimited taxonomy table. '
'This table is used to calculate edge weights during layout calculation. '
'If the taxonomy table is already included as node properties in the input network,'
'these node properties are used instead. ',
default=None)
parser.add_argument('--layout', dest='layout', action='store_true',
help='With this flag, layout coordinates are calculated for the network. '
'Only compatible with .cyjs output. ', required=False),
parser.set_defaults(layout=False)
parser.add_argument('-min', '--min_clusters',
dest='min', type=int,
required=False,
help='Minimum number of clusters. Default: 2.',
default=2)
parser.add_argument('-ms', '--min_size',
dest='ms', type=float,
required=False,
help='Minimum cluster size as fraction of network size divided by cluster number. Default: 0.2.',
default=0.2)
parser.add_argument('-max', '--max_clusters',
dest='max', type=int,
required=False,
help='Maximum number of clusters. Default: 4.',
default=4)
parser.add_argument('-limit', '--convergence_limit',
dest='limit', type=float,
required=False,
help='The limit defines the minimum percentage decrease in error per iteration.'
' If iterations do not decrease the error anymore, the matrix is considered converged. '
'Default: 2.',
default=2)
parser.add_argument('-iter', '--iterations',
dest='iter', type=int,
required=False,
help='Number of iterations to repeat if convergence is not reached. Default: 20.',
default=20)
parser.add_argument('-perm', '--permutation',
dest='perm', type=int,
required=False,
help='Number of permutation iterations for '
'network subsetting during partial iterations. Default: number of nodes.',
default=None)
parser.add_argument('-subset', '--subset_fraction',
dest='subset', type=float,
required=False,
help='Fraction of edges that are used for subsetting'
' if the input graph is not balanced. Default: 0.8.',
default=0.8)
parser.add_argument('-ratio', '--stability_ratio',
dest='ratio', type=float,
required=False,
help='Fraction of scores that need to be positive or negative'
'for edge scores to be considered stable. Default: 0.8.',
default=0.8)
parser.add_argument('-scale', '--edgescale',
dest='edgescale', type=float,
required=False,
help='Edge scale used to separate out weak cluster assignments. '
'The larger the edge scale, the larger the weak cluster. Default: 0.8.',
default=0.8)
parser.add_argument('-cr', '--cluster_reliability', dest='cr',
action='store_true',
help='If flagged, reliability of cluster assignment is computed. ', required=False)
parser.set_defaults(cr=False)
parser.add_argument('-rel', '--reliability_permutations',
dest='rel', type=int,
required=False,
help='Number of permutation iterations for reliability estimates. \n '
'By default, this is 20. \n',
default=20)
parser.add_argument('-e', '--error',
dest='error', type=int,
required=False,
help='Fraction of edges to rewire for reliability tests. Default: 0.1.',
default=0.1)
parser.add_argument('-dir', '--direction',
dest='direction',
action='store_true',
required=False,
help='If flagged, directed graphs are not converted to undirected after import. ',
default=False)
parser.add_argument('-b', '--binary',
dest='bin',
action='store_true',
required=False,
default=False,
help='If flagged, edge weights are converted to 1 and -1. ')
parser.add_argument('-v', '--verbose',
dest='verbose',
required=False,
action='store_true',
help='If flagged, rovides additional details on progress. ',
default=False)
parser.add_argument('-version', '--version',
dest='version',
required=False,
help='Version number.',
action='store_true',
default=False)
return parser
def main():
args = set_manta().parse_args(sys.argv[1:])
args = vars(args)
if args['version']:
info = VersionInfo('manta')
logger.info('Version ' + info.version_string())
exit(0)
if args['graph'] != 'demo':
filename = args['graph'].split(sep=".")
extension = filename[len(filename)-1]
# see if the file can be detected
# if not, try appending current working directory and then read.
if not os.path.isfile(args['graph']):
if os.path.isfile(os.getcwd() + '/' + args['graph']):
args['graph'] = os.getcwd() + '/'
else:
logger.error('Could not find the specified file. Is your file path correct?')
exit()
try:
if extension == 'graphml':
network = nx.read_graphml(args['graph'])
elif extension == 'txt':
network = nx.read_weighted_edgelist(args['graph'])
elif extension == 'gml':
network = nx.read_gml(args['graph'])
elif extension == 'cyjs':
network = read_cyjson(args['graph'])
else:
logger.warning('Format not accepted. '
'Please specify the filename including extension (e.g. test.graphml).', exc_info=True)
exit()
except Exception:
logger.error('Could not import network file!', exc_info=True)
exit()
# first need to convert network to undirected
elif args['graph'] == 'demo':
path = os.path.dirname(manta.__file__)
path = path + '//demo.graphml'
network = nx.read_graphml(path)
if args['direction']:
if extension == 'txt':
logger.warning('Directed networks from edge lists not supported, use graphml or cyjs! ')
exit()
else:
network = nx.to_undirected(network)
if args['bin']:
orig_edges = dict()
# store original edges for export
for edge in network.edges:
orig_edges[edge] = network.edges[edge]['weight']
network.edges[edge]['weight'] = np.sign(network.edges[edge]['weight'])
if sum(value == 0 for value in
np.any(nx.get_edge_attributes(network, 'weight').values())) > 0:
logger.error("Some edges in the network have a weight of exactly 0. \n"
"Such edges cannot be clustered. Try converting weights to 1 and -1. ")
weight_properties = nx.get_edge_attributes(network, 'weight')
if len(weight_properties) == 0:
logger.error("The imported network has no 'weight' edge property. \n"
"Please make sure you are formatting the network correctly. ")
results = cluster_graph(network, limit=args['limit'], max_clusters=args['max'],
min_clusters=args['min'], min_cluster_size=args['ms'],
iterations=args['iter'], subset=args['subset'],
ratio=args['ratio'], edgescale=args['edgescale'],
permutations=args['perm'], verbose=args['verbose'])
graph = results[0]
if args['cr']:
perm_clusters(graph=graph, limit=args['limit'], max_clusters=args['max'],
min_clusters=args['min'], min_cluster_size=args['ms'],
iterations=args['iter'], ratio=args['ratio'],
partialperms=args['perm'], relperms=args['rel'], subset=args['subset'],
error=args['error'], verbose=args['verbose'])
layout = None
if args['bin']:
for edge in network.edges:
network.edges[edge]['weight'] = orig_edges[edge]
if args['layout']:
layout = generate_layout(graph, args['tax'])
if args['fp']:
if args['f'] == 'graphml':
nx.write_graphml(graph, args['fp'] + '.graphml')
elif args['f'] == 'csv':
node_keys = graph.nodes[list(graph.nodes)[0]].keys()
properties = {}
for key in node_keys:
properties[key] = nx.get_node_attributes(graph, key)
data = | pd.DataFrame(properties) | pandas.DataFrame |
import datetime
import glob
import pandas as pd
import xarray as xr
import matplotlib.pyplot as plt
from src.data.observations import OpenAQDownloader
from src.data.utils import Location
from src.constants import ROOT_DIR
from src.workflow import Workflow
from pathlib import Path
variable = "no2"
station_id = "US007"
# Get CAMS forecast and Corrected CAMS Forecast into predictions dataframe
for day in range(1, 32):
time_0 = datetime.datetime.utcnow()
w = Workflow(
variable=variable,
date=datetime.datetime(year=2021, month=8, day=day),
model=Path("/home/pereza/datos/cams") / "config_inceptiontime_depth6.yml",
data_dir=Path("/home/pereza/datos/cams"),
stations_csv=ROOT_DIR / "data/external/stations.csv",
station_id="US007",
)
w.run()
time_1 = datetime.datetime.utcnow()
total_time = time_1 - time_0
print(total_time.total_seconds())
files = glob.glob(f'/home/pereza/datos/cams/*/{variable}_*_{station_id}_*.csv')
predictions = []
for file in files:
predictions.append(pd.read_csv(file, index_col=0))
predictions_df = | pd.concat(predictions) | pandas.concat |
from arche import arche, SH_URL
from arche.arche import Arche
from arche.rules.result import Level
from conftest import create_result
import pandas as pd
import pytest
def test_target_equals_source():
with pytest.raises(ValueError) as excinfo:
Arche(source="0/0/1", target="0/0/1")
assert (
str(excinfo.value)
== "'target' is equal to 'source'. Data to compare should have different sources."
)
def test_target_items(mocker, get_job_items):
mocker.patch("arche.Arche.get_items", return_value=get_job_items)
arche = Arche("source", target="target")
assert arche.target_items is get_job_items
assert arche._target_items is get_job_items
assert arche.target_items is get_job_items
def test_target_items_none(mocker):
arche = Arche("source")
assert arche.target_items is None
def test_arche_df(get_df):
a = Arche(source=get_df, target=get_df)
pd.testing.assert_frame_equal(a.source_items.df, get_df)
pd.testing.assert_frame_equal(a.target_items.df, get_df)
schema_dummies = [
{"$schema": "http://json-schema.org/draft-07/schema"},
{"$schema": "http://json-schema.org/draft-06/schema"},
]
@pytest.mark.parametrize(
"passed_schema_source, set_schema_source, expected_schema",
[
(schema_dummies[0], None, schema_dummies[0]),
(None, schema_dummies[1], schema_dummies[1]),
(schema_dummies[1], schema_dummies[0], schema_dummies[0]),
(None, None, None),
],
)
def test_schema(passed_schema_source, set_schema_source, expected_schema):
arche = Arche("source", schema=passed_schema_source)
assert arche._schema == passed_schema_source
assert arche.schema_source == passed_schema_source
if set_schema_source:
arche.schema = set_schema_source
assert arche.schema_source == set_schema_source
assert arche.schema == expected_schema
@pytest.mark.parametrize(
"source, start, count, filters, expand", [("112358/13/21", 1, 50, None, False)]
)
def test_get_items(mocker, get_raw_items, source, start, count, filters, expand):
mocker.patch(
"arche.readers.items.JobItems.fetch_data",
return_value=get_raw_items,
autospec=True,
)
items = Arche.get_items(
source=source, start=start, count=count, filters=filters, expand=expand
)
assert items.key == source
assert items.count == count
assert items.filters == filters
assert items.expand == expand
assert items.start_index == start
def test_get_items_from_iterable(get_cloud_items):
items = Arche.get_items(
get_cloud_items, start=None, count=None, filters=None, expand=True
)
assert items.raw == get_cloud_items
@pytest.mark.parametrize(
"source, count, filters, expand", [("112358/collections/s/pages", 5, None, True)]
)
def test_get_items_from_collection(
mocker, get_raw_items, source, count, filters, expand
):
mocker.patch(
"arche.readers.items.CollectionItems.fetch_data",
return_value=get_raw_items,
autospec=True,
)
items = Arche.get_items(
source=source, count=count, start=0, filters=filters, expand=expand
)
assert items.key == source
assert items.count == 5
assert items.filters == filters
assert items.expand == expand
def test_get_items_start():
with pytest.raises(ValueError) as excinfo:
Arche.get_items(
source="112358/collections/s/pages",
count=1,
start=1,
filters=None,
expand=None,
)
assert str(excinfo.value) == "Collections API does not support 'start' parameter"
def test_get_items_from_bad_source():
with pytest.raises(ValueError) as excinfo:
Arche.get_items(source="bad_key", count=1, start=1, filters=None, expand=None)
assert str(excinfo.value) == f"'bad_key' is not a valid job or collection key"
def test_arche_dataframe(mocker):
a = Arche(
source=pd.DataFrame({"c": [0, 1]}),
schema={"properties": {"c": {"type": "integer"}}},
target=pd.DataFrame({"c": [1, 1]}),
)
mocker.patch("arche.report.Report.write_details", autospec=True)
a.report_all()
executed = [
"Garbage Symbols",
"Fields Coverage",
"Scraped Fields",
"Boolean Fields",
"JSON Schema Validation",
"Tags",
"Compare Price Was And Now",
"Uniqueness",
"Duplicated Items",
"Coverage For Scraped Categories",
"Category Coverage Difference",
"Compare Prices For Same Urls",
"Compare Names Per Url",
"Compare Prices For Same Names",
]
for e in executed:
assert a.report.results.get(e)
assert a.report.results.get("JSON Schema Validation").errors is None
assert (
a.report.results.get("JSON Schema Validation").info[0].summary
== "2 items were checked, 0 error(s)"
)
assert (
Arche(
pd.DataFrame({"_key": ["0", "1"], "c": [0, 1]}),
schema={"properties": {"c": {"type": "string"}}},
).report_all()
is None
)
def test_arche_dataframe_data_warning(caplog):
Arche(pd.DataFrame())
assert "Pandas stores `NA` (missing)" in caplog.text
def test_report_all(mocker, get_cloud_items):
mocked_write_summaries = mocker.patch(
"arche.report.Report.write_summaries", autospec=True
)
# autospec and classmethod bug https://github.com/python/cpython/pull/11613
mocked_write = mocker.patch("arche.report.Report.write", autospec=False)
source = pd.DataFrame(get_cloud_items)
source["b"] = True
a = Arche(source=source, target= | pd.DataFrame(get_cloud_items[:2]) | pandas.DataFrame |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from matplotlib.patches import Patch
from scipy import linalg, stats
from scipy.interpolate import interp1d
from scipy.ndimage import gaussian_filter
from scipy.optimize import minimize
import os
os.makedirs("plots", exist_ok=True)
filename_pk = "../config/plots/pk_individual/pk_individual_alphameans.csv"
filename_xi = "../config/plots/xi_individual/xi_individual_alphameans.csv"
df_pk = | pd.read_csv(filename_pk) | pandas.read_csv |
from distutils.version import LooseVersion
from warnings import catch_warnings
import numpy as np
import pytest
from pandas._libs.tslibs import Timestamp
import pandas as pd
from pandas import (
DataFrame,
HDFStore,
Index,
MultiIndex,
Series,
_testing as tm,
bdate_range,
concat,
date_range,
isna,
read_hdf,
)
from pandas.tests.io.pytables.common import (
_maybe_remove,
ensure_clean_path,
ensure_clean_store,
tables,
)
from pandas.io.pytables import Term
pytestmark = pytest.mark.single
def test_select_columns_in_where(setup_path):
# GH 6169
# recreate multi-indexes when columns is passed
# in the `where` argument
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["foo_name", "bar_name"],
)
# With a DataFrame
df = DataFrame(np.random.randn(10, 3), index=index, columns=["A", "B", "C"])
with ensure_clean_store(setup_path) as store:
store.put("df", df, format="table")
expected = df[["A"]]
tm.assert_frame_equal(store.select("df", columns=["A"]), expected)
tm.assert_frame_equal(store.select("df", where="columns=['A']"), expected)
# With a Series
s = Series(np.random.randn(10), index=index, name="A")
with ensure_clean_store(setup_path) as store:
store.put("s", s, format="table")
tm.assert_series_equal(store.select("s", where="columns=['A']"), s)
def test_select_with_dups(setup_path):
# single dtypes
df = DataFrame(np.random.randn(10, 4), columns=["A", "A", "B", "B"])
df.index = date_range("20130101 9:30", periods=10, freq="T")
with ensure_clean_store(setup_path) as store:
store.append("df", df)
result = store.select("df")
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
result = store.select("df", columns=df.columns)
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
result = store.select("df", columns=["A"])
expected = df.loc[:, ["A"]]
tm.assert_frame_equal(result, expected)
# dups across dtypes
df = concat(
[
DataFrame(np.random.randn(10, 4), columns=["A", "A", "B", "B"]),
DataFrame(
np.random.randint(0, 10, size=20).reshape(10, 2), columns=["A", "C"]
),
],
axis=1,
)
df.index = date_range("20130101 9:30", periods=10, freq="T")
with ensure_clean_store(setup_path) as store:
store.append("df", df)
result = store.select("df")
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
result = store.select("df", columns=df.columns)
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
expected = df.loc[:, ["A"]]
result = store.select("df", columns=["A"])
tm.assert_frame_equal(result, expected, by_blocks=True)
expected = df.loc[:, ["B", "A"]]
result = store.select("df", columns=["B", "A"])
tm.assert_frame_equal(result, expected, by_blocks=True)
# duplicates on both index and columns
with ensure_clean_store(setup_path) as store:
store.append("df", df)
store.append("df", df)
expected = df.loc[:, ["B", "A"]]
expected = concat([expected, expected])
result = store.select("df", columns=["B", "A"])
tm.assert_frame_equal(result, expected, by_blocks=True)
def test_select(setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
# select with columns=
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df")
store.append("df", df)
result = store.select("df", columns=["A", "B"])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# equivalently
result = store.select("df", [("columns=['A', 'B']")])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# with a data column
_maybe_remove(store, "df")
store.append("df", df, data_columns=["A"])
result = store.select("df", ["A > 0"], columns=["A", "B"])
expected = df[df.A > 0].reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# all a data columns
_maybe_remove(store, "df")
store.append("df", df, data_columns=True)
result = store.select("df", ["A > 0"], columns=["A", "B"])
expected = df[df.A > 0].reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# with a data column, but different columns
_maybe_remove(store, "df")
store.append("df", df, data_columns=["A"])
result = store.select("df", ["A > 0"], columns=["C", "D"])
expected = df[df.A > 0].reindex(columns=["C", "D"])
tm.assert_frame_equal(expected, result)
def test_select_dtypes(setup_path):
with | ensure_clean_store(setup_path) | pandas.tests.io.pytables.common.ensure_clean_store |
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
def object_creation():
s = pd.Series([1, np.nan])
dates = pd.date_range('20130101', periods=2)
df = pd.DataFrame(np.random.randn(2, 3), index=dates, columns=list('ABC'))
df2 = pd.DataFrame({'A': pd.Timestamp('20130102'),
'B': pd.Series(1, index=list(range(2)), dtype='float32'),
'C': np.array([3] * 2, dtype='int32'),
'D': pd.Categorical(['test', 'train'])})
print(df2.dtypes)
return df
def viewing_data():
print(df.head())
print(df.tail(1))
print(df.index)
print(df.columns)
# DataFrame.to_numpy() can be an expensive operation when df has columns with different data types
print(df.to_numpy())
print(df.describe())
print(df.T)
print(df.sort_index(axis=1, ascending=False))
print(df.sort_values(by='B'))
def selection():
# Getting
print(df['A']) # Selecting a single column. Equivalent to df.A.# selecting via [], which slices the rows
print(df[:2]) # Selecting via [], which slices the rows
print(df[:'20130102'])
# Selection by label
print(df.loc['20130101'])
print(df.loc[:, ['A', 'B']])
# Selection by position
print(df.iloc[1])
print(df.iloc[:1, 1:2])
print(df.iloc[[0, 1], [0, 2]])
print(df.iat[1, 1]) # For getting fast access to a scalar
# Boolean indexing
print(df[df['A'] > 0])
print(df[df > 0])
df2 = df.copy()
df2['D'] = ['one', 'two']
print(df2[df2['D'].isin(['two'])])
# Setting
df.at['20130101', 'A'] = 0
df.iat[0, 1] = 0
df.loc[:, 'C'] = np.array([5] * len(df))
print(df)
df2 = df.copy()
df2[df2 > 0] = -df2
print(df2)
def missing_data():
# pandas uses np.nan to represent missing data
df1 = df.reindex(index=df.index[:2], columns=list(df.columns) + ['D'])
df1.loc[:df.index[0], 'D'] = 1
print(df1)
print(df1.dropna(how='any'))
print(df1.fillna(value=5))
print(pd.isna(df1))
def operations():
print(df.mean()) # operations in general exclude missing data
print(df.mean(1)) # same operation on the other axis
s = pd.Series([1, np.nan], index=df.index).shift(1)
print(df)
print(df.sub(s, axis='index'))
print(df.apply(np.cumsum))
print(df.apply(lambda x: x.max() - x.min()))
print(pd.Series(np.random.randint(0, 7, size=10)).value_counts()) # histogramming
print(pd.Series(['Aaba', np.nan]).str.lower())
def merge():
# Adding a column to a DataFrame is relatively fast. However, adding a row requires a copy, and may be expensive.
print(pd.concat([df[:1], df[1:]]))
left = pd.DataFrame({'key': ['foo', 'foo'], 'lval': [1, 2]})
right = pd.DataFrame({'key': ['foo', 'foo'], 'rval': [4, 5]})
print(pd.merge(left, right, on='key')) # join, SQL style merges
def grouping():
df0 = pd.DataFrame({'A': ['foo', 'bar', 'foo', 'bar', 'foo', 'bar', 'foo', 'foo'],
'B': ['one', 'one', 'two', 'three', 'two', 'two', 'one', 'three'],
'C': np.random.randn(8),
'D': np.random.randn(8)})
print(df0.groupby(['A', 'B']).sum())
def reshaping():
tuples = list(zip(*[['bar', 'bar', 'baz', 'baz'], ['one', 'two', 'one', 'two']]))
index = pd.MultiIndex.from_tuples(tuples, names=['first', 'second'])
df0 = pd.DataFrame(np.random.randn(4, 2), index=index, columns=['A', 'B'])
print(df0)
stacked = df0.stack()
print(stacked)
print(stacked.unstack()) # by default unstacks the last level
print(stacked.unstack(0))
df0 = pd.DataFrame({'A': ['one', 'one', 'two', 'three'] * 3,
'B': ['A', 'B', 'C'] * 4,
'C': ['foo', 'foo', 'foo', 'bar', 'bar', 'bar'] * 2,
'D': np.random.randn(12),
'E': np.random.randn(12)})
print(pd.pivot_table(df0, values='D', index=['A', 'B'], columns=['C']))
def categoricals():
df0 = pd.DataFrame({"id": [1, 2, 3, 4, 5, 6], "raw_grade": ['a', 'b', 'b', 'a', 'a', 'e']})
df0["grade"] = df0["raw_grade"].astype("category") # convert to a categorical data type
print(df0["grade"])
df0["grade"].cat.categories = ["very good", "good", "very bad"] # rename the categories
print(df0["grade"])
# Reorder the categories and simultaneously add the missing categories
df0["grade"] = df0["grade"].cat.set_categories(["very bad", "bad", "medium", "good", "very good"])
print(df0["grade"])
# Sorting is per order in the categories, not lexical order
print(df0.sort_values(by="grade"))
def plotting():
index = | pd.date_range('1/1/2000', periods=1000) | pandas.date_range |
#!/usr/bin/python3
import sys
from glob import glob
from pandas.io.parsers import read_csv
import igraph as ig
from leidenalg import find_partition_temporal, ModularityVertexPartition
from re import compile
import numpy as np
if (len(sys.argv) == 1):
print("usage: ./leiden.py path/to/outputdir")
sys.exit()
pth = sys.argv[1]
tar = pth + "/digest.csv"
isw = float(sys.argv[2])
fls = sorted(glob(pth + "/*_mat.csv"))
rgx = compile(r"^.*/([^/]+)_mat.csv$")
def mkgraph(fl):
al = read_csv(fl)
mt = al.values[:,2:]
mt[np.isnan(mt)] = 0
G = ig.Graph.Adjacency((mt > 0).tolist())
G.es['weight'] = mt[mt.nonzero()]
G.vs['id'] = al.columns[2:].tolist()
return G
def extractids(fl):
al = | read_csv(fl) | pandas.io.parsers.read_csv |
import math
import subprocess
import einops as eo
from loguru import logger
import numpy as np
import pandas as pd
from PIL import Image
from scipy.signal import savgol_filter
import torch
from torch import optim, nn
from collections import Counter
from pytti import (
format_input,
set_t,
print_vram_usage,
freeze_vram_usage,
vram_usage_mode,
)
from pytti.AudioParse import SpectralAudioParser
from pytti.Image.differentiable_image import DifferentiableImage
from pytti.Image.PixelImage import PixelImage
from pytti.Notebook import tqdm, make_hbox
# from pytti.rotoscoper import update_rotoscopers
from pytti.rotoscoper import ROTOSCOPERS
from pytti.Transforms import (
animate_2d,
zoom_3d,
animate_video_source,
)
# deprecate this
from labellines import labelLines
from IPython import display
def unpack_dict(D, n=2):
"""
Given a dictionary D and a number n, return a tuple of n dictionaries,
each containing the same keys as D and values corresponding to those
values of D at the corresponding index
:param D: a dictionary
:param n: number of samples to draw, defaults to 2 (optional)
:return: A tuple of dictionaries.
"""
ds = [{k: V[i] for k, V in D.items()} for i in range(n)]
return tuple(ds)
# this only gets used in the plot_losses method below.
# deprecate (tensorboard)
def smooth_dataframe(df, window_size):
"""applies a moving average filter to the columns of df"""
smoothed_df = pd.DataFrame().reindex_like(df)
for key in df.columns:
smoothed_df[key] = savgol_filter(df[key], window_size, 2, mode="nearest")
return smoothed_df
class DirectImageGuide:
"""
Image guide that uses an optimizer and torch autograd to optimize an image representation
Based on the BigGan+CLIP algorithm by advadnoun (https://twitter.com/advadnoun)
image_rep: (DifferentiableImage) image representation
embedder: (Module) image embedder
optimizer: (Class) optimizer class to use. Defaults to Adam
all other arguments are passed as kwargs to the optimizer.
"""
def __init__(
self,
image_rep: DifferentiableImage,
embedder: nn.Module,
optimizer: optim.Optimizer = None,
lr: float = None,
# null_update=True,
params=None,
writer=None,
fig=None,
axs=None,
base_name=None,
OUTPATH=None, # <<<<<<<<<<<<<<
#####################
video_frames=None, # # only need this to pass to animate_video_source
optical_flows=None,
stabilization_augs=None,
last_frame_semantic=None,
semantic_init_prompt=None,
init_augs=None,
**optimizer_params,
# pretty sure passing in optimizer_params isn't being used anywhere
# We pass in the optimizer object itself anyway... why not just give it
# initialize it with `**optimizer_params`` before passing it to this?
):
self.image_rep = image_rep
self.embedder = embedder
if lr is None:
lr = image_rep.lr
optimizer_params["lr"] = lr
self.optimizer_params = optimizer_params
if optimizer is None:
self.optimizer = optim.Adam(image_rep.parameters(), **optimizer_params)
else:
self.optimizer = optimizer
self.dataframe = []
self.audio_parser = None
if params is not None:
if params.input_audio and params.input_audio_filters:
self.audio_parser = SpectralAudioParser(
params.input_audio,
params.input_audio_offset,
params.frames_per_second,
params.input_audio_filters,
)
# else:
# self.audio_parser = None
# self.null_update = null_update
self.params = params
self.writer = writer
self.OUTPATH = OUTPATH
self.base_name = base_name
self.fig = fig
self.axs = axs
self.video_frames = video_frames
self.optical_flows = optical_flows
# if stabilization_augs is None:
# stabilization_augs = []
self.stabilization_augs = stabilization_augs
self.last_frame_semantic = last_frame_semantic
self.semantic_init_prompt = semantic_init_prompt
# if init_augs is None:
# init_augs = []
self.init_augs = init_augs
def run_steps(
self,
n_steps,
prompts,
interp_prompts,
loss_augs,
stop=-math.inf,
interp_steps=0,
i_offset=0,
skipped_steps=0,
gradient_accumulation_steps: int = 1,
):
"""
runs the optimizer
prompts: (ClipPrompt list) list of prompts
n_steps: (positive integer) steps to run
returns: the number of steps run
"""
for i in tqdm(range(n_steps)):
# not a huge fan of this.
# currently need it for PixelImage.encode_image
# TO DO: all that stuff we just moved around:
# let's attach it to a "Renderer" class,
# and here we can check if the DirectImageGuide was
# initialized with a renderer or not, and call self.renderer.update()
# if appropriate
# if not self.null_update:
# self.update(i + i_offset, i + skipped_steps)
self.update(
model=self,
img=self.image_rep,
i=i + i_offset,
stage_i=i + skipped_steps,
params=self.params,
writer=self.writer,
fig=self.fig,
axs=self.axs,
base_name=self.base_name,
optical_flows=self.optical_flows,
video_frames=self.video_frames,
stabilization_augs=self.stabilization_augs,
last_frame_semantic=self.last_frame_semantic,
embedder=self.embedder,
init_augs=self.init_augs,
semantic_init_prompt=self.semantic_init_prompt,
)
losses = self.train(
i + skipped_steps,
prompts,
interp_prompts,
loss_augs,
interp_steps=interp_steps,
gradient_accumulation_steps=gradient_accumulation_steps,
)
if losses["TOTAL"] <= stop:
break
return i + 1
def set_optim(self, opt=None):
if opt is not None:
self.optimizer = opt
else:
self.optimizer = optim.Adam(
self.image_rep.parameters(), **self.optimizer_params
)
def clear_dataframe(self):
"""
The .dataframe attribute is just a list of pd.DataFrames that
are tracking losses for the current scene. I wanna say one
for each prompt. To do: flush all that out and let tensorboard handle it.
"""
self.dataframe = []
# deprecate (tensorboard)
def plot_losses(self, axs):
def plot_dataframe(df, ax, legend=False):
keys = list(df)
keys.sort(reverse=True, key=lambda k: df[k].iloc[-1])
ax.clear()
df[keys].plot(ax=ax, legend=legend)
if legend:
ax.legend(bbox_to_anchor=(1.04, 1), loc="upper left")
ax.tick_params(
labelbottom=True,
labeltop=False,
labelleft=True,
labelright=False,
bottom=True,
top=False,
left=True,
right=False,
)
last_x = df.last_valid_index()
lines = ax.get_lines()
colors = [l.get_color() for l in lines]
labels = [l.get_label() for l in lines]
ax.relim()
ax.autoscale_view()
labelLines(ax.get_lines(), align=False)
return dict(zip(labels, colors))
dfs = self.dataframe[:]
if dfs != []:
dfs[0] = smooth_dataframe(dfs[0], 17)
for i, (df, ax) in enumerate(zip(dfs, axs)):
if len(df.index) < 2:
return False
# m = df.apply(lambda col: col.first_valid_index())
# print(m)
# print(df.lookup(m, m.index))
# rel_loss = (df-df.lookup(m, m.index))
if not df.empty:
plot_dataframe(df, ax, legend=i == 0)
ax.set_ylabel("Loss")
ax.set_xlabel("Step")
return True
def train(
self,
i,
prompts,
interp_prompts,
loss_augs,
interp_steps=0,
save_loss=True,
gradient_accumulation_steps: int = 1,
):
"""
steps the optimizer
promts: (ClipPrompt list) list of prompts
"""
self.optimizer.zero_grad()
z = self.image_rep.decode_training_tensor()
# logger.debug(z.shape) # [1, 3, height, width]
losses = []
aug_losses = {
aug: aug(format_input(z, self.image_rep, aug), self.image_rep)
for aug in loss_augs
}
image_augs = self.image_rep.image_loss()
image_losses = {aug: aug(self.image_rep) for aug in image_augs}
# losses_accumulator, losses_raw_accumulator = Counter(), Counter()
losses, losses_raw = [], [] # just... don't care
total_loss = 0
if self.embedder is not None:
for mb_i in range(gradient_accumulation_steps):
# logger.debug(mb_i)
image_embeds, offsets, sizes = self.embedder(self.image_rep, input=z)
t = 1
interp_losses = [0]
if i < interp_steps:
t = i / interp_steps
interp_losses = [
prompt(
format_input(image_embeds, self.embedder, prompt),
format_input(offsets, self.embedder, prompt),
format_input(sizes, self.embedder, prompt),
)[0]
* (1 - t)
for prompt in interp_prompts
]
prompt_losses = {
prompt: prompt(
format_input(image_embeds, self.embedder, prompt),
format_input(offsets, self.embedder, prompt),
format_input(sizes, self.embedder, prompt),
)
for prompt in prompts
}
losses, losses_raw = zip(
*map(unpack_dict, [prompt_losses, aug_losses, image_losses])
# *map(unpack_dict, [prompt_losses])
)
# logger.debug(losses)
losses = list(losses)
# logger.debug(losses)
# losses = Counter(losses)
# logger.debug(losses)
losses_raw = list(losses_raw)
# losses_raw = Counter(losses_raw)
# losses_accumulator += losses
# losses_raw_accumulator += losses_raw
for v in prompt_losses.values():
v[0].mul_(t)
total_loss_mb = sum(map(lambda x: sum(x.values()), losses)) + sum(
interp_losses
)
total_loss_mb /= gradient_accumulation_steps
# total_loss_mb.backward()
total_loss_mb.backward(retain_graph=True)
# total_loss += total_loss_mb # this is causing it to break
# total_loss = total_loss_mb
# losses = [{k:v} for k,v in losses_accumulator.items()]
# losses_raw = [{k:v} for k,v in losses_raw_accumulator.items()]
losses_raw.append({"TOTAL": total_loss}) # this needs to be fixed
self.optimizer.step()
self.image_rep.update()
self.optimizer.zero_grad()
# if t != 0:
# for v in prompt_losses.values():
# v[0].div_(t)
if save_loss:
if not self.dataframe:
self.dataframe = [
pd.DataFrame({str(k): float(v) for k, v in loss.items()}, index=[i])
for loss in losses_raw
]
for df in self.dataframe:
df.index.name = "Step"
else:
for j, (df, loss) in enumerate(zip(self.dataframe, losses_raw)):
frames = [df] + [
pd.DataFrame(
{str(k): float(v) for k, v in loss.items()}, index=[i]
)
]
self.dataframe[j] = | pd.concat(frames, ignore_index=False) | pandas.concat |
# Copyright (c) 2018, NVIDIA CORPORATION.
import pickle
import warnings
from numbers import Number
import numpy as np
import pandas as pd
import pyarrow as pa
from numba import cuda, njit
import nvstrings
import rmm
import cudf
import cudf._lib as libcudf
from cudf._lib.stream_compaction import nunique as cpp_unique_count
from cudf.core.buffer import Buffer
from cudf.utils import cudautils, ioutils, utils
from cudf.utils.dtypes import is_categorical_dtype, is_scalar, np_to_pa_dtype
from cudf.utils.utils import buffers_from_pyarrow
class Column(object):
"""An immutable structure for storing data and mask for a column.
This should be considered as the physical layer that provides
container operations on the data and mask.
These operations work on each data element as plain-old-data.
Any logical operations are implemented in subclasses of *TypedColumnBase*.
Attributes
----------
_data : Buffer
The data buffer
_mask : Buffer
The validity mask
_null_count : int
Number of null values in the mask.
These attributes are exported in the properties (e.g. *data*, *mask*,
*null_count*).
"""
@classmethod
def _concat(cls, objs, dtype=None):
from cudf.core.series import Series
from cudf.core.column import (
StringColumn,
CategoricalColumn,
NumericalColumn,
)
if len(objs) == 0:
dtype = pd.api.types.pandas_dtype(dtype)
if dtype.type in (np.object_, np.str_):
return StringColumn(data=nvstrings.to_device([]), null_count=0)
elif is_categorical_dtype(dtype):
return CategoricalColumn(
data=as_column(Buffer.null(np.dtype("int8"))),
null_count=0,
ordered=False,
)
else:
return as_column(Buffer.null(dtype))
# If all columns are `NumericalColumn` with different dtypes,
# we cast them to a common dtype.
# Notice, we can always cast pure null columns
not_null_cols = list(filter(lambda o: len(o) != o.null_count, objs))
if len(not_null_cols) > 0 and (
len(
[
o
for o in not_null_cols
if not isinstance(o, NumericalColumn)
or np.issubdtype(o.dtype, np.datetime64)
]
)
== 0
):
col_dtypes = [o.dtype for o in not_null_cols]
# Use NumPy to find a common dtype
common_dtype = np.find_common_type(col_dtypes, [])
# Cast all columns to the common dtype
for i in range(len(objs)):
objs[i] = objs[i].astype(common_dtype)
# Find the first non-null column:
head = objs[0]
for i, obj in enumerate(objs):
if len(obj) != obj.null_count:
head = obj
break
for i, obj in enumerate(objs):
# Check that all columns are the same type:
if not objs[i].is_type_equivalent(head):
# if all null, cast to appropriate dtype
if len(obj) == obj.null_count:
from cudf.core.column import column_empty_like
objs[i] = column_empty_like(
head, dtype=head.dtype, masked=True, newsize=len(obj)
)
# Handle categories for categoricals
if all(isinstance(o, CategoricalColumn) for o in objs):
cats = (
Series(Column._concat([o.categories for o in objs]))
.drop_duplicates()
._column
)
objs = [
o.cat()._set_categories(cats, is_unique=True) for o in objs
]
head = objs[0]
for obj in objs:
if not (obj.is_type_equivalent(head)):
raise ValueError("All series must be of same type")
# Handle strings separately
if all(isinstance(o, StringColumn) for o in objs):
objs = [o._data for o in objs]
return StringColumn(data=nvstrings.from_strings(*objs))
# Filter out inputs that have 0 length
objs = [o for o in objs if len(o) > 0]
nulls = sum(o.null_count for o in objs)
newsize = sum(map(len, objs))
mem = rmm.device_array(shape=newsize, dtype=head.data.dtype)
data = Buffer.from_empty(mem, size=newsize)
# Allocate output mask only if there's nulls in the input objects
mask = None
if nulls:
mask = Buffer(utils.make_mask(newsize))
col = head.replace(data=data, mask=mask, null_count=nulls)
# Performance the actual concatenation
if newsize > 0:
col = libcudf.concat._column_concat(objs, col)
return col
@staticmethod
def from_mem_views(data_mem, mask_mem=None, null_count=None, name=None):
"""Create a Column object from a data device array (or nvstrings
object), and an optional mask device array
"""
from cudf.core.column import column
if isinstance(data_mem, nvstrings.nvstrings):
return column.build_column(
name=name,
buffer=data_mem,
dtype=np.dtype("object"),
null_count=null_count,
)
else:
data_buf = Buffer(data_mem)
mask = None
if mask_mem is not None:
mask = Buffer(mask_mem)
return column.build_column(
name=name,
buffer=data_buf,
dtype=data_mem.dtype,
mask=mask,
null_count=null_count,
)
def __init__(self, data, mask=None, null_count=None, name=None):
"""
Parameters
----------
data : Buffer
The code values
mask : Buffer; optional
The validity mask
null_count : int; optional
The number of null values in the mask.
"""
# Forces Column content to be contiguous
if not data.is_contiguous():
data = data.as_contiguous()
assert mask is None or mask.is_contiguous()
self._data = data
self._mask = mask
self._name = name
if mask is None:
null_count = 0
else:
# check that mask length is sufficient
assert mask.size * utils.mask_bitsize >= len(self)
self._update_null_count(null_count)
def equals(self, other):
if self is other:
return True
if other is None or len(self) != len(other):
return False
if len(self) == 1:
val = self[0] == other[0]
# when self is multiindex we need to checkall
if isinstance(val, np.ndarray):
return val.all()
return bool(val)
return self.unordered_compare("eq", other).min()
def _update_null_count(self, null_count=None):
assert null_count is None or null_count >= 0
if null_count is None:
if self._mask is not None:
nnz = libcudf.cudf.count_nonzero_mask(
self._mask.mem, size=len(self)
)
null_count = len(self) - nnz
if null_count == 0:
self._mask = None
else:
null_count = 0
assert 0 <= null_count <= len(self)
if null_count == 0:
# Remove mask if null_count is zero
self._mask = None
self._null_count = null_count
@property
def name(self):
return self._name
@name.setter
def name(self, name):
self._name = name
def serialize(self):
header = {"null_count": self._null_count}
frames = []
header["type"] = pickle.dumps(type(self))
header["dtype"] = self._dtype.str
header["data_buffer"], data_frames = self._data.serialize()
header["data_frame_count"] = len(data_frames)
frames.extend(data_frames)
if self._mask:
header["mask_buffer"], mask_frames = self._mask.serialize()
header["mask_frame_count"] = len(mask_frames)
else:
header["mask_buffer"] = []
header["mask_frame_count"] = 0
mask_frames = {}
frames.extend(mask_frames)
header["frame_count"] = len(frames)
return header, frames
@classmethod
def deserialize(cls, header, frames):
data_nframe = header["data_frame_count"]
mask_nframe = header["mask_frame_count"]
data_typ = pickle.loads(header["data_buffer"]["type"])
data = data_typ.deserialize(
header["data_buffer"], frames[:data_nframe]
)
if header["mask_buffer"]:
mask_typ = pickle.loads(header["mask_buffer"]["type"])
mask = mask_typ.deserialize(
header["mask_buffer"],
frames[data_nframe : data_nframe + mask_nframe],
)
else:
mask = None
return data, mask
def _get_mask_as_column(self):
from cudf.core.column import NumericalColumn
data = Buffer(cudautils.ones(len(self), dtype=np.bool_))
mask = NumericalColumn(
data=data, mask=None, null_count=0, dtype=np.bool_
)
if self._mask is not None:
mask = mask.set_mask(self._mask).fillna(False)
return mask
def __sizeof__(self):
n = self._data.__sizeof__()
if self._mask:
n += self._mask.__sizeof__()
return n
def __len__(self):
return self._data.size
@property
def dtype(self):
return self._data.dtype
@property
def data(self):
"""Data buffer
"""
return self._data
@property
def mask(self):
"""Validity mask buffer
"""
return self._mask
def set_mask(self, mask, null_count=None):
"""Create new Column by setting the mask
This will override the existing mask. The returned Column will
reference the same data buffer as this Column.
Parameters
----------
mask : 1D array-like of numpy.uint8
The null-mask. Valid values are marked as ``1``; otherwise ``0``.
The mask bit given the data index ``idx`` is computed as::
(mask[idx // 8] >> (idx % 8)) & 1
null_count : int, optional
The number of null values.
If None, it is calculated automatically.
"""
if not isinstance(mask, Buffer):
mask = Buffer(mask)
if mask.dtype not in (np.dtype(np.uint8), np.dtype(np.int8)):
msg = "mask must be of byte; but got {}".format(mask.dtype)
raise ValueError(msg)
return self.replace(mask=mask, null_count=null_count)
def allocate_mask(self, all_valid=True):
"""Return a new Column with a newly allocated mask buffer.
If ``all_valid`` is True, the new mask is set to all valid.
If ``all_valid`` is False, the new mask is set to all null.
"""
nelem = len(self)
mask_sz = utils.calc_chunk_size(nelem, utils.mask_bitsize)
mask = rmm.device_array(mask_sz, dtype=utils.mask_dtype)
if nelem > 0:
cudautils.fill_value(mask, 0xFF if all_valid else 0)
return self.set_mask(mask=mask, null_count=0 if all_valid else nelem)
def to_gpu_array(self, fillna=None):
"""Get a dense numba device array for the data.
Parameters
----------
fillna : scalar, 'pandas', or None
See *fillna* in ``.to_array``.
Notes
-----
if ``fillna`` is ``None``, null values are skipped. Therefore, the
output size could be smaller.
"""
return self.to_dense_buffer(fillna=fillna).to_gpu_array()
def to_array(self, fillna=None):
"""Get a dense numpy array for the data.
Parameters
----------
fillna : scalar, 'pandas', or None
Defaults to None, which will skip null values.
If it equals "pandas", null values are filled with NaNs.
Non integral dtype is promoted to np.float64.
Notes
-----
if ``fillna`` is ``None``, null values are skipped. Therefore, the
output size could be smaller.
"""
return self.to_dense_buffer(fillna=fillna).to_array()
@property
def valid_count(self):
"""Number of non-null values"""
return len(self) - self._null_count
@property
def null_count(self):
"""Number of null values"""
return self._null_count
@property
def has_null_mask(self):
"""A boolean indicating whether a null-mask is needed"""
return self._mask is not None
@property
def nullmask(self):
"""The gpu buffer for the null-mask
"""
if self.has_null_mask:
return self._mask
else:
raise ValueError("Column has no null mask")
def _replace_defaults(self):
params = {
"data": self.data,
"mask": self.mask,
"name": self.name,
"null_count": self.null_count,
}
return params
def copy_data(self):
"""Copy the column with a new allocation of the data but not the mask,
which is shared by the new column.
"""
return self.replace(data=self.data.copy())
def copy(self, deep=True):
"""Columns are immutable, so a deep copy produces a copy of the
underlying data and mask and a shallow copy creates a new column and
copies the references of the data and mask.
"""
if deep:
return libcudf.copying.copy_column(self)
else:
params = self._replace_defaults()
return type(self)(**params)
def replace(self, **kwargs):
"""Replace attributes of the class and return a new Column.
Valid keywords are valid parameters for ``self.__init__``.
Any omitted keywords will be defaulted to the corresponding
attributes in ``self``.
"""
params = self._replace_defaults()
params.update(kwargs)
if "mask" in kwargs and "null_count" not in kwargs:
del params["null_count"]
return type(self)(**params)
def view(self, newcls, **kwargs):
"""View the underlying column data differently using a subclass of
*TypedColumnBase*.
Parameters
----------
newcls : TypedColumnBase
The logical view to be used
**kwargs :
Additional paramters for instantiating instance of *newcls*.
Valid keywords are valid parameters for ``newcls.__init__``.
Any omitted keywords will be defaulted to the corresponding
attributes in ``self``.
"""
params = Column._replace_defaults(self)
params.update(kwargs)
if "mask" in kwargs and "null_count" not in kwargs:
del params["null_count"]
return newcls(**params)
def element_indexing(self, index):
"""Default implementation for indexing to an element
Raises
------
``IndexError`` if out-of-bound
"""
index = np.int32(index)
if index < 0:
index = len(self) + index
if index > len(self) - 1:
raise IndexError
val = self.data[index] # this can raise IndexError
if isinstance(val, nvstrings.nvstrings):
val = val.to_host()[0]
valid = (
cudautils.mask_get.py_func(self.nullmask, index)
if self.has_null_mask
else True
)
return val if valid else None
def __getitem__(self, arg):
from cudf.core.column import column
if isinstance(arg, Number):
arg = int(arg)
return self.element_indexing(arg)
elif isinstance(arg, slice):
# compute mask slice
if self.null_count > 0:
if arg.step is not None and arg.step != 1:
raise NotImplementedError(arg)
# slicing data
subdata = self.data[arg]
# slicing mask
if self.dtype == "object":
data_size = self.data.size()
else:
data_size = self.data.size
bytemask = cudautils.expand_mask_bits(
data_size, self.mask.to_gpu_array()
)
submask = Buffer(cudautils.compact_mask_bytes(bytemask[arg]))
col = self.replace(data=subdata, mask=submask)
return col
else:
newbuffer = self.data[arg]
return self.replace(data=newbuffer)
else:
arg = column.as_column(arg)
if len(arg) == 0:
arg = column.as_column([], dtype="int32")
if pd.api.types.is_integer_dtype(arg.dtype):
return self.take(arg.data.mem)
if pd.api.types.is_bool_dtype(arg.dtype):
return self.apply_boolean_mask(arg)
raise NotImplementedError(type(arg))
def __setitem__(self, key, value):
"""
Set the value of self[key] to value.
If value and self are of different types,
value is coerced to self.dtype
"""
from cudf.core import column
if isinstance(key, slice):
key_start, key_stop, key_stride = key.indices(len(self))
if key_stride != 1:
raise NotImplementedError("Stride not supported in slice")
nelem = abs(key_stop - key_start)
else:
key = column.as_column(key)
if pd.api.types.is_bool_dtype(key.dtype):
if not len(key) == len(self):
raise ValueError(
"Boolean mask must be of same length as column"
)
key = column.as_column(cudautils.arange(len(self)))[key]
nelem = len(key)
if is_scalar(value):
if is_categorical_dtype(self.dtype):
from cudf.core.column import CategoricalColumn
from cudf.core.buffer import Buffer
from cudf.utils.cudautils import fill_value
data = rmm.device_array(nelem, dtype="int8")
fill_value(data, self._encode(value))
value = CategoricalColumn(
data=Buffer(data),
categories=self._categories,
ordered=False,
)
elif value is None:
value = column.column_empty(nelem, self.dtype, masked=True)
else:
to_dtype = pd.api.types.pandas_dtype(self.dtype)
value = utils.scalar_broadcast_to(value, nelem, to_dtype)
value = column.as_column(value).astype(self.dtype)
if len(value) != nelem:
msg = (
f"Size mismatch: cannot set value "
f"of size {len(value)} to indexing result of size "
f"{nelem}"
)
raise ValueError(msg)
if isinstance(key, slice):
out = libcudf.copying.copy_range(
self, value, key_start, key_stop, 0
)
else:
try:
out = libcudf.copying.scatter(value, key, self)
except RuntimeError as e:
if "out of bounds" in str(e):
raise IndexError(
f"index out of bounds for column of size {len(self)}"
)
self._data = out.data
self._mask = out.mask
self._update_null_count()
def fillna(self, value):
"""Fill null values with ``value``.
Returns a copy with null filled.
"""
if not self.has_null_mask:
return self
out = cudautils.fillna(
data=self.data.to_gpu_array(),
mask=self.mask.to_gpu_array(),
value=value,
)
return self.replace(data=Buffer(out), mask=None, null_count=0)
def isnull(self):
"""Identify missing values in a Column.
"""
return libcudf.unaryops.is_null(self)
def isna(self):
"""Identify missing values in a Column. Alias for isnull.
"""
return self.isnull()
def notna(self):
"""Identify non-missing values in a Column.
"""
return libcudf.unaryops.is_not_null(self)
def notnull(self):
"""Identify non-missing values in a Column. Alias for notna.
"""
return self.notna()
def to_dense_buffer(self, fillna=None):
"""Get dense (no null values) ``Buffer`` of the data.
Parameters
----------
fillna : scalar, 'pandas', or None
See *fillna* in ``.to_array``.
Notes
-----
if ``fillna`` is ``None``, null values are skipped. Therefore, the
output size could be smaller.
"""
if isinstance(fillna, Number):
if self.null_count > 0:
return self.fillna(fillna)
elif fillna not in {None, "pandas"}:
raise ValueError("invalid for fillna")
if self.null_count > 0:
if fillna == "pandas":
na_value = self.default_na_value()
# fill nan
return self.fillna(na_value)
else:
return self._copy_to_dense_buffer()
else:
# return a reference for performance reasons, should refactor code
# to explicitly use mem in the future
return self.data
def _invert(self):
"""Internal convenience function for inverting masked array
Returns
-------
DeviceNDArray
logical inverted mask
"""
gpu_mask = self.to_gpu_array()
cudautils.invert_mask(gpu_mask, gpu_mask)
return self.replace(data=Buffer(gpu_mask), mask=None, null_count=0)
def _copy_to_dense_buffer(self):
data = self.data.to_gpu_array()
mask = self.mask.to_gpu_array()
nnz, mem = cudautils.copy_to_dense(data=data, mask=mask)
return Buffer(mem, size=nnz, capacity=mem.size)
def find_first_value(self, value):
"""
Returns offset of first value that matches
"""
# FIXME: Inefficient find in CPU code
arr = self.to_array()
indices = np.argwhere(arr == value)
if not len(indices):
raise ValueError("value not found")
return indices[-1, 0]
def find_last_value(self, value):
"""
Returns offset of last value that matches
"""
arr = self.to_array()
indices = np.argwhere(arr == value)
if not len(indices):
raise ValueError("value not found")
return indices[-1, 0]
def append(self, other):
from cudf.core.column import as_column
return Column._concat([self, as_column(other)])
def quantile(self, q, interpolation, exact):
if isinstance(q, Number):
quant = [float(q)]
elif isinstance(q, list) or isinstance(q, np.ndarray):
quant = q
else:
msg = "`q` must be either a single element, list or numpy array"
raise TypeError(msg)
return libcudf.quantile.quantile(self, quant, interpolation, exact)
def take(self, indices, ignore_index=False):
"""Return Column by taking values from the corresponding *indices*.
"""
from cudf.core.column import column_empty_like
# Handle zero size
if indices.size == 0:
return column_empty_like(self, newsize=0)
try:
result = libcudf.copying.gather(self, indices)
except RuntimeError as e:
if "out of bounds" in str(e):
raise IndexError(
f"index out of bounds for column of size {len(self)}"
)
raise
result.name = self.name
return result
def as_mask(self):
"""Convert booleans to bitmask
Returns
-------
device array
"""
return cudautils.compact_mask_bytes(self.to_gpu_array())
@ioutils.doc_to_dlpack()
def to_dlpack(self):
"""{docstring}"""
import cudf.io.dlpack as dlpack
return dlpack.to_dlpack(self)
@property
def _pointer(self):
"""
Return pointer to a view of the underlying data structure
"""
return libcudf.cudf.column_view_pointer(self)
@property
def is_unique(self):
return self.unique_count() == len(self)
@property
def is_monotonic(self):
return self.is_monotonic_increasing
@property
def is_monotonic_increasing(self):
raise (NotImplementedError)
@property
def is_monotonic_decreasing(self):
raise (NotImplementedError)
def get_slice_bound(self, label, side, kind):
"""
Calculate slice bound that corresponds to given label.
Returns leftmost (one-past-the-rightmost if ``side=='right'``) position
of given label.
Parameters
----------
label : object
side : {'left', 'right'}
kind : {'ix', 'loc', 'getitem'}
"""
assert kind in ["ix", "loc", "getitem", None]
if side not in ("left", "right"):
raise ValueError(
"Invalid value for side kwarg,"
" must be either 'left' or 'right': %s" % (side,)
)
# TODO: Handle errors/missing keys correctly
# Not currently using `kind` argument.
if side == "left":
return self.find_first_value(label)
if side == "right":
return self.find_last_value(label) + 1
def sort_by_values(self):
raise NotImplementedError
def _unique_segments(self):
""" Common code for unique, unique_count and value_counts"""
# make dense column
densecol = self.replace(data=self.to_dense_buffer(), mask=None)
# sort the column
sortcol, _ = densecol.sort_by_values()
# find segments
sortedvals = sortcol.data.mem
segs, begins = cudautils.find_segments(sortedvals)
return segs, sortedvals
def unique_count(self, method="sort", dropna=True):
if method != "sort":
msg = "non sort based unique_count() not implemented yet"
raise NotImplementedError(msg)
return cpp_unique_count(self, dropna)
def repeat(self, repeats, axis=None):
assert axis in (None, 0)
return libcudf.filling.repeat([self], repeats)[0]
class TypedColumnBase(Column):
"""Base class for all typed column
e.g. NumericalColumn, CategoricalColumn
This class provides common operations to implement logical view and
type-based operations for the column.
Notes
-----
Not designed to be instantiated directly. Instantiate subclasses instead.
"""
def __init__(self, **kwargs):
dtype = kwargs.pop("dtype")
super(TypedColumnBase, self).__init__(**kwargs)
# Logical dtype
self._dtype = pd.api.types.pandas_dtype(dtype)
@property
def dtype(self):
return self._dtype
def is_type_equivalent(self, other):
"""Is the logical type of the column equal to the other column.
"""
mine = self._replace_defaults()
theirs = other._replace_defaults()
def remove_base(dct):
# removes base attributes in the phyiscal layer.
basekeys = Column._replace_defaults(self).keys()
for k in basekeys:
del dct[k]
remove_base(mine)
remove_base(theirs)
# Check categories via Column.equals(). Pop them off the
# dicts so the == below doesn't try to invoke `__eq__()`
if ("categories" in mine) or ("categories" in theirs):
if "categories" not in mine:
return False
if "categories" not in theirs:
return False
if not mine.pop("categories").equals(theirs.pop("categories")):
return False
return type(self) == type(other) and mine == theirs
def _replace_defaults(self):
params = super(TypedColumnBase, self)._replace_defaults()
params.update(dict(dtype=self._dtype))
return params
def _mimic_inplace(self, result, inplace=False):
"""
If `inplace=True`, used to mimic an inplace operation
by replacing data in ``self`` with data in ``result``.
Otherwise, returns ``result`` unchanged.
"""
if inplace:
self._data = result._data
self._mask = result._mask
self._null_count = result._null_count
else:
return result
def argsort(self, ascending):
_, inds = self.sort_by_values(ascending=ascending)
return inds
def sort_by_values(self, ascending):
raise NotImplementedError
def find_and_replace(self, to_replace, values):
raise NotImplementedError
def dropna(self):
dropped_col = libcudf.stream_compaction.drop_nulls([self])
if not dropped_col:
return column_empty_like(self, newsize=0)
else:
return self.replace(
data=dropped_col[0].data, mask=None, null_count=0
)
def apply_boolean_mask(self, mask):
mask = as_column(mask, dtype="bool")
data = libcudf.stream_compaction.apply_boolean_mask([self], mask)
if not data:
return column_empty_like(self, newsize=0)
else:
return self.replace(
data=data[0].data,
mask=data[0].mask,
null_count=data[0].null_count,
)
def fillna(self, fill_value, inplace):
raise NotImplementedError
def searchsorted(self, value, side="left"):
raise NotImplementedError
def astype(self, dtype, **kwargs):
if is_categorical_dtype(dtype):
return self.as_categorical_column(dtype, **kwargs)
elif pd.api.types.pandas_dtype(dtype).type in (np.str_, np.object_):
return self.as_string_column(dtype, **kwargs)
elif np.issubdtype(dtype, np.datetime64):
return self.as_datetime_column(dtype, **kwargs)
else:
return self.as_numerical_column(dtype, **kwargs)
def as_categorical_column(self, dtype, **kwargs):
if "ordered" in kwargs:
ordered = kwargs["ordered"]
else:
ordered = False
sr = cudf.Series(self)
labels, cats = sr.factorize()
# string columns include null index in factorization; remove:
if (
pd.api.types.pandas_dtype(self.dtype).type in (np.str_, np.object_)
) and self.null_count > 0:
cats = cats.dropna()
labels = labels - 1
return cudf.core.column.CategoricalColumn(
data=labels._column.data,
mask=self.mask,
null_count=self.null_count,
categories=cats._column,
ordered=ordered,
)
raise NotImplementedError
def as_numerical_column(self, dtype, **kwargs):
raise NotImplementedError
def as_datetime_column(self, dtype, **kwargs):
raise NotImplementedError
def as_string_column(self, dtype, **kwargs):
raise NotImplementedError
@property
def __cuda_array_interface__(self):
output = {
"shape": (len(self),),
"typestr": self.dtype.str,
"data": (self.data.mem.device_ctypes_pointer.value, True),
"version": 1,
}
if self.has_null_mask:
from types import SimpleNamespace
# Create a simple Python object that exposes the
# `__cuda_array_interface__` attribute here since we need to modify
# some of the attributes from the numba device array
mask = SimpleNamespace(
__cuda_array_interface__={
"shape": (len(self),),
"typestr": "<t1",
"data": (
self.nullmask.mem.device_ctypes_pointer.value,
True,
),
"version": 1,
}
)
output["mask"] = mask
return output
def column_empty_like(column, dtype=None, masked=False, newsize=None):
"""Allocate a new column like the given *column*
"""
if dtype is None:
dtype = column.dtype
row_count = len(column) if newsize is None else newsize
categories = None
if is_categorical_dtype(dtype):
categories = column.cat().categories
dtype = column.data.dtype
return column_empty(row_count, dtype, masked, categories=categories)
def column_empty_like_same_mask(column, dtype):
"""Create a new empty Column with the same length and the same mask.
Parameters
----------
dtype : np.dtype like
The dtype of the data buffer.
"""
result = column_empty_like(column, dtype)
if column.has_null_mask:
result = result.set_mask(column.mask)
return result
def column_empty(row_count, dtype, masked, categories=None):
"""Allocate a new column like the given row_count and dtype.
"""
dtype = | pd.api.types.pandas_dtype(dtype) | pandas.api.types.pandas_dtype |
# -*- coding: utf-8 -*-
from __future__ import print_function
from datetime import datetime, timedelta
import functools
import itertools
import numpy as np
import numpy.ma as ma
import numpy.ma.mrecords as mrecords
from numpy.random import randn
import pytest
from pandas.compat import (
PY3, PY36, OrderedDict, is_platform_little_endian, lmap, long, lrange,
lzip, range, zip)
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
from pandas.core.dtypes.common import is_integer_dtype
import pandas as pd
from pandas import (
Categorical, DataFrame, Index, MultiIndex, Series, Timedelta, Timestamp,
compat, date_range, isna)
from pandas.tests.frame.common import TestData
import pandas.util.testing as tm
MIXED_FLOAT_DTYPES = ['float16', 'float32', 'float64']
MIXED_INT_DTYPES = ['uint8', 'uint16', 'uint32', 'uint64', 'int8', 'int16',
'int32', 'int64']
class TestDataFrameConstructors(TestData):
def test_constructor(self):
df = DataFrame()
assert len(df.index) == 0
df = DataFrame(data={})
assert len(df.index) == 0
def test_constructor_mixed(self):
index, data = tm.getMixedTypeDict()
# TODO(wesm), incomplete test?
indexed_frame = DataFrame(data, index=index) # noqa
unindexed_frame = DataFrame(data) # noqa
assert self.mixed_frame['foo'].dtype == np.object_
def test_constructor_cast_failure(self):
foo = DataFrame({'a': ['a', 'b', 'c']}, dtype=np.float64)
assert foo['a'].dtype == object
# GH 3010, constructing with odd arrays
df = DataFrame(np.ones((4, 2)))
# this is ok
df['foo'] = np.ones((4, 2)).tolist()
# this is not ok
pytest.raises(ValueError, df.__setitem__, tuple(['test']),
np.ones((4, 2)))
# this is ok
df['foo2'] = np.ones((4, 2)).tolist()
def test_constructor_dtype_copy(self):
orig_df = DataFrame({
'col1': [1.],
'col2': [2.],
'col3': [3.]})
new_df = pd.DataFrame(orig_df, dtype=float, copy=True)
new_df['col1'] = 200.
assert orig_df['col1'][0] == 1.
def test_constructor_dtype_nocast_view(self):
df = DataFrame([[1, 2]])
should_be_view = DataFrame(df, dtype=df[0].dtype)
should_be_view[0][0] = 99
assert df.values[0, 0] == 99
should_be_view = DataFrame(df.values, dtype=df[0].dtype)
should_be_view[0][0] = 97
assert df.values[0, 0] == 97
def test_constructor_dtype_list_data(self):
df = DataFrame([[1, '2'],
[None, 'a']], dtype=object)
assert df.loc[1, 0] is None
assert df.loc[0, 1] == '2'
def test_constructor_list_frames(self):
# see gh-3243
result = DataFrame([DataFrame([])])
assert result.shape == (1, 0)
result = DataFrame([DataFrame(dict(A=lrange(5)))])
assert isinstance(result.iloc[0, 0], DataFrame)
def test_constructor_mixed_dtypes(self):
def _make_mixed_dtypes_df(typ, ad=None):
if typ == 'int':
dtypes = MIXED_INT_DTYPES
arrays = [np.array(np.random.rand(10), dtype=d)
for d in dtypes]
elif typ == 'float':
dtypes = MIXED_FLOAT_DTYPES
arrays = [np.array(np.random.randint(
10, size=10), dtype=d) for d in dtypes]
zipper = lzip(dtypes, arrays)
for d, a in zipper:
assert(a.dtype == d)
if ad is None:
ad = dict()
ad.update({d: a for d, a in zipper})
return DataFrame(ad)
def _check_mixed_dtypes(df, dtypes=None):
if dtypes is None:
dtypes = MIXED_FLOAT_DTYPES + MIXED_INT_DTYPES
for d in dtypes:
if d in df:
assert(df.dtypes[d] == d)
# mixed floating and integer coexinst in the same frame
df = _make_mixed_dtypes_df('float')
_check_mixed_dtypes(df)
# add lots of types
df = _make_mixed_dtypes_df('float', dict(A=1, B='foo', C='bar'))
_check_mixed_dtypes(df)
# GH 622
df = _make_mixed_dtypes_df('int')
_check_mixed_dtypes(df)
def test_constructor_complex_dtypes(self):
# GH10952
a = np.random.rand(10).astype(np.complex64)
b = np.random.rand(10).astype(np.complex128)
df = DataFrame({'a': a, 'b': b})
assert a.dtype == df.a.dtype
assert b.dtype == df.b.dtype
def test_constructor_dtype_str_na_values(self, string_dtype):
# https://github.com/pandas-dev/pandas/issues/21083
df = DataFrame({'A': ['x', None]}, dtype=string_dtype)
result = df.isna()
expected = DataFrame({"A": [False, True]})
tm.assert_frame_equal(result, expected)
assert df.iloc[1, 0] is None
df = DataFrame({'A': ['x', np.nan]}, dtype=string_dtype)
assert np.isnan(df.iloc[1, 0])
def test_constructor_rec(self):
rec = self.frame.to_records(index=False)
if PY3:
# unicode error under PY2
rec.dtype.names = list(rec.dtype.names)[::-1]
index = self.frame.index
df = DataFrame(rec)
tm.assert_index_equal(df.columns, pd.Index(rec.dtype.names))
df2 = DataFrame(rec, index=index)
tm.assert_index_equal(df2.columns, pd.Index(rec.dtype.names))
tm.assert_index_equal(df2.index, index)
rng = np.arange(len(rec))[::-1]
df3 = DataFrame(rec, index=rng, columns=['C', 'B'])
expected = DataFrame(rec, index=rng).reindex(columns=['C', 'B'])
tm.assert_frame_equal(df3, expected)
def test_constructor_bool(self):
df = DataFrame({0: np.ones(10, dtype=bool),
1: np.zeros(10, dtype=bool)})
assert df.values.dtype == np.bool_
def test_constructor_overflow_int64(self):
# see gh-14881
values = np.array([2 ** 64 - i for i in range(1, 10)],
dtype=np.uint64)
result = DataFrame({'a': values})
assert result['a'].dtype == np.uint64
# see gh-2355
data_scores = [(6311132704823138710, 273), (2685045978526272070, 23),
(8921811264899370420, 45),
(long(17019687244989530680), 270),
(long(9930107427299601010), 273)]
dtype = [('uid', 'u8'), ('score', 'u8')]
data = np.zeros((len(data_scores),), dtype=dtype)
data[:] = data_scores
df_crawls = DataFrame(data)
assert df_crawls['uid'].dtype == np.uint64
@pytest.mark.parametrize("values", [np.array([2**64], dtype=object),
np.array([2**65]), [2**64 + 1],
np.array([-2**63 - 4], dtype=object),
np.array([-2**64 - 1]), [-2**65 - 2]])
def test_constructor_int_overflow(self, values):
# see gh-18584
value = values[0]
result = DataFrame(values)
assert result[0].dtype == object
assert result[0][0] == value
def test_constructor_ordereddict(self):
import random
nitems = 100
nums = lrange(nitems)
random.shuffle(nums)
expected = ['A%d' % i for i in nums]
df = DataFrame(OrderedDict(zip(expected, [[0]] * nitems)))
assert expected == list(df.columns)
def test_constructor_dict(self):
frame = DataFrame({'col1': self.ts1,
'col2': self.ts2})
# col2 is padded with NaN
assert len(self.ts1) == 30
assert len(self.ts2) == 25
tm.assert_series_equal(self.ts1, frame['col1'], check_names=False)
exp = pd.Series(np.concatenate([[np.nan] * 5, self.ts2.values]),
index=self.ts1.index, name='col2')
tm.assert_series_equal(exp, frame['col2'])
frame = DataFrame({'col1': self.ts1,
'col2': self.ts2},
columns=['col2', 'col3', 'col4'])
assert len(frame) == len(self.ts2)
assert 'col1' not in frame
assert isna(frame['col3']).all()
# Corner cases
assert len(DataFrame({})) == 0
# mix dict and array, wrong size - no spec for which error should raise
# first
with pytest.raises(ValueError):
DataFrame({'A': {'a': 'a', 'b': 'b'}, 'B': ['a', 'b', 'c']})
# Length-one dict micro-optimization
frame = DataFrame({'A': {'1': 1, '2': 2}})
tm.assert_index_equal(frame.index, pd.Index(['1', '2']))
# empty dict plus index
idx = Index([0, 1, 2])
frame = DataFrame({}, index=idx)
assert frame.index is idx
# empty with index and columns
idx = Index([0, 1, 2])
frame = DataFrame({}, index=idx, columns=idx)
assert frame.index is idx
assert frame.columns is idx
assert len(frame._series) == 3
# with dict of empty list and Series
frame = DataFrame({'A': [], 'B': []}, columns=['A', 'B'])
tm.assert_index_equal(frame.index, Index([], dtype=np.int64))
# GH 14381
# Dict with None value
frame_none = DataFrame(dict(a=None), index=[0])
frame_none_list = DataFrame(dict(a=[None]), index=[0])
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
assert frame_none.get_value(0, 'a') is None
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
assert frame_none_list.get_value(0, 'a') is None
tm.assert_frame_equal(frame_none, frame_none_list)
# GH10856
# dict with scalar values should raise error, even if columns passed
msg = 'If using all scalar values, you must pass an index'
with pytest.raises(ValueError, match=msg):
DataFrame({'a': 0.7})
with pytest.raises(ValueError, match=msg):
| DataFrame({'a': 0.7}, columns=['a']) | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from datetime import timedelta
import itertools
import warnings
import numpy as np
import pandas as pd
import ruptures as rpt
from covsirphy.util.error import SubsetNotFoundError, UnExpectedValueError, deprecate
from covsirphy.util.error import NotRegisteredMainError, NotRegisteredExtraError
from covsirphy.util.term import Term
from covsirphy.cleaning.cbase import CleaningBase
from covsirphy.cleaning.country_data import CountryData
from covsirphy.cleaning.japan_data import JapanData
from covsirphy.cleaning.jhu_data import JHUData
from covsirphy.cleaning.oxcgrt import OxCGRTData
from covsirphy.cleaning.pcr_data import PCRData
from covsirphy.cleaning.population import PopulationData
from covsirphy.cleaning.vaccine_data import VaccineData
from covsirphy.cleaning.mobility_data import MobilityData
class DataHandler(Term):
"""
Data handler for analysis.
Args:
country (str): country name
province (str or None): province name
kwargs: arguments of DataHandler.register()
"""
# Deprecated
__NAME_COUNTRY = "CountryData"
__NAME_JAPAN = "JapanData"
# Extra datasets {str: class}
__NAME_OXCGRT = "OxCGRTData"
__NAME_PCR = "PCRData"
__NAME_VACCINE = "VaccineData"
__NAME_MOBILE = "MobilityData"
EXTRA_DICT = {
__NAME_COUNTRY: CountryData,
__NAME_JAPAN: JapanData,
__NAME_OXCGRT: OxCGRTData,
__NAME_PCR: PCRData,
__NAME_VACCINE: VaccineData,
__NAME_MOBILE: MobilityData,
}
def __init__(self, country, province=None, **kwargs):
# Details of the area name
self._area_dict = {"country": str(country), "province": str(province or self.UNKNOWN)}
# Main dataset before complement
main_cols = [self.DATE, self.C, self.CI, self.F, self.R, self.S]
self._main_raw = pd.DataFrame(columns=main_cols)
# Main dataset After complement
self._main_df = pd.DataFrame(columns=main_cols)
# Extra dataset
self._extra_df = pd.DataFrame(columns=[self.DATE])
# Population
self._population = None
# Complement
self._jhu_data = None
self._complemented = None
self._comp_dict = {}
# Date
self._first_date = None
self._last_date = None
self._today = None
# Register datasets: date and main columns will be set internally if main data available
self.register(**kwargs)
@property
def main_satisfied(self):
"""
bool: all main datasets were registered or not
"""
return not self._main_raw.empty
@property
def complemented(self):
"""
bool or str: whether complemented or not and the details
Raises:
NotRegisteredMainError: no information because JHUData was not registered
"""
if not self.main_satisfied:
raise NotRegisteredMainError(".register(jhu_data)")
return self._complemented
@property
@deprecate("DataHandler.population property", version="2.19.1-lambda")
def population(self):
"""
int: population value
Raises:
NotRegisteredMainError: no information because JHUData was not registered
"""
if self._population is None:
raise NotRegisteredMainError(".register(jhu_data)")
return self._population
@property
def first_date(self):
"""
str or None: the first date of the records
"""
return self._first_date.strftime(self.DATE_FORMAT)
@property
def last_date(self):
"""
str or None: the last date of the records
"""
return self._last_date.strftime(self.DATE_FORMAT)
@property
def today(self):
"""
str or None: reference date to determine whether a phase is a past phase or a future phase
"""
return self._today.strftime(self.DATE_FORMAT)
def register(self, jhu_data=None, population_data=None, extras=None):
"""
Register datasets.
Args:
jhu_data (covsirphy.JHUData or None): object of records
population_data (covsirphy.PopulationData or None): PopulationData object (deprecated)
extras (list[covsirphy.CleaningBase] or None): extra datasets
Raises:
TypeError: non-data cleaning instance was included
UnExpectedValueError: instance of un-expected data cleaning class was included as an extra dataset
"""
# Main: JHUData
if jhu_data is not None:
self._ensure_instance(jhu_data, JHUData, name="jhu_data")
try:
self._main_raw = jhu_data.subset(**self._area_dict, recovered_min=0)
except SubsetNotFoundError as e:
raise e from None
self._jhu_data = jhu_data
self.switch_complement(whether=True)
# Main: PopulationData
if population_data is not None:
warnings.warn(
".register(population_data) was deprecated because population values are included in JHUData.",
DeprecationWarning,
stacklevel=2
)
self._ensure_instance(population_data, PopulationData, name="population_data")
self._population = population_data.value(**self._area_dict)
# Extra datasets
if extras is not None:
self._register_extras(extras)
def _register_extras(self, extras):
"""
Verify the extra datasets.
Args:
extras (list[covsirphy.CleaningBase]): extra datasets
Raises:
TypeError: non-data cleaning instance was included as an extra dataset
UnExpectedValueError: instance of un-expected data cleaning class was included as an extra dataset
"""
self._ensure_list(extras, name="extras")
# Verify the datasets
for (i, extra_data) in enumerate(extras, start=1):
statement = f"{self.num2str(i)} extra dataset"
# Check the data is a data cleaning class
self._ensure_instance(extra_data, CleaningBase, name=statement)
# Check the data can be accepted as an extra dataset
if isinstance(extra_data, (CountryData, JapanData)):
warnings.warn(
".register(extras=[CountryData, JapanData]) was deprecated because its role is played by the other classes.",
DeprecationWarning,
stacklevel=2
)
if isinstance(extra_data, tuple(self.EXTRA_DICT.values())):
continue
raise UnExpectedValueError(
name=statement, value=type(extra_data), candidates=list(self.EXTRA_DICT.keys()))
# Register the datasets
extra_df = self._extra_df.set_index(self.DATE)
for (extra_data, data_class) in itertools.product(extras, self.EXTRA_DICT.values()):
if isinstance(extra_data, data_class):
try:
subset_df = extra_data.subset(**self._area_dict)
except TypeError:
subset_df = extra_data.subset(country=self._area_dict["country"])
except SubsetNotFoundError:
continue
extra_df = extra_df.combine_first(subset_df.set_index(self.DATE))
self._extra_df = extra_df.reset_index()
def recovery_period(self):
"""
Return representative value of recovery period of all countries.
Raises:
NotRegisteredMainError: JHUData was not registered
Returns:
int or None: recovery period [days]
"""
if self._jhu_data is None:
raise NotRegisteredMainError(".register(jhu_data)")
return self._jhu_data.recovery_period
def records_main(self):
"""
Return records of the main datasets as a dataframe from the first date to the last date.
Raises:
NotRegisteredMainError: JHUData was not registered
SubsetNotFoundError: failed in subsetting because of lack of data
Returns:
pandas.DataFrame:
Index
reset index
Columns:
- Date (pd.Timestamp): Observation date
- Confirmed (int): the number of confirmed cases
- Infected (int): the number of currently infected cases
- Fatal (int): the number of fatal cases
- Recovered (int): the number of recovered cases ( > 0)
- Susceptible (int): the number of susceptible cases
"""
if self._main_df.empty:
raise NotRegisteredMainError(".register(jhu_data)")
df = self._main_df.copy()
df = df.loc[(df[self.DATE] >= self._first_date) & (df[self.DATE] <= self._last_date)]
return df.reset_index(drop=True)
def switch_complement(self, whether=None, **kwargs):
"""
Switch whether perform auto complement or not.
Args:
whether (bool): if True and necessary, the number of cases will be complemented
kwargs: the other arguments of JHUData.subset_complement()
"""
if not whether:
df = self._main_raw.copy()
self._main_df = df.loc[df[self.R] > 0].reset_index(drop=True)
self._complemented = False
return
self._comp_dict.update(kwargs)
if self._jhu_data is None:
return
self._main_df, self._complemented = self._jhu_data.records(**self._area_dict, **self._comp_dict)
self.timepoints()
def show_complement(self, **kwargs):
"""
Show the details of complement that was (or will be) performed for the records.
Args:
kwargs: keyword arguments of JHUDataComplementHandler() i.e. control factors of complement
Raises:
NotRegisteredMainError: JHUData was not registered
Returns:
pandas.DataFrame: as the same as JHUData.show_complement()
"""
if self._jhu_data is None:
raise NotRegisteredMainError(".register(jhu_data)")
comp_dict = self._comp_dict.copy()
comp_dict.update(kwargs)
return self._jhu_data.show_complement(
start_date=self._first_date, end_date=self._last_date, **self._area_dict, **comp_dict)
def timepoints(self, first_date=None, last_date=None, today=None):
"""
Set the range of data and reference date to determine past/future of phases.
Args:
first_date (str or None): the first date of the records or None (min date of main dataset)
last_date (str or None): the first date of the records or None (max date of main dataset)
today (str or None): reference date to determine whether a phase is a past phase or a future phase
Raises:
NotRegisteredMainError: JHUData was not registered
SubsetNotFoundError: failed in subsetting because of lack of data
Note:
When @today is None, the reference date will be the same as @last_date (or max date).
"""
df = self._main_df.copy()
first_date = self._ensure_date(
first_date, name="first_date", default=self._first_date or df[self.DATE].min())
last_date = self._ensure_date(
last_date, name="last_date", default=self._last_date or df[self.DATE].max())
today = self._ensure_date(today, name="today", default=min(self._today or last_date, last_date))
# Check the order of dates
self._ensure_date_order(df[self.DATE].min(), first_date, name="first_date")
self._ensure_date_order(last_date, df[self.DATE].max(), name="the last date before changing")
self._ensure_date_order(first_date, today, name="today")
self._ensure_date_order(today, last_date, name="last_date")
# Set timepoints
self._first_date = first_date
self._last_date = last_date
self._today = today
def records_extras(self):
"""
Return records of the extra datasets as a dataframe.
Raises:
NotRegisteredMainError: JHUData was not registered
NotRegisteredExtraError: no extra datasets were registered
Returns:
pandas.DataFrame:
Index
reset index
Columns:
- Date(pd.Timestamp): Observation date
- columns defined in the extra datasets
"""
if self._main_df.empty:
raise NotRegisteredMainError(".register(jhu_data)")
if self._extra_df.empty:
raise NotRegisteredExtraError(
".register(jhu_data, extras=[...])", message="with extra datasets")
# Get all subset
df = self._extra_df.copy()
# Remove columns which is included in the main datasets
unused_set = set(self._main_df.columns) - set([self.DATE])
df = df.loc[:, ~df.columns.isin(unused_set)]
# Data cleaning
df = df.set_index(self.DATE).resample("D").last()
df = df.fillna(method="ffill").fillna(0)
# Subsetting by dates
df = df.loc[self._first_date: self._last_date]
# Convert float values to integer if values will not be changed
for col in df.columns:
converted2int = df[col].astype(np.int64)
if np.array_equal(converted2int, df[col]):
df[col] = converted2int
return df.reset_index()
def _records(self, main=True, extras=True):
"""
Return records of the datasets as a dataframe.
Args:
main (bool): whether include main datasets or not
extras (bool): whether include extra datasets or not
Raises:
NotRegisteredMainError: JHUData was not registered
SubsetNotFoundError: failed in subsetting because of lack of data
NotRegisteredExtraError: @extras is True and no extra datasets were registered
ValueError: both of @main and @extras were False
Returns:
pandas.DataFrame:
Index
reset index
Columns:
- Date(pd.Timestamp): Observation date
- if @main is True,
- Confirmed(int): the number of confirmed cases
- Infected(int): the number of currently infected cases
- Fatal(int): the number of fatal cases
- Recovered (int): the number of recovered cases ( > 0)
- Susceptible(int): the number of susceptible cases
- if @extra is True,
- columns defined in the extra datasets
"""
if main and extras:
main_df = self.records_main()
extra_df = self.records_extras()
return main_df.merge(extra_df, on=self.DATE)
if main:
return self.records_main()
if extras:
return self.records_extras()
raise ValueError("Either @main or @extras must be True.")
def records(self, main=True, extras=True, past=True, future=True):
"""
Return records of the datasets as a dataframe.
Args:
main (bool): whether include main datasets or not
extras (bool): whether include extra datasets or not
past (bool): whether include past records or not
future (bool): whether include future records or not
Raises:
NotRegisteredMainError: JHUData was not registered
SubsetNotFoundError: failed in subsetting because of lack of data
NotRegisteredExtraError: @extras is True and no extra datasets were registered
ValueError: both of @main and @extras were False, or both of @past and @future were False
Returns:
pandas.DataFrame:
Index
reset index
Columns:
- Date(pd.Timestamp): Observation date
- if @main is True,
- Confirmed(int): the number of confirmed cases
- Infected(int): the number of currently infected cases
- Fatal(int): the number of fatal cases
- Recovered (int): the number of recovered cases ( > 0)
- Susceptible(int): the number of susceptible cases
- if @extra is True,
- columns defined in the extra datasets
"""
if past and future:
return self._records(main=main, extras=extras)
if not past and not future:
raise ValueError("Either @past or @future must be True.")
df = self._records(main=main, extras=extras).set_index(self.DATE)
if past:
return df.loc[:self._today].reset_index()
if future:
return df.loc[self._today + timedelta(days=1):].reset_index()
def records_all(self):
"""
Return registered all records of the datasets as a dataframe.
Raises:
NotRegisteredMainError: JHUData was not registered
SubsetNotFoundError: failed in subsetting because of lack of data
Returns:
pandas.DataFrame:
Index
reset index
Columns:
- Date(pd.Timestamp): Observation date
- Confirmed(int): the number of confirmed cases
- Infected(int): the number of currently infected cases
- Fatal(int): the number of fatal cases
- Recovered (int): the number of recovered cases ( > 0)
- Susceptible(int): the number of susceptible cases
- columns defined in the extra datasets
"""
try:
return self.records(main=True, extras=True, past=True, future=True)
except NotRegisteredExtraError:
return self.records(main=True, extras=False, past=True, future=True)
def estimate_delay(self, indicator, target, min_size=7, use_difference=False, delay_name="Period Length"):
"""
Estimate the average day [days] between the indicator and the target.
We assume that the indicator impact on the target value with delay.
All results will be returned with a dataframe.
Args:
indicator (str): indicator name, a column of any registered datasets
target (str): target name, a column of any registered datasets
min_size (int): minimum size of the delay period
use_difference (bool): if True, use first discrete difference of target
delay_name (str): column name of delay in the output dataframe
Raises:
NotRegisteredMainError: JHUData was not registered
SubsetNotFoundError: failed in subsetting because of lack of data
UserWarning: failed in calculating and returned the default value (recovery period)
Returns:
pandas.DataFrame:
Index
reset index
Columns
- (int or float): column defined by @indicator
- (int or float): column defined by @target
- (int): column defined by @delay_name [days]
Note:
- We use change point analysis of ruptures package. Refer to the documentation.
https://centre-borelli.github.io/ruptures-docs/
- When failed in calculation, recovery period will be returned after raising UserWarning.
"""
output_cols = [target, indicator, delay_name]
# Create dataframe with indicator and target
record_df = self.records_all()
self._ensure_list(
[indicator, target], candidates=record_df.columns.tolist(), name="indicator and target")
if use_difference:
record_df[target] = record_df[target].diff()
pivot_df = record_df.pivot_table(values=indicator, index=target)
run_df = pivot_df.copy()
# Convert index (target) to serial numbers
serial_df = pd.DataFrame(np.arange(1, run_df.index.max() + 1, 1))
serial_df.index += 1
run_df = run_df.join(serial_df, how="outer")
series = run_df.reset_index(drop=True).iloc[:, 0].dropna()
# Detection with Ruptures using indicator values
warnings.simplefilter("ignore", category=RuntimeWarning)
algorithm = rpt.Pelt(model="rbf", jump=1, min_size=min_size)
try:
results = algorithm.fit_predict(series.values, pen=0.5)
except ValueError:
default_delay = self.recovery_period()
warnings.warn(
f"Delay days could not be estimated and delay set to default: {default_delay} [days]",
UserWarning, stacklevel=2)
return | pd.DataFrame(columns=output_cols) | pandas.DataFrame |
"""
This script visualises the prevention parameters of the first and second COVID-19 waves.
Arguments:
----------
-f:
Filename of samples dictionary to be loaded. Default location is ~/data/interim/model_parameters/COVID19_SEIRD/calibrations/national/
Returns:
--------
Example use:
------------
"""
__author__ = "<NAME>"
__copyright__ = "Copyright (c) 2020 by <NAME>, BIOMATH, Ghent University. All Rights Reserved."
# ----------------------
# Load required packages
# ----------------------
import json
import argparse
import datetime
import random
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.transforms import offset_copy
from covid19model.models import models
from covid19model.data import mobility, sciensano, model_parameters
from covid19model.models.time_dependant_parameter_fncs import ramp_fun
from covid19model.visualization.output import _apply_tick_locator
from covid19model.visualization.utils import colorscale_okabe_ito, moving_avg
# covid 19 specific parameters
plt.rcParams.update({
"axes.prop_cycle": plt.cycler('color',
list(colorscale_okabe_ito.values())),
})
# -----------------------
# Handle script arguments
# -----------------------
parser = argparse.ArgumentParser()
parser.add_argument("-n", "--n_samples", help="Number of samples used to visualise model fit", default=100, type=int)
parser.add_argument("-k", "--n_draws_per_sample", help="Number of binomial draws per sample drawn used to visualize model fit", default=1, type=int)
args = parser.parse_args()
#################################################
## PART 1: Comparison of total number of cases ##
#################################################
youth = moving_avg((df_sciensano['C_0_9']+df_sciensano['C_10_19']).to_frame())
cases_youth_nov21 = youth[youth.index == pd.to_datetime('2020-11-21')].values
cases_youth_rel = moving_avg((df_sciensano['C_0_9']+df_sciensano['C_10_19']).to_frame())/cases_youth_nov21*100
work = moving_avg((df_sciensano['C_20_29']+df_sciensano['C_30_39']+df_sciensano['C_40_49']+df_sciensano['C_50_59']).to_frame())
cases_work_nov21 = work[work.index == pd.to_datetime('2020-11-21')].values
cases_work_rel = work/cases_work_nov21*100
old = moving_avg((df_sciensano['C_60_69']+df_sciensano['C_70_79']+df_sciensano['C_80_89']+df_sciensano['C_90+']).to_frame())
cases_old_nov21 = old[old.index == pd.to_datetime('2020-11-21')].values
cases_old_rel = old/cases_old_nov21*100
fig,ax=plt.subplots(figsize=(12,4.3))
ax.plot(df_sciensano.index, cases_youth_rel, linewidth=1.5, color='black')
ax.plot(df_sciensano.index, cases_work_rel, linewidth=1.5, color='orange')
ax.plot(df_sciensano.index, cases_old_rel, linewidth=1.5, color='blue')
ax.axvspan(pd.to_datetime('2020-11-21'), pd.to_datetime('2020-12-18'), color='black', alpha=0.2)
ax.axvspan(pd.to_datetime('2021-01-09'), pd.to_datetime('2021-02-15'), color='black', alpha=0.2)
ax.set_xlim([pd.to_datetime('2020-11-05'), pd.to_datetime('2021-02-01')])
ax.set_ylim([0,320])
ax.set_ylabel('Relative number of cases as compared\n to November 16th, 2020 (%)')
#ax.set_xticks([pd.to_datetime('2020-11-16'), pd.to_datetime('2020-12-18'), pd.to_datetime('2021-01-04')])
ax.legend(['$[0,20[$','$[20,60[$','$[60,\infty[$'], bbox_to_anchor=(1.05, 1), loc='upper left')
ax = _apply_tick_locator(ax)
ax.set_yticks([0,100,200,300])
ax.grid(False)
plt.tight_layout()
plt.show()
def crosscorr(datax, datay, lag=0):
""" Lag-N cross correlation.
Parameters
----------
lag : int, default 0
datax, datay : pandas.Series objects of equal length
Returns
----------
crosscorr : float
"""
return datax.corr(datay.shift(lag))
lag_series = range(-15,8)
covariance_youth_work = []
covariance_youth_old = []
covariance_work_old = []
for lag in lag_series:
covariance_youth_work.append(crosscorr(cases_youth_rel[pd.to_datetime('2020-11-02'):pd.to_datetime('2021-02-01')].squeeze(),cases_work_rel[pd.to_datetime('2020-11-02'):pd.to_datetime('2021-02-01')].squeeze(),lag=lag))
covariance_youth_old.append(crosscorr(cases_youth_rel[pd.to_datetime('2020-11-02'):pd.to_datetime('2021-02-01')].squeeze(),cases_old_rel[pd.to_datetime('2020-11-02'):pd.to_datetime('2021-02-01')].squeeze(),lag=lag))
covariance_work_old.append(crosscorr(cases_work_rel[pd.to_datetime('2020-11-02'):pd.to_datetime('2021-02-01')].squeeze(),cases_old_rel[pd.to_datetime('2020-11-02'):pd.to_datetime('2021-02-01')].squeeze(),lag=lag))
covariances = [covariance_youth_work, covariance_youth_old, covariance_work_old]
for i in range(3):
n = len(covariances[i])
k = max(covariances[i])
idx=np.argmax(covariances[i])
tau = lag_series[idx]
sig = 2/np.sqrt(n-abs(k))
if k >= sig:
print(tau, k, True)
else:
print(tau, k, False)
fig,(ax1,ax2)=plt.subplots(nrows=2,ncols=1,figsize=(15,10))
# First part
ax1.plot(df_sciensano.index, cases_youth_rel, linewidth=1.5, color='black')
ax1.plot(df_sciensano.index, cases_work_rel, linewidth=1.5, color='orange')
ax1.plot(df_sciensano.index, cases_old_rel, linewidth=1.5, color='blue')
ax1.axvspan(pd.to_datetime('2020-11-21'), pd.to_datetime('2020-12-18'), color='black', alpha=0.2)
ax1.axvspan(pd.to_datetime('2021-01-09'), pd.to_datetime('2021-02-15'), color='black', alpha=0.2)
ax1.set_xlim([pd.to_datetime('2020-11-05'), pd.to_datetime('2021-02-01')])
ax1.set_ylim([0,300])
ax1.set_ylabel('Relative number of cases as compared\n to November 16th, 2020 (%)')
#ax.set_xticks([pd.to_datetime('2020-11-16'), pd.to_datetime('2020-12-18'), pd.to_datetime('2021-01-04')])
ax1.legend(['$[0,20[$','$[20,60[$','$[60,\infty[$'], bbox_to_anchor=(1.05, 1), loc='upper left')
ax1 = _apply_tick_locator(ax1)
# Second part
ax2.scatter(lag_series, covariance_youth_work, color='black',alpha=0.6,linestyle='None',facecolors='none', s=30, linewidth=1)
ax2.scatter(lag_series, covariance_youth_old, color='black',alpha=0.6, linestyle='None',facecolors='none', s=30, linewidth=1, marker='s')
ax2.scatter(lag_series, covariance_work_old, color='black',alpha=0.6, linestyle='None',facecolors='none', s=30, linewidth=1, marker='D')
ax2.legend(['$[0,20[$ vs. $[20,60[$', '$[0,20[$ vs. $[60,\infty[$', '$[20,60[$ vs. $[60, \infty[$'], bbox_to_anchor=(1.05, 1), loc='upper left')
ax2.plot(lag_series, covariance_youth_work, color='black', linestyle='--', linewidth=1)
ax2.plot(lag_series, covariance_youth_old, color='black',linestyle='--', linewidth=1)
ax2.plot(lag_series, covariance_work_old, color='black',linestyle='--', linewidth=1)
ax2.axvline(0,linewidth=1, color='black')
ax2.grid(False)
ax2.set_ylabel('lag-$\\tau$ cross correlation (-)')
ax2.set_xlabel('$\\tau$ (days)')
plt.tight_layout()
plt.show()
fig,ax = plt.subplots(figsize=(15,5))
ax.scatter(lag_series, covariance_youth_work, color='black',alpha=0.6,linestyle='None',facecolors='none', s=30, linewidth=1)
ax.scatter(lag_series, covariance_youth_old, color='black',alpha=0.6, linestyle='None',facecolors='none', s=30, linewidth=1, marker='s')
ax.scatter(lag_series, covariance_work_old, color='black',alpha=0.6, linestyle='None',facecolors='none', s=30, linewidth=1, marker='D')
ax.legend(['$[0,20[$ vs. $[20,60[$', '$[0,20[$ vs. $[60,\infty[$', '$[20,60[$ vs. $[60, \infty[$'], bbox_to_anchor=(1.05, 1), loc='upper left')
ax.plot(lag_series, covariance_youth_work, color='black', linestyle='--', linewidth=1)
ax.plot(lag_series, covariance_youth_old, color='black',linestyle='--', linewidth=1)
ax.plot(lag_series, covariance_work_old, color='black',linestyle='--', linewidth=1)
ax.axvline(0,linewidth=1, color='black')
ax.grid(False)
ax.set_ylabel('lag-$\\tau$ cross correlation (-)')
ax.set_xlabel('$\\tau$ (days)')
plt.tight_layout()
plt.show()
#####################################################
## PART 1: Calibration robustness figure of WAVE 1 ##
#####################################################
n_calibrations = 6
n_prevention = 3
conf_int = 0.05
# -------------------------
# Load samples dictionaries
# -------------------------
samples_dicts = [
json.load(open('../../data/interim/model_parameters/COVID19_SEIRD/calibrations/national/BE_WAVE1_BETA_COMPLIANCE_2021-02-15.json')), # 2020-04-04
json.load(open('../../data/interim/model_parameters/COVID19_SEIRD/calibrations/national/BE_WAVE1_BETA_COMPLIANCE_2021-02-13.json')), # 2020-04-15
json.load(open('../../data/interim/model_parameters/COVID19_SEIRD/calibrations/national/BE_WAVE1_BETA_COMPLIANCE_2021-02-23.json')), # 2020-05-01
json.load(open('../../data/interim/model_parameters/COVID19_SEIRD/calibrations/national/BE_WAVE1_BETA_COMPLIANCE_2021-02-18.json')), # 2020-05-15
json.load(open('../../data/interim/model_parameters/COVID19_SEIRD/calibrations/national/BE_WAVE1_BETA_COMPLIANCE_2021-02-21.json')), # 2020-06-01
json.load(open('../../data/interim/model_parameters/COVID19_SEIRD/calibrations/national/BE_WAVE1_BETA_COMPLIANCE_2021-02-22.json')) # 2020-07-01
]
warmup = int(samples_dicts[0]['warmup'])
# Start of data collection
start_data = '2020-03-15'
# First datapoint used in inference
start_calibration = '2020-03-15'
# Last datapoint used in inference
end_calibrations = ['2020-04-04', '2020-04-15', '2020-05-01', '2020-05-15', '2020-06-01', '2020-07-01']
# Start- and enddate of plotfit
start_sim = start_calibration
end_sim = '2020-07-14'
# ---------
# Load data
# ---------
# Contact matrices
initN, Nc_home, Nc_work, Nc_schools, Nc_transport, Nc_leisure, Nc_others, Nc_total = model_parameters.get_interaction_matrices(dataset='willem_2012')
Nc_all = {'total': Nc_total, 'home':Nc_home, 'work': Nc_work, 'schools': Nc_schools, 'transport': Nc_transport, 'leisure': Nc_leisure, 'others': Nc_others}
levels = initN.size
# Google Mobility data
df_google = mobility.get_google_mobility_data(update=False)
# ---------------------------------
# Time-dependant parameter function
# ---------------------------------
# Extract build contact matrix function
from covid19model.models.time_dependant_parameter_fncs import make_contact_matrix_function, ramp_fun
contact_matrix_4prev, all_contact, all_contact_no_schools = make_contact_matrix_function(df_google, Nc_all)
# Define policy function
def policies_wave1_4prev(t, states, param, l , tau, prev_schools, prev_work, prev_rest, prev_home):
# Convert tau and l to dates
tau_days = pd.Timedelta(tau, unit='D')
l_days = pd.Timedelta(l, unit='D')
# Define key dates of first wave
t1 = pd.Timestamp('2020-03-15') # start of lockdown
t2 = pd.Timestamp('2020-05-15') # gradual re-opening of schools (assume 50% of nominal scenario)
t3 = pd.Timestamp('2020-07-01') # start of summer holidays
t4 = pd.Timestamp('2020-09-01') # end of summer holidays
# Define key dates of second wave
t5 = pd.Timestamp('2020-10-19') # lockdown (1)
t6 = pd.Timestamp('2020-11-02') # lockdown (2)
t7 = pd.Timestamp('2020-11-16') # schools re-open
t8 = pd.Timestamp('2020-12-18') # Christmas holiday starts
t9 = pd.Timestamp('2021-01-04') # Christmas holiday ends
t10 = pd.Timestamp('2021-02-15') # Spring break starts
t11 = pd.Timestamp('2021-02-21') # Spring break ends
t12 = | pd.Timestamp('2021-04-05') | pandas.Timestamp |
"""SQL io tests
The SQL tests are broken down in different classes:
- `PandasSQLTest`: base class with common methods for all test classes
- Tests for the public API (only tests with sqlite3)
- `_TestSQLApi` base class
- `TestSQLApi`: test the public API with sqlalchemy engine
- `TestSQLiteFallbackApi`: test the public API with a sqlite DBAPI
connection
- Tests for the different SQL flavors (flavor specific type conversions)
- Tests for the sqlalchemy mode: `_TestSQLAlchemy` is the base class with
common methods, `_TestSQLAlchemyConn` tests the API with a SQLAlchemy
Connection object. The different tested flavors (sqlite3, MySQL,
PostgreSQL) derive from the base class
- Tests for the fallback mode (`TestSQLiteFallback`)
"""
import csv
from datetime import date, datetime, time
from io import StringIO
import sqlite3
import warnings
import numpy as np
import pytest
from pandas.core.dtypes.common import is_datetime64_dtype, is_datetime64tz_dtype
import pandas as pd
from pandas import (
DataFrame,
Index,
MultiIndex,
Series,
Timestamp,
concat,
date_range,
isna,
to_datetime,
to_timedelta,
)
import pandas._testing as tm
import pandas.io.sql as sql
from pandas.io.sql import read_sql_query, read_sql_table
try:
import sqlalchemy
import sqlalchemy.schema
import sqlalchemy.sql.sqltypes as sqltypes
from sqlalchemy.ext import declarative
from sqlalchemy.orm import session as sa_session
SQLALCHEMY_INSTALLED = True
except ImportError:
SQLALCHEMY_INSTALLED = False
SQL_STRINGS = {
"create_iris": {
"sqlite": """CREATE TABLE iris (
"SepalLength" REAL,
"SepalWidth" REAL,
"PetalLength" REAL,
"PetalWidth" REAL,
"Name" TEXT
)""",
"mysql": """CREATE TABLE iris (
`SepalLength` DOUBLE,
`SepalWidth` DOUBLE,
`PetalLength` DOUBLE,
`PetalWidth` DOUBLE,
`Name` VARCHAR(200)
)""",
"postgresql": """CREATE TABLE iris (
"SepalLength" DOUBLE PRECISION,
"SepalWidth" DOUBLE PRECISION,
"PetalLength" DOUBLE PRECISION,
"PetalWidth" DOUBLE PRECISION,
"Name" VARCHAR(200)
)""",
},
"insert_iris": {
"sqlite": """INSERT INTO iris VALUES(?, ?, ?, ?, ?)""",
"mysql": """INSERT INTO iris VALUES(%s, %s, %s, %s, "%s");""",
"postgresql": """INSERT INTO iris VALUES(%s, %s, %s, %s, %s);""",
},
"create_test_types": {
"sqlite": """CREATE TABLE types_test_data (
"TextCol" TEXT,
"DateCol" TEXT,
"IntDateCol" INTEGER,
"IntDateOnlyCol" INTEGER,
"FloatCol" REAL,
"IntCol" INTEGER,
"BoolCol" INTEGER,
"IntColWithNull" INTEGER,
"BoolColWithNull" INTEGER
)""",
"mysql": """CREATE TABLE types_test_data (
`TextCol` TEXT,
`DateCol` DATETIME,
`IntDateCol` INTEGER,
`IntDateOnlyCol` INTEGER,
`FloatCol` DOUBLE,
`IntCol` INTEGER,
`BoolCol` BOOLEAN,
`IntColWithNull` INTEGER,
`BoolColWithNull` BOOLEAN
)""",
"postgresql": """CREATE TABLE types_test_data (
"TextCol" TEXT,
"DateCol" TIMESTAMP,
"DateColWithTz" TIMESTAMP WITH TIME ZONE,
"IntDateCol" INTEGER,
"IntDateOnlyCol" INTEGER,
"FloatCol" DOUBLE PRECISION,
"IntCol" INTEGER,
"BoolCol" BOOLEAN,
"IntColWithNull" INTEGER,
"BoolColWithNull" BOOLEAN
)""",
},
"insert_test_types": {
"sqlite": {
"query": """
INSERT INTO types_test_data
VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?)
""",
"fields": (
"TextCol",
"DateCol",
"IntDateCol",
"IntDateOnlyCol",
"FloatCol",
"IntCol",
"BoolCol",
"IntColWithNull",
"BoolColWithNull",
),
},
"mysql": {
"query": """
INSERT INTO types_test_data
VALUES("%s", %s, %s, %s, %s, %s, %s, %s, %s)
""",
"fields": (
"TextCol",
"DateCol",
"IntDateCol",
"IntDateOnlyCol",
"FloatCol",
"IntCol",
"BoolCol",
"IntColWithNull",
"BoolColWithNull",
),
},
"postgresql": {
"query": """
INSERT INTO types_test_data
VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)
""",
"fields": (
"TextCol",
"DateCol",
"DateColWithTz",
"IntDateCol",
"IntDateOnlyCol",
"FloatCol",
"IntCol",
"BoolCol",
"IntColWithNull",
"BoolColWithNull",
),
},
},
"read_parameters": {
"sqlite": "SELECT * FROM iris WHERE Name=? AND SepalLength=?",
"mysql": 'SELECT * FROM iris WHERE `Name`="%s" AND `SepalLength`=%s',
"postgresql": 'SELECT * FROM iris WHERE "Name"=%s AND "SepalLength"=%s',
},
"read_named_parameters": {
"sqlite": """
SELECT * FROM iris WHERE Name=:name AND SepalLength=:length
""",
"mysql": """
SELECT * FROM iris WHERE
`Name`="%(name)s" AND `SepalLength`=%(length)s
""",
"postgresql": """
SELECT * FROM iris WHERE
"Name"=%(name)s AND "SepalLength"=%(length)s
""",
},
"create_view": {
"sqlite": """
CREATE VIEW iris_view AS
SELECT * FROM iris
"""
},
}
class MixInBase:
def teardown_method(self, method):
# if setup fails, there may not be a connection to close.
if hasattr(self, "conn"):
for tbl in self._get_all_tables():
self.drop_table(tbl)
self._close_conn()
class MySQLMixIn(MixInBase):
def drop_table(self, table_name):
cur = self.conn.cursor()
cur.execute(f"DROP TABLE IF EXISTS {sql._get_valid_mysql_name(table_name)}")
self.conn.commit()
def _get_all_tables(self):
cur = self.conn.cursor()
cur.execute("SHOW TABLES")
return [table[0] for table in cur.fetchall()]
def _close_conn(self):
from pymysql.err import Error
try:
self.conn.close()
except Error:
pass
class SQLiteMixIn(MixInBase):
def drop_table(self, table_name):
self.conn.execute(
f"DROP TABLE IF EXISTS {sql._get_valid_sqlite_name(table_name)}"
)
self.conn.commit()
def _get_all_tables(self):
c = self.conn.execute("SELECT name FROM sqlite_master WHERE type='table'")
return [table[0] for table in c.fetchall()]
def _close_conn(self):
self.conn.close()
class SQLAlchemyMixIn(MixInBase):
def drop_table(self, table_name):
sql.SQLDatabase(self.conn).drop_table(table_name)
def _get_all_tables(self):
meta = sqlalchemy.schema.MetaData(bind=self.conn)
meta.reflect()
table_list = meta.tables.keys()
return table_list
def _close_conn(self):
pass
class PandasSQLTest:
"""
Base class with common private methods for SQLAlchemy and fallback cases.
"""
def _get_exec(self):
if hasattr(self.conn, "execute"):
return self.conn
else:
return self.conn.cursor()
@pytest.fixture(params=[("data", "iris.csv")])
def load_iris_data(self, datapath, request):
import io
iris_csv_file = datapath(*request.param)
if not hasattr(self, "conn"):
self.setup_connect()
self.drop_table("iris")
self._get_exec().execute(SQL_STRINGS["create_iris"][self.flavor])
with io.open(iris_csv_file, mode="r", newline=None) as iris_csv:
r = csv.reader(iris_csv)
next(r) # skip header row
ins = SQL_STRINGS["insert_iris"][self.flavor]
for row in r:
self._get_exec().execute(ins, row)
def _load_iris_view(self):
self.drop_table("iris_view")
self._get_exec().execute(SQL_STRINGS["create_view"][self.flavor])
def _check_iris_loaded_frame(self, iris_frame):
pytype = iris_frame.dtypes[0].type
row = iris_frame.iloc[0]
assert issubclass(pytype, np.floating)
tm.equalContents(row.values, [5.1, 3.5, 1.4, 0.2, "Iris-setosa"])
def _load_test1_data(self):
columns = ["index", "A", "B", "C", "D"]
data = [
(
"2000-01-03 00:00:00",
0.980268513777,
3.68573087906,
-0.364216805298,
-1.15973806169,
),
(
"2000-01-04 00:00:00",
1.04791624281,
-0.0412318367011,
-0.16181208307,
0.212549316967,
),
(
"2000-01-05 00:00:00",
0.498580885705,
0.731167677815,
-0.537677223318,
1.34627041952,
),
(
"2000-01-06 00:00:00",
1.12020151869,
1.56762092543,
0.00364077397681,
0.67525259227,
),
]
self.test_frame1 = DataFrame(data, columns=columns)
def _load_test2_data(self):
df = DataFrame(
dict(
A=[4, 1, 3, 6],
B=["asd", "gsq", "ylt", "jkl"],
C=[1.1, 3.1, 6.9, 5.3],
D=[False, True, True, False],
E=["1990-11-22", "1991-10-26", "1993-11-26", "1995-12-12"],
)
)
df["E"] = to_datetime(df["E"])
self.test_frame2 = df
def _load_test3_data(self):
columns = ["index", "A", "B"]
data = [
("2000-01-03 00:00:00", 2 ** 31 - 1, -1.987670),
("2000-01-04 00:00:00", -29, -0.0412318367011),
("2000-01-05 00:00:00", 20000, 0.731167677815),
("2000-01-06 00:00:00", -290867, 1.56762092543),
]
self.test_frame3 = DataFrame(data, columns=columns)
def _load_raw_sql(self):
self.drop_table("types_test_data")
self._get_exec().execute(SQL_STRINGS["create_test_types"][self.flavor])
ins = SQL_STRINGS["insert_test_types"][self.flavor]
data = [
{
"TextCol": "first",
"DateCol": "2000-01-03 00:00:00",
"DateColWithTz": "2000-01-01 00:00:00-08:00",
"IntDateCol": 535852800,
"IntDateOnlyCol": 20101010,
"FloatCol": 10.10,
"IntCol": 1,
"BoolCol": False,
"IntColWithNull": 1,
"BoolColWithNull": False,
},
{
"TextCol": "first",
"DateCol": "2000-01-04 00:00:00",
"DateColWithTz": "2000-06-01 00:00:00-07:00",
"IntDateCol": 1356998400,
"IntDateOnlyCol": 20101212,
"FloatCol": 10.10,
"IntCol": 1,
"BoolCol": False,
"IntColWithNull": None,
"BoolColWithNull": None,
},
]
for d in data:
self._get_exec().execute(
ins["query"], [d[field] for field in ins["fields"]]
)
def _count_rows(self, table_name):
result = (
self._get_exec()
.execute(f"SELECT count(*) AS count_1 FROM {table_name}")
.fetchone()
)
return result[0]
def _read_sql_iris(self):
iris_frame = self.pandasSQL.read_query("SELECT * FROM iris")
self._check_iris_loaded_frame(iris_frame)
def _read_sql_iris_parameter(self):
query = SQL_STRINGS["read_parameters"][self.flavor]
params = ["Iris-setosa", 5.1]
iris_frame = self.pandasSQL.read_query(query, params=params)
self._check_iris_loaded_frame(iris_frame)
def _read_sql_iris_named_parameter(self):
query = SQL_STRINGS["read_named_parameters"][self.flavor]
params = {"name": "Iris-setosa", "length": 5.1}
iris_frame = self.pandasSQL.read_query(query, params=params)
self._check_iris_loaded_frame(iris_frame)
def _to_sql(self, method=None):
self.drop_table("test_frame1")
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", method=method)
assert self.pandasSQL.has_table("test_frame1")
num_entries = len(self.test_frame1)
num_rows = self._count_rows("test_frame1")
assert num_rows == num_entries
# Nuke table
self.drop_table("test_frame1")
def _to_sql_empty(self):
self.drop_table("test_frame1")
self.pandasSQL.to_sql(self.test_frame1.iloc[:0], "test_frame1")
def _to_sql_fail(self):
self.drop_table("test_frame1")
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", if_exists="fail")
assert self.pandasSQL.has_table("test_frame1")
msg = "Table 'test_frame1' already exists"
with pytest.raises(ValueError, match=msg):
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", if_exists="fail")
self.drop_table("test_frame1")
def _to_sql_replace(self):
self.drop_table("test_frame1")
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", if_exists="fail")
# Add to table again
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", if_exists="replace")
assert self.pandasSQL.has_table("test_frame1")
num_entries = len(self.test_frame1)
num_rows = self._count_rows("test_frame1")
assert num_rows == num_entries
self.drop_table("test_frame1")
def _to_sql_append(self):
# Nuke table just in case
self.drop_table("test_frame1")
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", if_exists="fail")
# Add to table again
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", if_exists="append")
assert self.pandasSQL.has_table("test_frame1")
num_entries = 2 * len(self.test_frame1)
num_rows = self._count_rows("test_frame1")
assert num_rows == num_entries
self.drop_table("test_frame1")
def _to_sql_method_callable(self):
check = [] # used to double check function below is really being used
def sample(pd_table, conn, keys, data_iter):
check.append(1)
data = [dict(zip(keys, row)) for row in data_iter]
conn.execute(pd_table.table.insert(), data)
self.drop_table("test_frame1")
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", method=sample)
assert self.pandasSQL.has_table("test_frame1")
assert check == [1]
num_entries = len(self.test_frame1)
num_rows = self._count_rows("test_frame1")
assert num_rows == num_entries
# Nuke table
self.drop_table("test_frame1")
def _roundtrip(self):
self.drop_table("test_frame_roundtrip")
self.pandasSQL.to_sql(self.test_frame1, "test_frame_roundtrip")
result = self.pandasSQL.read_query("SELECT * FROM test_frame_roundtrip")
result.set_index("level_0", inplace=True)
# result.index.astype(int)
result.index.name = None
tm.assert_frame_equal(result, self.test_frame1)
def _execute_sql(self):
# drop_sql = "DROP TABLE IF EXISTS test" # should already be done
iris_results = self.pandasSQL.execute("SELECT * FROM iris")
row = iris_results.fetchone()
tm.equalContents(row, [5.1, 3.5, 1.4, 0.2, "Iris-setosa"])
def _to_sql_save_index(self):
df = DataFrame.from_records(
[(1, 2.1, "line1"), (2, 1.5, "line2")], columns=["A", "B", "C"], index=["A"]
)
self.pandasSQL.to_sql(df, "test_to_sql_saves_index")
ix_cols = self._get_index_columns("test_to_sql_saves_index")
assert ix_cols == [["A"]]
def _transaction_test(self):
with self.pandasSQL.run_transaction() as trans:
trans.execute("CREATE TABLE test_trans (A INT, B TEXT)")
class DummyException(Exception):
pass
# Make sure when transaction is rolled back, no rows get inserted
ins_sql = "INSERT INTO test_trans (A,B) VALUES (1, 'blah')"
try:
with self.pandasSQL.run_transaction() as trans:
trans.execute(ins_sql)
raise DummyException("error")
except DummyException:
# ignore raised exception
pass
res = self.pandasSQL.read_query("SELECT * FROM test_trans")
assert len(res) == 0
# Make sure when transaction is committed, rows do get inserted
with self.pandasSQL.run_transaction() as trans:
trans.execute(ins_sql)
res2 = self.pandasSQL.read_query("SELECT * FROM test_trans")
assert len(res2) == 1
# -----------------------------------------------------------------------------
# -- Testing the public API
class _TestSQLApi(PandasSQLTest):
"""
Base class to test the public API.
From this two classes are derived to run these tests for both the
sqlalchemy mode (`TestSQLApi`) and the fallback mode
(`TestSQLiteFallbackApi`). These tests are run with sqlite3. Specific
tests for the different sql flavours are included in `_TestSQLAlchemy`.
Notes:
flavor can always be passed even in SQLAlchemy mode,
should be correctly ignored.
we don't use drop_table because that isn't part of the public api
"""
flavor = "sqlite"
mode: str
def setup_connect(self):
self.conn = self.connect()
@pytest.fixture(autouse=True)
def setup_method(self, load_iris_data):
self.load_test_data_and_sql()
def load_test_data_and_sql(self):
self._load_iris_view()
self._load_test1_data()
self._load_test2_data()
self._load_test3_data()
self._load_raw_sql()
def test_read_sql_iris(self):
iris_frame = sql.read_sql_query("SELECT * FROM iris", self.conn)
self._check_iris_loaded_frame(iris_frame)
def test_read_sql_view(self):
iris_frame = sql.read_sql_query("SELECT * FROM iris_view", self.conn)
self._check_iris_loaded_frame(iris_frame)
def test_to_sql(self):
sql.to_sql(self.test_frame1, "test_frame1", self.conn)
assert sql.has_table("test_frame1", self.conn)
def test_to_sql_fail(self):
sql.to_sql(self.test_frame1, "test_frame2", self.conn, if_exists="fail")
assert sql.has_table("test_frame2", self.conn)
msg = "Table 'test_frame2' already exists"
with pytest.raises(ValueError, match=msg):
sql.to_sql(self.test_frame1, "test_frame2", self.conn, if_exists="fail")
def test_to_sql_replace(self):
sql.to_sql(self.test_frame1, "test_frame3", self.conn, if_exists="fail")
# Add to table again
sql.to_sql(self.test_frame1, "test_frame3", self.conn, if_exists="replace")
assert sql.has_table("test_frame3", self.conn)
num_entries = len(self.test_frame1)
num_rows = self._count_rows("test_frame3")
assert num_rows == num_entries
def test_to_sql_append(self):
sql.to_sql(self.test_frame1, "test_frame4", self.conn, if_exists="fail")
# Add to table again
sql.to_sql(self.test_frame1, "test_frame4", self.conn, if_exists="append")
assert sql.has_table("test_frame4", self.conn)
num_entries = 2 * len(self.test_frame1)
num_rows = self._count_rows("test_frame4")
assert num_rows == num_entries
def test_to_sql_type_mapping(self):
sql.to_sql(self.test_frame3, "test_frame5", self.conn, index=False)
result = sql.read_sql("SELECT * FROM test_frame5", self.conn)
tm.assert_frame_equal(self.test_frame3, result)
def test_to_sql_series(self):
s = Series(np.arange(5, dtype="int64"), name="series")
sql.to_sql(s, "test_series", self.conn, index=False)
s2 = sql.read_sql_query("SELECT * FROM test_series", self.conn)
tm.assert_frame_equal(s.to_frame(), s2)
def test_roundtrip(self):
sql.to_sql(self.test_frame1, "test_frame_roundtrip", con=self.conn)
result = sql.read_sql_query("SELECT * FROM test_frame_roundtrip", con=self.conn)
# HACK!
result.index = self.test_frame1.index
result.set_index("level_0", inplace=True)
result.index.astype(int)
result.index.name = None
tm.assert_frame_equal(result, self.test_frame1)
def test_roundtrip_chunksize(self):
sql.to_sql(
self.test_frame1,
"test_frame_roundtrip",
con=self.conn,
index=False,
chunksize=2,
)
result = sql.read_sql_query("SELECT * FROM test_frame_roundtrip", con=self.conn)
tm.assert_frame_equal(result, self.test_frame1)
def test_execute_sql(self):
# drop_sql = "DROP TABLE IF EXISTS test" # should already be done
iris_results = sql.execute("SELECT * FROM iris", con=self.conn)
row = iris_results.fetchone()
tm.equalContents(row, [5.1, 3.5, 1.4, 0.2, "Iris-setosa"])
def test_date_parsing(self):
# Test date parsing in read_sql
# No Parsing
df = sql.read_sql_query("SELECT * FROM types_test_data", self.conn)
assert not issubclass(df.DateCol.dtype.type, np.datetime64)
df = sql.read_sql_query(
"SELECT * FROM types_test_data", self.conn, parse_dates=["DateCol"]
)
assert issubclass(df.DateCol.dtype.type, np.datetime64)
assert df.DateCol.tolist() == [
pd.Timestamp(2000, 1, 3, 0, 0, 0),
pd.Timestamp(2000, 1, 4, 0, 0, 0),
]
df = sql.read_sql_query(
"SELECT * FROM types_test_data",
self.conn,
parse_dates={"DateCol": "%Y-%m-%d %H:%M:%S"},
)
assert issubclass(df.DateCol.dtype.type, np.datetime64)
assert df.DateCol.tolist() == [
pd.Timestamp(2000, 1, 3, 0, 0, 0),
pd.Timestamp(2000, 1, 4, 0, 0, 0),
]
df = sql.read_sql_query(
"SELECT * FROM types_test_data", self.conn, parse_dates=["IntDateCol"]
)
assert issubclass(df.IntDateCol.dtype.type, np.datetime64)
assert df.IntDateCol.tolist() == [
pd.Timestamp(1986, 12, 25, 0, 0, 0),
pd.Timestamp(2013, 1, 1, 0, 0, 0),
]
df = sql.read_sql_query(
"SELECT * FROM types_test_data", self.conn, parse_dates={"IntDateCol": "s"}
)
assert issubclass(df.IntDateCol.dtype.type, np.datetime64)
assert df.IntDateCol.tolist() == [
pd.Timestamp(1986, 12, 25, 0, 0, 0),
pd.Timestamp(2013, 1, 1, 0, 0, 0),
]
df = sql.read_sql_query(
"SELECT * FROM types_test_data",
self.conn,
parse_dates={"IntDateOnlyCol": "%Y%m%d"},
)
assert issubclass(df.IntDateOnlyCol.dtype.type, np.datetime64)
assert df.IntDateOnlyCol.tolist() == [
pd.Timestamp("2010-10-10"),
pd.Timestamp("2010-12-12"),
]
def test_date_and_index(self):
# Test case where same column appears in parse_date and index_col
df = sql.read_sql_query(
"SELECT * FROM types_test_data",
self.conn,
index_col="DateCol",
parse_dates=["DateCol", "IntDateCol"],
)
assert issubclass(df.index.dtype.type, np.datetime64)
assert issubclass(df.IntDateCol.dtype.type, np.datetime64)
def test_timedelta(self):
# see #6921
df = to_timedelta(Series(["00:00:01", "00:00:03"], name="foo")).to_frame()
with tm.assert_produces_warning(UserWarning):
df.to_sql("test_timedelta", self.conn)
result = sql.read_sql_query("SELECT * FROM test_timedelta", self.conn)
tm.assert_series_equal(result["foo"], df["foo"].astype("int64"))
def test_complex_raises(self):
df = DataFrame({"a": [1 + 1j, 2j]})
msg = "Complex datatypes not supported"
with pytest.raises(ValueError, match=msg):
df.to_sql("test_complex", self.conn)
@pytest.mark.parametrize(
"index_name,index_label,expected",
[
# no index name, defaults to 'index'
(None, None, "index"),
# specifying index_label
(None, "other_label", "other_label"),
# using the index name
("index_name", None, "index_name"),
# has index name, but specifying index_label
("index_name", "other_label", "other_label"),
# index name is integer
(0, None, "0"),
# index name is None but index label is integer
(None, 0, "0"),
],
)
def test_to_sql_index_label(self, index_name, index_label, expected):
temp_frame = DataFrame({"col1": range(4)})
temp_frame.index.name = index_name
query = "SELECT * FROM test_index_label"
sql.to_sql(temp_frame, "test_index_label", self.conn, index_label=index_label)
frame = sql.read_sql_query(query, self.conn)
assert frame.columns[0] == expected
def test_to_sql_index_label_multiindex(self):
temp_frame = DataFrame(
{"col1": range(4)},
index=MultiIndex.from_product([("A0", "A1"), ("B0", "B1")]),
)
# no index name, defaults to 'level_0' and 'level_1'
sql.to_sql(temp_frame, "test_index_label", self.conn)
frame = sql.read_sql_query("SELECT * FROM test_index_label", self.conn)
assert frame.columns[0] == "level_0"
assert frame.columns[1] == "level_1"
# specifying index_label
sql.to_sql(
temp_frame,
"test_index_label",
self.conn,
if_exists="replace",
index_label=["A", "B"],
)
frame = sql.read_sql_query("SELECT * FROM test_index_label", self.conn)
assert frame.columns[:2].tolist() == ["A", "B"]
# using the index name
temp_frame.index.names = ["A", "B"]
sql.to_sql(temp_frame, "test_index_label", self.conn, if_exists="replace")
frame = sql.read_sql_query("SELECT * FROM test_index_label", self.conn)
assert frame.columns[:2].tolist() == ["A", "B"]
# has index name, but specifying index_label
sql.to_sql(
temp_frame,
"test_index_label",
self.conn,
if_exists="replace",
index_label=["C", "D"],
)
frame = sql.read_sql_query("SELECT * FROM test_index_label", self.conn)
assert frame.columns[:2].tolist() == ["C", "D"]
msg = "Length of 'index_label' should match number of levels, which is 2"
with pytest.raises(ValueError, match=msg):
sql.to_sql(
temp_frame,
"test_index_label",
self.conn,
if_exists="replace",
index_label="C",
)
def test_multiindex_roundtrip(self):
df = DataFrame.from_records(
[(1, 2.1, "line1"), (2, 1.5, "line2")],
columns=["A", "B", "C"],
index=["A", "B"],
)
df.to_sql("test_multiindex_roundtrip", self.conn)
result = sql.read_sql_query(
"SELECT * FROM test_multiindex_roundtrip", self.conn, index_col=["A", "B"]
)
tm.assert_frame_equal(df, result, check_index_type=True)
def test_integer_col_names(self):
df = DataFrame([[1, 2], [3, 4]], columns=[0, 1])
sql.to_sql(df, "test_frame_integer_col_names", self.conn, if_exists="replace")
def test_get_schema(self):
create_sql = sql.get_schema(self.test_frame1, "test", con=self.conn)
assert "CREATE" in create_sql
def test_get_schema_dtypes(self):
float_frame = DataFrame({"a": [1.1, 1.2], "b": [2.1, 2.2]})
dtype = sqlalchemy.Integer if self.mode == "sqlalchemy" else "INTEGER"
create_sql = sql.get_schema(
float_frame, "test", con=self.conn, dtype={"b": dtype}
)
assert "CREATE" in create_sql
assert "INTEGER" in create_sql
def test_get_schema_keys(self):
frame = DataFrame({"Col1": [1.1, 1.2], "Col2": [2.1, 2.2]})
create_sql = sql.get_schema(frame, "test", con=self.conn, keys="Col1")
constraint_sentence = 'CONSTRAINT test_pk PRIMARY KEY ("Col1")'
assert constraint_sentence in create_sql
# multiple columns as key (GH10385)
create_sql = sql.get_schema(
self.test_frame1, "test", con=self.conn, keys=["A", "B"]
)
constraint_sentence = 'CONSTRAINT test_pk PRIMARY KEY ("A", "B")'
assert constraint_sentence in create_sql
def test_chunksize_read(self):
df = DataFrame(np.random.randn(22, 5), columns=list("abcde"))
df.to_sql("test_chunksize", self.conn, index=False)
# reading the query in one time
res1 = sql.read_sql_query("select * from test_chunksize", self.conn)
# reading the query in chunks with read_sql_query
res2 = DataFrame()
i = 0
sizes = [5, 5, 5, 5, 2]
for chunk in sql.read_sql_query(
"select * from test_chunksize", self.conn, chunksize=5
):
res2 = concat([res2, chunk], ignore_index=True)
assert len(chunk) == sizes[i]
i += 1
tm.assert_frame_equal(res1, res2)
# reading the query in chunks with read_sql_query
if self.mode == "sqlalchemy":
res3 = DataFrame()
i = 0
sizes = [5, 5, 5, 5, 2]
for chunk in sql.read_sql_table("test_chunksize", self.conn, chunksize=5):
res3 = concat([res3, chunk], ignore_index=True)
assert len(chunk) == sizes[i]
i += 1
tm.assert_frame_equal(res1, res3)
def test_categorical(self):
# GH8624
# test that categorical gets written correctly as dense column
df = DataFrame(
{
"person_id": [1, 2, 3],
"person_name": ["<NAME>", "<NAME>", "<NAME>"],
}
)
df2 = df.copy()
df2["person_name"] = df2["person_name"].astype("category")
df2.to_sql("test_categorical", self.conn, index=False)
res = sql.read_sql_query("SELECT * FROM test_categorical", self.conn)
tm.assert_frame_equal(res, df)
def test_unicode_column_name(self):
# GH 11431
df = DataFrame([[1, 2], [3, 4]], columns=["\xe9", "b"])
df.to_sql("test_unicode", self.conn, index=False)
def test_escaped_table_name(self):
# GH 13206
df = DataFrame({"A": [0, 1, 2], "B": [0.2, np.nan, 5.6]})
df.to_sql("d1187b08-4943-4c8d-a7f6", self.conn, index=False)
res = sql.read_sql_query("SELECT * FROM `d1187b08-4943-4c8d-a7f6`", self.conn)
tm.assert_frame_equal(res, df)
@pytest.mark.single
@pytest.mark.skipif(not SQLALCHEMY_INSTALLED, reason="SQLAlchemy not installed")
class TestSQLApi(SQLAlchemyMixIn, _TestSQLApi):
"""
Test the public API as it would be used directly
Tests for `read_sql_table` are included here, as this is specific for the
sqlalchemy mode.
"""
flavor = "sqlite"
mode = "sqlalchemy"
def connect(self):
return sqlalchemy.create_engine("sqlite:///:memory:")
def test_read_table_columns(self):
# test columns argument in read_table
sql.to_sql(self.test_frame1, "test_frame", self.conn)
cols = ["A", "B"]
result = sql.read_sql_table("test_frame", self.conn, columns=cols)
assert result.columns.tolist() == cols
def test_read_table_index_col(self):
# test columns argument in read_table
sql.to_sql(self.test_frame1, "test_frame", self.conn)
result = sql.read_sql_table("test_frame", self.conn, index_col="index")
assert result.index.names == ["index"]
result = sql.read_sql_table("test_frame", self.conn, index_col=["A", "B"])
assert result.index.names == ["A", "B"]
result = sql.read_sql_table(
"test_frame", self.conn, index_col=["A", "B"], columns=["C", "D"]
)
assert result.index.names == ["A", "B"]
assert result.columns.tolist() == ["C", "D"]
def test_read_sql_delegate(self):
iris_frame1 = sql.read_sql_query("SELECT * FROM iris", self.conn)
iris_frame2 = sql.read_sql("SELECT * FROM iris", self.conn)
tm.assert_frame_equal(iris_frame1, iris_frame2)
iris_frame1 = sql.read_sql_table("iris", self.conn)
iris_frame2 = sql.read_sql("iris", self.conn)
tm.assert_frame_equal(iris_frame1, iris_frame2)
def test_not_reflect_all_tables(self):
# create invalid table
qry = """CREATE TABLE invalid (x INTEGER, y UNKNOWN);"""
self.conn.execute(qry)
qry = """CREATE TABLE other_table (x INTEGER, y INTEGER);"""
self.conn.execute(qry)
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# Trigger a warning.
sql.read_sql_table("other_table", self.conn)
sql.read_sql_query("SELECT * FROM other_table", self.conn)
# Verify some things
assert len(w) == 0
def test_warning_case_insensitive_table_name(self):
# see gh-7815
#
# We can't test that this warning is triggered, a the database
# configuration would have to be altered. But here we test that
# the warning is certainly NOT triggered in a normal case.
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# This should not trigger a Warning
self.test_frame1.to_sql("CaseSensitive", self.conn)
# Verify some things
assert len(w) == 0
def _get_index_columns(self, tbl_name):
from sqlalchemy.engine import reflection
insp = reflection.Inspector.from_engine(self.conn)
ixs = insp.get_indexes("test_index_saved")
ixs = [i["column_names"] for i in ixs]
return ixs
def test_sqlalchemy_type_mapping(self):
# Test Timestamp objects (no datetime64 because of timezone) (GH9085)
df = DataFrame(
{"time": to_datetime(["201412120154", "201412110254"], utc=True)}
)
db = sql.SQLDatabase(self.conn)
table = sql.SQLTable("test_type", db, frame=df)
# GH 9086: TIMESTAMP is the suggested type for datetimes with timezones
assert isinstance(table.table.c["time"].type, sqltypes.TIMESTAMP)
def test_database_uri_string(self):
# Test read_sql and .to_sql method with a database URI (GH10654)
test_frame1 = self.test_frame1
# db_uri = 'sqlite:///:memory:' # raises
# sqlalchemy.exc.OperationalError: (sqlite3.OperationalError) near
# "iris": syntax error [SQL: 'iris']
with tm.ensure_clean() as name:
db_uri = "sqlite:///" + name
table = "iris"
test_frame1.to_sql(table, db_uri, if_exists="replace", index=False)
test_frame2 = sql.read_sql(table, db_uri)
test_frame3 = sql.read_sql_table(table, db_uri)
query = "SELECT * FROM iris"
test_frame4 = sql.read_sql_query(query, db_uri)
tm.assert_frame_equal(test_frame1, test_frame2)
tm.assert_frame_equal(test_frame1, test_frame3)
tm.assert_frame_equal(test_frame1, test_frame4)
# using driver that will not be installed on Travis to trigger error
# in sqlalchemy.create_engine -> test passing of this error to user
try:
# the rest of this test depends on pg8000's being absent
import pg8000 # noqa
pytest.skip("pg8000 is installed")
except ImportError:
pass
db_uri = "postgresql+pg8000://user:pass@host/dbname"
with pytest.raises(ImportError, match="pg8000"):
sql.read_sql("select * from table", db_uri)
def _make_iris_table_metadata(self):
sa = sqlalchemy
metadata = sa.MetaData()
iris = sa.Table(
"iris",
metadata,
sa.Column("SepalLength", sa.REAL),
sa.Column("SepalWidth", sa.REAL),
sa.Column("PetalLength", sa.REAL),
sa.Column("PetalWidth", sa.REAL),
sa.Column("Name", sa.TEXT),
)
return iris
def test_query_by_text_obj(self):
# WIP : GH10846
name_text = sqlalchemy.text("select * from iris where name=:name")
iris_df = sql.read_sql(name_text, self.conn, params={"name": "Iris-versicolor"})
all_names = set(iris_df["Name"])
assert all_names == {"Iris-versicolor"}
def test_query_by_select_obj(self):
# WIP : GH10846
iris = self._make_iris_table_metadata()
name_select = sqlalchemy.select([iris]).where(
iris.c.Name == sqlalchemy.bindparam("name")
)
iris_df = sql.read_sql(name_select, self.conn, params={"name": "Iris-setosa"})
all_names = set(iris_df["Name"])
assert all_names == {"Iris-setosa"}
class _EngineToConnMixin:
"""
A mixin that causes setup_connect to create a conn rather than an engine.
"""
@pytest.fixture(autouse=True)
def setup_method(self, load_iris_data):
super().load_test_data_and_sql()
engine = self.conn
conn = engine.connect()
self.__tx = conn.begin()
self.pandasSQL = sql.SQLDatabase(conn)
self.__engine = engine
self.conn = conn
yield
self.__tx.rollback()
self.conn.close()
self.conn = self.__engine
self.pandasSQL = sql.SQLDatabase(self.__engine)
# XXX:
# super().teardown_method(method)
@pytest.mark.single
class TestSQLApiConn(_EngineToConnMixin, TestSQLApi):
pass
@pytest.mark.single
class TestSQLiteFallbackApi(SQLiteMixIn, _TestSQLApi):
"""
Test the public sqlite connection fallback API
"""
flavor = "sqlite"
mode = "fallback"
def connect(self, database=":memory:"):
return sqlite3.connect(database)
def test_sql_open_close(self):
# Test if the IO in the database still work if the connection closed
# between the writing and reading (as in many real situations).
with tm.ensure_clean() as name:
conn = self.connect(name)
sql.to_sql(self.test_frame3, "test_frame3_legacy", conn, index=False)
conn.close()
conn = self.connect(name)
result = sql.read_sql_query("SELECT * FROM test_frame3_legacy;", conn)
conn.close()
tm.assert_frame_equal(self.test_frame3, result)
@pytest.mark.skipif(SQLALCHEMY_INSTALLED, reason="SQLAlchemy is installed")
def test_con_string_import_error(self):
conn = "mysql://root@localhost/pandas_nosetest"
msg = "Using URI string without sqlalchemy installed"
with pytest.raises(ImportError, match=msg):
sql.read_sql("SELECT * FROM iris", conn)
def test_read_sql_delegate(self):
iris_frame1 = sql.read_sql_query("SELECT * FROM iris", self.conn)
iris_frame2 = sql.read_sql("SELECT * FROM iris", self.conn)
tm.assert_frame_equal(iris_frame1, iris_frame2)
msg = "Execution failed on sql 'iris': near \"iris\": syntax error"
with pytest.raises(sql.DatabaseError, match=msg):
sql.read_sql("iris", self.conn)
def test_safe_names_warning(self):
# GH 6798
df = DataFrame([[1, 2], [3, 4]], columns=["a", "b "]) # has a space
# warns on create table with spaces in names
with tm.assert_produces_warning():
sql.to_sql(df, "test_frame3_legacy", self.conn, index=False)
def test_get_schema2(self):
# without providing a connection object (available for backwards comp)
create_sql = sql.get_schema(self.test_frame1, "test")
assert "CREATE" in create_sql
def _get_sqlite_column_type(self, schema, column):
for col in schema.split("\n"):
if col.split()[0].strip('""') == column:
return col.split()[1]
raise ValueError(f"Column {column} not found")
def test_sqlite_type_mapping(self):
# Test Timestamp objects (no datetime64 because of timezone) (GH9085)
df = DataFrame(
{"time": to_datetime(["201412120154", "201412110254"], utc=True)}
)
db = sql.SQLiteDatabase(self.conn)
table = sql.SQLiteTable("test_type", db, frame=df)
schema = table.sql_schema()
assert self._get_sqlite_column_type(schema, "time") == "TIMESTAMP"
# -----------------------------------------------------------------------------
# -- Database flavor specific tests
class _TestSQLAlchemy(SQLAlchemyMixIn, PandasSQLTest):
"""
Base class for testing the sqlalchemy backend.
Subclasses for specific database types are created below. Tests that
deviate for each flavor are overwritten there.
"""
flavor: str
@pytest.fixture(autouse=True, scope="class")
def setup_class(cls):
cls.setup_import()
cls.setup_driver()
conn = cls.connect()
conn.connect()
def load_test_data_and_sql(self):
self._load_raw_sql()
self._load_test1_data()
@pytest.fixture(autouse=True)
def setup_method(self, load_iris_data):
self.load_test_data_and_sql()
@classmethod
def setup_import(cls):
# Skip this test if SQLAlchemy not available
if not SQLALCHEMY_INSTALLED:
pytest.skip("SQLAlchemy not installed")
@classmethod
def setup_driver(cls):
raise NotImplementedError()
@classmethod
def connect(cls):
raise NotImplementedError()
def setup_connect(self):
try:
self.conn = self.connect()
self.pandasSQL = sql.SQLDatabase(self.conn)
# to test if connection can be made:
self.conn.connect()
except sqlalchemy.exc.OperationalError:
pytest.skip(f"Can't connect to {self.flavor} server")
def test_read_sql(self):
self._read_sql_iris()
def test_read_sql_parameter(self):
self._read_sql_iris_parameter()
def test_read_sql_named_parameter(self):
self._read_sql_iris_named_parameter()
def test_to_sql(self):
self._to_sql()
def test_to_sql_empty(self):
self._to_sql_empty()
def test_to_sql_fail(self):
self._to_sql_fail()
def test_to_sql_replace(self):
self._to_sql_replace()
def test_to_sql_append(self):
self._to_sql_append()
def test_to_sql_method_multi(self):
self._to_sql(method="multi")
def test_to_sql_method_callable(self):
self._to_sql_method_callable()
def test_create_table(self):
temp_conn = self.connect()
temp_frame = DataFrame(
{"one": [1.0, 2.0, 3.0, 4.0], "two": [4.0, 3.0, 2.0, 1.0]}
)
pandasSQL = sql.SQLDatabase(temp_conn)
pandasSQL.to_sql(temp_frame, "temp_frame")
assert temp_conn.has_table("temp_frame")
def test_drop_table(self):
temp_conn = self.connect()
temp_frame = DataFrame(
{"one": [1.0, 2.0, 3.0, 4.0], "two": [4.0, 3.0, 2.0, 1.0]}
)
pandasSQL = sql.SQLDatabase(temp_conn)
pandasSQL.to_sql(temp_frame, "temp_frame")
assert temp_conn.has_table("temp_frame")
pandasSQL.drop_table("temp_frame")
assert not temp_conn.has_table("temp_frame")
def test_roundtrip(self):
self._roundtrip()
def test_execute_sql(self):
self._execute_sql()
def test_read_table(self):
iris_frame = sql.read_sql_table("iris", con=self.conn)
self._check_iris_loaded_frame(iris_frame)
def test_read_table_columns(self):
iris_frame = sql.read_sql_table(
"iris", con=self.conn, columns=["SepalLength", "SepalLength"]
)
tm.equalContents(iris_frame.columns.values, ["SepalLength", "SepalLength"])
def test_read_table_absent_raises(self):
msg = "Table this_doesnt_exist not found"
with pytest.raises(ValueError, match=msg):
sql.read_sql_table("this_doesnt_exist", con=self.conn)
def test_default_type_conversion(self):
df = sql.read_sql_table("types_test_data", self.conn)
assert issubclass(df.FloatCol.dtype.type, np.floating)
assert issubclass(df.IntCol.dtype.type, np.integer)
assert issubclass(df.BoolCol.dtype.type, np.bool_)
# Int column with NA values stays as float
assert issubclass(df.IntColWithNull.dtype.type, np.floating)
# Bool column with NA values becomes object
assert issubclass(df.BoolColWithNull.dtype.type, np.object)
def test_bigint(self):
# int64 should be converted to BigInteger, GH7433
df = DataFrame(data={"i64": [2 ** 62]})
df.to_sql("test_bigint", self.conn, index=False)
result = sql.read_sql_table("test_bigint", self.conn)
tm.assert_frame_equal(df, result)
def test_default_date_load(self):
df = sql.read_sql_table("types_test_data", self.conn)
# IMPORTANT - sqlite has no native date type, so shouldn't parse, but
# MySQL SHOULD be converted.
assert issubclass(df.DateCol.dtype.type, np.datetime64)
def test_datetime_with_timezone(self):
# edge case that converts postgresql datetime with time zone types
# to datetime64[ns,psycopg2.tz.FixedOffsetTimezone..], which is ok
# but should be more natural, so coerce to datetime64[ns] for now
def check(col):
# check that a column is either datetime64[ns]
# or datetime64[ns, UTC]
if is_datetime64_dtype(col.dtype):
# "2000-01-01 00:00:00-08:00" should convert to
# "2000-01-01 08:00:00"
assert col[0] == Timestamp("2000-01-01 08:00:00")
# "2000-06-01 00:00:00-07:00" should convert to
# "2000-06-01 07:00:00"
assert col[1] == Timestamp("2000-06-01 07:00:00")
elif is_datetime64tz_dtype(col.dtype):
assert str(col.dt.tz) == "UTC"
# "2000-01-01 00:00:00-08:00" should convert to
# "2000-01-01 08:00:00"
# "2000-06-01 00:00:00-07:00" should convert to
# "2000-06-01 07:00:00"
# GH 6415
expected_data = [
Timestamp("2000-01-01 08:00:00", tz="UTC"),
Timestamp("2000-06-01 07:00:00", tz="UTC"),
]
expected = Series(expected_data, name=col.name)
tm.assert_series_equal(col, expected)
else:
raise AssertionError(
f"DateCol loaded with incorrect type -> {col.dtype}"
)
# GH11216
df = pd.read_sql_query("select * from types_test_data", self.conn)
if not hasattr(df, "DateColWithTz"):
pytest.skip("no column with datetime with time zone")
# this is parsed on Travis (linux), but not on macosx for some reason
# even with the same versions of psycopg2 & sqlalchemy, possibly a
# Postgresql server version difference
col = df.DateColWithTz
assert is_datetime64tz_dtype(col.dtype)
df = pd.read_sql_query(
"select * from types_test_data", self.conn, parse_dates=["DateColWithTz"]
)
if not hasattr(df, "DateColWithTz"):
pytest.skip("no column with datetime with time zone")
col = df.DateColWithTz
assert is_datetime64tz_dtype(col.dtype)
assert str(col.dt.tz) == "UTC"
check(df.DateColWithTz)
df = pd.concat(
list(
pd.read_sql_query(
"select * from types_test_data", self.conn, chunksize=1
)
),
ignore_index=True,
)
col = df.DateColWithTz
assert is_datetime64tz_dtype(col.dtype)
assert str(col.dt.tz) == "UTC"
expected = sql.read_sql_table("types_test_data", self.conn)
col = expected.DateColWithTz
assert is_datetime64tz_dtype(col.dtype)
tm.assert_series_equal(df.DateColWithTz, expected.DateColWithTz)
# xref #7139
# this might or might not be converted depending on the postgres driver
df = sql.read_sql_table("types_test_data", self.conn)
check(df.DateColWithTz)
def test_datetime_with_timezone_roundtrip(self):
# GH 9086
# Write datetimetz data to a db and read it back
# For dbs that support timestamps with timezones, should get back UTC
# otherwise naive data should be returned
expected = DataFrame(
{"A": date_range("2013-01-01 09:00:00", periods=3, tz="US/Pacific")}
)
expected.to_sql("test_datetime_tz", self.conn, index=False)
if self.flavor == "postgresql":
# SQLAlchemy "timezones" (i.e. offsets) are coerced to UTC
expected["A"] = expected["A"].dt.tz_convert("UTC")
else:
# Otherwise, timestamps are returned as local, naive
expected["A"] = expected["A"].dt.tz_localize(None)
result = sql.read_sql_table("test_datetime_tz", self.conn)
tm.assert_frame_equal(result, expected)
result = sql.read_sql_query("SELECT * FROM test_datetime_tz", self.conn)
if self.flavor == "sqlite":
# read_sql_query does not return datetime type like read_sql_table
assert isinstance(result.loc[0, "A"], str)
result["A"] = to_datetime(result["A"])
tm.assert_frame_equal(result, expected)
def test_naive_datetimeindex_roundtrip(self):
# GH 23510
# Ensure that a naive DatetimeIndex isn't converted to UTC
dates = date_range("2018-01-01", periods=5, freq="6H")
expected = DataFrame({"nums": range(5)}, index=dates)
expected.to_sql("foo_table", self.conn, index_label="info_date")
result = sql.read_sql_table("foo_table", self.conn, index_col="info_date")
# result index with gain a name from a set_index operation; expected
tm.assert_frame_equal(result, expected, check_names=False)
def test_date_parsing(self):
# No Parsing
df = sql.read_sql_table("types_test_data", self.conn)
expected_type = object if self.flavor == "sqlite" else np.datetime64
assert issubclass(df.DateCol.dtype.type, expected_type)
df = sql.read_sql_table("types_test_data", self.conn, parse_dates=["DateCol"])
assert issubclass(df.DateCol.dtype.type, np.datetime64)
df = sql.read_sql_table(
"types_test_data", self.conn, parse_dates={"DateCol": "%Y-%m-%d %H:%M:%S"}
)
assert issubclass(df.DateCol.dtype.type, np.datetime64)
df = sql.read_sql_table(
"types_test_data",
self.conn,
parse_dates={"DateCol": {"format": "%Y-%m-%d %H:%M:%S"}},
)
assert issubclass(df.DateCol.dtype.type, np.datetime64)
df = sql.read_sql_table(
"types_test_data", self.conn, parse_dates=["IntDateCol"]
)
assert issubclass(df.IntDateCol.dtype.type, np.datetime64)
df = sql.read_sql_table(
"types_test_data", self.conn, parse_dates={"IntDateCol": "s"}
)
assert issubclass(df.IntDateCol.dtype.type, np.datetime64)
df = sql.read_sql_table(
"types_test_data", self.conn, parse_dates={"IntDateCol": {"unit": "s"}}
)
assert issubclass(df.IntDateCol.dtype.type, np.datetime64)
def test_datetime(self):
df = DataFrame(
{"A": date_range("2013-01-01 09:00:00", periods=3), "B": np.arange(3.0)}
)
df.to_sql("test_datetime", self.conn)
# with read_table -> type information from schema used
result = sql.read_sql_table("test_datetime", self.conn)
result = result.drop("index", axis=1)
tm.assert_frame_equal(result, df)
# with read_sql -> no type information -> sqlite has no native
result = sql.read_sql_query("SELECT * FROM test_datetime", self.conn)
result = result.drop("index", axis=1)
if self.flavor == "sqlite":
assert isinstance(result.loc[0, "A"], str)
result["A"] = to_datetime(result["A"])
tm.assert_frame_equal(result, df)
else:
tm.assert_frame_equal(result, df)
def test_datetime_NaT(self):
df = DataFrame(
{"A": date_range("2013-01-01 09:00:00", periods=3), "B": np.arange(3.0)}
)
df.loc[1, "A"] = np.nan
df.to_sql("test_datetime", self.conn, index=False)
# with read_table -> type information from schema used
result = sql.read_sql_table("test_datetime", self.conn)
tm.assert_frame_equal(result, df)
# with read_sql -> no type information -> sqlite has no native
result = sql.read_sql_query("SELECT * FROM test_datetime", self.conn)
if self.flavor == "sqlite":
assert isinstance(result.loc[0, "A"], str)
result["A"] = to_datetime(result["A"], errors="coerce")
tm.assert_frame_equal(result, df)
else:
tm.assert_frame_equal(result, df)
def test_datetime_date(self):
# test support for datetime.date
df = DataFrame([date(2014, 1, 1), date(2014, 1, 2)], columns=["a"])
df.to_sql("test_date", self.conn, index=False)
res = read_sql_table("test_date", self.conn)
result = res["a"]
expected = to_datetime(df["a"])
# comes back as datetime64
tm.assert_series_equal(result, expected)
def test_datetime_time(self):
# test support for datetime.time
df = DataFrame([time(9, 0, 0), time(9, 1, 30)], columns=["a"])
df.to_sql("test_time", self.conn, index=False)
res = read_sql_table("test_time", self.conn)
tm.assert_frame_equal(res, df)
# GH8341
# first, use the fallback to have the sqlite adapter put in place
sqlite_conn = TestSQLiteFallback.connect()
sql.to_sql(df, "test_time2", sqlite_conn, index=False)
res = sql.read_sql_query("SELECT * FROM test_time2", sqlite_conn)
ref = df.applymap(lambda _: _.strftime("%H:%M:%S.%f"))
tm.assert_frame_equal(ref, res) # check if adapter is in place
# then test if sqlalchemy is unaffected by the sqlite adapter
sql.to_sql(df, "test_time3", self.conn, index=False)
if self.flavor == "sqlite":
res = sql.read_sql_query("SELECT * FROM test_time3", self.conn)
ref = df.applymap(lambda _: _.strftime("%H:%M:%S.%f"))
tm.assert_frame_equal(ref, res)
res = sql.read_sql_table("test_time3", self.conn)
tm.assert_frame_equal(df, res)
def test_mixed_dtype_insert(self):
# see GH6509
s1 = Series(2 ** 25 + 1, dtype=np.int32)
s2 = Series(0.0, dtype=np.float32)
df = DataFrame({"s1": s1, "s2": s2})
# write and read again
df.to_sql("test_read_write", self.conn, index=False)
df2 = sql.read_sql_table("test_read_write", self.conn)
tm.assert_frame_equal(df, df2, check_dtype=False, check_exact=True)
def test_nan_numeric(self):
# NaNs in numeric float column
df = DataFrame({"A": [0, 1, 2], "B": [0.2, np.nan, 5.6]})
df.to_sql("test_nan", self.conn, index=False)
# with read_table
result = sql.read_sql_table("test_nan", self.conn)
tm.assert_frame_equal(result, df)
# with read_sql
result = sql.read_sql_query("SELECT * FROM test_nan", self.conn)
tm.assert_frame_equal(result, df)
def test_nan_fullcolumn(self):
# full NaN column (numeric float column)
df = DataFrame({"A": [0, 1, 2], "B": [np.nan, np.nan, np.nan]})
df.to_sql("test_nan", self.conn, index=False)
# with read_table
result = sql.read_sql_table("test_nan", self.conn)
tm.assert_frame_equal(result, df)
# with read_sql -> not type info from table -> stays None
df["B"] = df["B"].astype("object")
df["B"] = None
result = sql.read_sql_query("SELECT * FROM test_nan", self.conn)
tm.assert_frame_equal(result, df)
def test_nan_string(self):
# NaNs in string column
df = DataFrame({"A": [0, 1, 2], "B": ["a", "b", np.nan]})
df.to_sql("test_nan", self.conn, index=False)
# NaNs are coming back as None
df.loc[2, "B"] = None
# with read_table
result = sql.read_sql_table("test_nan", self.conn)
tm.assert_frame_equal(result, df)
# with read_sql
result = sql.read_sql_query("SELECT * FROM test_nan", self.conn)
tm.assert_frame_equal(result, df)
def _get_index_columns(self, tbl_name):
from sqlalchemy.engine import reflection
insp = reflection.Inspector.from_engine(self.conn)
ixs = insp.get_indexes(tbl_name)
ixs = [i["column_names"] for i in ixs]
return ixs
def test_to_sql_save_index(self):
self._to_sql_save_index()
def test_transactions(self):
self._transaction_test()
def test_get_schema_create_table(self):
# Use a dataframe without a bool column, since MySQL converts bool to
# TINYINT (which read_sql_table returns as an int and causes a dtype
# mismatch)
self._load_test3_data()
tbl = "test_get_schema_create_table"
create_sql = sql.get_schema(self.test_frame3, tbl, con=self.conn)
blank_test_df = self.test_frame3.iloc[:0]
self.drop_table(tbl)
self.conn.execute(create_sql)
returned_df = sql.read_sql_table(tbl, self.conn)
tm.assert_frame_equal(returned_df, blank_test_df, check_index_type=False)
self.drop_table(tbl)
def test_dtype(self):
cols = ["A", "B"]
data = [(0.8, True), (0.9, None)]
df = DataFrame(data, columns=cols)
df.to_sql("dtype_test", self.conn)
df.to_sql("dtype_test2", self.conn, dtype={"B": sqlalchemy.TEXT})
meta = sqlalchemy.schema.MetaData(bind=self.conn)
meta.reflect()
sqltype = meta.tables["dtype_test2"].columns["B"].type
assert isinstance(sqltype, sqlalchemy.TEXT)
msg = "The type of B is not a SQLAlchemy type"
with pytest.raises(ValueError, match=msg):
df.to_sql("error", self.conn, dtype={"B": str})
# GH9083
df.to_sql("dtype_test3", self.conn, dtype={"B": sqlalchemy.String(10)})
meta.reflect()
sqltype = meta.tables["dtype_test3"].columns["B"].type
assert isinstance(sqltype, sqlalchemy.String)
assert sqltype.length == 10
# single dtype
df.to_sql("single_dtype_test", self.conn, dtype=sqlalchemy.TEXT)
meta = sqlalchemy.schema.MetaData(bind=self.conn)
meta.reflect()
sqltypea = meta.tables["single_dtype_test"].columns["A"].type
sqltypeb = meta.tables["single_dtype_test"].columns["B"].type
assert isinstance(sqltypea, sqlalchemy.TEXT)
assert isinstance(sqltypeb, sqlalchemy.TEXT)
def test_notna_dtype(self):
cols = {
"Bool": Series([True, None]),
"Date": Series([datetime(2012, 5, 1), None]),
"Int": Series([1, None], dtype="object"),
"Float": Series([1.1, None]),
}
df = DataFrame(cols)
tbl = "notna_dtype_test"
df.to_sql(tbl, self.conn)
returned_df = sql.read_sql_table(tbl, self.conn) # noqa
meta = sqlalchemy.schema.MetaData(bind=self.conn)
meta.reflect()
if self.flavor == "mysql":
my_type = sqltypes.Integer
else:
my_type = sqltypes.Boolean
col_dict = meta.tables[tbl].columns
assert isinstance(col_dict["Bool"].type, my_type)
assert isinstance(col_dict["Date"].type, sqltypes.DateTime)
assert isinstance(col_dict["Int"].type, sqltypes.Integer)
assert isinstance(col_dict["Float"].type, sqltypes.Float)
def test_double_precision(self):
V = 1.23456789101112131415
df = DataFrame(
{
"f32": Series([V], dtype="float32"),
"f64": Series([V], dtype="float64"),
"f64_as_f32": Series([V], dtype="float64"),
"i32": Series([5], dtype="int32"),
"i64": Series([5], dtype="int64"),
}
)
df.to_sql(
"test_dtypes",
self.conn,
index=False,
if_exists="replace",
dtype={"f64_as_f32": sqlalchemy.Float(precision=23)},
)
res = sql.read_sql_table("test_dtypes", self.conn)
# check precision of float64
assert np.round(df["f64"].iloc[0], 14) == np.round(res["f64"].iloc[0], 14)
# check sql types
meta = sqlalchemy.schema.MetaData(bind=self.conn)
meta.reflect()
col_dict = meta.tables["test_dtypes"].columns
assert str(col_dict["f32"].type) == str(col_dict["f64_as_f32"].type)
assert isinstance(col_dict["f32"].type, sqltypes.Float)
assert isinstance(col_dict["f64"].type, sqltypes.Float)
assert isinstance(col_dict["i32"].type, sqltypes.Integer)
assert isinstance(col_dict["i64"].type, sqltypes.BigInteger)
def test_connectable_issue_example(self):
# This tests the example raised in issue
# https://github.com/pandas-dev/pandas/issues/10104
def foo(connection):
query = "SELECT test_foo_data FROM test_foo_data"
return sql.read_sql_query(query, con=connection)
def bar(connection, data):
data.to_sql(name="test_foo_data", con=connection, if_exists="append")
def main(connectable):
with connectable.connect() as conn:
with conn.begin():
foo_data = conn.run_callable(foo)
conn.run_callable(bar, foo_data)
DataFrame({"test_foo_data": [0, 1, 2]}).to_sql("test_foo_data", self.conn)
main(self.conn)
def test_temporary_table(self):
test_data = "Hello, World!"
expected = DataFrame({"spam": [test_data]})
Base = declarative.declarative_base()
class Temporary(Base):
__tablename__ = "temp_test"
__table_args__ = {"prefixes": ["TEMPORARY"]}
id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True)
spam = sqlalchemy.Column(sqlalchemy.Unicode(30), nullable=False)
Session = sa_session.sessionmaker(bind=self.conn)
session = Session()
with session.transaction:
conn = session.connection()
Temporary.__table__.create(conn)
session.add(Temporary(spam=test_data))
session.flush()
df = sql.read_sql_query(sql=sqlalchemy.select([Temporary.spam]), con=conn)
tm.assert_frame_equal(df, expected)
class _TestSQLAlchemyConn(_EngineToConnMixin, _TestSQLAlchemy):
def test_transactions(self):
pytest.skip("Nested transactions rollbacks don't work with Pandas")
class _TestSQLiteAlchemy:
"""
Test the sqlalchemy backend against an in-memory sqlite database.
"""
flavor = "sqlite"
@classmethod
def connect(cls):
return sqlalchemy.create_engine("sqlite:///:memory:")
@classmethod
def setup_driver(cls):
# sqlite3 is built-in
cls.driver = None
def test_default_type_conversion(self):
df = sql.read_sql_table("types_test_data", self.conn)
assert issubclass(df.FloatCol.dtype.type, np.floating)
assert issubclass(df.IntCol.dtype.type, np.integer)
# sqlite has no boolean type, so integer type is returned
assert issubclass(df.BoolCol.dtype.type, np.integer)
# Int column with NA values stays as float
assert issubclass(df.IntColWithNull.dtype.type, np.floating)
# Non-native Bool column with NA values stays as float
assert issubclass(df.BoolColWithNull.dtype.type, np.floating)
def test_default_date_load(self):
df = sql.read_sql_table("types_test_data", self.conn)
# IMPORTANT - sqlite has no native date type, so shouldn't parse, but
assert not issubclass(df.DateCol.dtype.type, np.datetime64)
def test_bigint_warning(self):
# test no warning for BIGINT (to support int64) is raised (GH7433)
df = DataFrame({"a": [1, 2]}, dtype="int64")
df.to_sql("test_bigintwarning", self.conn, index=False)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
sql.read_sql_table("test_bigintwarning", self.conn)
assert len(w) == 0
class _TestMySQLAlchemy:
"""
Test the sqlalchemy backend against an MySQL database.
"""
flavor = "mysql"
@classmethod
def connect(cls):
url = "mysql+{driver}://root@localhost/pandas_nosetest"
return sqlalchemy.create_engine(
url.format(driver=cls.driver), connect_args=cls.connect_args
)
@classmethod
def setup_driver(cls):
pymysql = pytest.importorskip("pymysql")
cls.driver = "pymysql"
cls.connect_args = {"client_flag": pymysql.constants.CLIENT.MULTI_STATEMENTS}
def test_default_type_conversion(self):
df = sql.read_sql_table("types_test_data", self.conn)
assert issubclass(df.FloatCol.dtype.type, np.floating)
assert issubclass(df.IntCol.dtype.type, np.integer)
# MySQL has no real BOOL type (it's an alias for TINYINT)
assert issubclass(df.BoolCol.dtype.type, np.integer)
# Int column with NA values stays as float
assert issubclass(df.IntColWithNull.dtype.type, np.floating)
# Bool column with NA = int column with NA values => becomes float
assert issubclass(df.BoolColWithNull.dtype.type, np.floating)
def test_read_procedure(self):
import pymysql
# see GH7324. Although it is more an api test, it is added to the
# mysql tests as sqlite does not have stored procedures
df = DataFrame({"a": [1, 2, 3], "b": [0.1, 0.2, 0.3]})
df.to_sql("test_procedure", self.conn, index=False)
proc = """DROP PROCEDURE IF EXISTS get_testdb;
CREATE PROCEDURE get_testdb ()
BEGIN
SELECT * FROM test_procedure;
END"""
connection = self.conn.connect()
trans = connection.begin()
try:
r1 = connection.execute(proc) # noqa
trans.commit()
except pymysql.Error:
trans.rollback()
raise
res1 = sql.read_sql_query("CALL get_testdb();", self.conn)
tm.assert_frame_equal(df, res1)
# test delegation to read_sql_query
res2 = sql.read_sql("CALL get_testdb();", self.conn)
tm.assert_frame_equal(df, res2)
class _TestPostgreSQLAlchemy:
"""
Test the sqlalchemy backend against an PostgreSQL database.
"""
flavor = "postgresql"
@classmethod
def connect(cls):
url = "postgresql+{driver}://postgres@localhost/pandas_nosetest"
return sqlalchemy.create_engine(url.format(driver=cls.driver))
@classmethod
def setup_driver(cls):
pytest.importorskip("psycopg2")
cls.driver = "psycopg2"
def test_schema_support(self):
# only test this for postgresql (schema's not supported in
# mysql/sqlite)
df = | DataFrame({"col1": [1, 2], "col2": [0.1, 0.2], "col3": ["a", "n"]}) | pandas.DataFrame |
# read inventory of all sites
from hydroDL.data import usgs, gageII
from hydroDL import kPath
import pandas as pd
import numpy as np
import time
import os
import matplotlib.pyplot as plt
# read site inventory
workDir =os.path.join(kPath.dirData,'USGS','inventory')
modelDir = os.path.join(workDir, 'modelUsgs2')
fileInvC = os.path.join(workDir, 'inventory_NWIS_sample')
fileInvQ = os.path.join(workDir, 'inventory_NWIS_streamflow')
# look up sample for interested sample sites
fileCountC = os.path.join(workDir, 'count_NWIS_sample')
if os.path.exists(fileCountC):
tabC = pd.read_csv(fileCountC, dtype={'site_no': str})
else:
siteC = usgs.readUsgsText(fileInvC)
codeLst = \
['00915', '00925', '00930', '00935', '00955', '00940', '00945']+\
['00418','00419','39086','39087']+\
['00301','00300','00618','00681','00653']+\
['00010','00530','00094']+\
['00403','00408']
dictTab = dict()
for code in codeLst:
site = siteC.loc[(siteC['parm_cd'] == code) & (siteC['count_nu'] > 1)]
temp = dict(
zip(site['site_no'].tolist(),
site['count_nu'].astype(int).tolist()))
dictTab[code] = temp
tabC = | pd.DataFrame.from_dict(dictTab) | pandas.DataFrame.from_dict |
import json
import pandas as pd
import time
import requests
from shapely.geometry import shape
from shapely.geometry import Point
from sqlalchemy import create_engine
path = r'C:\Users\Hamza\OneDrive\startup-where\data\Neighbourhoods.geojson'
yelp_api_key = '<KEY>'
def get_neighbourhoods(path):
'''For each neighbourhood zone, return neighbourhood name, longitude and latitude'''
with open(path) as f:
data = json.load(f)
cols = ['area name', 'longitude', 'latitude']
lst = []
for feature in data['features']:
area_name = feature['properties']['AREA_NAME']
longitude = feature['properties']['LONGITUDE']
latitude = feature['properties']['LATITUDE']
lst.append([area_name, longitude, latitude])
neighbourhoods_df = | pd.DataFrame(lst, columns=cols) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
/*------------------------------------------------------*
| Spatial Uncertainty Research Framework |
| |
| Author: <NAME>, UC Berkeley, <EMAIL> |
| |
| Date: 07/11/2019 |
*------------------------------------------------------*/
"""
from __future__ import absolute_import, division, print_function
import os
import json
import pathlib
import random
import numpy as np
import pandas as pd
from scipy import spatial
import seaborn as sns
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from scipy.spatial.distance import squareform, cdist, pdist
# fix random seed for reproducibility
#tf.set_random_seed(1234)
class PrintDot(keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs):
if epoch % 100 == 0:
print(epoch)
print('.', end='')
class SpatialNeuralNet:
""" A Neural Net Doing Spatial Predictions. """
def __init__(self, X=None, Y=None, rawData=None, architecture=None, activation=None,modelType='regression', distScaler = 100000., numNei=10, trainFrac=0.8,testFrac=None, writeTmpData=False, workDir='./tmp', saveFigs=True, plotFigs=True):
'''
X: input
Y: output
rawData: [x1,x2,value]
numNei: number of neighbor to be considered
trainFrac: fraction of data used for training
'''
if architecture is None:
# default architecture
self.architecture = [256, 64, 64, 64, 1]
else:
if len(architecture)<2:
print("Length of NN architecture must be greater than 1")
exit()
self.architecture = architecture
self.activation = activation
self.modelType = modelType
self.numNei = numNei
self.distScaler = distScaler
self.writeTmpData = writeTmpData
self.workDir = workDir
self.saveFigs = saveFigs
self.plotFigs = plotFigs
hasInput = True
if rawData is not None:
self.rawData = rawData
self.processRawData()
elif X is not None:
self.X = X
self.Y = Y
else:
print("No input is provided, assuming the model will be used for predicting only. ")
hasInput = False
if hasInput:
if testFrac is not None: # testFrac dominates
self.trainFrac = 1.0 - testFrac
else: self.trainFrac = trainFrac
self.EPOCHS = 5000
n = self.X.shape[0]
ind = random.sample(range(n),n)
indTrain = ind[0:np.floor(n*trainFrac).astype(int)]
indTest = ind[np.floor(n*trainFrac).astype(int):]
self.train_dataset = self.X[indTrain]
self.test_dataset = self.X[indTest]
if self.Y is not None:
self.train_labels = self.Y[indTrain]
self.test_labels = self.Y[indTest]
self.mean_train_dataset = np.mean(self.train_dataset, axis = 0)
self.std_train_dataset = np.std(self.train_dataset, axis = 0)
self.normed_train_data = self.norm(self.train_dataset)
self.normed_test_data = self.norm(self.test_dataset)
# build model
#self.model = self.build_model()
# train model
#self.train()
# test model
#self.test()
if not os.path.exists(workDir):
pathlib.Path(workDir).mkdir(parents=True, exist_ok=True)
if writeTmpData:
if rawData is not None:
np.savetxt(workDir+'/test_dataset.txt', self.rawData[indTest,:])
np.savetxt(workDir+'/train_dataset.txt', self.rawData[indTrain,:])
def processRawData(self,rawData=None,numColumnsY=1):
numNei = self.numNei
perNei = 2
numPre = 2
# Defining input size, hidden layer size, output size and batch size respectively
n_in, n_h, n_out, batch_size = numNei * perNei + numPre, 10, 1, 1000
if rawData is None:# normally built model
if numColumnsY == 1:
rawData = self.rawData[:,:0-numColumnsY]
rawTarget = self.rawData[:,-numColumnsY:]
self.Y = rawTarget
elif numColumnsY == 0:# no target
rawData = self.rawData
else:
print('SURF currently can not deal with multi-dimensional targets.')
exit()
else:# loaded model
if numColumnsY == 1:
rawTarget = self.rawData[:,-numColumnsY:]
self.Y = rawTarget
elif numColumnsY == 0:# no target
rawData = rawData
else:
print('SURF currently can not deal with multi-dimensional targets.')
exit()
# Create data
coordsAll = np.array(rawData, dtype=np.float32)
kdTree = spatial.KDTree(coordsAll)
data = []
for i in range(rawData.shape[0]):
distance,index = kdTree.query(rawData[i,:],numNei+1) # nearest 10 points
distance = distance[1:]
index = index[1:]
datatmp = rawData[i,:]
for j in range(numNei):
if numColumnsY==1:
datatmp = np.append(np.append(datatmp, distance[j]*self.distScaler), rawTarget[index[j]])
elif numColumnsY==0:
datatmp = np.append(datatmp, distance[j]*self.distScaler)
else:
print('SURF currently can not deal with multi-dimensional targets.')
exit()
data.append(datatmp.tolist())
data = np.array(data)
self.X = data
return data
def processRawDataLoad(self,rawData=None):
numNei = self.numNei
perNei = 2
numPre = 2
# Defining input size, hidden layer size, output size and batch size respectively
n_in, n_h, n_out, batch_size = numNei * perNei + numPre, 10, 1, 1000
# Create data
coordsAll = np.array(self.rawData[:,0:-1], dtype=np.float32)
rawTarget = self.rawData[:,-1]
kdTree = spatial.KDTree(coordsAll)
data = []
for i in range(rawData.shape[0]):
distance,index = kdTree.query(rawData[i,:],numNei+1) # nearest 10 points
distance = distance[1:]
index = index[1:]
datatmp = rawData[i,:]
for j in range(numNei):
datatmp = np.append(np.append(datatmp, distance[j]*self.distScaler), rawTarget[index[j]])
data.append(datatmp.tolist())
data = np.array(data)
#self.X = data
return data
def norm(self, v):
#return v
return (v - self.mean_train_dataset) / self.std_train_dataset
# Build the model
def build_model(self,numTypes=None):
print("Building the neural network ...\n")
if self.modelType == "classification":
model = self.build_classification_model(numTypes)
return model
else:
archi = []
archi.append(layers.Dense(self.architecture[0], activation=tf.nn.relu, input_shape=[len(self.train_dataset.T)]))
for i in self.architecture[1:-1]:
archi.append(layers.Dense(i, activation=tf.nn.relu))
if self.activation is None:
archi.append(layers.Dense(self.architecture[-1]))
elif self.activation == "sigmoid":
archi.append(layers.Dense(self.architecture[-1], activation=tf.nn.sigmoid)) # for 0~1
else:#
#TODO: add more activation fuctions
archi.append(layers.Dense(self.architecture[-1]))
model = keras.Sequential(archi)
#optimizer = tf.train.RMSPropOptimizer(0.001)
#optimizer = tf.train.AdamOptimizer(1e-4)
model.compile(loss='mae', optimizer='adam', metrics=['mae', 'mse'])
self.model = model
return model
def load_model(self, modelName):
if os.path.isdir(modelName):
self.modelLoadedModelPath = modelName
else: self.modelLoadedModelPath = self.workDir + '/' + modelName
with open(self.modelLoadedModelPath+'/config.json') as json_file:
m = json.load(json_file)
self.numNei = m['numNei']
self.modelType = m['modelType']
self.model = tf.keras.models.load_model(self.modelLoadedModelPath)
# Check its architecture
self.model.summary()
# Build the classification model
def build_classification_model(self, numTypes):
model = keras.Sequential([
layers.Dense(len(self.train_dataset.T), activation=tf.nn.relu, input_shape=[len(self.train_dataset.T)]),
layers.Dense(len(self.train_dataset.T), activation=tf.nn.relu),
layers.Dense(len(self.train_dataset.T)/2, activation=tf.nn.relu),
layers.Dense(numTypes, activation=tf.nn.softmax)
])
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
self.model = model
return model
def train_classification_model(self):
self.model.summary()
# The patience parameter is the amount of epochs to check for improvement
early_stop = keras.callbacks.EarlyStopping(monitor='val_loss', patience=10)
history = self.model.fit(self.normed_train_data, self.train_labels.astype(int).flatten(), epochs=self.EPOCHS,
validation_split = 0.2, verbose=0, callbacks=[early_stop, PrintDot()])
hist = | pd.DataFrame(history.history) | pandas.DataFrame |
# --------------
#Importing header files
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#Path of the file
path
data = | pd.read_csv(path) | pandas.read_csv |
import pandas as pd
import glob
def import_all():
path = './data/'
allFiles = glob.glob(path +'/*.csv')
frame = pd.DataFrame()
list_ = []
for file_ in allFiles:
df = pd.read_csv(file_,index_col=None,header=0,low_memory=False)
list_.append(df)
frame = | pd.concat(list_) | pandas.concat |
import base64
import numpy as np
import os
import pandas as pd
import streamlit as st
from streamlit.uploaded_file_manager import UploadedFile
import streamlit.components.v1 as components
import json
from datetime import datetime
from pathlib import Path
from .repo import get_all_commits
DATE_COLUMN = 'last_updated'
### COINGECKO
@st.experimental_memo
def load_coingecko_data():
# coin_file_path = "streamlit_app/coin_info/coin_socials.json"
coin_file_path = './data/coin_socials.json'
pkl_path = './data/merged_on_name_cg_agg.pkl'
print(f"reading file from {coin_file_path}")
try:
with open(coin_file_path, "r") as file:
jj = json.load(file)
coin_social_data_df = pd.DataFrame.from_dict(jj,orient = "index")
print(f"Read df with {coin_social_data_df.shape} rows ")
# lowercase = lambda x: str(x).lower()
# data.rename(lowercase, axis='columns', inplace=True)
coin_social_data_df[DATE_COLUMN] = pd.to_datetime(coin_social_data_df[DATE_COLUMN])
# Add in repo info from electricCapital
additional_repo_info_df = pd.read_pickle(pkl_path)
# st.dataframe(additional_repo_info_df)
# print("Shape")
coin_social_data_df_merged = pd.merge(coin_social_data_df, additional_repo_info_df, left_on = 'name', right_on= 'name_coingecko', how = 'left')
return coin_social_data_df_merged
except Exception as e:
# Notify the reader that the data was successfully loaded.
st.sidebar.error('Error loading data :(')
return None
@st.experimental_memo
def get_one_token_latest(coin_choice):
import pandas as pd
from pycoingecko import CoinGeckoAPI
import time
pkl_path = './data/merged_on_name_cg_agg.pkl'
# coin_data = {}
cg = CoinGeckoAPI()
coin_json = cg.get_coin_by_id(id=coin_choice, localization = False)
required_cols = ['id', 'symbol', 'name', 'asset_platform_id', 'platforms',
'block_time_in_minutes', 'hashing_algorithm', 'categories',
'public_notice', 'additional_notices', 'description', 'links', 'image',
'country_origin', 'genesis_date', 'sentiment_votes_up_percentage',
'sentiment_votes_down_percentage', 'market_cap_rank', 'coingecko_rank',
'coingecko_score', 'developer_score', 'community_score',
'liquidity_score', 'public_interest_score', 'market_data',
'community_data', 'developer_data', 'public_interest_stats',
'status_updates', 'last_updated', 'tickers', 'ico_data',
'contract_address']
df = pd.DataFrame.from_dict({coin_choice: coin_json}, orient="index")
for col in required_cols:
if col not in df.columns:
df[col] = np.nan
additional_repo_info_df = pd.read_pickle(pkl_path)
# st.dataframe(additional_repo_info_df)
# print("Shape")
coin_social_data_df_merged = pd.merge(df, additional_repo_info_df, left_on = 'name', right_on= 'name_coingecko', how = 'left')
return coin_social_data_df_merged
@st.experimental_memo
def get_one_token_latest_market_data(coin_choice):
import time
import datetime
from pycoingecko import CoinGeckoAPI
from_time = "2014-01-01"
element = datetime.datetime.strptime(from_time,"%Y-%m-%d")
from_time = datetime.datetime.timestamp(element)
to_time = time.time()
cg = CoinGeckoAPI()
price_history = cg.get_coin_market_chart_range_by_id(coin_choice, vs_currency = 'usd', from_timestamp=from_time, to_timestamp=to_time)
prices_df = pd.DataFrame(price_history['prices']).rename(columns={0:'time', 1:'price'})
market_caps_df = pd.DataFrame(price_history['market_caps']).rename(columns={0:'time', 1:'market_cap'})
total_volumes_df = pd.DataFrame(price_history['total_volumes']).rename(columns={0:'time', 1:'volume'})
dfs = [prices_df, market_caps_df, total_volumes_df]
for df in dfs:
df['time'] = pd.to_datetime(df.time, unit='ms')
concated_dfs = pd.concat(dfs, axis = 1)[['time', 'price', 'market_cap', 'volume']].iloc[:,2:]
return concated_dfs
def get_description(data, coin_choice):
description_text = data.loc[data.name == str(coin_choice), 'description'].values[0].get("en","")
return description_text
####GITHUB
@st.cache
def get_repo_data(repo_path, since = None, to = None, first_n_commits = None):
"""
Retrieve commit history from remote source or local .json file
Args:
repo_path: File st.text_input or st.file_uploader
Returns:
pandas.DataFrame: A dataframae containing the commit history
"""
if isinstance(repo_path, UploadedFile):
data = pd.read_json(repo_path, orient="records")
else:
commits = get_all_commits(repo_path, since = since, to = to, first_n_commits = first_n_commits)
if len(commits) == 0:
data = pd.DataFrame({"hash":pd.Series(['NA'], dtype='str'),
"author":pd.Series(['NA'],dtype='str'),
"committed_on":pd.date_range(datetime.now().date(), periods=1, freq='D'),
"authored_on":pd.date_range(datetime.now().date(), periods=1, freq='D'),
"lines_added":pd.Series([0],dtype='int'),
"lines_deleted": | pd.Series([0],dtype='int') | pandas.Series |
# hackathon T - Hacks 3.0
# flask backend of data-cleaning website
import matplotlib.pyplot as plt
#import tensorflow as tf
#from tensorflow.keras import layers
import pandas as pd
import numpy as np
from flask import *
import os
from datetime import *
from subprocess import Popen, PIPE
from math import floor
import converter as con
from flask_ngrok import run_with_ngrok
from meanShift import Mean_Shift
from matplotlib import style
#import seaborn as sns
style.use('ggplot')
from sklearn.model_selection import train_test_split
from datetime import datetime
pd.options.display.max_rows = 10
pd.options.display.float_format = "{:.1f}".format
colors = 10*['g', 'r', 'b', 'c', 'k']
from pyparsing import (
Literal,
Word,
Group,
Forward,
alphas,
alphanums,
Regex,
ParseException,
CaselessKeyword,
Suppress,
delimitedList,
)
import math
import operator
exprStack = []
def push_first(toks):
exprStack.append(toks[0])
def push_unary_minus(toks):
for t in toks:
if t == "-":
exprStack.append("unary -")
else:
break
bnf = None
def BNF():
"""
expop :: '^'
multop :: '*' | '/'
addop :: '+' | '-'
integer :: ['+' | '-'] '0'..'9'+
atom :: PI | E | real | fn '(' expr ')' | '(' expr ')'
factor :: atom [ expop factor ]*
term :: factor [ multop factor ]*
expr :: term [ addop term ]*
"""
global bnf
if not bnf:
# use CaselessKeyword for e and pi, to avoid accidentally matching
# functions that start with 'e' or 'pi' (such as 'exp'); Keyword
# and CaselessKeyword only match whole words
e = CaselessKeyword("E")
pi = CaselessKeyword("PI")
# fnumber = Combine(Word("+-"+nums, nums) +
# Optional("." + Optional(Word(nums))) +
# Optional(e + Word("+-"+nums, nums)))
# or use provided pyparsing_common.number, but convert back to str:
# fnumber = ppc.number().addParseAction(lambda t: str(t[0]))
fnumber = Regex(r"[+-]?\d+(?:\.\d*)?(?:[eE][+-]?\d+)?")
ident = Word(alphas, alphanums + "_$")
plus, minus, mult, div = map(Literal, "+-*/")
lpar, rpar = map(Suppress, "()")
addop = plus | minus
multop = mult | div
expop = Literal("^")
expr = Forward()
expr_list = delimitedList(Group(expr))
# add parse action that replaces the function identifier with a (name, number of args) tuple
def insert_fn_argcount_tuple(t):
fn = t.pop(0)
num_args = len(t[0])
t.insert(0, (fn, num_args))
fn_call = (ident + lpar - Group(expr_list) + rpar).setParseAction(
insert_fn_argcount_tuple
)
atom = (
addop[...]
+ (
(fn_call | pi | e | fnumber | ident).setParseAction(push_first)
| Group(lpar + expr + rpar)
)
).setParseAction(push_unary_minus)
# by defining exponentiation as "atom [ ^ factor ]..." instead of "atom [ ^ atom ]...", we get right-to-left
# exponents, instead of left-to-right that is, 2^3^2 = 2^(3^2), not (2^3)^2.
factor = Forward()
factor <<= atom + (expop + factor).setParseAction(push_first)[...]
term = factor + (multop + factor).setParseAction(push_first)[...]
expr <<= term + (addop + term).setParseAction(push_first)[...]
bnf = expr
return bnf
# map operator symbols to corresponding arithmetic operations
epsilon = 1e-12
opn = {
"+": operator.add,
"-": operator.sub,
"*": operator.mul,
"/": operator.truediv,
"^": operator.pow,
}
fn = {
"sin": math.sin,
"cos": math.cos,
"tan": math.tan,
"exp": math.exp,
"abs": abs,
"trunc": int,
"round": round,
"sgn": lambda a: -1 if a < -epsilon else 1 if a > epsilon else 0,
# functionsl with multiple arguments
"multiply": lambda a, b: a * b,
"hypot": math.hypot,
# functions with a variable number of arguments
"all": lambda *a: all(a),
}
def evaluate_stack(s):
op, num_args = s.pop(), 0
if isinstance(op, tuple):
op, num_args = op
if op == "unary -":
return -evaluate_stack(s)
if op in "+-*/^":
# note: operands are pushed onto the stack in reverse order
op2 = evaluate_stack(s)
op1 = evaluate_stack(s)
return opn[op](op1, op2)
elif op == "PI":
return math.pi # 3.1415926535
elif op == "E":
return math.e # 2.718281828
elif op in fn:
# note: args are pushed onto the stack in reverse order
args = reversed([evaluate_stack(s) for _ in range(num_args)])
return fn[op](*args)
elif op[0].isalpha():
raise Exception("invalid identifier '%s'" % op)
else:
# try to evaluate as int first, then as float if int fails
try:
return int(op)
except ValueError:
return float(op)
def test(s):
val = "NA"
exprStack[:] = []
try:
results = BNF().parseString(s, parseAll=True)
val = evaluate_stack(exprStack[:])
except ParseException as pe:
print(s, "failed parse:", str(pe))
except Exception as e:
print(s, "failed eval:", str(e), exprStack)
return val
def feature_pie(filename, feature1, feature2, class_size = 10):
df = pd.read_csv(filename)
sums = df.groupby(df[feature1])[feature2].sum()
plt.axis('equal')
plt.pie(sums, labels=sums.index, autopct='%1.1f%%', shadow=True, startangle=140)
plt.title("Pie chart on basis of "+feature2)
name = filename.split('.')
plt.savefig(name[0]+".png")
plt.close()
def feature_scatter(filename, feature1, feature2):
df = pd.read_csv(filename)
plt.axis('equal')
plt.pie(feature1, feature2, autopct='%1.1f%%', shadow=True, startangle=140)
plt.title("Scatter plot between "+feature1+" and "+feature2)
name = filename.split('.')
plt.savefig(name[0]+".png")
plt.close()
def new_feature(filename, com, name):
df = pd.read_csv(filename)
com = com.split(',')
formula = "_"
temp = "_"
for i, c in enumerate(com):
if c == "formula":
formula = com[i+1]
temp = formula
vals = []
i = 0
print(name)
if name != " ":
i = 1
n = len(df)
for j in range(n):
for k, c in enumerate(com):
if k%2 == 0:
if c == "formula":
break
formula = formula.replace(c, str(df.at[j, com[k+1]]))
vals.append(test(formula))
formula = temp
col = len(df.axes[1])
print(vals)
df[name] = vals
"""
if name != " ":
df.insert(col, vals, True)
else:
df.insert(col, vals, True)
"""
del df['Unnamed: 0']
os.remove(filename)
df.to_csv(filename)
def disp(filename):
df = pd.read_csv(filename)
n_row = str(len(df))
n_col = str(len(df.axes[1]))
col = []
for c in df.columns:
col.append(c)
types = df.dtypes.tolist()
f = open(filename, "r+")
line0 = f.readline()
line1 = f.readline()
line2 = f.readline()
line3 = f.readline()
line4 = f.readline()
line5 = f.readline()
f.close()
return n_row, n_col, col, types, line0, line1, line2, line3, line4, line5
def stat(filename, feature, func):
df = pd.read_csv(filename)
ans = 0
print(filename,feature,func)
print(df)
if func == "mean":
ans = df[feature].mean()
if func == "max":
ans = df[feature].max()
if func == "min":
ans = df[feature].min()
if func == "sum":
ans = df[feature].sum()
return ans
def freq(filename, feature, condition):
df = pd.read_csv(filename)
condition = condition.split(' ')
if condition[0] == "=":
print(int(condition[1]))
counts = df[feature].value_counts().to_dict()
if condition[1] == 'N/A':
try:
return str(counts['N/A'])
except:
return '0'
try:
return str(counts[int(condition[1])])
except:
return '0'
elif condition[0] == ">":
count = 0
df = pd.read_csv(filename)
n = df.columns.get_loc(feature)
for i in range(len(df)):
if int(df.at[i, n]) > int(condition[1]):
count = count + 1
return str(count)
elif condition[0] == "<":
count = 0
df = pd.read_csv(filename)
n = df.columns.get_loc(feature)
for i in range(len(df)):
if df.at[i, n] < int(condition[1]):
count = count + 1
return count
def drop(filename, feature, condition):
df = pd.read_csv(filename)
condition = condition.split(' ')
if condition[0] == "=":
df.drop(df[df[feature] == int(condition[1])].index, inplace = True)
elif condition[0] == ">":
df.drop(df[df[feature] > int(condition[1])].index, inplace = True)
elif condition[0] == "<":
df.drop(df[df[feature] < int(condition[1])].index, inplace = True)
def ms(filename, feature1, feature2):
name = filename.split('.')
df = pd.read_csv(filename)
n = df.columns.get_loc(feature1)
mat1 = df.iloc[:, n].values
m = df.columns.get_loc(feature2)
mat2 = df.iloc[:, m].values
combined = np.vstack((mat1, mat2)).T
combined = combined.tolist()
clf = Mean_Shift()
clf.fit(combined)
centroids = clf.centroids
for classification in clf.classifications:
color = colors[classification]
for featureset in clf.classifications[classification]:
plt.scatter(featureset[0], featureset[1], marker='x', color=color, s=150, linewidths=5)
for c in centroids:
plt.scatter(centroids[c][0], centroids[c][1], color='k', marker='*', s=150, linewidths=5)
plt.savefig("static/ms_"+name[0].split('/')[-1]+".png")
plt.close()
def dataDivide(df, percent):
train_df=df.sample(frac=percent,random_state=200) #random state is a seed value
test_df=df.drop(train.index)
return train_df, test_df
def scale(train_df, test_df, scale = 1):
train_df["median_house_value"] /= scale_factor
test_df["median_house_value"] /= scale_factor
return train_df, test_df
def build_model(my_learning_rate):
"""Create and compile a simple linear regression model."""
# Most simple tf.keras models are sequential.
model = tf.keras.models.Sequential()
# Add one linear layer to the model to yield a simple linear regressor.
model.add(tf.keras.layers.Dense(units=1, input_shape=(1,)))
# Compile the model topography into code that TensorFlow can efficiently
# execute. Configure training to minimize the model's mean squared error.
model.compile(optimizer=tf.keras.optimizers.RMSprop(lr=my_learning_rate),
loss="mean_squared_error",
metrics=[tf.keras.metrics.RootMeanSquaredError()])
return model
def train_model(model, df, feature, label, my_epochs,
my_batch_size=None, my_validation_split=0.1):
"""Feed a dataset into the model in order to train it."""
history = model.fit(x=df[feature],
y=df[label],
batch_size=my_batch_size,
epochs=my_epochs,
validation_split=my_validation_split)
# Gather the model's trained weight and bias.
trained_weight = model.get_weights()[0]
trained_bias = model.get_weights()[1]
# The list of epochs is stored separately from the
# rest of history.
epochs = history.epoch
# Isolate the root mean squared error for each epoch.
hist = pd.DataFrame(history.history)
rmse = hist["root_mean_squared_error"]
return epochs, rmse, history.history
def plot_the_loss_curve(epochs, mae_training, mae_validation, filename):
name = filename.split('.')
"""Plot a curve of loss vs. epoch."""
plt.figure()
plt.xlabel("Epoch")
plt.ylabel("Root Mean Squared Error")
plt.plot(epochs[1:], mae_training[1:], label="Training Loss")
plt.plot(epochs[1:], mae_validation[1:], label="Validation Loss")
plt.legend()
# We're not going to plot the first epoch, since the loss on the first epoch
# is often substantially greater than the loss for other epochs.
merged_mae_lists = mae_training[1:] + mae_validation[1:]
highest_loss = max(merged_mae_lists)
lowest_loss = min(merged_mae_lists)
delta = highest_loss - lowest_loss
print(delta)
top_of_y_axis = highest_loss + (delta * 0.05)
bottom_of_y_axis = lowest_loss - (delta * 0.05)
plt.ylim([bottom_of_y_axis, top_of_y_axis])
plt.save("static/nn_"+name[0]+".png")
app = Flask(__name__)
#app.secret_key = 'maidoublequotesmelikhrhahu'
#run_with_ngrok(app)
@app.route('/', methods=['GET', 'POST'])
def basic():
if request.method == 'POST':
if request.files['file'].filename != '':
f = request.files.get('file')
varrr = "static/"+f.filename
err=f.save(varrr)
name = f.filename.split('.')
ext = name[-1]
name = name[0]
if ext == "csv":
con.csvtojson("static/"+f.filename, "static/"+name+".json")
os.remove("static/"+f.filename)
con.jsontocsv("static/"+name+".json", "static/"+f.filename)
if ext == "json":
con.jsontocsv("static/"+f.filename, "static/"+name+".csv")
elif ext == "xml":
con.xmltocsv("static/"+f.filename, "static/"+name+".csv")
elif ext == "nc":
con.netCDFtocsv("static/"+f.filename, "static/"+name+".csv")
n_row, n_col, col, types, line0, line1, line2, line3, line4, line5 = disp("static/"+name+".csv")
res = make_response(render_template("filedata.html", filename = f.filename, n_row = n_row, n_col = n_col, col = col, types = types, lists = "../static/"+name+".csv?"+str(datetime.now()), convertable=["json", "xml", "nc"]))
res.set_cookie("filename", value=f.filename)
return res
return render_template("upload.html")
@app.route('/Info', methods=['GET', 'POST'])
def info():
filename = request.cookies.get('filename')
name = filename.split('.')
n_row, n_col, col, types, line0, line1, line2, line3, line4, line5 = disp("static/"+name[0]+".csv")
return render_template("filedata.html", filename = filename, n_row = n_row, n_col = n_col, col = col, types = types, lists = "../static/"+name[0]+".csv?"+str(datetime.now()), convertable=["json", "xml", "nc"])
@app.route('/stat', methods=['GET', 'POST'])
def stats():
if request.method == 'GET':
filename = request.args.get('filename').split('/')[-1]
name = filename.split('.')
ext = name[-1]
name = name[0]
if ext == "json":
con.jsontocsv("static/"+filename, "static/"+name+".csv")
elif ext == "nc":
con.netCDFtocsv("static/"+filename, "static/"+name+".csv")
elif ext == "xml":
con.xmltocsv("static/"+filename, "static/"+name+".csv")
feature = request.args.get('feature')
func = request.args.get('func')
ans = stat("static/"+name+".csv", feature, func)
print(ans,type(ans))
return str(ans)
return render_template("upload.html")
@app.route('/con', methods = ['GET', 'POST'])
def conv():
if request.method == 'GET':
filename = request.args.get('filename')
name = filename.split('.')
ext = name[-1]
name = name[0]
to = request.args.get('to')
if ext == "csv":
if to == "json":
con.csvtojson("static/"+filename, "static/"+name+"."+to)
elif to == "xml":
con.csvtoxml("static/"+filename, "static/"+name+"."+to)
elif to == "nc":
con.csvtonetCDF("static/"+filename, "static/"+name+"."+to)
elif ext == "json":
if to == "csv":
con.jsontocsv("static/"+filename, "static/"+name+"."+to)
elif to == "xml":
con.jsontoxml("static/"+filename, "static/"+name+"."+to)
elif to == "nc":
con.jsontonetCDF("static/"+filename, "static/"+name+"."+to)
elif ext == "xml":
if to == "json":
con.xmltojson("static/"+filename, "static/"+name+"."+to)
elif to == "csv":
con.xmltocsv("static/"+filename, "static/"+name+"."+to)
elif to == "nc":
con.xmltonetCDF("static/"+filename, "static/"+name+"."+to)
elif ext == "nc":
if to == "json":
con.netCDFtojson("static/"+filename, "static/"+name+"."+to)
elif to == "csv":
con.netCDFtocsv("static/"+filename, "static/"+name+"."+to)
elif to == "xml":
con.netCDFtoxml("static/"+filename, "static/"+name+"."+to)
return "../static/"+name+"."+to
return render_template("upload.html")
@app.route('/analyse', methods = ['GET', 'POST'])
def analyse():
filename = request.cookies.get('filename')
name = filename.split('.')
name = name[0]
df = pd.read_csv("static/"+name+".csv")
col = []
for c in df.columns:
col.append(c)
if request.method == 'GET':
feature1 = request.args.get('feature1')
feature2 = request.args.get('feature2')
if feature1 == None:
return render_template("analysis.html", col = col)
feature_pie("static/"+name+".csv", feature1, feature2)
return str("../static/"+name+".png")
return render_template("analysis.html", col = col)
@app.route('/anAdd', methods = ['GET', 'POST'])
def anAdd():
filename = request.cookies.get('filename')
name = filename.split('.')
name = name[0]
df = pd.read_csv("static/"+name+".csv")
col = []
for c in df.columns:
col.append(c)
if request.method == 'GET':
kname = request.args.get('name')
print(kname)
com = request.args.get('formula')
new_feature("static/"+filename, com, kname)
feature1 = request.args.get('feature1')
feature_pie("static/"+name+".csv", feature1, kname)
return "../static/"+name+".png"
@app.route('/clean', methods = ['GET', 'POST'])
def clean():
filename = request.cookies.get('filename')
name = filename.split('.')
name = name[0]
df = pd.read_csv("static/"+name+".csv")
col = []
for c in df.columns:
col.append(c)
if request.method == 'POST':
feature1 = request.form['feature1']
feature2 = request.form['feature2']
feature_scatter("static/"+name+".csv", feature1, feature2)
return render_template("clean.html", col = col, img = "static/"+name+".png")
return render_template("clean.html", col = col)
@app.route('/clAdd', methods = ['GET', 'POST'])
def clAdd():
filename = request.cookies.get('filename')
name = filename.split('.')
name = name[0]
df = pd.read_csv("static/"+name+".csv")
col = []
for c in df.columns:
col.append(c)
if request.method == 'GET':
kname = request.form['name']
com = request.form['formula']
new_feature("static/"+name+".csv", com, kname)
feature_scatter("static/"+name+".csv", feature1, kname)
return "../static/"+name+".png"
@app.route('/freq', methods = ['GET', 'POST'])
def fre():
filename = request.cookies.get('filename')
name = filename.split('.')
name = name[0]
df = pd.read_csv("static/"+name+".csv")
col = []
for c in df.columns:
col.append(c)
if request.method == 'GET':
feature = request.args.get('feature')
cond = request.args.get('cond')
freqq = freq('static/'+name+".csv", feature, cond)
return freqq
return render_template("clean.html", col = col)
@app.route('/drop', methods = ['GET', 'POST'])
def dro():
filename = request.cookies.get('filename')
name = filename.split('.')
name = name[0]
df = pd.read_csv("static/"+name+".csv")
col = []
for c in df.columns:
col.append(c)
if request.method == 'GET':
feature = request.args.get('feature')
cond = request.args.get('cond')
drop(filename, feature, cond)
return
return render_template("clean.html", col = col)
@app.route('/ms', methods = ['GET', 'POST'])
def mShift():
filename = request.cookies.get('filename')
name = filename.split('.')
name = name[0]
df = pd.read_csv("static/"+name+".csv")
col = []
for c in df.columns:
col.append(c)
if request.method == 'GET':
feature1 = request.args.get('feature1')
feature2 = request.args.get('feature2')
if feature1 == None:
return render_template("meanShift.html", filename = filename, col = col)
ms('static/'+filename, feature1, feature2)
name = filename.split('.')
return "../static/ms_"+name[0]+".png"
return render_template("meanShift.html", filename = filename, col = col)
@app.route('/nn', methods = ['GET', 'POST'])
def neural():
name = filename.split('.')
name = name[0]
df = | pd.read_csv("static/"+name+".csv") | pandas.read_csv |
import numpy as np
import pandas as pd
from numpy import inf, nan
from numpy.testing import assert_array_almost_equal, assert_array_equal
from pandas import DataFrame, Series, Timestamp
from pandas.testing import assert_frame_equal, assert_series_equal
from shapely.geometry.point import Point
from pymove import MoveDataFrame
from pymove.utils import integration
from pymove.utils.constants import (
ADDRESS,
CITY,
DATETIME,
DIST_EVENT,
DIST_HOME,
DIST_POI,
EVENT_ID,
EVENT_TYPE,
GEOMETRY,
HOME,
ID_POI,
LATITUDE,
LONGITUDE,
NAME_POI,
POI,
TRAJ_ID,
TYPE_POI,
VIOLATING,
)
list_random_banks = [
[39.984094, 116.319236, 1, 'bank'],
[39.984198, 116.319322, 2, 'randomvalue'],
[39.984224, 116.319402, 3, 'bancos_postos'],
[39.984211, 116.319389, 4, 'randomvalue'],
[39.984217, 116.319422, 5, 'bancos_PAE'],
[39.984710, 116.319865, 6, 'bancos_postos'],
[39.984674, 116.319810, 7, 'bancos_agencias'],
[39.984623, 116.319773, 8, 'bancos_filiais'],
[39.984606, 116.319732, 9, 'banks'],
[39.984555, 116.319728, 10, 'banks']
]
list_random_bus_station = [
[39.984094, 116.319236, 1, 'transit_station'],
[39.984198, 116.319322, 2, 'randomvalue'],
[39.984224, 116.319402, 3, 'transit_station'],
[39.984211, 116.319389, 4, 'pontos_de_onibus'],
[39.984217, 116.319422, 5, 'transit_station'],
[39.984710, 116.319865, 6, 'randomvalue'],
[39.984674, 116.319810, 7, 'bus_station'],
[39.984623, 116.319773, 8, 'bus_station'],
]
list_random_bar_restaurant = [
[39.984094, 116.319236, 1, 'restaurant'],
[39.984198, 116.319322, 2, 'restaurant'],
[39.984224, 116.319402, 3, 'randomvalue'],
[39.984211, 116.319389, 4, 'bar'],
[39.984217, 116.319422, 5, 'bar'],
[39.984710, 116.319865, 6, 'bar-restaurant'],
[39.984674, 116.319810, 7, 'random123'],
[39.984623, 116.319773, 8, '123'],
]
list_random_parks = [
[39.984094, 116.319236, 1, 'pracas_e_parques'],
[39.984198, 116.319322, 2, 'park'],
[39.984224, 116.319402, 3, 'parks'],
[39.984211, 116.319389, 4, 'random'],
[39.984217, 116.319422, 5, '123'],
[39.984710, 116.319865, 6, 'park'],
[39.984674, 116.319810, 7, 'parks'],
[39.984623, 116.319773, 8, 'pracas_e_parques'],
]
list_random_police = [
[39.984094, 116.319236, 1, 'distritos_policiais'],
[39.984198, 116.319322, 2, 'police'],
[39.984224, 116.319402, 3, 'police'],
[39.984211, 116.319389, 4, 'distritos_policiais'],
[39.984217, 116.319422, 5, 'random'],
[39.984710, 116.319865, 6, 'randomvalue'],
[39.984674, 116.319810, 7, '123'],
[39.984623, 116.319773, 8, 'bus_station'],
]
list_move = [
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1],
[39.984559000000004, 116.326696, Timestamp('2008-10-23 10:37:26'), 1],
[40.002899, 116.32151999999999, Timestamp('2008-10-23 10:50:16'), 1],
[40.016238, 116.30769099999999, Timestamp('2008-10-23 11:03:06'), 1],
[40.013814, 116.306525, Timestamp('2008-10-23 11:58:33'), 2],
[40.009735, 116.315069, Timestamp('2008-10-23 23:50:45'), 2],
[39.993527, 116.32648300000001, Timestamp('2008-10-24 00:02:14'), 2],
[39.978575, 116.326975, Timestamp('2008-10-24 00:22:01'), 3],
[39.981668, 116.310769, Timestamp('2008-10-24 01:57:57'), 3],
]
list_pois = [
[39.984094, 116.319236, 1, 'policia', 'distrito_pol_1'],
[39.991013, 116.326384, 2, 'policia', 'policia_federal'],
[40.01, 116.312615, 3, 'comercio', 'supermercado_aroldo'],
[40.013821, 116.306531, 4, 'show', 'forro_tropykalia'],
[40.008099, 116.31771100000002, 5, 'risca-faca',
'rinha_de_galo_world_cup'],
[39.985704, 116.326877, 6, 'evento', 'adocao_de_animais'],
[39.979393, 116.3119, 7, 'show', 'dia_do_municipio']
]
# Testes de Unions
def test_union_poi_bank():
pois_df = DataFrame(
data=list_random_banks,
columns=[LATITUDE, LONGITUDE, TRAJ_ID, TYPE_POI],
index=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
)
expected = DataFrame(
data=[
[39.984094, 116.319236, 1, 'banks'],
[39.984198, 116.319322, 2, 'randomvalue'],
[39.984224, 116.319402, 3, 'banks'],
[39.984211, 116.319389, 4, 'randomvalue'],
[39.984217, 116.319422, 5, 'banks'],
[39.984710, 116.319865, 6, 'banks'],
[39.984674, 116.319810, 7, 'banks'],
[39.984623, 116.319773, 8, 'banks'],
[39.984606, 116.319732, 9, 'banks'],
[39.984555, 116.319728, 10, 'banks']
],
columns=[LATITUDE, LONGITUDE, TRAJ_ID, TYPE_POI],
index=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
)
integration.union_poi_bank(pois_df, TYPE_POI, inplace=True)
assert_frame_equal(pois_df, expected)
def test_union_poi_bus_station():
pois_df = DataFrame(
data=list_random_bus_station,
columns=[LATITUDE, LONGITUDE, TRAJ_ID, TYPE_POI],
index=[0, 1, 2, 3, 4, 5, 6, 7]
)
expected = DataFrame(
data=[
[39.984094, 116.319236, 1, 'bus_station'],
[39.984198, 116.319322, 2, 'randomvalue'],
[39.984224, 116.319402, 3, 'bus_station'],
[39.984211, 116.319389, 4, 'bus_station'],
[39.984217, 116.319422, 5, 'bus_station'],
[39.984710, 116.319865, 6, 'randomvalue'],
[39.984674, 116.319810, 7, 'bus_station'],
[39.984623, 116.319773, 8, 'bus_station'],
],
columns=[LATITUDE, LONGITUDE, TRAJ_ID, TYPE_POI],
index=[0, 1, 2, 3, 4, 5, 6, 7]
)
integration.union_poi_bus_station(pois_df, TYPE_POI, inplace=True)
assert_frame_equal(pois_df, expected)
def test_union_poi_bar_restaurant():
pois_df = DataFrame(
data=list_random_bar_restaurant,
columns=[LATITUDE, LONGITUDE, TRAJ_ID, TYPE_POI],
index=[0, 1, 2, 3, 4, 5, 6, 7]
)
expected = DataFrame(
data=[
[39.984094, 116.319236, 1, 'bar-restaurant'],
[39.984198, 116.319322, 2, 'bar-restaurant'],
[39.984224, 116.319402, 3, 'randomvalue'],
[39.984211, 116.319389, 4, 'bar-restaurant'],
[39.984217, 116.319422, 5, 'bar-restaurant'],
[39.984710, 116.319865, 6, 'bar-restaurant'],
[39.984674, 116.319810, 7, 'random123'],
[39.984623, 116.319773, 8, '123'],
],
columns=[LATITUDE, LONGITUDE, TRAJ_ID, TYPE_POI],
index=[0, 1, 2, 3, 4, 5, 6, 7]
)
integration.union_poi_bar_restaurant(pois_df, TYPE_POI, inplace=True)
assert_frame_equal(pois_df, expected)
def test_union_poi_parks():
pois_df = DataFrame(
data=list_random_parks,
columns=[LATITUDE, LONGITUDE, TRAJ_ID, TYPE_POI],
index=[0, 1, 2, 3, 4, 5, 6, 7]
)
expected = DataFrame(
data=[
[39.984094, 116.319236, 1, 'parks'],
[39.984198, 116.319322, 2, 'parks'],
[39.984224, 116.319402, 3, 'parks'],
[39.984211, 116.319389, 4, 'random'],
[39.984217, 116.319422, 5, '123'],
[39.984710, 116.319865, 6, 'parks'],
[39.984674, 116.319810, 7, 'parks'],
[39.984623, 116.319773, 8, 'parks'],
],
columns=[LATITUDE, LONGITUDE, TRAJ_ID, TYPE_POI],
index=[0, 1, 2, 3, 4, 5, 6, 7]
)
integration.union_poi_parks(pois_df, TYPE_POI, inplace=True)
assert_frame_equal(pois_df, expected)
def test_union_poi_police():
pois_df = DataFrame(
data=list_random_police,
columns=[LATITUDE, LONGITUDE, TRAJ_ID, TYPE_POI],
index=[0, 1, 2, 3, 4, 5, 6, 7]
)
expected = DataFrame(
data=[
[39.984094, 116.319236, 1, 'police'],
[39.984198, 116.319322, 2, 'police'],
[39.984224, 116.319402, 3, 'police'],
[39.984211, 116.319389, 4, 'police'],
[39.984217, 116.319422, 5, 'random'],
[39.984710, 116.319865, 6, 'randomvalue'],
[39.984674, 116.319810, 7, '123'],
[39.984623, 116.319773, 8, 'bus_station'],
],
columns=[LATITUDE, LONGITUDE, TRAJ_ID, TYPE_POI],
index=[0, 1, 2, 3, 4, 5, 6, 7]
)
integration.union_poi_police(pois_df, TYPE_POI, inplace=True)
assert_frame_equal(pois_df, expected)
def test_join_colletive_areas():
move_df = MoveDataFrame(
data=list_move,
)
move_df['geometry'] = move_df.apply(lambda x: Point(x['lon'], x['lat']), axis=1)
expected = move_df.copy()
indexes_ac = np.linspace(0, move_df.shape[0], 5, dtype=int)
area_c = move_df[move_df.index.isin(indexes_ac)].copy()
integration.join_collective_areas(move_df, area_c, inplace=True)
expected[VIOLATING] = [True, False, True, False, True, False, True, False, False]
assert_frame_equal(move_df, expected)
def test__reset_and_creates_id_and_lat_lon():
move_df = MoveDataFrame(list_move)
pois = DataFrame(
data=list_pois,
columns=[LATITUDE, LONGITUDE, TRAJ_ID, TYPE_POI, NAME_POI],
index=[0, 1, 2, 3, 4, 5, 6]
)
dists, ids, tags, lats, lons = (
integration._reset_and_creates_id_and_lat_lon(
move_df, pois, True, True
)
)
id_expected = np.full(9, '', dtype='object_')
tag_expected = np.full(9, '', dtype='object_')
dist_expected = np.full(
9, np.Infinity, dtype=np.float64
)
lat_expected = np.full(7, np.Infinity, dtype=np.float64)
lon_expected = np.full(7, np.Infinity, dtype=np.float64)
assert_array_almost_equal(dists, dist_expected)
assert_array_equal(ids, id_expected)
assert_array_equal(tags, tag_expected)
assert_array_almost_equal(lats, lat_expected)
assert_array_almost_equal(lons, lon_expected)
dists, ids, tags, lats, lons = (
integration._reset_and_creates_id_and_lat_lon(
move_df, pois, True, False
)
)
assert_array_almost_equal(dists, dist_expected)
assert_array_equal(ids, id_expected)
assert_array_equal(tags, tag_expected)
assert_array_almost_equal(lats, lat_expected)
assert_array_almost_equal(lons, lon_expected)
dists, ids, tags, lats, lons = (
integration._reset_and_creates_id_and_lat_lon(
move_df, pois, False, True
)
)
lat_expected = np.full(9, np.Infinity, dtype=np.float64)
lon_expected = np.full(9, np.Infinity, dtype=np.float64)
assert_array_almost_equal(dists, dist_expected)
assert_array_equal(ids, id_expected)
assert_array_equal(tags, tag_expected)
assert_array_almost_equal(lats, lat_expected)
assert_array_almost_equal(lons, lon_expected)
dists, ids, tags, lats, lons = (
integration._reset_and_creates_id_and_lat_lon(
move_df, pois, False, False
)
)
assert_array_almost_equal(dists, dist_expected)
assert_array_equal(ids, id_expected)
assert_array_equal(tags, tag_expected)
assert_array_almost_equal(lats, lat_expected)
assert_array_almost_equal(lons, lon_expected)
def test__reset_set_window__and_creates_event_id_type():
list_events = [
[39.984094, 116.319236, 1,
Timestamp('2008-10-24 01:57:57'), 'show do tropykalia'],
[39.991013, 116.326384, 2,
Timestamp('2008-10-24 00:22:01'), 'evento da prefeitura'],
[40.01, 116.312615, 3,
Timestamp('2008-10-25 00:21:01'), 'show do seu joao'],
[40.013821, 116.306531, 4,
Timestamp('2008-10-26 00:22:01'), 'missa']
]
move_df = MoveDataFrame(list_move)
pois = DataFrame(
data=list_events,
columns=[LATITUDE, LONGITUDE, EVENT_ID, DATETIME, EVENT_TYPE],
index=[0, 1, 2, 3]
)
list_win_start = [
'2008-10-22T17:23:05.000000000', '2008-10-22T22:07:26.000000000',
'2008-10-22T22:20:16.000000000', '2008-10-22T22:33:06.000000000',
'2008-10-22T23:28:33.000000000', '2008-10-23T11:20:45.000000000',
'2008-10-23T11:32:14.000000000', '2008-10-23T11:52:01.000000000',
'2008-10-23T13:27:57.000000000'
]
win_start_expected = Series(pd.to_datetime(list_win_start), name=DATETIME)
list_win_end = [
'2008-10-23T18:23:05.000000000', '2008-10-23T23:07:26.000000000',
'2008-10-23T23:20:16.000000000', '2008-10-23T23:33:06.000000000',
'2008-10-24T00:28:33.000000000', '2008-10-24T12:20:45.000000000',
'2008-10-24T12:32:14.000000000', '2008-10-24T12:52:01.000000000',
'2008-10-24T14:27:57.000000000'
]
win_end_expected = Series(pd.to_datetime(list_win_end), name=DATETIME)
dist_expected = np.full(
9, np.Infinity, dtype=np.float64
)
type_expected = np.full(9, '', dtype='object_')
id_expected = np.full(9, '', dtype='object_')
window_starts, window_ends, current_distances, event_id, event_type = (
integration._reset_set_window__and_creates_event_id_type(
move_df, pois, 45000, DATETIME
)
)
assert_series_equal(window_starts, win_start_expected)
| assert_series_equal(window_ends, win_end_expected) | pandas.testing.assert_series_equal |
import datetime
from typing import List
import pandas as pd
import pytest
from ruamel.yaml import YAML
import great_expectations.exceptions as ge_exceptions
from great_expectations.core.batch import (
Batch,
BatchDefinition,
BatchSpec,
RuntimeBatchRequest,
)
from great_expectations.core.batch_spec import (
PathBatchSpec,
RuntimeDataBatchSpec,
RuntimeQueryBatchSpec,
S3BatchSpec,
)
from great_expectations.core.id_dict import IDDict
from great_expectations.data_context.types.resource_identifiers import BatchIdentifier
from great_expectations.data_context.util import instantiate_class_from_config
from great_expectations.datasource import Datasource
from great_expectations.datasource.data_connector import RuntimeDataConnector
yaml = YAML()
@pytest.fixture
def basic_datasource_with_assets(tmp_path_factory):
basic_datasource: Datasource = instantiate_class_from_config(
config=yaml.load(
"""
class_name: Datasource
data_connectors:
runtime:
class_name: RuntimeDataConnector
batch_identifiers:
- hour
- minute
assets:
asset_a:
batch_identifiers:
- day
- month
asset_b:
batch_identifiers:
- day
- month
- year
execution_engine:
class_name: PandasExecutionEngine
""",
),
runtime_environment={
"name": "my_datasource",
},
config_defaults={
"module_name": "great_expectations.datasource",
},
)
return basic_datasource
def test_self_check(basic_datasource):
test_runtime_data_connector: RuntimeDataConnector = (
basic_datasource.data_connectors["test_runtime_data_connector"]
)
assert test_runtime_data_connector.self_check() == {
"class_name": "RuntimeDataConnector",
"data_asset_count": 0,
"data_assets": {},
"example_data_asset_names": [],
"example_unmatched_data_references": [],
"note": "RuntimeDataConnector will not have data_asset_names until they are "
"passed in through RuntimeBatchRequest",
"unmatched_data_reference_count": 0,
}
def test_self_check_named_assets(basic_datasource_with_assets):
test_runtime_data_connector: RuntimeDataConnector = (
basic_datasource_with_assets.data_connectors["runtime"]
)
assert test_runtime_data_connector.self_check() == {
"class_name": "RuntimeDataConnector",
"data_asset_count": 2,
"example_data_asset_names": ["asset_a", "asset_b"],
"data_assets": {
"asset_a": {"batch_definition_count": 0, "example_data_references": []},
"asset_b": {"batch_definition_count": 0, "example_data_references": []},
},
"unmatched_data_reference_count": 0,
"example_unmatched_data_references": [],
}
def test_new_self_check_after_adding_named_asset_a(
basic_datasource_with_assets, test_df_pandas
):
runtime_data_connector: RuntimeDataConnector = (
basic_datasource_with_assets.data_connectors["runtime"]
)
res: List[
BatchDefinition
] = runtime_data_connector.get_batch_definition_list_from_batch_request(
batch_request=RuntimeBatchRequest(
datasource_name=basic_datasource_with_assets.name,
data_connector_name="runtime",
data_asset_name="asset_a",
batch_identifiers={"month": 4, "day": 1},
runtime_parameters={"batch_data": test_df_pandas},
)
)
assert runtime_data_connector.self_check() == {
"class_name": "RuntimeDataConnector",
"data_asset_count": 2,
"example_data_asset_names": ["asset_a", "asset_b"],
"data_assets": {
"asset_a": {
"batch_definition_count": 1,
"example_data_references": ["4-1"],
},
"asset_b": {"batch_definition_count": 0, "example_data_references": []},
},
"unmatched_data_reference_count": 0,
"example_unmatched_data_references": [],
}
def test_new_self_check_after_adding_new_asset_c(
basic_datasource_with_assets, test_df_pandas
):
runtime_data_connector: RuntimeDataConnector = (
basic_datasource_with_assets.data_connectors["runtime"]
)
res: List[
BatchDefinition
] = runtime_data_connector.get_batch_definition_list_from_batch_request(
batch_request=RuntimeBatchRequest(
datasource_name=basic_datasource_with_assets.name,
data_connector_name="runtime",
data_asset_name="asset_c",
batch_identifiers={"hour": 12, "minute": 15},
runtime_parameters={"batch_data": test_df_pandas},
)
)
assert runtime_data_connector.self_check() == {
"class_name": "RuntimeDataConnector",
"data_asset_count": 3,
"example_data_asset_names": ["asset_a", "asset_b", "asset_c"],
"data_assets": {
"asset_a": {"batch_definition_count": 0, "example_data_references": []},
"asset_b": {"batch_definition_count": 0, "example_data_references": []},
"asset_c": {
"batch_definition_count": 1,
"example_data_references": ["12-15"],
},
},
"unmatched_data_reference_count": 0,
"example_unmatched_data_references": [],
}
def test_add_batch_identifiers_correct(basic_datasource_with_assets):
test_runtime_data_connector: RuntimeDataConnector = (
basic_datasource_with_assets.data_connectors["runtime"]
)
assert test_runtime_data_connector._batch_identifiers == {
"runtime": ["hour", "minute"],
"asset_a": ["day", "month"],
"asset_b": ["day", "month", "year"],
}
def test_batch_identifiers_missing_completely():
# missing from base DataConnector
with pytest.raises(ge_exceptions.DataConnectorError):
instantiate_class_from_config(
config=yaml.load(
"""
class_name: Datasource
data_connectors:
runtime:
class_name: RuntimeDataConnector
execution_engine:
class_name: PandasExecutionEngine
""",
),
runtime_environment={
"name": "my_datasource",
},
config_defaults={
"module_name": "great_expectations.datasource",
},
)
def test_batch_identifiers_missing_from_named_asset():
with pytest.raises(ge_exceptions.DataConnectorError):
basic_datasource: Datasource = instantiate_class_from_config(
config=yaml.load(
"""
class_name: Datasource
data_connectors:
runtime:
class_name: RuntimeDataConnector
batch_identifiers:
- hour
- minute
assets:
asset_a:
execution_engine:
class_name: PandasExecutionEngine
""",
),
runtime_environment={
"name": "my_datasource",
},
config_defaults={
"module_name": "great_expectations.datasource",
},
)
def test_error_checking_unknown_datasource(basic_datasource):
test_df: pd.DataFrame = pd.DataFrame(data={"col1": [1, 2], "col2": [3, 4]})
test_runtime_data_connector: RuntimeDataConnector = (
basic_datasource.data_connectors["test_runtime_data_connector"]
)
# Test for an unknown datasource
with pytest.raises(ValueError):
# noinspection PyUnusedLocal
batch_definition_list: List[
BatchDefinition
] = test_runtime_data_connector.get_batch_definition_list_from_batch_request(
batch_request=RuntimeBatchRequest(
datasource_name="non_existent_datasource",
data_connector_name="test_runtime_data_connector",
data_asset_name="my_data_asset",
runtime_parameters={"batch_data": test_df},
batch_identifiers={"airflow_run_id": "first"},
)
)
def test_error_checking_unknown_data_connector(basic_datasource):
test_df: pd.DataFrame = pd.DataFrame(data={"col1": [1, 2], "col2": [3, 4]})
test_runtime_data_connector: RuntimeDataConnector = (
basic_datasource.data_connectors["test_runtime_data_connector"]
)
# Test for an unknown data_connector
with pytest.raises(ValueError):
# noinspection PyUnusedLocal
batch_definition_list: List[
BatchDefinition
] = test_runtime_data_connector.get_batch_definition_list_from_batch_request(
batch_request=RuntimeBatchRequest(
datasource_name=basic_datasource.name,
data_connector_name="non_existent_data_connector",
data_asset_name="my_data_asset",
runtime_parameters={"batch_data": test_df},
batch_identifiers={"airflow_run_id": "first"},
)
)
def test_error_checking_missing_runtime_parameters(basic_datasource):
test_runtime_data_connector: RuntimeDataConnector = (
basic_datasource.data_connectors["test_runtime_data_connector"]
)
# test for missing runtime_parameters arg
with pytest.raises(TypeError):
# noinspection PyUnusedLocal, PyArgumentList
batch_definition_list: List[
BatchDefinition
] = test_runtime_data_connector.get_batch_definition_list_from_batch_request(
batch_request=RuntimeBatchRequest(
datasource_name=basic_datasource.name,
data_connector_name="test_runtime_data_connector",
data_asset_name="my_data_asset",
batch_identifiers={"pipeline_stage_name": "munge"},
)
)
def test_asset_is_name_batch_identifier_correctly_used(
basic_datasource_with_assets, test_df_pandas
):
"""
Using asset_a, which is named in the RuntimeDataConnector configuration, and using batch_identifier that is named.
"""
runtime_data_connector: RuntimeDataConnector = (
basic_datasource_with_assets.data_connectors["runtime"]
)
res: List[
BatchDefinition
] = runtime_data_connector.get_batch_definition_list_from_batch_request(
batch_request=RuntimeBatchRequest(
datasource_name=basic_datasource_with_assets.name,
data_connector_name="runtime",
data_asset_name="asset_a",
batch_identifiers={"month": 4, "day": 1},
runtime_parameters={"batch_data": test_df_pandas},
)
)
assert len(res) == 1
assert res[0] == BatchDefinition(
datasource_name="my_datasource",
data_connector_name="runtime",
data_asset_name="asset_a",
batch_identifiers=IDDict({"month": 4, "day": 1}),
)
def test_asset_is_named_but_batch_identifier_in_other_asset(
basic_datasource_with_assets, test_df_pandas
):
runtime_data_connector: RuntimeDataConnector = (
basic_datasource_with_assets.data_connectors["runtime"]
)
with pytest.raises(ge_exceptions.DataConnectorError):
runtime_data_connector.get_batch_definition_list_from_batch_request(
batch_request=RuntimeBatchRequest(
datasource_name=basic_datasource_with_assets.name,
data_connector_name="runtime",
data_asset_name="asset_a",
batch_identifiers={
"year": 2022,
"month": 4,
"day": 1,
}, # year is only defined for asset_b
runtime_parameters={"batch_data": test_df_pandas},
)
)
def test_asset_is_named_but_batch_identifier_not_defined_anywhere(
basic_datasource_with_assets, test_df_pandas
):
runtime_data_connector: RuntimeDataConnector = (
basic_datasource_with_assets.data_connectors["runtime"]
)
with pytest.raises(ge_exceptions.DataConnectorError):
runtime_data_connector.get_batch_definition_list_from_batch_request(
batch_request=RuntimeBatchRequest(
datasource_name=basic_datasource_with_assets.name,
data_connector_name="runtime",
data_asset_name="asset_a",
batch_identifiers={"blorg": 2022}, # blorg is not defined anywhere
runtime_parameters={"batch_data": test_df_pandas},
)
)
def test_named_asset_is_trying_to_use_batch_identifier_defined_in_data_connector(
basic_datasource_with_assets, test_df_pandas
):
runtime_data_connector: RuntimeDataConnector = (
basic_datasource_with_assets.data_connectors["runtime"]
)
with pytest.raises(ge_exceptions.DataConnectorError):
runtime_data_connector.get_batch_definition_list_from_batch_request(
batch_request=RuntimeBatchRequest(
datasource_name=basic_datasource_with_assets.name,
data_connector_name="runtime",
data_asset_name="asset_a",
batch_identifiers={
"month": 4,
"day": 1,
"hour": 12,
}, # hour is a data-connector level batch identifier
runtime_parameters={"batch_data": test_df_pandas},
)
)
def test_runtime_batch_request_trying_to_use_batch_identifier_defined_at_asset_level(
basic_datasource_with_assets, test_df_pandas
):
runtime_data_connector: RuntimeDataConnector = (
basic_datasource_with_assets.data_connectors["runtime"]
)
with pytest.raises(ge_exceptions.DataConnectorError):
runtime_data_connector.get_batch_definition_list_from_batch_request(
batch_request=RuntimeBatchRequest(
datasource_name=basic_datasource_with_assets.name,
data_connector_name="runtime",
data_asset_name="new_asset",
batch_identifiers={
"year": 2022,
"hour": 12,
"minute": 30,
}, # year is a asset_a level batch identifier
runtime_parameters={"batch_data": test_df_pandas},
)
)
def test_error_checking_too_many_runtime_parameters(basic_datasource):
test_df: pd.DataFrame = pd.DataFrame(data={"col1": [1, 2], "col2": [3, 4]})
test_runtime_data_connector: RuntimeDataConnector = (
basic_datasource.data_connectors["test_runtime_data_connector"]
)
# test for too many runtime_parameters keys
with pytest.raises(ge_exceptions.InvalidBatchRequestError):
# noinspection PyUnusedLocal
batch_definition_list: List[
BatchDefinition
] = test_runtime_data_connector.get_batch_definition_list_from_batch_request(
batch_request=RuntimeBatchRequest(
datasource_name=basic_datasource.name,
data_connector_name="test_runtime_data_connector",
data_asset_name="my_data_asset",
runtime_parameters={"batch_data": test_df, "path": "my_path"},
batch_identifiers={"pipeline_stage_name": "munge"},
)
)
def test_batch_identifiers_and_batch_identifiers_success_all_keys_present(
basic_datasource,
):
test_df: pd.DataFrame = pd.DataFrame(data={"col1": [1, 2], "col2": [3, 4]})
batch_identifiers: dict
batch_identifiers = {
"pipeline_stage_name": "core_processing",
"airflow_run_id": 1234567890,
"custom_key_0": "custom_value_0",
}
test_runtime_data_connector: RuntimeDataConnector = (
basic_datasource.data_connectors["test_runtime_data_connector"]
)
# Verify that all keys in batch_identifiers are acceptable as batch_identifiers (using batch count).
batch_request: dict = {
"datasource_name": basic_datasource.name,
"data_connector_name": test_runtime_data_connector.name,
"data_asset_name": "IN_MEMORY_DATA_ASSET",
"runtime_parameters": {"batch_data": test_df},
"batch_identifiers": batch_identifiers,
}
batch_request: RuntimeBatchRequest = RuntimeBatchRequest(**batch_request)
batch_definition_list: List[
BatchDefinition
] = test_runtime_data_connector.get_batch_definition_list_from_batch_request(
batch_request=batch_request
)
assert len(batch_definition_list) == 1
def test_batch_identifiers_and_batch_identifiers_error_illegal_keys(
basic_datasource,
):
test_df: pd.DataFrame = pd.DataFrame(data={"col1": [1, 2], "col2": [3, 4]})
batch_identifiers: dict
batch_identifiers = {
"pipeline_stage_name": "core_processing",
"airflow_run_id": 1234567890,
"custom_key_0": "custom_value_0",
"custom_key_1": "custom_value_1",
}
test_runtime_data_connector: RuntimeDataConnector = (
basic_datasource.data_connectors["test_runtime_data_connector"]
)
# Ensure that keys in batch_identifiers["batch_identifiers"] that are not among batch_identifiers declared in
# configuration are not accepted. In this test, all legal keys plus a single illegal key are present.
batch_request: dict = {
"datasource_name": basic_datasource.name,
"data_connector_name": test_runtime_data_connector.name,
"data_asset_name": "my_data_asset_name",
"runtime_parameters": {"batch_data": test_df},
"batch_identifiers": batch_identifiers,
}
batch_request: RuntimeBatchRequest = RuntimeBatchRequest(**batch_request)
with pytest.raises(ge_exceptions.DataConnectorError):
# noinspection PyUnusedLocal
batch_definition_list: List[
BatchDefinition
] = test_runtime_data_connector.get_batch_definition_list_from_batch_request(
batch_request=batch_request
)
batch_identifiers = {"batch_identifiers": {"unknown_key": "some_value"}}
test_runtime_data_connector: RuntimeDataConnector = (
basic_datasource.data_connectors["test_runtime_data_connector"]
)
# Ensure that keys in batch_identifiers["batch_identifiers"] that are not among batch_identifiers declared in configuration
# are not accepted. In this test, a single illegal key is present.
batch_request: dict = {
"datasource_name": basic_datasource.name,
"data_connector_name": test_runtime_data_connector.name,
"data_asset_name": "IN_MEMORY_DATA_ASSET",
"runtime_parameters": {"batch_data": test_df},
"batch_identifiers": batch_identifiers,
}
batch_request: RuntimeBatchRequest = RuntimeBatchRequest(**batch_request)
with pytest.raises(ge_exceptions.DataConnectorError):
# noinspection PyUnusedLocal
batch_definition_list: List[
BatchDefinition
] = test_runtime_data_connector.get_batch_definition_list_from_batch_request(
batch_request=batch_request
)
def test_get_available_data_asset_names(basic_datasource):
test_runtime_data_connector: RuntimeDataConnector = (
basic_datasource.data_connectors["test_runtime_data_connector"]
)
expected_available_data_asset_names: List[str] = []
available_data_asset_names: List[
str
] = test_runtime_data_connector.get_available_data_asset_names()
assert available_data_asset_names == expected_available_data_asset_names
def test_get_available_data_asset_names_named_assets(basic_datasource_with_assets):
runtime_data_connector: RuntimeDataConnector = (
basic_datasource_with_assets.data_connectors["runtime"]
)
assert runtime_data_connector.get_available_data_asset_names() == [
"asset_a",
"asset_b",
]
def test_get_available_data_asset_names_updating_after_batch_request(
basic_datasource_with_assets,
):
test_runtime_data_connector: RuntimeDataConnector = (
basic_datasource_with_assets.data_connectors["runtime"]
)
test_df: pd.DataFrame = pd.DataFrame(data={"col1": [1, 2], "col2": [3, 4]})
# empty if data_connector has not been used
assert test_runtime_data_connector.get_available_data_asset_names() == [
"asset_a",
"asset_b",
]
batch_identifiers = {
"hour": 12,
"minute": 15,
}
batch_request: dict = {
"datasource_name": basic_datasource_with_assets.name,
"data_connector_name": test_runtime_data_connector.name,
"data_asset_name": "my_new_data_asset_1",
"runtime_parameters": {
"batch_data": test_df,
},
"batch_identifiers": batch_identifiers,
}
batch_request: RuntimeBatchRequest = RuntimeBatchRequest(**batch_request)
# run with my_data_asset_1
test_runtime_data_connector.get_batch_definition_list_from_batch_request(
batch_request=batch_request
)
# updated to my_data_asset_1
assert test_runtime_data_connector.get_available_data_asset_names() == [
"asset_a",
"asset_b",
"my_new_data_asset_1",
]
batch_identifiers = {
"hour": 12,
"minute": 30,
}
batch_request: dict = {
"datasource_name": basic_datasource_with_assets.name,
"data_connector_name": test_runtime_data_connector.name,
"data_asset_name": "my_new_data_asset_2",
"runtime_parameters": {
"batch_data": test_df,
},
"batch_identifiers": batch_identifiers,
}
batch_request: RuntimeBatchRequest = RuntimeBatchRequest(**batch_request)
# run with my_data_asset_2
test_runtime_data_connector.get_batch_definition_list_from_batch_request(
batch_request=batch_request
)
# updated to my_data_asset_1 and my_data_asset_2
assert test_runtime_data_connector.get_available_data_asset_names() == [
"asset_a",
"asset_b",
"my_new_data_asset_1",
"my_new_data_asset_2",
]
def test_data_references_cache_updating_after_batch_request(
basic_datasource,
):
test_runtime_data_connector: RuntimeDataConnector = (
basic_datasource.data_connectors["test_runtime_data_connector"]
)
test_df: pd.DataFrame = pd.DataFrame(data={"col1": [1, 2], "col2": [3, 4]})
# empty if data_connector has not been used
assert test_runtime_data_connector.get_available_data_asset_names() == []
batch_identifiers = {
"airflow_run_id": 1234567890,
}
batch_request: dict = {
"datasource_name": basic_datasource.name,
"data_connector_name": test_runtime_data_connector.name,
"data_asset_name": "my_data_asset_1",
"runtime_parameters": {
"batch_data": test_df,
},
"batch_identifiers": batch_identifiers,
}
batch_request: RuntimeBatchRequest = RuntimeBatchRequest(**batch_request)
# run with my_data_asset_1
test_runtime_data_connector.get_batch_definition_list_from_batch_request(
batch_request=batch_request
)
assert test_runtime_data_connector._data_references_cache == {
"my_data_asset_1": {
"1234567890": [
BatchDefinition(
datasource_name="my_datasource",
data_connector_name="test_runtime_data_connector",
data_asset_name="my_data_asset_1",
batch_identifiers=IDDict({"airflow_run_id": 1234567890}),
)
],
}
}
# update with
test_df_new: pd.DataFrame = | pd.DataFrame(data={"col1": [5, 6], "col2": [7, 8]}) | pandas.DataFrame |
# Database Lib
"""
Oracle
PostGresSQL
SQLite
SQLServer
Hive
Spark
"""
import os, datetime, pandas, time, re
from collections import namedtuple, OrderedDict
import jmespath
import sqlalchemy
from multiprocessing import Queue, Process
from xutil.helpers import (
log,
elog,
slog,
get_exception_message,
struct,
now,
get_databases,
get_dir_path,
get_profile,
get_variables,
file_exists,
str_rmv_indent,
ptable,
make_rec,
get_error_str,
)
from xutil.diskio import read_yaml, write_csvs
conns = {}
_fwklike = lambda k, v: "lower({}) like lower('{}')".format(k, v)
_fwkeq = lambda k, v: "{} = '{}'".format(k, v)
_fw = lambda sep, _fwkop, **kws: sep.join([_fwkop(k, v) for k, v in kws.items()]) # Format WHERE
fwa = lambda _fwkop=_fwkeq, **kws: _fw(' and ', _fwkop, **kws) # Format WHERE AND
fwo = lambda _fwkop=_fwkeq, **kws: _fw(' or ', _fwkop, **kws) # Format WHERE OR
rows_to_dicts = lambda rows: [row._asdict() for row in rows]
class DBConn(object):
"""Base class for database connections"""
_fix_f_name = lambda self, f: f
_to_text = lambda self, t: t
def __init__(self, conn_dict, profile=None, echo=False):
"Inititate connection"
self._cred = struct(conn_dict)
self._cred.kwargs = conn_dict.get('kwargs', {})
self.name = self._cred.get('name', None)
self.username = self._cred.get('username', None)
self.type = self._cred.type
self.engine = None
self._cursor_description = None
self.profile = profile
self.batch_size = 10000
self.fetch_size = 20000
self.echo = echo
self.connect()
self.last_connect = now()
# Base Template
template_base_path = '{}/database/templates/base.yaml'.format(
get_dir_path())
self.template_dict = read_yaml(template_base_path)
# Specific Type Template
template_path = '{}/database/templates/{}.yaml'.format(
get_dir_path(), self.type)
temp_dict = read_yaml(template_path)
for key1 in temp_dict:
# Level 1
if isinstance(temp_dict[key1], dict):
if key1 not in self.template_dict:
self.template_dict[key1] = temp_dict[key1]
# Level 2
for key2 in temp_dict[key1]:
# Always Overwrite
self.template_dict[key1][key2] = temp_dict[key1][key2]
else:
# Level 1 Non-Dict Overwrite
self.template_dict[key1] = temp_dict[key1]
self.variables = self._template('variables')
if os.getenv('PROFILE_YAML'):
other_vars = get_variables()
for key in other_vars:
self.variables[key] = other_vars[key]
self.tmp_folder = self.variables['tmp_folder']
self.set_variables()
if echo:
log("Connected to {} as {}".format(self._cred.name, self._cred.user))
def connect(self):
"""Connect to Database"""
self.engine = self.get_engine()
self.connection = self.engine.connect()
def close(self):
"""Close database connection"""
self.conn.connection.close()
def reconnect(self, min_tresh=0):
"""Re-Connect to Database if minute threshold reached"""
if (now() - self.last_connect).total_seconds() > min_tresh * 60:
log('Reconnecting to {}...'.format(self.name))
self.connect()
self.last_connect = now()
def set_variables(self):
"""Set custom variables"""
raise Exception("Method 'set_variables' is not implemented!")
def get_dialect(self, echo=False):
"""SQLAlchemy dialect"""
raise Exception("Method 'get_dialect' is not implemented!")
def get_engine(self, echo=False):
import sqlalchemy
if not self.engine:
self.create_engine(echo=self.echo)
self.engine_inspect = sqlalchemy.inspect(self.engine)
return self.engine
def check_pk(self, table, fields):
"Check Primary key to ensure there are not duplicates"
if 'where' in fields.lower():
fields, where_clause = fields.lower().split('where')
where_clause = 'where ' + where_clause
else:
where_clause = ''
sql = '''
select
'{table}' as table,
case when count(1) = count({fields}) then 'PASS' else 'FAIL' end as pk_result
from {table}
{where_clause}
'''.format(
table=table,
fields=fields,
where_clause=where_clause,
)
data = self.query(sql, echo=False)
headers = self._fields
print(ptable(headers, data))
if data[0].pk_result == 'FAIL':
raise (Exception('PK Text failed for table "{}" with fields "{}"'.format(
table, fields)))
def _do_execute(self, sql):
try:
self._cursor_description = None
self.fields = None
self.result = self.connection.execute(sql)
self._cursor_description = self.result._cursor_description()
self._fields = self._get_cursor_fields()
except Exception as E:
if 'not open' in get_error_str(E):
pass # error when Oracle doesn't have a cursor open
else:
log(Exception('Error for SQL:\n' + sql))
raise E
def execute_multi(self,
sql,
dtype='namedtuple',
limit=None,
echo=True,
query_name='Record',
log=log):
"""
Execute multiple SQL statements separtated by ';'. Returns a generator.
Example:
for fields, rows in conn.execute(sql):
print(fields)
print(len(rows))
"""
self.reconnect(min_tresh=10)
data = None
fields = None
rows = []
message_mapping = {
'drop ': 'Dropping {}.',
'truncate ': 'Truncating {}.',
'select ': 'Selecting {}.',
'create ': 'Creating {}.',
'insert ': 'Inserting {}.',
'alter ': 'Altering {}.',
'update ': 'Updating {}.',
'delete ': 'Deleting {}.',
'exec ': 'Calling Procedure {}.',
'grant ': 'Granting {}.',
}
sqls = sql.split(';')
for sql in sqls:
if not sql.strip(): continue
sql_ = sql.strip().lower()
for word, message in message_mapping.items():
if sql_.startswith(word):
if echo:
log(
message.format(' '.join(
sql_.splitlines()[0].split()[1:3]).upper()))
break
# Call procedure with callproc
if sql_.startswith('exec '):
procedure = sql_[5:].split('(')[0]
args = sql_[5:].split('(')[1][:-1].replace("'", '').split(',')
args = [a.strip() for a in args]
cursor.callproc(procedure, args)
continue
try:
self._fields = []
rows = self.query(
sql,
rec_name=query_name,
dtype=dtype,
limit=limit,
echo=echo,
log=log)
fields = self._fields
if '-- pk_test:' in sql.lower() and sql_.startswith('create'):
sql_lines = sql_.splitlines()
regexp = r'create\s+table\s+(\S*)[\sa-zA-Z\d]+ as'
table = re.findall(regexp, sql_lines[0])[0]
line = [
l for l in sql_lines if l.strip().lower().startswith('-- pk_test:')
][0]
fields = line.split(':')[-1]
self.check_pk(table, fields)
except Exception as E:
message = get_exception_message().lower()
if sql_.startswith(
'drop ') and self.error_msg['table_not_exist'] in message:
log("WARNING: Table already dropped.")
else:
raise E
if not fields: fields = []
yield fields, rows
def execute(self,
sql,
dtype='tuple',
limit=None,
echo=True,
query_name='Record',
log=log):
"""Execute SQL, return last result"""
self.reconnect(min_tresh=10)
data = None
fields = None
rows = []
message_mapping = {
'drop ': 'Dropping {}.',
'truncate ': 'Truncating {}.',
'select ': 'Selecting {}.',
'create ': 'Creating {}.',
'insert ': 'Inserting {}.',
'alter ': 'Altering {}.',
'update ': 'Updating {}.',
'delete ': 'Deleting {}.',
'exec ': 'Calling Procedure {}.',
'grant ': 'Granting {}.',
}
sql_ = sql.strip().lower()
for word, message in message_mapping.items():
if sql_.startswith(word):
if echo:
log(
message.format(' '.join(
sql_.splitlines()[0].split()[1:3]).upper()))
break
# Call procedure with callproc
if sql_.startswith('exec '):
procedure = sql_[5:].split('(')[0]
args = sql_[5:].split('(')[1][:-1].replace("'", '').split(',')
args = [a.strip() for a in args]
connection = self.engine.raw_connection()
try:
cursor = connection.cursor()
cursor.callproc(procedure, args)
self._fields = self._get_cursor_fields(cursor_desc=cursor.description)
rows = list(cursor.fetchall())
cursor.close()
connection.commit()
return fields, rows
finally:
connection.close()
try:
self._fields = []
rows = self.query(
sql,
rec_name=query_name,
dtype=dtype,
limit=limit,
echo=echo,
log=log)
fields = self._fields
if '-- pk_test:' in sql.lower() and sql_.startswith('create'):
sql_lines = sql_.splitlines()
regexp = r'create\s+table\s+(\S*)[\sa-zA-Z\d]+ as'
table = re.findall(regexp, sql_lines[0])[0]
line = [
l for l in sql_lines if l.strip().lower().startswith('-- pk_test:')
][0]
fields = line.split(':')[-1]
self.check_pk(table, fields)
except Exception as E:
message = get_exception_message().lower()
if sql_.startswith(
'drop ') and self.error_msg['table_not_exist'] in message:
log("WARNING: Table already dropped.")
else:
raise E
if not fields: fields = []
return fields, rows
def insert(self, table, data, echo=False):
"""Insert records of namedtuple or dicts"""
raise Exception('insert not implemented')
def drop_table(self, table, log=log):
"Drop table"
try:
sql = self._template('core.drop_table').format(table)
self._do_execute(sql)
except Exception as E:
message = get_exception_message().lower()
if self._template('error_filter.table_not_exist') in message:
if self.echo:
log('Table "{}" already dropped.'.format(table))
else:
raise E
def create_table(self, table, field_types, drop=False, log=log):
"Create table"
if drop:
self.drop_table(table, log=log)
new_ftypes = OrderedDict()
for f in field_types:
ftype, max_len, dec_len = field_types[f]
if dec_len:
suff = '({},{})'.format(max_len, dec_len)
elif max_len:
suff = '({})'.format(max_len)
else:
suff = ''
new_ftypes[f] = self._template('general_type_map')[ftype].replace(
'()', suff)
field_types_str = ', \n'.join([
self._fix_f_name(field) + ' ' + new_ftypes[field] for field in new_ftypes
])
sql = self._template('core.create_table').format(
table=table,
col_types=field_types_str,
)
# log('Creating table: \n' + sql))
try:
self._do_execute(sql)
except Exception as e:
raise e
log('Created table "{}"'.format(table))
def _get_cursor_fields(self, as_dict=False, native_type=True, cursor_desc=None):
"Get fields of active Select cursor"
fields = OrderedDict()
cursor_desc = cursor_desc if cursor_desc else self._cursor_description
if cursor_desc == None:
return []
for f in cursor_desc:
f_name = f[0].lower()
if as_dict:
if native_type:
f_type = f[1]
else:
f_type = self.reverse_data_map[f[1]]
# assign floa/double as needed
if 'cx_Oracle.NUMBER' in str(f[1]):
if f[4] and f[4] > 11: f_type = 'long'
if f[5] and f[5] > 0: f_type = 'double'
fields[f_name] = f_type
else:
fields[f_name] = None
if as_dict:
return fields
else:
return list(fields.keys())
def stream(self,
sql,
rec_name='Record',
dtype='namedtuple',
yield_chuncks=False,
chunk_size=None,
limit=None,
echo=True):
"Stream Select from SQL, yield records as they come in"
self.reconnect(min_tresh=10)
if echo: log("Streaming SQL for '{}'.".format(rec_name))
fetch_size = limit if limit else self.fetch_size
fetch_size = chunk_size if chunk_size else fetch_size
try:
self._do_execute(sql)
except Exception as e:
raise e
if dtype == 'tuple':
make_rec = lambda row: row
make_batch = lambda rows: rows
elif dtype == 'dataframe':
yield_chuncks=True
make_batch = lambda rows: | pandas.DataFrame(rows, columns=self._fields) | pandas.DataFrame |
import pandas as pd
import inspect
import functools
# ============================================ DataFrame ============================================ #
# Decorates a generator function that yields rows (v,...)
def pd_dfrows(columns=None):
def dec(fn):
def wrapper(*args,**kwargs):
return pd.DataFrame([*fn(*args,**kwargs)],columns=columns)
return functools.update_wrapper(wrapper,fn)
return dec
# Decorates a generator function that yields k,(v,...) pairs
def pd_dataframe(index=None,columns=None):
def dec(fn):
def wrapper(*args,**kwargs):
i,d = (list(x) for x in zip(*fn(*args,**kwargs)))
return pd.DataFrame(d,pd.Index(i,name=index),columns=columns)
return functools.update_wrapper(wrapper,fn)
return dec
# Decorates a generator function that yields (k,...),(v,...) pairs
def pd_multiframe(index=None,columns=None):
def dec(fn):
def wrapper(*args,**kwargs):
i,d = (list(x) for x in zip(*fn(*args,**kwargs)))
return pd.DataFrame(d,index=pd.MultiIndex.from_tuples(i,names=index),columns=columns)
return functools.update_wrapper(wrapper,fn)
return dec
# ============================================ Series ============================================ #
# Decorates a generator function that yields k,v pairs
def pd_series(index=None,name=None):
def dec(fn):
def wrapper(*args,**kwargs):
i,d = (list(x) for x in zip(*fn(*args,**kwargs)))
return pd.Series(d,index=pd.Index(i,name=index),name=name)
return functools.update_wrapper(wrapper,fn)
return dec
# Decorates a generator function that yields (k,...),v pairs
def pd_multiseries(index=None,name=None):
def dec(fn):
def wrapper(*args,**kwargs):
i,d = [[*x] for x in zip(*fn(*args,**kwargs))]
return pd.Series(d,index=pd.MultiIndex.from_tuples(i,names=index),name=name)
return functools.update_wrapper(wrapper,fn)
return dec
# ============================================ Index ============================================ #
# Decorates a generator function that yields (k,...)
def pd_multi_index(names=None):
def dec(fn):
def wrapper(*args,**kwargs):
return pd.MultiIndex.from_tuples([*fn(*args,**kwargs)],names=names)
return functools.update_wrapper(wrapper,fn)
return dec
# Decorates a generator function that yields k
def pd_index(name=None):
def dec(fn):
def wrapper(*args,**kwargs):
return pd.Index([*fn(*args,**kwargs)],name=name)
return functools.update_wrapper(wrapper,fn)
return dec
# ============================================ Joins ============================================ #
# decorates either a generator function that yields dataframes, or an iterable containing dataframes.
def pd_concat(axis=0,**catargs):
def dec(fn):
def wrapper(*args,**kwargs):
return pd.concat([*fn(*args,**kwargs)],axis=axis,**catargs)
return functools.update_wrapper(wrapper,fn)
return dec
# ============================================ Transforms ============================================ #
# decorates a function that reindexes dataframes
def pd_reindex(name=None):
def dec(fn):
def wrapper(df):
inx = pd.Index([*map(fn,df.index)],name=(name if name!=None else df.index.names if df.index.ndim>1 else df.index.name))
return pd.DataFrame(df.values,index=inx,columns=df.columns)
return wrapper
return dec
# decorates a function that transforms both the index values and column values of an inputted dataframe
def pd_transform(inx=None,col=None):
def dec(fn):
def wrapper(df,*args,**kwargs):
i,d = [[*x] for x in zip(*fn(df,*args,**kwargs))]
index = pd.Index(i,name=(inx if inx!=None else df.index.names if df.index.ndim>1 else df.index.name))
return pd.DataFrame(d,index,columns=(col if col!=None else df.columns))
return wrapper
return dec
# ============================================ GroupBy ============================================ #
def pd_groupby_agg(by,columns=None):
def dec(fn):
if inspect.isgeneratorfunction(fn):
def wrapper(df,*args,**kwargs):
i,d = [[*x] for x in zip(*(a for b in (((g,r) for r in fn(data,*args,**kwargs)) for g,data in df.groupby(by)) for a in b))]
inx = pd.Index(i,name=by) if type(by) == str else | pd.MultiIndex.from_tuples(i,names=by) | pandas.MultiIndex.from_tuples |
"""
Contain codes about parse plate info and generate sample sheet
"""
import pathlib
import re
from collections import OrderedDict
import pandas as pd
import cemba_data
# Load defaults
PACKAGE_DIR = pathlib.Path(cemba_data.__path__[0])
# the Illumina sample sheet header used by Ecker Lab
with open(PACKAGE_DIR / 'files/sample_sheet_header.txt') as _f:
SAMPLESHEET_DEFAULT_HEADER = _f.read()
SECTIONS = ['[CriticalInfo]', '[LibraryInfo]', '[PlateInfo]']
LIMITED_CHOICES = {
'n_random_index': [8, 384, '8', '384'],
'input_plate_size': [384, '384'],
'primer_quarter': ['Set1_Q1', 'Set1_Q2', 'Set1_Q3', 'Set1_Q4',
'SetB_Q1', 'SetB_Q2', 'SetB_Q3', 'SetB_Q4']}
CRITICAL_INFO_KEYS = ['n_random_index', 'input_plate_size',
'pool_id', 'tube_label', 'email']
# key (n_random_index, input_plate_size)
BARCODE_TABLE = {
('8', '384'): PACKAGE_DIR / 'files/V1_i7_i5_index.tsv', # V1 can use both Set1 and SetB i5 i7 primer
('384', '384'): PACKAGE_DIR / 'files/V2_i7_i5_index.tsv' # V2 only use SetB primer
}
def _clean_str_for_path(str_in):
# replace special char with _
str_out = re.sub('[^a-zA-Z0-9]', '_', str_in.strip())
return str_out
def _get_kv_pair(line):
try:
k, v = line.split('=')
if k == 'email':
return k, v
else:
return _clean_str_for_path(k), _clean_str_for_path(v)
except ValueError:
raise ValueError(f'Each key=value line must contain a "=" to separate key and value. Got {line}')
def _read_plate_info(plate_info_path):
"""Parse the plate info file"""
cur_section = ''
cur_section_id = -1
critical_info = {}
library_info = OrderedDict()
plate_header = True
plate_info = []
with open(plate_info_path) as f:
for line in f:
line = line.strip('\n')
if line == '' or line.startswith('#'):
continue
# determine section
if line.startswith('['):
cur_section_id += 1
if line == SECTIONS[cur_section_id]:
cur_section = line
else:
raise ValueError(
f'Section name and order must be [CriticalInfo] [LibraryInfo] [PlateInfo], '
f'got {line} at No.{cur_section_id + 1} section.')
elif cur_section == '[CriticalInfo]':
k, v = _get_kv_pair(line)
if k not in CRITICAL_INFO_KEYS:
raise ValueError(f'Unknown key {k} in [CriticalInfo]')
else:
critical_info[k] = v
elif cur_section == '[LibraryInfo]':
k, v = _get_kv_pair(line)
if (k in critical_info.keys()) or (k in library_info.keys()):
raise ValueError(f'Found duplicated key {k}')
else:
library_info[k] = v
elif cur_section == '[PlateInfo]':
ll = line.split('\t')
if plate_header:
plate_header = False
plate_info.append(ll)
else:
raise ValueError(f'Got a malformed line: {line}')
for k in CRITICAL_INFO_KEYS:
if k not in critical_info:
raise ValueError(f'[CriticalInfo] missing key-value pair "{k}"')
header = plate_info[0]
plate_info = pd.DataFrame(plate_info[1:], columns=plate_info[0])
for k, v in library_info.items():
if k in plate_info.columns:
raise ValueError(f'Found duplicated key {k} between [PlateInfo] and [LibraryInfo]')
plate_info[k] = v
if critical_info['n_random_index'] == '8':
n_plate_info_fix_col = 2
if plate_info['plate_id'].duplicated().sum() != 0:
raise ValueError(f'Found duplicated plate_id in [PlateInfo] section.')
elif critical_info['n_random_index'] == '384':
n_plate_info_fix_col = 3
if plate_info.set_index(['plate_id', 'multiplex_group']).index.duplicated().sum() != 0:
raise ValueError(f'Found duplicated plate_id, multiplex_group combination in [PlateInfo] section.')
else:
raise ValueError(f'[CriticalInfo] n_random_index got unknown value '
f'{critical_info["n_random_index"]}')
col_order = header[:n_plate_info_fix_col] + list(library_info.keys()) + header[n_plate_info_fix_col:]
plate_info = plate_info[col_order].copy()
plate_info['sample_id_prefix'] = plate_info.apply(
lambda i: '-'.join(i[n_plate_info_fix_col:].astype(str).tolist()), axis=1)
# after getting sample_id_prefix, add critical info into plate_info too
for k, v in critical_info.items():
if k in plate_info.columns:
raise ValueError(f'Found duplicated key {k}')
plate_info[k] = v
# deduplicate primer name
# because we start to use the same primer for different multiplex groups
plate_info = plate_info.loc[~plate_info['primer_name'].duplicated(), :].copy()
return critical_info, plate_info
def reverse_comp(sequence):
rc_dict = {'A': 'T', 'T': 'A', 'C': 'G', 'G': 'C',
'a': 't', 't': 'a', 'c': 'G', 'g': 'C',
'n': 'n', 'N': 'N'}
_seq = ''.join([rc_dict[s] for s in sequence[::-1]])
return _seq
def _plate_384_random_index_8(plate_info, barcode_table, i5_reverse_comp=False):
"""UID pattern of V1 {sample_id_prefix}-{plate1}-{plate2}-{plate_pos}"""
records = []
# check plate_info primer compatibility
for primer_quarter, n_plate in plate_info['primer_quarter'].value_counts().iteritems():
if n_plate < 2:
print(f'{primer_quarter} only have 1 plate in the table, please make sure this is correct.')
elif n_plate == 2:
pass
else:
raise ValueError(f'{primer_quarter} have {n_plate} plates in the table, that is impossible.')
for primer_quarter, plate_pair in plate_info.groupby('primer_quarter'):
if primer_quarter not in LIMITED_CHOICES['primer_quarter']:
raise ValueError(f'Unknown primer_quarter value {primer_quarter}')
if plate_pair.shape[0] == 1:
# sometimes, a quarter only index 1 plate
plate1, *plate2 = plate_pair['plate_id']
plate2 = 'GHOST_PLATE'
else:
plate1, plate2 = plate_pair['plate_id']
# check plate pair info consistence
for col_name, col in plate_pair.iteritems():
if col.unique().size != 1:
if col_name != 'plate_id':
print(f'{col_name} contains different information between {plate1} and {plate2}, '
f'Will put {plate1} prefix into sample_id. This should not happen normally.')
# remove all the special char with '_' in plate names
# I use '-' to separate sample parts
plate1 = _clean_str_for_path(plate1)
plate2 = _clean_str_for_path(plate2)
for col in 'ABCDEFGH':
for row in range(1, 13):
plate_pos = f'{col}{row}'
cur_row = barcode_table.loc[(primer_quarter, plate_pos)]
i5_barcode = cur_row['i5_index_sequence']
i7_barcode = cur_row['i7_index_sequence']
sample_id_prefix = plate_pair['sample_id_prefix'].iloc[0]
sample_id = f'{sample_id_prefix}-{plate1}-{plate2}-{plate_pos}'
# THIS IS BASED ON FORMAT BCL2FASTQ NEEDS
records.append({'Sample_ID': sample_id,
'index': i7_barcode, # the index must be i7
'index2': reverse_comp(i5_barcode) if i5_reverse_comp else i5_barcode,
# the index2 must be i5
'Sample_Project': plate_pair['tube_label'].iloc[0],
'Description': plate_pair['email'].iloc[0]})
miseq_sample_sheet, nova_sample_sheet = _make_final_samplesheet(records)
return miseq_sample_sheet, nova_sample_sheet
def _plate_384_random_index_384(plate_info, barcode_table, i5_reverse_comp=False):
"""
UID pattern of V2 {sample_id_prefix}-{plate}-{multiplex_group}-{barcode_name}
If i5_reverse_comp, use the reverse complement i5 sequence, this is needed for NovaSeq v1.5 S4 kit.
"""
records = []
# this is now possible because we may used the same PCR index for the same plate
# # check plate_info primer compatibility
# for primer_name, n_primer in plate_info['primer_name'].value_counts().iteritems():
# if n_primer > 1:
# raise ValueError(f'{primer_name} have {n_primer} multiplex_group in the table, that is impossible.')
for _, row in plate_info.iterrows():
plate = row['plate_id']
# remove all the '-' with '_' in plate names
plate = _clean_str_for_path(plate)
barcode_name = row['primer_name']
cur_row = barcode_table.loc[barcode_name]
i5_barcode = cur_row['i5_index_sequence']
i7_barcode = cur_row['i7_index_sequence']
sample_id_prefix = row['sample_id_prefix']
multiplex_group = row['multiplex_group']
sample_id = f'{sample_id_prefix}-{plate}-{multiplex_group}-{barcode_name}'
# THIS IS BASED ON FORMAT BCL2FASTQ NEEDS
records.append({'Sample_ID': sample_id,
'index': i7_barcode, # the index must be i7
'index2': reverse_comp(i5_barcode) if i5_reverse_comp else i5_barcode, # the index2 must be i5
'Sample_Project': row['tube_label'],
'Description': row['email']})
miseq_sample_sheet, nova_sample_sheet = _make_final_samplesheet(records)
return miseq_sample_sheet, nova_sample_sheet
def _make_final_samplesheet(records):
# THIS IS BASED ON FORMAT BCL2FASTQ NEEDS
sample_sheet = pd.DataFrame(records)
sample_sheet['Sample_Name'] = ''
sample_sheet['Sample_Well'] = ''
sample_sheet['I7_Index_ID'] = ''
sample_sheet['I5_Index_ID'] = ''
sample_sheet['I7_Index_ID'] = ''
sample_sheet['Sample_Plate'] = 'Plate'
miseq_sample_sheet = sample_sheet[['Sample_ID', 'Sample_Name', 'Sample_Plate',
'Sample_Well', 'I7_Index_ID', 'index',
'I5_Index_ID', 'index2', 'Sample_Project',
'Description']].copy()
lanes = []
for i in range(1, 5):
lane_df = miseq_sample_sheet.copy()
lane_df['Lane'] = i
lanes.append(lane_df)
nova_sample_sheet = pd.concat(lanes)
nova_sample_sheet = nova_sample_sheet[['Lane', 'Sample_ID', 'Sample_Name', 'Sample_Plate',
'Sample_Well', 'I7_Index_ID', 'index',
'I5_Index_ID', 'index2', 'Sample_Project',
'Description']].copy()
return miseq_sample_sheet, nova_sample_sheet
def make_sample_sheet(plate_info_path: str, output_prefix: str, header_path=None):
"""
make two sample sheets for bcl2fastq based on the plate info file: one for miseq, one for novaseq
Parameters
----------
plate_info_path
output_prefix
header_path
Returns
-------
"""
# read plate info
critical_info, plate_info = _read_plate_info(plate_info_path)
# check valid choice
for k in ['n_random_index', 'input_plate_size']:
if critical_info[k] not in LIMITED_CHOICES[k]:
raise ValueError(f'Invalid value in critical_info section for {k}: {critical_info[k]}')
n_random_index = critical_info['n_random_index']
input_plate_size = critical_info['input_plate_size']
barcode_table_path = BARCODE_TABLE[n_random_index, input_plate_size]
for i5_reverse_comp in [True, False]:
if (n_random_index, input_plate_size) == ('8', '384'):
barcode_table = | pd.read_csv(barcode_table_path, sep='\t') | pandas.read_csv |
import os
import gzip
import shutil
from typing import Tuple
import wget
import spacy
import numpy as np
import pandas as pd
import nlpaug.augmenter.word as naw
from sklearn.model_selection import train_test_split as splitting
from ..DataAugmenter import AbstractDataAugmenter
class DataAugmenterNLP(AbstractDataAugmenter):
__class_local_path = os.path.dirname(os.path.realpath(__file__))
__synsets_folder = os.path.join(__class_local_path, "synsets_data")
__aug_wdnt = None
__aug_ppdb = None
def __init__(self, n_jobs=1):
self.n_jobs = n_jobs
if not os.path.exists(self.__synsets_folder):
os.mkdir(self.__synsets_folder)
def _prepare_data_to_aug(self, data, freq=0.2) -> Tuple[pd.Series, pd.Series]:
"""Get part of data. Not augment all of it excep case freq=1.0"""
data = (
pd.Series(data)
if type(data) is not pd.Series and type(data) is not pd.DataFrame
else data
)
if 0 < freq < 1:
not_to_aug, to_aug = train_test_split(data, test_size=freq)
return not_to_aug, to_aug
elif freq == 1:
return data.sample(0), data
elif freq == 0:
return data, data.sample(0)
else:
raise ValueError("freq value not in [0, 1] span")
def augment_dataframe(
self,
df: pd.DataFrame,
freq=0.2,
return_only_aug=False,
aug_type="deleting",
reps=1,
min_words=1,
window_size=3,
columns=None,
) -> pd.DataFrame:
"""Augment dataframe data. Pandas dataframe"""
not_to_aug, to_aug = self._prepare_data_to_aug(df, freq=freq)
columns = columns if columns else df.columns
for col in columns:
to_aug[col] = self.augment_column(
to_aug[col],
freq=1.0,
return_only_aug=return_only_aug,
aug_type=aug_type,
reps=reps,
min_words=min_words,
window_size=window_size,
)
return to_aug if return_only_aug else | pd.concat([not_to_aug, to_aug]) | pandas.concat |
import pdb
import numpy as np
import pandas as pd
from math import ceil
def score_at_percentage(alpha, df, targets):
segment = ceil(alpha * df.shape[0])
segmented_df = df[0:segment]
targets_seen = 0
for i, row in segmented_df.iterrows():
if row.node in targets:
targets_seen += 1
return targets_seen
def valid_targets(df, targets):
validated = []
for target in targets:
if df[df.node == target].shape[0] > 0: # if target is in our dataframe
validated.append(target)
return validated
def evaluate_recommendations(df, on, targets):
"""
Evaluates recommendation results based on how highly they rated the target values
Input
-----
df (pandas dataframe): a dataframe containing the features and node names
on (string): the column name that will be sorted on (how we rank our nodes/targets)
targets (array-like): a list of known recommendations (from the "see also" section)
return
------
Returns a datafram report containing the score, the max score, and percentage points at which targets
were found in the top recommendations.
"""
# target = valid_targets(df, targets)
sorted_df = df.sort_values(on, ascending=False).reset_index().drop("index", axis=1)
total_nodes = sorted_df.shape[0]
max_target_index = 0
for target in targets:
try:
target_val = sorted_df[sorted_df.node == target].index[0]
if max_target_index < target_val:
max_target_index = target_val
except:
continue
# Represents the percentage of rows we must go down before we have captured all targets
# a higher number indicates that the targets are closer to the top of our recommendations
score = 1 - (max_target_index / (total_nodes - len(targets)))
max_score_possible = 1 - (len(targets) / total_nodes)
report = pd.DataFrame([
{"Metric Score": "score", on: score},
{"Metric Score": "max score possible", on: max_score_possible},
{"Metric Score": "difference", on: max_score_possible - score},
{"Metric Score": "total targets", on: len(targets)},
{"Metric Score": "% targets in top 1%", on: (score_at_percentage(0.01, sorted_df, targets) / len(targets))},
{"Metric Score": "% targets in top 5%", on: (score_at_percentage(0.05, sorted_df, targets) / len(targets))},
{"Metric Score": "% targets in top 10%", on: (score_at_percentage(0.10, sorted_df, targets) / len(targets))},
{"Metric Score": "% targets in top 20%", on: (score_at_percentage(0.20, sorted_df, targets) / len(targets))},
]).set_index("Metric Score")
return report.T
def evaluate_metrics(df, on, targets):
validated_targets = valid_targets(df, targets)
dfs = []
for metric in on:
dfs.append(evaluate_recommendations(df, metric, validated_targets))
return | pd.concat(dfs) | pandas.concat |
import pdb # NOQA F401
import copy
import os
import sqlite3
import pandas as pd
__alchemy_installed = True
try:
from sqlalchemy import create_engine, inspect
# from sqlalchemy.engine.reflection import Inspector
except:
__alchemy_installed = False
def db_exists(db='xxx.sqlite'):
return os.path.isfile(db)
def create_db_engine(db='xxx.sqlite', check_exists=True):
# if we don't do this, the code will create an empty db with the given name
if check_exists:
assert db_exists(db), "The database '%s' does NOT exist!" % db
engine = create_engine('sqlite:///%s' % db)
return engine
def table_exists(table_name, db='xxx.sqlite'):
if __alchemy_installed:
engine = create_db_engine(db)
return engine_has_table(engine, table_name)
else:
raise NotImplementedError("Can't do this yet without sqlalchemy")
def read_sql_query(query, db='xxx.sqlite', verbose=False, force_no_alchemy=False, return_array=False, return_single_value=False, **kwargs):
"""
return array will return a list if the query if for just one column (avoid a bunch of extra stuff at the point of calling the func)
"""
if verbose:
print('Trying to get %s' % db)
if (__alchemy_installed and not force_no_alchemy) and not (return_array or return_single_value):
engine = create_db_engine(db)
try:
ds = pd.read_sql_query(query, engine, **kwargs)
except Exception as e:
if 'no such column' in str(e):
# pdb.set_trace()
msg = "Query '%s' failed because of missing column:" % query
for tbl in engine.table_names():
if tbl in query.split(' '):
# print(tbl)
raise Exception(msg + '%s' % (schema(tbl, db)))
raise Exception(str(e))
# pdb.set_trace()
else:
# this is actually faster than sqlalchemy, probably lots of overhead + sanity checks?
return _convert_query_result_to_df(query, db, return_array=return_array, return_single_value=return_single_value)
return ds
def item_exists(item, col, table, db, cursor=None):
query = "select * from %s where %s in ('%s')" % (table, col, item)
res = query_response(query, db, cursor=cursor)
return res is not None
def sql_like_query(col_name='', table='', query='', db_name=''):
"""
# query would be like 'select * from data where name = '''
"""
query = "select * from %s where %s LIKE '%%%s%%'" % (
table, col_name, query)
return read_sql_query(query, db_name)
def query_response(query, db, cursor=None):
close_cursor = True
if cursor is None:
conn = sqlite3.connect(db)
crs = conn.cursor()
else:
crs = cursor
close_cursor = False
crs.execute(query)
res = crs.fetchone()
if close_cursor:
crs.close()
return res
def _convert_query_result_to_df(query, db, return_array=False, return_single_value=False, verbose=False):
conn = sqlite3.connect(db)
crs = conn.cursor()
if verbose:
print(query)
crs.execute(query)
res = crs.fetchall()
if not len(res):
raise Exception("Nothing returned for '%s'" % query)
cols = [x[0] for x in crs.description]
if (len(cols) == 1) and (return_array or return_single_value):
crs.close()
if return_single_value:
if len(res[0]) > 1:
raise Exception("There are more than 1 value!")
return res[0][0]
else:
return [x[0] for x in res]
else:
df_in = {}
for i in range(len(cols)):
df_in[cols[i]] = [x[i] for x in res]
crs.close()
return pd.DataFrame(df_in, columns=cols)
def engine_has_table(engine, table_name):
insp = inspect(engine)
tables = insp.get_table_names()
return table_name in tables
def read_sql_table(table_name, db='xxx.sqlite', force_no_alchemy=False, check_exists=True):
"""
wrapper around pandas function
force_no_alchemy is there for debug reasons
"""
if __alchemy_installed and not force_no_alchemy:
engine = create_db_engine(db, check_exists=check_exists)
if not engine_has_table(engine, table_name):
print("Table '%s' not found in %s!" % (table_name, db))
return pd.DataFrame()
else:
ds = pd.read_sql_table(table_name, engine)
return ds
else:
query = 'select * from %s' % table_name
return _convert_query_result_to_df(query, db)
def schema(table_name='', db_name=''):
df = read_sql_query("select * from %s limit 5" % table_name, db=db_name)
cols = df.columns
rv = []
i = 0
def fixer(x):
return str(type(x)).replace("<class '", '').replace("'>", '')
for c in cols:
cts = list(set(list(map(fixer, df[c]))))
if len(cts) == 1:
rv.append('[%d]:%s:%s' % (i, c, cts[0]))
else:
rv.append('[%d]:%s:%s' % (i, c, ','.join(cts)))
i += 1
return rv
def sqlite_schema(table_name='', db_name=''):
_db = sqlite3.connect(db_name)
cur = _db.cursor()
res = cur.execute("PRAGMA table_info('%s')" % table_name).fetchall()
out = []
for el in res:
out.append([el[0], el[1], el[2]])
return | pd.DataFrame(out, columns=['col_nr', 'col_name', 'col_type']) | pandas.DataFrame |
from optparse import OptionParser
import datetime as dt
import pandas as pd
import numpy as np
import blpapi # See our installation guide to learn how to install this library properly
class BBG(object):
"""
This class is a wrapper around the Bloomberg API. To work, it requires an active bloomberg terminal and
a python 3.6 environment.
"""
def __init__(self):
self.options = BBG._parse_cmd_line()
def fetch_series(self, securities, fields, startdate, enddate, period="DAILY", calendar="ACTUAL", fx=None,
fperiod=None, verbose=False):
"""
Fetches time series for given tickers and fields, from startdate to enddate.
Output is a DataFrame with tickers on the columns. If a single field is passed, the index are the dates.
If a list of fields is passed, a multi-index DataFrame is returned, where the index is ['FIELD', date].
Requests can easily get really big, this method allows for up to 30k data points.
:param securities: str or list of str
:param fields: str or list of str
:param startdate: str, datetime or timestamp
:param enddate: str, datetime or timestamp
:param period: 'DAILY', 'WEEKLY', 'MONTHLY', 'QUARTERLY', 'SEMI ANNUAL' OR 'YEARLY'. Periodicity of the series
:param calendar: 'ACTUAL', 'CALENDAR' or 'FISCAL'
:return: DataFrame or Multi-index DataFrame (if more than one field is passed)
"""
startdate = BBG._assert_date_type(startdate)
enddate = BBG._assert_date_type(enddate)
bbg_start_date = BBG._datetime_to_bbg_string(startdate)
bbg_end_date = BBG._datetime_to_bbg_string(enddate)
if startdate > enddate:
ValueError("Start date is later than end date")
session_options = blpapi.SessionOptions()
session_options.setServerHost(self.options.host)
session_options.setServerPort(self.options.port)
session = blpapi.Session(session_options)
if not session.start():
raise ConnectionError("Failed to start session")
try:
if not session.openService("//blp/refdata"):
raise ConnectionError("Failed to open //blp/refdat")
# Obtain the previously opened service
refdata_service = session.getService("//blp/refdata")
# Create and fill the request for historical data
request = refdata_service.createRequest("HistoricalDataRequest")
# grab securities
if type(securities) is list:
for sec in securities:
request.getElement("securities").appendValue(sec)
else:
request.getElement("securities").appendValue(securities)
# grab fields
if type(fields) is list:
for f in fields:
request.getElement("fields").appendValue(f)
else:
request.getElement("fields").appendValue(fields)
request.set("periodicityAdjustment", calendar)
request.set("periodicitySelection", period)
request.set("startDate", bbg_start_date)
request.set("endDate", bbg_end_date)
request.set("maxDataPoints", 30000)
if not (fx is None):
request.set("currency", fx)
if not (fperiod is None):
overrides_bdh = request.getElement("overrides")
override1_bdh = overrides_bdh.appendElement()
override1_bdh.setElement("fieldId", "BEST_FPERIOD_OVERRIDE")
override1_bdh.setElement("value", fperiod)
if verbose:
print("Sending Request:", request.getElement("date").getValue())
# send request
session.sendRequest(request)
# process received response
results = {}
while True:
ev = session.nextEvent()
for msg in ev:
if verbose:
print(msg)
if msg.messageType().__str__() == "HistoricalDataResponse":
sec_data = msg.getElement("securityData")
sec_name = sec_data.getElement("security").getValue()
field_data = sec_data.getElement("fieldData")
if type(fields) is list:
results[sec_name] = {}
for day in range(field_data.numValues()):
fld = field_data.getValue(day)
for fld_i in fields:
if fld.hasElement(fld_i):
results[sec_name]\
.setdefault(fld_i, []).append([fld.getElement("date").getValue(),
fld.getElement(fld_i).getValue()])
else:
results[sec_name] = []
for day_i in range(field_data.numValues()):
fld = field_data.getValue(day_i)
results[sec_name].append([
fld.getElement("date").getValue(),
fld.getElement(fields).getValue()])
if ev.eventType() == blpapi.Event.RESPONSE: # Response completly received, break out of the loop
break
finally:
session.stop()
if not type(securities) is list:
results = results[securities]
# parse the results as a DataFrame
df = pd.DataFrame()
if not (type(securities) is list) and not (type(fields) is list):
# single ticker and single field
# returns a dataframe with a single column
results = np.array(results)
df[securities] = pd.Series(index=results[:, 0], data=results[:, 1])
elif (type(securities) is list) and not (type(fields) is list):
# multiple tickers and single field
# returns a single dataframe for the field with the ticker on the columns
for tick in results.keys():
aux = np.array(results[tick])
if len(aux) == 0:
df[tick] = np.nan
else:
df = pd.concat([df, pd.Series(index=aux[:, 0], data=aux[:, 1], name=tick)], axis=1, join='outer', sort=True)
elif not (type(securities) is list) and (type(fields) is list):
# single ticker and multiple fields
# returns a single dataframe for the ticker with the fields on the columns
for fld in results.keys():
aux = np.array(results[fld])
df[fld] = pd.Series(index=aux[:, 0], data=aux[:, 1])
else:
# multiple tickers and multiple fields
# returns a multi-index dataframe with [field, ticker] as index
for tick in results.keys():
for fld in results[tick].keys():
aux = np.array(results[tick][fld])
df_aux = pd.DataFrame(data={'FIELD': fld,
'TRADE_DATE': aux[:, 0],
'TICKER': tick,
'VALUE': aux[:, 1]})
df = df.append(df_aux)
df['VALUE'] = df['VALUE'].astype(float, errors='ignore')
df['TRADE_DATE'] = df['TRADE_DATE'].astype(pd.Timestamp)
df = pd.pivot_table(data=df, index=['FIELD', 'TRADE_DATE'], columns='TICKER', values='VALUE')
return df
@staticmethod
def fetch_contract_parameter(securities, field):
"""
Grabs a characteristic of a contract, like maturity dates, first notice dates, strikes, contract sizes, etc.
Returns a DataFrame with the tickers on the index and the field on the columns.
OBS: For now, it only allows for a single field. An extension that allows for multiple fields is a good idea.
:param securities: str or list of str
:param field: str
:return: DataFrame
"""
session = blpapi.Session()
session.start()
if not session.openService("//blp/refdata"):
raise ConnectionError("Failed to open //blp/refdat")
service = session.getService("//blp/refdata")
request = service.createRequest("ReferenceDataRequest")
if type(securities) is list:
for each in securities:
request.append("securities", str(each))
else:
request.append("securities", securities)
request.append("fields", field)
session.sendRequest(request)
name, val = [], []
end_reached = False
while not end_reached:
ev = session.nextEvent()
if ev.eventType() == blpapi.Event.RESPONSE or ev.eventType() == blpapi.Event.PARTIAL_RESPONSE:
for msg in ev:
for i in range(msg.getElement("securityData").numValues()):
sec = str(msg.getElement("securityData").getValue(i).getElement("security").getValue()) # here we get the security
name.append(sec)
value = msg.getElement("securityData").getValue(i).getElement("fieldData").getElement(field).getValue()
val.append(value) # here we get the field we have selected
if ev.eventType() == blpapi.Event.RESPONSE:
end_reached = True
session.stop()
df = pd.DataFrame(val, columns=[field], index=name)
return df
@staticmethod
def fetch_futures_list(generic_ticker):
"""
Given a generic ticker for a future contract, it returns all of the historical contracts that composed the
generic.
:param generic_ticker: str
:return: list
"""
session = blpapi.Session()
if not session.start():
raise ConnectionError("Failed to start session.")
if not session.openService("//blp/refdata"):
raise ConnectionError("Failed to open //blp/refdat")
service = session.getService("//blp/refdata")
request = service.createRequest("ReferenceDataRequest")
request.append("securities", generic_ticker)
request.append("fields", "FUT_CHAIN")
overrides = request.getElement("overrides")
override1 = overrides.appendElement()
override1.setElement("fieldId", "INCLUDE_EXPIRED_CONTRACTS")
override1.setElement("value", "Y")
override2 = overrides.appendElement()
override2.setElement("fieldId", "CHAIN_DATE")
override2.setElement("value", pd.to_datetime('today').date().strftime('%Y%m%d'))
session.sendRequest(request)
# process received events
end_reached = True
contract_list = []
while end_reached:
ev = session.nextEvent()
if ev.eventType() == blpapi.Event.RESPONSE or ev.eventType() == blpapi.Event.PARTIAL_RESPONSE:
for msg in ev:
elements = msg.getElement("securityData").getValue().getElement("fieldData").getElement("FUT_CHAIN")
num_values = elements.numValues()
for cont in range(num_values):
contract_list.append(elements.getValue(cont).getElement("Security Description").getValue())
if ev.eventType() == blpapi.Event.RESPONSE:
end_reached = False
session.stop()
return contract_list
@staticmethod
def fetch_index_weights(index_name, ref_date):
"""
Given an index (e.g. S&P500, IBOV) and a date, it returns a DataFrame of its components as the index an
their respective weights as the value for the given date.
:param index_name: str
:param ref_date: str, datetime or timestamp
:return: DataFrame
"""
ref_date = BBG._assert_date_type(ref_date)
session = blpapi.Session()
if not session.start():
raise ConnectionError("Failed to start session.")
if not session.openService("//blp/refdata"):
raise ConnectionError("Failed to open //blp/refdat")
service = session.getService("//blp/refdata")
request = service.createRequest("ReferenceDataRequest")
request.append("securities", index_name)
request.append("fields", "INDX_MWEIGHT_HIST")
overrides = request.getElement("overrides")
override1 = overrides.appendElement()
override1.setElement("fieldId", "END_DATE_OVERRIDE")
override1.setElement("value", ref_date.strftime('%Y%m%d'))
session.sendRequest(request) # there is no need to save the response as a variable in this case
end_reached = False
df = pd.DataFrame()
while not end_reached:
ev = session.nextEvent()
if ev.eventType() == blpapi.Event.RESPONSE:
for msg in ev:
security_data = msg.getElement('securityData')
security_data_list = [security_data.getValueAsElement(i) for i in range(security_data.numValues())]
for sec in security_data_list:
field_data = sec.getElement('fieldData')
field_data_list = [field_data.getElement(i) for i in range(field_data.numElements())]
for fld in field_data_list:
for v in [fld.getValueAsElement(i) for i in range(fld.numValues())]:
s = | pd.Series() | pandas.Series |
""" This script stores the shared settings for other .py files in the same repository."""
import pandas as pd
from utils.concentration import rainfall_events
# read the discrete storm events
obspath = '../data/obs/'
modpath = '../data/mod/'
outpath = '../output/'
events_name = 'obs_storm_event_common.csv'
obs_events = rainfall_events(outpath + events_name)
# Read daily loads and flow
day_load_flow = pd.read_csv(obspath+'low_interp_flow.csv', index_col='Date')
day_load_flow.index = pd.to_datetime(day_load_flow.index)
# Read hourly loads and flow
fn_conct = 'cq-NO3'
conct_name = fn_conct.split('-')[1]
hour_load_flow = pd.read_csv(f'{obspath}high_{conct_name}_flow.csv', index_col = 'Time')
hour_load_flow.index = pd.to_datetime(hour_load_flow.index)
# Read modeling loads and flow
mod_fl_fn = 'DIN_flow.csv'
mod_load_flow = pd.read_csv(modpath + mod_fl_fn, index_col='Date')
mod_load_flow.index = | pd.to_datetime(mod_load_flow.index, dayfirst=False) | pandas.to_datetime |
import numpy as np
import pytest
from pandas.compat import IS64
import pandas as pd
import pandas._testing as tm
@pytest.mark.parametrize("ufunc", [np.abs, np.sign])
# np.sign emits a warning with nans, <https://github.com/numpy/numpy/issues/15127>
@pytest.mark.filterwarnings("ignore:invalid value encountered in sign")
def test_ufuncs_single(ufunc):
a = pd.array([1, 2, -3, np.nan], dtype="Float64")
result = ufunc(a)
expected = pd.array(ufunc(a.astype(float)), dtype="Float64")
tm.assert_extension_array_equal(result, expected)
s = pd.Series(a)
result = ufunc(s)
expected = pd.Series(expected)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("ufunc", [np.log, np.exp, np.sin, np.cos, np.sqrt])
def test_ufuncs_single_float(ufunc):
a = pd.array([1.0, 0.2, 3.0, np.nan], dtype="Float64")
with np.errstate(invalid="ignore"):
result = ufunc(a)
expected = pd.array(ufunc(a.astype(float)), dtype="Float64")
tm.assert_extension_array_equal(result, expected)
s = pd.Series(a)
with np.errstate(invalid="ignore"):
result = ufunc(s)
expected = pd.Series(ufunc(s.astype(float)), dtype="Float64")
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("ufunc", [np.add, np.subtract])
def test_ufuncs_binary_float(ufunc):
# two FloatingArrays
a = pd.array([1, 0.2, -3, np.nan], dtype="Float64")
result = ufunc(a, a)
expected = pd.array(ufunc(a.astype(float), a.astype(float)), dtype="Float64")
tm.assert_extension_array_equal(result, expected)
# FloatingArray with numpy array
arr = np.array([1, 2, 3, 4])
result = ufunc(a, arr)
expected = pd.array(ufunc(a.astype(float), arr), dtype="Float64")
tm.assert_extension_array_equal(result, expected)
result = ufunc(arr, a)
expected = pd.array(ufunc(arr, a.astype(float)), dtype="Float64")
tm.assert_extension_array_equal(result, expected)
# FloatingArray with scalar
result = ufunc(a, 1)
expected = pd.array(ufunc(a.astype(float), 1), dtype="Float64")
tm.assert_extension_array_equal(result, expected)
result = ufunc(1, a)
expected = pd.array(ufunc(1, a.astype(float)), dtype="Float64")
tm.assert_extension_array_equal(result, expected)
@pytest.mark.parametrize("values", [[0, 1], [0, None]])
def test_ufunc_reduce_raises(values):
arr = pd.array(values, dtype="Float64")
res = np.add.reduce(arr)
expected = arr.sum(skipna=False)
tm.assert_almost_equal(res, expected)
@pytest.mark.skipif(not IS64, reason="GH 36579: fail on 32-bit system")
@pytest.mark.parametrize(
"pandasmethname, kwargs",
[
("var", {"ddof": 0}),
("var", {"ddof": 1}),
("kurtosis", {}),
("skew", {}),
("sem", {}),
],
)
def test_stat_method(pandasmethname, kwargs):
s = pd.Series(data=[0.1, 0.2, 0.3, 0.4, 0.5, 0.6, np.nan, np.nan], dtype="Float64")
pandasmeth = getattr(s, pandasmethname)
result = pandasmeth(**kwargs)
s2 = pd.Series(data=[0.1, 0.2, 0.3, 0.4, 0.5, 0.6], dtype="float64")
pandasmeth = getattr(s2, pandasmethname)
expected = pandasmeth(**kwargs)
assert expected == result
def test_value_counts_na():
arr = pd.array([0.1, 0.2, 0.1, pd.NA], dtype="Float64")
result = arr.value_counts(dropna=False)
idx = pd.Index([0.1, 0.2, pd.NA], dtype=arr.dtype)
assert idx.dtype == arr.dtype
expected = pd.Series([2, 1, 1], index=idx, dtype="Int64")
| tm.assert_series_equal(result, expected) | pandas._testing.assert_series_equal |
# util.py
from __future__ import print_function
from collections import Mapping, OrderedDict
import datetime
import itertools
import random
import warnings
import pandas as pd
np = pd.np
from scipy import integrate
from matplotlib import pyplot as plt
import seaborn
from scipy.optimize import minimize
from scipy.signal import correlate
from titlecase import titlecase
from pug.nlp.util import listify, fuzzy_get, make_timestamp
def dropna(x):
"""Delete all NaNs and and infinities in a sequence of real values
Returns:
list: Array of all values in x that are between -inf and +inf, exclusive
"""
return [x_i for x_i in listify(x) if float('-inf') < x_i < float('inf')]
def rms(x):
""""Root Mean Square"
Arguments:
x (seq of float): A sequence of numerical values
Returns:
The square root of the average of the squares of the values
math.sqrt(sum(x_i**2 for x_i in x) / len(x))
or
return (np.array(x) ** 2).mean() ** 0.5
>>> rms([0, 2, 4, 4])
3.0
"""
try:
return (np.array(x) ** 2).mean() ** 0.5
except:
x = np.array(dropna(x))
invN = 1.0 / len(x)
return (sum(invN * (x_i ** 2) for x_i in x)) ** .5
def rmse(target, prediction, relative=False, percent=False):
"""Root Mean Square Error
This seems like a simple formula that you'd never need to create a function for.
But my mistakes on coding challenges have convinced me that I do need it,
as a reminder of important tweaks, if nothing else.
>>> rmse([0, 1, 4, 3], [2, 1, 0, -1])
3.0
>>> rmse([0, 1, 4, 3], [2, 1, 0, -1], relative=True) # doctest: +ELLIPSIS
1.2247...
>>> rmse([0, 1, 4, 3], [2, 1, 0, -1], percent=True) # doctest: +ELLIPSIS
122.47...
"""
relative = relative or percent
prediction = pd.np.array(prediction)
target = np.array(target)
err = prediction - target
if relative:
denom = target
# Avoid ZeroDivisionError: divide by prediction rather than target where target==0
denom[denom == 0] = prediction[denom == 0]
# If the prediction and target are both 0, then the error is 0 and should be included in the RMSE
# Otherwise, the np.isinf() below would remove all these zero-error predictions from the array.
denom[(denom == 0) & (target == 0)] = 1
err = (err / denom)
err = err[(~ np.isnan(err)) & (~ np.isinf(err))]
return 100 * rms(err) if percent else rms(err)
def blended_rolling_apply(series, window=2, fun=pd.np.mean):
new_series = pd.Series(np.fromiter((fun(series[:i + 1]) for i in range(window - 1)),
type(series.values[0])), index=series.index[:window - 1]).append(
pd.rolling_apply(series.copy(), window, fun)[window - 1:])
assert len(series) == len(new_series), (
"blended_rolling_apply should always return a series of the same length!\n"
" len(series) = {0} != {1} = len(new_series".format(len(series), len(new_series)))
assert not any(np.isnan(val) or val is None for val in new_series)
return new_series
def rolling_latch(series, period=31, decay=1.0):
# FIXME: implement recursive exponential decay filter rather than the nonrecursive, deratring done here
return blended_rolling_apply(series, period, lambda val: decay * pd.np.max(val))
def clean_dataframe(df):
"""Fill NaNs with the previous value, the next value or if all are NaN then 1.0"""
df = df.fillna(method='ffill')
df = df.fillna(0.0)
return df
def clean_dataframes(dfs):
"""Fill NaNs with the previous value, the next value or if all are NaN then 1.0
TODO:
Linear interpolation and extrapolation
Arguments:
dfs (list of dataframes): list of dataframes that contain NaNs to be removed
Returns:
list of dataframes: list of dataframes with NaNs replaced by interpolated values
"""
if isinstance(dfs, (list)):
for df in dfs:
df = clean_dataframe(df)
return dfs
else:
return [clean_dataframe(dfs)]
def get_symbols_from_list(list_name):
"""Retrieve a named (symbol list name) list of strings (symbols)
If you've installed the QSTK Quantitative analysis toolkit
`get_symbols_from_list('sp5002012')` will produce a list of the symbols that
were members of the S&P 500 in 2012.
Otherwise an import error exception will be raised.
If the symbol list cannot be found you'll get an empty list returned
Example:
>> len(get_symbols_from_list('sp5002012')) in (0, 501)
True
"""
try:
# quant software toolkit has a method for retrieving lists of symbols like S&P500 for 2012 with 'sp5002012'
import QSTK.qstkutil.DataAccess as da
dataobj = da.DataAccess('Yahoo')
except ImportError:
raise
except:
return []
try:
return dataobj.get_symbols_from_list(list_name)
except:
raise
def make_symbols(symbols, *args):
"""Return a list of uppercase strings like "GOOG", "$SPX, "XOM"...
Arguments:
symbols (str or list of str): list of market ticker symbols to normalize
If `symbols` is a str a get_symbols_from_list() call is used to retrieve the list of symbols
Returns:
list of str: list of cananical ticker symbol strings (typically after .upper().strip())
See Also:
pug.dj.db.normalize_names
Examples:
>>> make_symbols("Goog")
['GOOG']
>>> make_symbols(" $SPX ", " aaPL ")
['$SPX', 'AAPL']
>>> make_symbols(["$SPX", ["GOOG", "AAPL"]])
['GOOG', 'AAPL', '$SPX']
>>> make_symbols(" $Spy, Goog, aAPL ")
['$SPY', 'GOOG', 'AAPL']
"""
if (hasattr(symbols, '__iter__') and not any(symbols)) \
or (isinstance(symbols, (list, tuple, Mapping)) and not symbols):
return []
if isinstance(symbols, basestring):
# # FIXME: find a direct API for listing all possible symbols
# try:
# return list(set(dataobj.get_symbols_from_list(symbols)))
# except:
return [s.upper().strip() for s in (symbols.split(',') + list(str(a) for a in args))]
else:
ans = []
for sym in (list(symbols) + list(args)):
tmp = make_symbols(sym)
ans = ans + tmp
return list(set(ans))
def make_time_series(x, t=pd.Timestamp(datetime.datetime(1970, 1, 1)), freq=None):
"""Convert a 2-D array of time/value pairs (or pair of time/value vectors) into a pd.Series time-series
>>> make_time_series(range(3), freq='15min') # doctest: +NORMALIZE_WHITESPACE, +ELLIPSIS
1970-01-01 00:00:00 NaN
1970-01-01 00:15:00 NaN
1970-01-01 00:30:00 NaN
dtype: float64
"""
if isinstance(x, pd.DataFrame):
x = pd.Series(x[x.columns[0]])
elif not isinstance(x, pd.Series) and (not isinstance(t, (pd.Series, pd.Index, list, tuple)) or not len(t)):
#warnings.warn("Coercing a non-Series")
if len(x) == 2:
t, x = listify(x[0]), listify(x[1])
elif len(x) >= 2:
try:
t, x = zip(*x)
except (ValueError, IndexError, TypeError):
pass
x = pd.Series(x)
else:
if isinstance(t, (datetime.datetime, pd.Timestamp)):
t = pd.Timestamp(t)
else:
x = pd.Series(listify(x), index=listify(t))
if not isinstance(x, pd.Series):
raise TypeError("`pug.invest.util.make_time_series(x, t)` expects x to be a type that"
" can be coerced to a Series object, but it's type is: {0}"
.format(type(x)))
# By this point x must be a Series, only question is whether its index needs to be converted to a DatetimeIndex
if x.index[0] != 0 and isinstance(x.index[0], (datetime.date, datetime.datetime, pd.Timestamp,
basestring, float, np.int64, int)):
t = x.index
elif isinstance(t, (datetime.date, datetime.datetime, pd.Timestamp, basestring, float, np.int64, int)):
if not freq:
freq = '15min'
warnings.warn('Assumed time series freq to be {0} though no freq argument was provided!'
.format(freq), RuntimeWarning)
t = pd.date_range(t, periods=len(x), freq=freq)
x = pd.Series(x, index=t)
if isinstance(x, pd.Series):
x.index = pd.DatetimeIndex(x.index.values)
return x
def pandas_mesh(df):
"""Create numpy 2-D "meshgrid" from 3+ columns in a Pandas DataFrame
Arguments:
df (DataFrame): Must have 3 or 4 columns of numerical data
Returns:
OrderedDict: column labels from the data frame are the keys, values are 2-D matrices
All matrices have shape NxM, where N = len(set(df.iloc[:,0])) and M = len(set(df.iloc[:,1]))
>>> pandas_mesh(pd.DataFrame(np.arange(18).reshape(3,6),
... columns=list('ABCDEF'))).values() # doctest: +NORMALIZE_WHITESPACE
[array([[ 0, 6, 12],
[ 0, 6, 12],
[ 0, 6, 12]]),
array([[ 1, 1, 1],
[ 7, 7, 7],
[13, 13, 13]]),
array([[ 2., nan, nan],
[ nan, 8., nan],
[ nan, nan, 14.]]),
array([[ 3., nan, nan],
[ nan, 9., nan],
[ nan, nan, 15.]]),
array([[ 4., nan, nan],
[ nan, 10., nan],
[ nan, nan, 16.]]),
array([[ 5., nan, nan],
[ nan, 11., nan],
[ nan, nan, 17.]])]
"""
xyz = [df[c].values for c in df.columns]
index = pd.MultiIndex.from_tuples(zip(xyz[0], xyz[1]), names=['x', 'y'])
# print(index)
series = [pd.Series(values, index=index) for values in xyz[2:]]
# print(series)
X, Y = np.meshgrid(sorted(list(set(xyz[0]))), sorted(list(set(xyz[1]))))
N, M = X.shape
Zs = []
# print(Zs)
for k, s in enumerate(series):
Z = np.empty(X.shape)
Z[:] = np.nan
for i, j in itertools.product(range(N), range(M)):
Z[i, j] = s.get((X[i, j], Y[i, j]), np.NAN)
Zs += [Z]
return OrderedDict((df.columns[i], m) for i, m in enumerate([X, Y] + Zs))
def integrated_change(ts, integrator=integrate.trapz, clip_floor=None, clip_ceil=float('inf')):
"""Total value * time above the starting value within a TimeSeries"""
integrator = get_integrator(integrator)
if clip_floor is None:
clip_floor = ts[0]
if clip_ceil < clip_floor:
polarity = -1
offset, clip_floor, clip_ceil, = clip_ceil, clip_ceil, clip_floor
else:
polarity, offset = 1, clip_floor
clipped_values = np.clip(ts.values - offset, clip_floor, clip_ceil)
print(polarity, offset, clip_floor, clip_ceil)
print(clipped_values)
integrator_types = set(['trapz', 'cumtrapz', 'simps', 'romb'])
if integrator in integrator_types:
integrator = getattr(integrate, integrator)
integrator = integrator or integrate.trapz
# datetime units converted to seconds (since 1/1/1970)
return integrator(clipped_values, ts.index.astype(np.int64) / 10 ** 9)
def insert_crossings(ts, thresh):
"""Insert/append threshold crossing points (time and value) into a timeseries (pd.Series)
Arguments:
ts (pandas.Series): Time series of values to be interpolated at `thresh` crossings
thresh (float or np.float64):
"""
# import time
# tic0 = time.clock(); tic = tic0
# int64 for fast processing, pandas.DatetimeIndex is 5-10x slower, 0.3 ms
index = ts.index
index_type = type(index)
ts.index = ts.index.astype(np.int64)
# toc = time.clock();
# print((toc-tic)*1000); tic = time.clock()
# value immediately before an upward thresh crossing, 6 ms
preup = ts[(ts < thresh) & (ts.shift(-1) > thresh)]
# toc = time.clock();
# print((toc-tic)*1000); tic = time.clock()
# values immediately after an upward thresh crossing, 4 ms\
postup = ts[(ts.shift(1) < thresh) & (ts > thresh)]
# toc = time.clock();
# print((toc-tic)*1000); tic = time.clock()
# value immediately after a downward thresh crossing, 1.8 ms
postdown = ts[(ts < thresh) & (ts.shift(1) > thresh)]
# toc = time.clock();
# print((toc-tic)*1000); tic = time.clock()
# value immediately before an upward thresh crossing, 1.9 ms
predown = ts[(ts.shift(-1) < thresh) & (ts > thresh)]
# toc = time.clock();
# print((toc-tic)*1000); tic = time.clock()
# upward slope (always positive) between preup and postup in units of
# "value" per nanosecond (timestamps convert to floats as nanoseconds), 0.04 ms
slopeup = (postup.values - preup.values) / (postup.index.values - preup.index.values).astype(np.float64)
# toc = time.clock();
# print((toc-tic)*1000); tic = time.clock()
# upward crossing point index/time, 0.04 ms
tup = preup.index.values + ((thresh - preup.values) / slopeup).astype(np.int64)
# toc = time.clock();
# print((toc-tic)*1000); tic = time.clock()
# downward slope (always negative) between predown and postdown in units of
# "value" per nanosecond (timestamps convert to floats as nanoseconds), 0.03 ms
slopedown = (postdown.values - predown.values) / \
(postdown.index.values - predown.index.values).astype(np.float64)
# toc = time.clock();
# print((toc-tic)*1000); tic = time.clock()
# upward crossing point index/time, 0.02 ms
tdown = predown.index.values + ((thresh - predown.values) / slopedown).astype(np.int64)
# toc = time.clock();
# print((toc-tic)*1000); tic = time.clock()
# insert crossing points into time-series (if it had a regular sample period before, it won't now!), 2.0 ms
ts.index = index # pd.DatetimeIndex(ts.index)
# toc = time.clock();
# print((toc-tic)*1000); tic = time.clock()
# insert crossing points into time-series (if it had a regular sample period before, it won't now!), 2.0 ms
ts = ts.append(pd.Series(thresh * np.ones(len(tup)), index=index_type(tup.astype(np.int64))))
# toc = time.clock();
# print((toc-tic)*1000); tic = time.clock()
# insert crossing points into time-series (if it had a regular sample period before, it won't now!), 1.9 ms
ts = ts.append(pd.Series(thresh * np.ones(len(tdown)), index=index_type(tdown.astype(np.int64))))
# toc = time.clock();
# print((toc-tic)*1000); tic = time.clock()
# if you don't `sort_index()`, numerical integrators in `scipy.integrate` will give the wrong answer, 0.1 ms
ts = ts.sort_index()
# toc = time.clock();
# if you don't `sort_index()`, numerical integrators in `scipy.integrate` will give the wrong answer
# print((toc-tic)*1000); tic = time.clock()
# print((toc-tic0)*1000);
return ts
def get_integrator(integrator):
"""Return the scipy.integrator indicated by an index, name, or integrator_function
>> get_integrator(0)
"""
integrator_types = set(['trapz', 'cumtrapz', 'simps', 'romb'])
integrator_funcs = [integrate.trapz, integrate.cumtrapz, integrate.simps, integrate.romb]
if isinstance(integrator, int) and 0 <= integrator < len(integrator_types):
integrator = integrator_types[integrator]
if isinstance(integrator, basestring) and integrator in integrator_types:
return getattr(integrate, integrator)
elif integrator in integrator_funcs:
return integrator
else:
print('Unsupported integration rule: {0}'.format(integrator))
print('Expecting one of these sample-based integration rules: %s' % (str(list(integrator_types))))
raise AttributeError
return integrator
def clipped_area(ts, thresh=0, integrator=integrate.trapz):
"""Total value * time above the starting value within a TimeSeries
Arguments:
ts (pandas.Series): Time series to be integrated.
thresh (float): Value to clip the tops off at (crossings will be interpolated)
References:
http://nbviewer.ipython.org/gist/kermit666/5720498
>>> t = ['2014-12-09T00:00', '2014-12-09T00:15', '2014-12-09T00:30', '2014-12-09T00:45',
... '2014-12-09T01:00', '2014-12-09T01:15', '2014-12-09T01:30', '2014-12-09T01:45']
>>> import pandas as pd
>>> ts = pd.Series([217, 234, 235, 231, 219, 219, 231, 232], index=pd.to_datetime(t))
>>> clipped_area(ts, thresh=230) # doctest: +ELLIPSIS
8598.52941...
>>> clipped_area(ts, thresh=234) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
562.5
>>> clipped_area(pd.Series(ts.values, index=ts.index.values.astype(pd.np.int64)),
... thresh=234) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
562.5
"""
integrator = get_integrator(integrator or 0)
ts = insert_crossings(ts, thresh) - thresh
ts = ts[ts >= 0]
# timestamp is in nanoseconds (since 1/1/1970) but this converts it to seconds (SI units)
return integrator(ts, ts.index.astype(np.int64)) / 1.0e9
def clipping_params(ts, capacity=100, rate_limit=float('inf'), method=None, max_attempts=100):
"""Start, end, and threshold that clips the value of a time series the most, given a limitted "capacity" and "rate"
Assumes that signal can be linearly interpolated between points (trapezoidal integration)
Arguments:
ts (TimeSeries): Time series to attempt to clip to as low a max value as possible
capacity (float): Total "funds" or "energy" available for clipping (integrated area under time series)
method (str): scipy optimization algorithm name, one of:
'L-BFGS-B': Byrd, 1995, "A Limited Memory Algorithm for Bound Constrained Optimization"
'TNC': Truncated Newton in C, or Newton Conjugate-Gradient, each variable may be constrained with upper and lower bounds
'COBYLA': Constrained Optimization by Linear Approximation. Fortran implementation.
'SLSQP': Kraft, 1988, Sequential Least Squares Programming or Quadratic Programming, infinite bounds converted to large floats
TODO:
Bisection search for the optimal threshold.
Returns:
2-tuple: Timestamp of the start and end of the period of the maximum clipped integrated increase
>>> t = ['2014-12-09T00:00', '2014-12-09T00:15', '2014-12-09T00:30', '2014-12-09T00:45', '2014-12-09T01:00', '2014-12-09T01:15', '2014-12-09T01:30', '2014-12-09T01:45']
>>> import pandas as pd
>>> ts = pd.Series([217, 234, 235, 231, 219, 219, 231, 232], index=pd.to_datetime(t)) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
>>> clipping_params(ts, capacity=60000)['threshold'] # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
218.13...
>>> clipping_params(ts, capacity=30000)['threshold'] # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
224.15358...
"""
VALID_METHODS = ['L-BFGS-B', 'TNC', 'SLSQP', 'COBYLA']
# print('in clipping params for ts.index={0} and method={1}'.format(ts.index[0], method))
ts.index = ts.index.astype(np.int64)
costs = []
def cost_fun(x, *args):
thresh = x[0]
ts, capacity, bounds = args
integral = clipped_area(ts, thresh=thresh)
terms = np.array([(10. * (integral - capacity) / capacity) ** 2,
2. / 0.1**((bounds[0] - thresh) * capacity / bounds[0]),
2. / 0.1**((thresh - bounds[1]) * capacity / bounds[1]),
1.2 ** (integral / capacity)])
return sum(terms)
bounds = (ts.min(), ts.max())
done, attempts = 0, 0
thresh0 = bounds[0] + 0.5 * (bounds[1] - bounds[0])
if not method or not method in VALID_METHODS:
while attempts < max_attempts and not done:
for optimizer_method in VALID_METHODS:
optimum = minimize(fun=cost_fun, x0=[thresh0], bounds=[bounds], args=(ts, capacity, bounds), method=optimizer_method)
if optimum.success:
done = True
break
if done:
break
attempts += 1
thresh0 = bounds[0] + random.random() * (bounds[1] - bounds[0])
else:
optimum = minimize(fun=cost_fun, x0=[thresh0], bounds=[bounds], args=(ts, capacity, bounds), method=method)
thresh = optimum.x[0]
integral = clipped_area(ts, thresh=thresh)
params = dict(optimum)
params.update({'costs': costs, 'threshold': thresh, 'initial_guess': thresh0, 'attempts': attempts,
'integral': integral, 'method': method})
return params
# if integral - capacity > capacity:
# return {'t0': None, 't1': None, 'threshold': 0.96*thresh + 0.06*bounds[0][1], 'integral': integral}
def discrete_clipping_params(ts, capacity=100, rate_limit=float('inf')):
"""Start, end, and threshold that clips the value of a time series the most, given a limitted "capacity" and "rate"
Assumes that the integrated maximum includes the peak (instantaneous maximum).
Assumes that the threshold can only set to one of the values of the Series.
Arguments:
ts (TimeSeries): Time series to attempt to clip to as low a max value as possible
capacity (float): Total "funds" or "energy" available for clipping (integrated area under time series)
TODO:
Bisection search for the optimal threshold.
Returns:
2-tuple: Timestamp of the start and end of the period of the maximum clipped integrated increase
>> t = ['2014-12-09T00:00', '2014-12-09T00:15', '2014-12-09T00:30', '2014-12-09T00:45',
.. '2014-12-09T01:00', '2014-12-09T01:15', '2014-12-09T01:30', '2014-12-09T01:45']
>> ts = pd.Series([217, 234, 235, 231, 219, 219, 231, 232], index=pd.to_datetime(t))
>> (discrete_clipping_params(ts, capacity=60000) ==
.. {'integral': 54555.882352942499, 't0': pd.Timestamp('2014-12-09 00:15:00'),
.. 't1': pd.Timestamp('2014-12-09 01:45:00'),
.. 'threshold': 219})
True
>> (discrete_clipping_params(ts, capacity=30000) ==
.. {'integral': 5638.2352941179997, 't0': pd.Timestamp('2014-12-09 00:15:00'),
.. 't1': pd.Timestamp('2014-12-09 01:45:00'),
.. 'threshold': 231})
True
"""
raise NotImplementedError("Doesn't work. Returns incorrect, overly conservative threshold values.")
#index_type = ts.index.dtype
#ts2 = ts.copy()
ts.index = ts.index.astype(np.int64)
ts_sorted = ts.order(ascending=False)
# default is to clip right at the peak (no clipping at all)
i, t0, t1, integral, thresh = 1, ts_sorted.index[0], ts_sorted.index[0], 0, ts_sorted.iloc[0]
params = {'t0': t0, 't1': t1, 'integral': 0, 'threshold': thresh}
while i < len(ts_sorted) and integral <= capacity and (ts_sorted.iloc[0] - ts_sorted.iloc[i]) < rate_limit:
params = {'t0': pd.Timestamp(t0), 't1': pd.Timestamp(t1), 'threshold': thresh, 'integral': integral}
i += 1
times = ts_sorted.index[:i]
# print(times)
t0 = times.min()
t1 = times.max()
# print(ts_sorted.index[:3])
thresh = min(ts_sorted.iloc[:i])
integral = clipped_area(ts, thresh=thresh)
if integral <= capacity:
return {'t0': pd.Timestamp(t0), 't1': pd.Timestamp(t1), 'threshold': thresh, 'integral': integral}
return params
def square_off(series, time_delta=None, transition_seconds=1):
"""Insert samples in regularly sampled data to produce stairsteps from ramps when plotted.
New samples are 1 second (1e9 ns) before each existing samples, to facilitate plotting and sorting
>>> square_off(pd.Series(range(3), index=pd.date_range('2014-01-01', periods=3, freq='15m')),
... time_delta=5.5) # doctest: +NORMALIZE_WHITESPACE
2014-01-31 00:00:00 0
2014-01-31 00:00:05.500000 0
2015-04-30 00:00:00 1
2015-04-30 00:00:05.500000 1
2016-07-31 00:00:00 2
2016-07-31 00:00:05.500000 2
dtype: int64
>>> square_off(pd.Series(range(2), index=pd.date_range('2014-01-01', periods=2, freq='15min')),
... transition_seconds=2.5) # doctest: +NORMALIZE_WHITESPACE
2014-01-01 00:00:00 0
2014-01-01 00:14:57.500000 0
2014-01-01 00:15:00 1
2014-01-01 00:29:57.500000 1
dtype: int64
"""
if time_delta:
# int, float means delta is in seconds (not years!)
if isinstance(time_delta, (int, float)):
time_delta = datetime.timedelta(0, time_delta)
new_times = series.index + time_delta
else:
diff = np.diff(series.index)
time_delta = np.append(diff, [diff[-1]])
new_times = series.index + time_delta
new_times = pd.DatetimeIndex(new_times) - datetime.timedelta(0, transition_seconds)
return pd.concat([series, pd.Series(series.values, index=new_times)]).sort_index()
def clipping_threshold(ts, capacity=100, rate_limit=10):
"""Start and end index (datetime) that clips the price/value of a time series the most
Assumes that the integrated maximum includes the peak (instantaneous maximum).
Arguments:
ts (TimeSeries): Time series of prices or power readings to be "clipped" as much as possible.
capacity (float): Total "funds" or "energy" available for clipping (in $ or Joules)
The maximum allowed integrated area under time series and above the clipping threshold.
rate_limit: Maximum rate at which funds or energy can be expended (in $/s or Watts)
The clipping threshold is limitted to no less than the peak power (price rate) minus this rate_limit
Returns:
dict: Timestamp of the start and end of the period of the maximum clipped integrated increase
>>> t = ['2014-12-09T00:00', '2014-12-09T00:15', '2014-12-09T00:30', '2014-12-09T00:45',
... '2014-12-09T01:00', '2014-12-09T01:15', '2014-12-09T01:30', '2014-12-09T01:45']
>>> import pandas as pd
>>> ts = pd.Series([217, 234, 235, 231, 219, 219, 231, 232], index=pd.to_datetime(t))
>>> clipping_threshold(ts, capacity=60000) # doctest: +NORMALIZE_WHITESPACE, +ELLIPSIS
218.13...
>>> clipping_threshold(ts, capacity=30000) # doctest: +NORMALIZE_WHITESPACE, +ELLIPSIS
224.15...
"""
params = clipping_params(ts, capacity=capacity, rate_limit=rate_limit)
if params:
return params['threshold']
return None
def join_time_series(serieses, ignore_year=False, T_s=None, aggregator='mean'):
"""Combine a dict of pd.Series objects into a single pd.DataFrame with optional downsampling
FIXME:
For ignore_year and multi-year data, the index (in seconds) is computed assuming
366 days per year (leap year). So 3 out of 4 years will have a 1-day (86400 s) gap
Arguments:
series (dict of Series): dictionary of named timestamp-indexed Series objects
ignore_year (bool): ignore the calendar year, but not the season (day of year)
If True, the DataFrame index will be seconds since the beginning of the
year in each Series index, i.e. midnight Jan 1, 2014 will have index=0
as will Jan 1, 2010 if two Series start on those two dates.
T_s (float): sample period in seconds (for downsampling)
aggregator (str or func): e.g. 'mean', 'sum', np.std
"""
if ignore_year:
df = pd.DataFrame()
for name, ts in serieses.iteritems():
# FIXME: deal with leap years
sod = np.array(map(lambda x: (x.hour * 3600 + x.minute * 60 + x.second),
ts.index.time))
# Coerce soy to an integer so that merge/join operations identify same values
# (floats don't equal!?)
soy = (ts.index.dayofyear + 366 * (ts.index.year - ts.index.year[0])) * 3600 * 24 + sod
ts2 = pd.Series(ts.values, index=soy)
ts2 = ts2.dropna()
ts2 = ts2.sort_index()
df2 = pd.DataFrame({name: ts2.values}, index=soy)
df = df.join(df2, how='outer')
if T_s and aggregator:
df = df.groupby(lambda x: int(x /
float(T_s))).aggregate(dict((name, aggregator) for name in df.columns))
else:
df = pd.DataFrame(serieses)
if T_s and aggregator:
x0 = df.index[0]
df = df.groupby(lambda x: int((x - x0).total_seconds() /
float(T_s))).aggregate(dict((name, aggregator) for name in df.columns))
# FIXME: convert seconds since begninning of first year back into Timestamp instances
return df
def simulate(t=1000, poly=(0.,), sinusoids=None, sigma=0, rw=0, irw=0, rrw=0):
"""Simulate a random signal with seasonal (sinusoids), linear and quadratic trend, RW, IRW, and RRW
Arguments:
t (int or list of float): number of samples or time vector, default = 1000
poly (list of float): polynomial coefficients (in decreasing "order") passed to `numpy.polyval`
i.e. poly[0]*x**(N-1) + ... + poly[N-1]
sinusoids (list of list): [[period], [amplitude, period], or [ampl., period, phase]]
>>> len(simulate(poly=(0,),rrw=1))
1000
>>> simulate(t=range(3), poly=(1,2)) # doctest: +NORMALIZE_WHITESPACE
0 2
1 3
2 4
dtype: float64
>>> all(simulate(t=50, sinusoids=((1,2,3),)) == simulate(t=range(50), sinusoids=((1,2,3),)))
True
>>> any(simulate(t=100))
False
>>> abs(simulate(sinusoids=42.42).values[1] + simulate(sinusoids=42.42).values[-1]) < 1e-10
True
>>> simulate(t=17,sinusoids=[42, 16]).min()
-42.0
>>> all((simulate(t=range(10), sinusoids=(1, 9, 4.5))+simulate(t=10, sinusoids=(1,9))).abs() < 1e-10)
True
"""
if t and isinstance(t, int):
t = np.arange(t, dtype=np.float64)
else:
t = np.array(t, dtype=np.float64)
N = len(t)
poly = poly or (0.,)
poly = listify(poly)
y = np.polyval(poly, t)
sinusoids = listify(sinusoids or [])
if any(isinstance(ATP, (int, float)) for ATP in sinusoids):
sinusoids = [sinusoids]
for ATP in sinusoids:
# default period is 1 more than the length of the simulated series (no values of the cycle are repeated)
T = (t[-1] - t[0]) * N / (N - 1.)
# default amplitude is 1 and phase is 0
A, P = 1., 0
try:
A, T, P = ATP
except (TypeError, ValueError):
try:
A, T = ATP
except (TypeError, ValueError):
# default period is 1 more than the length of the simulated series
# (no values of the cycle are repeated)
A = ATP[0]
# print(A, T, P)
# print(t[1] - t[0])
y += A * np.sin(2 * np.pi * (t - P) / T)
if sigma:
y += np.random.normal(0.0, float(sigma), N)
if rw:
y += np.random.normal(0.0, float(rw), N).cumsum()
if irw:
y += np.random.normal(0.0, float(irw), N).cumsum().cumsum()
if rrw:
y += np.random.normal(0.0, float(rrw), N).cumsum().cumsum().cumsum()
return pd.Series(y, index=t)
def normalize_symbols(symbols, *args, **kwargs):
"""Coerce into a list of uppercase strings like "GOOG", "$SPX, "XOM"
Flattens nested lists in `symbols` and converts all list elements to strings
Arguments:
symbols (str or list of str): list of market ticker symbols to normalize
If `symbols` is a str a get_symbols_from_list() call is used to retrieve the list of symbols
postrprocess (func): function to apply to strings after they've been stripped
default = str.upper
FIXME:
- list(set(list(symbols))) and `args` separately so symbols may be duplicated in symbols and args
- `postprocess` should be a method to facilitate monkey-patching
Returns:
list of str: list of cananical ticker symbol strings (typically after .upper().strip())
Examples:
>> normalize_symbols("Goog,AAPL")
['GOOG', 'AAPL']
>> normalize_symbols(" $SPX ", " aaPL ")
['$SPX', 'AAPL']
>> normalize_symbols(" $SPX ", " aaPL ", postprocess=str)
['$SPX', 'aaPL']
>> normalize_symbols(["$SPX", ["GOOG", "AAPL"]])
['GOOG', 'AAPL', '$SPX']
>> normalize_symbols("$spy", ["GOOGL", "Apple"], postprocess=str)
['$spy', 'GOOGL', 'Apple']
"""
postprocess = kwargs.get('postprocess', None) or str.upper
if ( (hasattr(symbols, '__iter__') and not any(symbols))
or (isinstance(symbols, (list, tuple, Mapping)) and (not symbols or not any(symbols)))):
return []
args = normalize_symbols(args, postprocess=postprocess)
if isinstance(symbols, basestring):
try:
return list(set(get_symbols_from_list(symbols))) + args
except:
return [postprocess(s.strip()) for s in symbols.split(',')] + args
else:
ans = []
for sym in list(symbols):
ans += normalize_symbols(sym, postprocess=postprocess)
return list(set(ans))
def series_bollinger(series, window=20, sigma=1., plot=False):
mean = pd.rolling_mean(series, window=window)
std = pd.rolling_std(series, window=window)
df = pd.DataFrame({'value': series, 'mean': mean, 'upper': mean + sigma * std, 'lower': mean - sigma * std})
bollinger_values = (series - pd.rolling_mean(series, window=window)) / (pd.rolling_std(series, window=window))
if plot:
df.plot()
pd.DataFrame({'bollinger': bollinger_values}).plot()
plt.show()
return bollinger_values
def frame_bollinger(df, window=20, sigma=1., plot=False):
bol = pd.DataFrame()
for col in df.columns:
bol[col] = series_bollinger(df[col], plot=False)
return bol
def double_sinc(T_0=120, T_N=240, T_s=0.01, A=[1, .9], sigma=0.01, T_cyc=10, N_cyc=[3, 2], verbosity=0):
# T0, TN, A, sigma = np.array(T0), np.array(TN), np.array(A), np.array(sigma)
N = int(T_N / T_s)
t = np.arange(0, T_N, T_s)
# t_mid = 0.5 * (t[-1] + t[0])
e = sigma * np.random.randn(N)
x = A[0] * np.sinc(((t - T_0) * N_cyc[0] * 2 / T_cyc) % T_cyc) * np.sinc((t - T_0) * N_cyc[1] * 2 / t[-1])
y = x + e
df = pd.DataFrame({'x': x, 'y': y}, index=t)
if verbosity > 0:
df.plot()
plt.show(block=False)
return df
def sinc_signals(T0=[60, 120], TN=[240, 160], A=[1, .9], sigma=[.03, .02], T_cyc=10, Ts=0.01):
T0, TN, A, sigma = np.array(T0), np.array(TN), np.array(A), np.array(sigma)
N1 = int(TN[0] / Ts)
N2 = int(TN[1] / Ts)
i1 = np.arange(0, N1)
i2 = np.arange(0, N2)
t1 = T0[0] + i1 * Ts
t2 = t1[i2 + int((T0[1] - T0[0]) / Ts)]
e1 = sigma[0] * np.random.randn(N1)
e2 = sigma[1] * np.random.randn(N2)
signal = A[0] * np.sinc((t1[i1] * 5. / T_cyc) % T_cyc) * np.sinc((t1[i1]) * 4 / t1[-1])
x1 = signal + e1
x2 = signal[i2 + int((T0[1] - T0[0]) / Ts)] + e2
df = pd.DataFrame({'signal 1': pd.Series(x1, index=t1), 'signal 2': | pd.Series(x2, index=t2) | pandas.Series |
import numpy as np
import pandas as pd
import pandas.testing as pdt
import pyarrow as pa
import pytest
from pandas.arrays import SparseArray
from kartothek.core.cube.constants import (
KTK_CUBE_DF_SERIALIZER,
KTK_CUBE_METADATA_DIMENSION_COLUMNS,
KTK_CUBE_METADATA_KEY_IS_SEED,
KTK_CUBE_METADATA_PARTITION_COLUMNS,
KTK_CUBE_METADATA_SUPPRESS_INDEX_ON,
)
from kartothek.core.cube.cube import Cube
from kartothek.core.dataset import DatasetMetadata
from kartothek.core.index import ExplicitSecondaryIndex, PartitionIndex
from kartothek.io.testing.utils import assert_num_row_groups
from kartothek.io_components.cube.write import MultiTableCommitAborted
from kartothek.io_components.metapartition import SINGLE_TABLE
from kartothek.serialization._parquet import ParquetSerializer
__all__ = (
"test_accept_projected_duplicates",
"test_distinct_branches",
"test_do_not_modify_df",
"test_empty_df",
"test_fail_all_empty",
"test_fail_duplicates_global",
"test_fail_duplicates_local",
"test_fail_no_store_factory",
"test_fail_nondistinc_payload",
"test_fail_not_a_df",
"test_fail_partial_build",
"test_fail_partial_overwrite",
"test_fail_partition_on_1",
"test_fail_partition_on_3",
"test_fail_partition_on_4",
"test_fail_partition_on_nondistinc_payload",
"test_fail_sparse",
"test_fail_wrong_dataset_ids",
"test_fail_wrong_types",
"test_fails_duplicate_columns",
"test_fails_metadata_nested_wrong_type",
"test_fails_metadata_unknown_id",
"test_fails_metadata_wrong_type",
"test_fails_missing_dimension_columns",
"test_fails_missing_partition_columns",
"test_fails_missing_seed",
"test_fails_no_dimension_columns",
"test_fails_null_dimension",
"test_fails_null_index",
"test_fails_null_partition",
"test_fails_projected_duplicates",
"test_indices",
"test_metadata",
"test_nones",
"test_overwrite",
"test_overwrite_rollback_ktk_cube",
"test_parquet",
"test_partition_on_enrich_extra",
"test_partition_on_enrich_none",
"test_partition_on_index_column",
"test_projected_data",
"test_regression_pseudo_duplicates",
"test_rowgroups_are_applied_when_df_serializer_is_passed_to_build_cube",
"test_simple_seed_only",
"test_simple_two_datasets",
"test_single_rowgroup_when_df_serializer_is_not_passed_to_build_cube",
"test_split",
)
def test_simple_seed_only(driver, function_store):
"""
Simple integration test w/ a seed dataset only. This is the most simple way to create a cube.
"""
df = pd.DataFrame({"x": [0, 1, 2, 3], "p": [0, 0, 1, 1], "v": [10, 11, 12, 13]})
cube = Cube(dimension_columns=["x"], partition_columns=["p"], uuid_prefix="cube")
result = driver(data=df, cube=cube, store=function_store)
assert set(result.keys()) == {cube.seed_dataset}
ds = list(result.values())[0]
ds = ds.load_all_indices(function_store())
assert ds.uuid == cube.ktk_dataset_uuid(cube.seed_dataset)
assert len(ds.partitions) == 2
assert set(ds.indices.keys()) == {"p", "x"}
assert isinstance(ds.indices["p"], PartitionIndex)
assert isinstance(ds.indices["x"], ExplicitSecondaryIndex)
assert ds.table_name == SINGLE_TABLE
def test_simple_two_datasets(driver, function_store):
"""
Simple intergration test w/ 2 datasets.
"""
df_source = pd.DataFrame(
{"x": [0, 1, 2, 3], "p": [0, 0, 1, 1], "v1": [10, 11, 12, 13]}
)
df_enrich = pd.DataFrame(
{"x": [0, 1, 2, 3], "p": [0, 0, 1, 1], "v2": [20, 21, 22, 23]}
)
cube = Cube(
dimension_columns=["x"],
partition_columns=["p"],
uuid_prefix="cube",
seed_dataset="source",
)
result = driver(
data={"source": df_source, "enrich": df_enrich}, cube=cube, store=function_store
)
assert set(result.keys()) == {cube.seed_dataset, "enrich"}
ds_source = result[cube.seed_dataset].load_all_indices(function_store())
ds_enrich = result["enrich"].load_all_indices(function_store())
assert ds_source.uuid == cube.ktk_dataset_uuid(cube.seed_dataset)
assert ds_enrich.uuid == cube.ktk_dataset_uuid("enrich")
assert len(ds_source.partitions) == 2
assert len(ds_enrich.partitions) == 2
assert set(ds_source.indices.keys()) == {"p", "x"}
assert isinstance(ds_source.indices["p"], PartitionIndex)
assert isinstance(ds_source.indices["x"], ExplicitSecondaryIndex)
assert set(ds_enrich.indices.keys()) == {"p"}
assert isinstance(ds_enrich.indices["p"], PartitionIndex)
assert ds_source.table_name == SINGLE_TABLE
assert ds_enrich.table_name == SINGLE_TABLE
def test_indices(driver, function_store):
"""
Test that index structures are created correctly.
"""
df_source = pd.DataFrame(
{
"x": [0, 1, 2, 3],
"p": [0, 0, 1, 1],
"v1": [10, 11, 12, 13],
"i1": [100, 101, 102, 103],
}
)
df_enrich = pd.DataFrame(
{
"x": [0, 1, 4, 5],
"p": [0, 0, 2, 2],
"v2": [20, 21, 22, 23],
"i2": [200, 201, 202, 203],
}
)
cube = Cube(
dimension_columns=["x"],
partition_columns=["p"],
uuid_prefix="cube",
seed_dataset="source",
index_columns=["i1", "i2"],
)
result = driver(
data={"source": df_source, "enrich": df_enrich}, cube=cube, store=function_store
)
assert set(result.keys()) == {cube.seed_dataset, "enrich"}
ds_source = result[cube.seed_dataset].load_all_indices(function_store())
ds_enrich = result["enrich"].load_all_indices(function_store())
assert set(ds_source.indices.keys()) == {"p", "x", "i1"}
assert isinstance(ds_source.indices["p"], PartitionIndex)
assert isinstance(ds_source.indices["x"], ExplicitSecondaryIndex)
assert isinstance(ds_source.indices["i1"], ExplicitSecondaryIndex)
assert set(ds_enrich.indices.keys()) == {"p", "i2"}
assert isinstance(ds_enrich.indices["p"], PartitionIndex)
assert isinstance(ds_enrich.indices["i2"], ExplicitSecondaryIndex)
def test_dimension_index_suppression(driver, function_store):
"""
Test that suppress_index_on works as expected
"""
df_source = pd.DataFrame(
{
"x": [0, 0, 1, 1],
"y": [10, 11, 12, 13],
"p": [0, 0, 1, 1],
"v1": [10, 11, 12, 13],
"i1": [100, 101, 102, 103],
}
)
cube = Cube(
dimension_columns=["x", "y"],
partition_columns=["p"],
uuid_prefix="cube",
seed_dataset="source",
index_columns=["i1", "i2"],
suppress_index_on=["x"],
)
result = driver(data={"source": df_source}, cube=cube, store=function_store)
ds_source = result[cube.seed_dataset].load_all_indices(function_store())
assert set(ds_source.indices.keys()) == {"p", "i1", "y"}
assert isinstance(ds_source.indices["p"], PartitionIndex)
assert isinstance(ds_source.indices["i1"], ExplicitSecondaryIndex)
assert isinstance(ds_source.indices["y"], ExplicitSecondaryIndex)
def test_do_not_modify_df(driver, function_store):
"""
Functions should not modify their inputs.
"""
df = | pd.DataFrame({"x": [0, 1, 2, 3], "p": [0, 0, 1, 1], "v": [10, 11, 12, 13]}) | pandas.DataFrame |
import numpy as np
import pandas as pd
import pytest
from rulelist.datastructure.attribute.nominal_attribute import activation_nominal, NominalAttribute
class TestNominalAttribute(object):
def test_normal(self):
dictdata = {"column1" : np.array(["below50" if i < 50 else "above49" for i in range(100)]),
"column2" : np.ones(100)}
test_dataframe = pd.DataFrame(data=dictdata)
input_name = "column1"
input_max_operators = 1
input_minsupp = 0
expected_number_items = 2
expected_cardinality_operator = {1: 2}
output_attribute = NominalAttribute(input_name, test_dataframe[input_name], input_max_operators,input_minsupp)
actual_number_items= len(output_attribute.items)
actual_cardinality_operator = output_attribute.cardinality_operator
pd.testing.assert_series_equal(output_attribute.values, test_dataframe[input_name])
assert expected_number_items == actual_number_items
assert expected_cardinality_operator == actual_cardinality_operator
def test_onlyonevalue(self):
dictdata = {"column1" : np.array(["below100" for i in range(100)]),
"column2" : np.ones(100)}
test_dataframe = pd.DataFrame(data=dictdata)
input_name = "column1"
input_max_operators = 1
input_minsupp = 0
expected_number_items = 1
expected_cardinality_operator = {1: 1}
expected_n_cutpoints = 3
output_attribute = NominalAttribute(input_name, test_dataframe[input_name], input_max_operators,input_minsupp)
actual_number_items= len(output_attribute.items)
actual_cardinality_operator = output_attribute.cardinality_operator
pd.testing.assert_series_equal(output_attribute.values, test_dataframe[input_name])
assert expected_number_items == actual_number_items
assert expected_cardinality_operator == actual_cardinality_operator
class TestActivationNominal(object):
def test_left_interval(self):
dictdata = {"column1" : np.array(["below50" if i < 50 else "above49" for i in range(100)]),
"column2" : np.ones(100)}
test_dataframe = pd.DataFrame(data=dictdata)
input_attribute_name = "column1"
input_category = "below50"
expected_vector = pd.Series(name= "column1", data = [True if i < 50 else False for i in range(100)])
actual_vector = activation_nominal(test_dataframe,input_attribute_name,input_category)
pd.testing.assert_series_equal(actual_vector, expected_vector, check_exact=True)
def test_right_interval(self):
dictdata = {"column1": np.array(["below50" if i < 50 else "above49" for i in range(100)]),
"column2": np.ones(100)}
test_dataframe = pd.DataFrame(data=dictdata)
input_attribute_name = "column1"
input_category = "above49"
expected_vector = pd.Series(name="column1", data=[True if i > 49 else False for i in range(100)])
actual_vector = activation_nominal(test_dataframe, input_attribute_name, input_category)
| pd.testing.assert_series_equal(actual_vector, expected_vector, check_exact=True) | pandas.testing.assert_series_equal |
import ibeis
import six
import vtool
import utool
import numpy as np
import numpy.linalg as npl # NOQA
import pandas as pd
from vtool import clustering2 as clustertool
from vtool import nearest_neighbors as nntool
from plottool import draw_func2 as df2
np.set_printoptions(precision=2)
pd.set_option('display.max_rows', 10)
pd.set_option('display.max_columns', 10)
pd.set_option('isplay.notebook_repr_html', True)
ibeis.ensure_pz_mtest()
#taids = ibs.get_valid_aids()
#tvecs_list = ibs.get_annot_vecs(taids)
#tkpts_list = ibs.get_annot_kpts(taids)
#tvec_list = np.vstack(tvecs_list)
#print(idx2_vec)
#labels, words = vtool.clustering.cached_akmeans(tvec_list, 1000, 30, cache_dir='.')
#tvecdf_list = [pd.DataFrame(vecs) for vecs in tvecs_list]
#tvecs_df = pd.DataFrame(tvecdf_list, index=taids)
#kpts_col = pd.DataFrame(tkpts_list, index=taids, columns=['kpts'])
#vecs_col = pd.DataFrame(tvecs_list, index=taids, columns=['vecs'])
#tvecs_dflist = [pd.DataFrame(vecs, index=np.arange(len(vecs))) for vecs in tvecs_list]
#pd.concat(tvecs_dflist)
## Bui
#taids = ibs.get_valid_aids()
#tvecs_list = ibs.get_annot_vecs(taids)
#tkpts_list = ibs.get_annot_kpts(taids)
#orig_idx2_vec, orig_idx2_ax, orig_idx2_fx = vtool.nearest_neighbors.invertible_stack(tvecs_list, taids)
#annots_df = pd.concat([vecs_col, kpts_col], axis=1)
#annots_df
#idx2_vec = np.vstack(annots_df['vecs'].values)
##idx2_ax =
#idx2_vec, idx2_ax, idx2_fx = vtool.nearest_neighbors.invertible_stack(tvecs_list, taids)
#labels, words = vtool.clustering2.cached_akmeans(tvec_list, 1000, 30)
#words = centroids
def display_info(ibs, invindex, annots_df):
#################
#from ibeis.other import dbinfo
#print(ibs.get_infostr())
#dbinfo.get_dbinfo(ibs, verbose=True)
#################
#print('Inverted Index Stats: vectors per word')
#print(utool.get_stats_str(map(len, invindex.wx2_idxs.values())))
#################
qfx2_vec = annots_df['vecs'][1]
centroids = invindex.words
num_pca_dims = 3 # 3
whiten = False
kwd = dict(num_pca_dims=num_pca_dims,
whiten=whiten,)
#clustertool.rrr()
def makeplot_(fnum, prefix, data, labels='centroids', centroids=centroids):
return clustertool.plot_centroids(data, centroids, labels=labels,
fnum=fnum, prefix=prefix + '\n', **kwd)
#makeplot_(1, 'centroid vecs', centroids)
#makeplot_(2, 'database vecs', invindex.idx2_vec)
#makeplot_(3, 'query vecs', qfx2_vec)
#makeplot_(4, 'database vecs', invindex.idx2_vec)
#makeplot_(5, 'query vecs', qfx2_vec)
#################
def make_annot_df(ibs):
aid_list = ibs.get_valid_aids()
_kpts_col = pd.DataFrame(ibs.get_annot_kpts(aid_list),
index=aid_list, columns=['kpts'])
_vecs_col = pd.DataFrame(ibs.get_annot_vecs(aid_list),
index=aid_list, columns=['vecs'])
annots_df = pd.concat([_vecs_col, _kpts_col], axis=1)
return annots_df
def learn_visual_words(annots_df, train_aids, nCentroids):
vecs_list = annots_df['vecs'][train_aids].as_matrix()
train_vecs = np.vstack(vecs_list)
print('Training %d word vocabulary with %d annots and %d descriptors' %
(nCentroids, len(train_aids), len(train_vecs)))
words = clustertool.cached_akmeans(train_vecs, nCentroids, max_iters=100)
return words
def index_data_annots(annots_df, daids, words):
vecs_list = annots_df['vecs'][daids]
flann_params = {}
wordflann = vtool.nearest_neighbors.flann_cache(words, flann_params=flann_params)
ax2_aid = np.array(daids)
idx2_vec, idx2_ax, idx2_fx = nntool.invertible_stack(vecs_list, np.arange(len(ax2_aid)))
invindex = InvertedIndex(words, wordflann, idx2_vec, idx2_ax, idx2_fx, ax2_aid)
invindex.compute_internals()
return invindex
@six.add_metaclass(utool.ReloadingMetaclass)
class InvertedIndex(object):
def __init__(invindex, words, wordflann, idx2_vec, idx2_ax, idx2_fx, ax2_aid):
invindex.wordflann = wordflann
invindex.words = words # visual word centroids
invindex.ax2_aid = ax2_aid # annot index -> annot id
invindex.idx2_vec = idx2_vec # stacked index -> descriptor vector
invindex.idx2_ax = idx2_ax # stacked index -> annot index
invindex.idx2_fx = idx2_fx # stacked index -> feature index
invindex.idx2_wx = None # stacked index -> word index
invindex.wx2_idxs = None # word index -> stacked indexes
invindex.wx2_drvecs = None # word index -> residual vectors
#invindex.compute_internals()
def compute_internals(invindex):
idx2_vec = invindex.idx2_vec
wx2_idxs, idx2_wx = invindex.assign_to_words(idx2_vec)
wx2_drvecs = invindex.compute_residuals(idx2_vec, wx2_idxs)
invindex.idx2_wx = idx2_wx
invindex.wx2_idxs = wx2_idxs
invindex.wx2_drvecs = wx2_drvecs
def assign_to_words(invindex, idx2_vec):
idx2_wx, _idx2_wdist = invindex.wordflann.nn_index(idx2_vec, 1)
if True:
assign_df = pd.DataFrame(idx2_wx, columns=['wordindex'])
grouping = assign_df.groupby('wordindex')
wx2_idxs = grouping.wordindex.indices
else:
# TODO: replace with pandas groupby
idx_list = list(range(len(idx2_wx)))
wx2_idxs = utool.group_items(idx_list, idx2_wx.tolist())
return wx2_idxs, idx2_wx
def compute_residuals(invindex, idx2_vec, wx2_idxs):
""" returns mapping from word index to a set of residual vectors """
words = invindex.words
wx2_rvecs = {}
for word_index in wx2_idxs.keys():
# for each word
idxs = wx2_idxs[word_index]
vecs = np.array(idx2_vec[idxs], dtype=np.float64)
word = np.array(words[word_index], dtype=np.float64)
# compute residuals of all vecs assigned to this word
residuals = np.array([word - vec for vec in vecs])
# normalize residuals
residuals_n = vtool.linalg.normalize_rows(residuals)
wx2_rvecs[word_index] = residuals_n
return wx2_rvec
#def smk_similarity(wx2_qrvecs, wx2_drvecs):
# similarity_matrix = (rvecs1.dot(rvecs2.T))
def query_inverted_index(annots_df, qaid, invindex):
qfx2_vec = annots_df['vecs'][qaid]
wx2_qfxs, qfx2_wx = invindex.assign_to_words(qfx2_vec)
wx2_qrvecs = invindex.compute_residuals(qfx2_vec, wx2_qfxs)
daid = invindex.ax2_aid[0]
def single_daid_similairty(invindex, daid):
""" daid = 4
FIXME: Inefficient code
"""
ax = np.where(invindex.ax2_aid == daid)[0]
wx2_dfxs = {}
wx2_drvecs = {}
for wx, idxs in invindex.wx2_idxs.items():
valid = (invindex.idx2_ax[idxs] == ax)
dfxs = invindex.idx2_fx[idxs][valid]
drvecs = invindex.wx2_drvecs[wx][valid]
wx2_dfxs[wx] = dfxs
wx2_drvecs[wx] = drvecs
# Similarity to a single database annotation
query_wxs = set(wx2_qrvecs.keys())
data_wxs = set(wx2_drvecs.keys())
total_score = 0
for wx in data_wxs.intersection(query_wxs):
qrvecs = wx2_qrvecs[wx]
drvecs = wx2_drvecs[wx]
residual_similarity = qrvecs.dot(drvecs.T)
scores = selectivity_function(residual_similarity)
total_score += scores.sum()
return total_score
def selectivity_function(residual_similarity, alpha=3, thresh=0):
""" sigma from SMK paper """
u = residual_similarity
scores = (np.sign(u) * np.abs(u)) ** alpha
scores[scores <= thresh] = 0
return scores
# Entire database
daid2_score = utool.ddict(lambda: 0)
query_wxs = set(wx2_qrvecs.keys())
data_wxs = set(invindex.wx2_drvecs.keys())
qfx2_axs = []
qfx2_fm = []
qfx2_fs = []
aid_fm = []
aid_fs = []
idx2_daid = pd.Series(invindex.ax2_aid[invindex.idx2_ax], name='daid')
idx2_dfx = pd.Series(invindex.idx2_fx, name='dfx')
idx2_wfx = pd.Series(invindex.idx2_wx, name='dwx')
idx_df = pd.concat((idx2_daid, idx2_dfx, idx2_wfx), axis=1, names=['idx'])
idx_df = | pd.concat((idx2_daid, idx2_dfx, idx2_wfx), axis=1, names=['idx']) | pandas.concat |
"""
Utils for time series generation
--------------------------------
"""
import math
from typing import Union
import numpy as np
import pandas as pd
import holidays
from ..timeseries import TimeSeries
from ..logging import raise_if_not, get_logger
logger = get_logger(__name__)
def constant_timeseries(value: float = 1,
length: int = 10,
freq: str = 'D',
start_ts: pd.Timestamp = pd.Timestamp('2000-01-01')) -> TimeSeries:
"""
Creates a constant univariate TimeSeries with the given value, length, start date and frequency.
Parameters
----------
value
The constant value that the TimeSeries object will assume at every index.
length
The length of the returned TimeSeries.
freq
The time difference between two adjacent entries in the returned TimeSeries. A DateOffset alias is expected;
see `docs <https://pandas.pydata.org/pandas-docs/stable/user_guide/TimeSeries.html#dateoffset-objects>`_.
start_ts
The time index of the first entry in the returned TimeSeries.
Returns
-------
TimeSeries
A constant TimeSeries with value 'value'.
"""
times = pd.date_range(periods=length, freq=freq, start=start_ts)
values = np.full(length, value)
return TimeSeries.from_times_and_values(times, values, freq=freq)
def linear_timeseries(start_value: float = 0,
end_value: float = 1,
length: int = 10,
freq: str = 'D',
start_ts: pd.Timestamp = | pd.Timestamp('2000-01-01') | pandas.Timestamp |
import pandas as pd
import numpy as np
from pathlib import Path
from datetime import datetime as dt
def mergeManagers(managers, gameLogs):
#Get visiting managers
visitingManagers = gameLogs[['row','Date','Visiting team manager ID']]
visitingManagers['yearID'] = pd.DatetimeIndex(pd.to_datetime(visitingManagers['Date'])).year-1
visitingManagers = pd.merge(visitingManagers, managers, left_on=['yearID','Visiting team manager ID'], right_on=['yearID','playerID'], how="left")
#Get home managers
homeManagers = gameLogs[['row','Date','Home team manager ID']]
homeManagers['yearID'] = pd.DatetimeIndex(pd.to_datetime(homeManagers['Date'])).year-1
homeManagers = pd.merge(homeManagers, managers, left_on=['yearID','Home team manager ID'], right_on=['yearID','playerID'], how="left")
#Merge managers
homes = homeManagers[['row','Games','Wins','Losses']]
visitings = visitingManagers[['row','Games','Wins','Losses']]
merged = pd.merge(homes, visitings, on='row', suffixes=(' home manager',' visiting manager'))
print("Merged Managers. Checksum: ", gameLogs.index.size==merged.index.size, gameLogs.index.size, merged.index.size)
return merged
def mergePitchings(pitchers, gameLogs):
#Get visiting pitchers
visitingPitchers = gameLogs[['row','Date','Visiting starting pitcher ID']]
visitingPitchers['yearID'] = pd.DatetimeIndex(pd.to_datetime(visitingPitchers['Date'])).year-1
visitingPitchers = pd.merge(visitingPitchers, pitchers, left_on=['yearID','Visiting starting pitcher ID'], right_on=['yearID','playerID'], how="left")
#Get home pitchers
homePitchers = gameLogs[['row','Date','Home starting pitcher ID']]
homePitchers['yearID'] = pd.DatetimeIndex(pd.to_datetime(homePitchers['Date'])).year-1
homePitchers = pd.merge(homePitchers, pitchers, left_on=['yearID','Home starting pitcher ID'], right_on=['yearID','playerID'], how="left")
#Merge pitchers
homes = homePitchers.drop(columns=['yearID','Home starting pitcher ID','playerID','Date'])
visitings = visitingPitchers.drop(columns=['yearID','Visiting starting pitcher ID','playerID','Date'])
merged = pd.merge(homes, visitings, on='row', suffixes=(' home pitcher',' visiting pitcher'))
print("Merged Pitchings. Checksum: ", gameLogs.index.size==merged.index.size, gameLogs.index.size, merged.index.size)
return merged
def mergePeople(people, gameLogs):
#Merge people
allPeople = []
for IDColumn in gameLogs.columns:
if IDColumn.find("starting")>-1:
merged = pd.merge(gameLogs[['row','Date',IDColumn]], people, how="left", left_on=[IDColumn], right_on=['playerID'])
merged['age'] = (pd.to_datetime(merged['Date']) - pd.to_datetime(merged['birthdate'])) / np.timedelta64(1, 'Y')
newColumns = {"age":IDColumn.replace(" ID"," "+" age")}
for column in people.drop(columns=['playerID','birthdate']).columns:
newColumns[column] = IDColumn.replace(" ID"," "+str(column))
merged = merged.rename(columns=newColumns)
allPeople.append(merged[['row']+list(newColumns.values())])
merged = gameLogs['row']
for merSal in allPeople:
merged = pd.merge(merged, merSal, how="left", on='row')
print("Merged People. Checksum: ", gameLogs.index.size==merged.index.size, gameLogs.index.size, merged.index.size)
return merged
def mergeTeams(teams, gameLogs):
#Get visiting teams
visitingTeams = gameLogs[['row','Date','Visiting team','Visiting league AL']]
visitingTeams['yearID'] = pd.DatetimeIndex(pd.to_datetime(visitingTeams['Date'])).year-1
visitingTeams = pd.merge(visitingTeams.drop(columns=['Date']), teams, left_on=['yearID','Visiting team'], right_on=['yearID','teamID'], how="left")
#Get home teams
homeTeams = gameLogs[['row','Date','Home team','Home league AL']]
homeTeams['yearID'] = pd.DatetimeIndex( | pd.to_datetime(homeTeams['Date']) | pandas.to_datetime |
import sys
import pandas as pd
import numpy as np
def load_data(messages_filepath, categories_filepath):
'''
INPUT
file paths of the message and categories files in cvs format
OUTPUT
a dataframe contains both dataset
'''
messages = pd.read_csv(messages_filepath)
categories = | pd.read_csv(categories_filepath) | pandas.read_csv |
# aisles : aisle_id | aisle
# departments : department_id | department
# orders_products (merge prior + train): order_id | product_id | add_to_cart_order | reordered
# orders : order_id | user_id | eval_set | order_number | order_dow | order_hour_of_day | days_since_prior_order
# products : product_id | product_name | aisle_id | department_id
import pandas as pd
AISLES_CSV_FILEPATH = 'C:\\Users\\tadav\\OneDrive\\Bureau\\Data&GO\\PROJET 1 DATA ANALYSIS\\datasCSV\\aisles.csv'
DEPARTMENTS_CSV_FILEPATH = 'C:\\Users\\tadav\\OneDrive\\Bureau\\Data&GO\\PROJET 1 DATA ANALYSIS\\datasCSV\\departments.csv'
ORDER_PRODUCTS_PRIOR_CSV_FILEPATH = 'C:\\Users\\tadav\\OneDrive\\Bureau\\Data&GO\\PROJET 1 DATA ANALYSIS\\datasCSV\\order_products__prior.csv'
ORDER_PRODUCTS_TRAIN_CSV_FILEPATH = 'C:\\Users\\tadav\\OneDrive\\Bureau\\Data&GO\\PROJET 1 DATA ANALYSIS\\datasCSV\\order_products__train.csv'
ORDERS_CSV_FILEPATH = 'C:\\Users\\tadav\\OneDrive\\Bureau\\Data&GO\\PROJET 1 DATA ANALYSIS\\datasCSV\\orders.csv'
PRODUCTS_CSV_FILEPATH = 'C:\\Users\\tadav\\OneDrive\\Bureau\\Data&GO\\PROJET 1 DATA ANALYSIS\\datasCSV\\products.csv'
# import data from csv to pandas dataframe
def import_data_from(source_data_file):
return pd.read_csv(source_data_file)
# fonction pour ranked en desc
def add_rank_desc_column(dataframe, orderby_column):
dataframe = dataframe.sort_values(orderby_column, ascending=False).reset_index(drop=True)
dataframe['rank'] = dataframe[orderby_column].rank(ascending=False)
dataframe['rank'] = dataframe['rank'].astype(int)
return dataframe
class DataParser:
def __init__(self):
self.__departments_df = | pd.DataFrame() | pandas.DataFrame |
from mpl_toolkits.mplot3d import axes3d
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import csv
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
from mpl_toolkits.mplot3d import Axes3D
import plotly.graph_objects as go
import plotly.express as px
import publico as func
pd.options.mode.chained_assignment = None # default='warn'
from dateutil import parser
def MediaFileRede(res_select, interval_time=5):
res_select.drop_duplicates(subset=None, keep="first", inplace=True)
# cria campos
res_select['Timer2'] = 0
res_select['Media2'] = 0.0
velo_total = 0.0
count=0
timer_atual = 0.0
timer_ant = 0.0
elapset_atual= 0.0
elapset_cumulativo = 0.0
count_timer=interval_time
for index, row in res_select.iterrows():
timer_atual = row['Tempo']
if (timer_ant!=0.0):
elapset_atual = float(row['Tempo']) - float(timer_ant)
# print(abs(elapset_atual))
elapset_cumulativo+=float(elapset_atual)
if ((elapset_cumulativo >= interval_time)):
# print('Chegou')
# break
media_velo = velo_total / count
res_select.at[index,"Media2"] = media_velo
res_select.at[index,"Timer2"] = count_timer
elapset_cumulativo=0.0
timer_ant = 0.0
velo_total=0.0
media_velo=0.0
count=0
count_timer+=interval_time
if (timer_atual != timer_ant):
timer_ant = timer_atual
velo_total = velo_total + row['Download']
count+=1
# remove zeros
res_select = res_select[(res_select['Timer2']!=0) & (res_select['Timer2']<=280) & (res_select['Media2']<300) ]
return res_select
EXP="70"
print("Loading Dataframe...")
# BASELINE GERAL ***************************************************
df1 = pd.read_csv("../repositorio/" + EXP + "/REDE_GERAL.csv")
df1['Download'] = df1['Download'].astype(float)
df1['Upload'] = df1['Upload'].astype(float)
df1['Tempo'] = df1['Tempo'].astype(float)
df1['Source'] = "BASELINE"
# df1_filtro = df1.loc[(df1['Bytes'] > 0)]
df1_select = df1[['Download', 'Source', 'Tempo']]
df1_select = MediaFileRede(df1_select)
# *************************************************************************
# BASELINE 1TO 2 **********************************************************
df2 = pd.read_csv("../repositorio/" + EXP + "/REDE_BASELINE_1TO2.csv")
df2['Download'] = df2['Download'].astype(float)
df2['Upload'] = df2['Upload'].astype(float)
# df2['Duracao'] = df2['Duracao'].astype(float)
df2['Tempo'] = df2['Tempo'].astype(float)
# df2['Bytes'] = df2['Bytes'].astype(float)
df2['Source'] = "1TO2"
# df4_filtro = 7df4.loc[(df4['Bytes'] > 0)]
df2_select = df2[['Download', 'Source', 'Tempo']]
df2_select = MediaFileRede(df2_select)
#********************************************************************
print("Loading Dataframe...")
# BASELINE RANDOM **********************************************************
df3 = pd.read_csv("../repositorio/" + EXP + "/REDE_BASELINE_RANDOM.csv")
df3['Download'] = df3['Download'].astype(float)
df3['Upload'] = df3['Upload'].astype(float)
# df3['Duracao'] = df3['Duracao'].astype(float)
df3['Tempo'] = df3['Tempo'].astype(float)
# df3['Bytes'] = df3['Bytes'].astype(float)
df3['Source'] = "RAND"
# df4_filtro = df4.loc[(df4['Bytes'] > 0)]
df3_select = df3[['Download', 'Source', 'Tempo']]
df3_select = MediaFileRede(df3_select)
#********************************************************************
print("Loading Dataframe...")
# BASELINE THRESHOLD **********************************************************
df4 = pd.read_csv("../repositorio/" + EXP + "/REDE_BASELINE_THRESHOLD.csv")
df4['Download'] = df4['Download'].astype(float)
df4['Upload'] = df4['Upload'].astype(float)
# df4['Duracao'] = df4['Duracao'].astype(float)
df4['Tempo'] = df4['Tempo'].astype(float)
# df4['Bytes'] = df4['Bytes'].astype(float)
df4['Source'] = "LIM-5"
# df4_filtro = df4.loc[(df4['Bytes'] > 0)]
df4_select = df4[['Download', 'Source', 'Tempo']]
df4_select = MediaFileRede(df4_select)
#********************************************************************
print("Loading Dataframe...")
# DBSCAN **********************************************************
df5 = pd.read_csv("../repositorio/" + EXP + "/REDE_DBSCAN.csv")
df5['Download'] = df5['Download'].astype(float)
df5['Upload'] = df5['Upload'].astype(float)
df5['Tempo'] = df5['Tempo'].astype(float)
df5['Source'] = "DBSCAN"
# df1_filtro = df1.loc[(df1['Bytes'] > 0)]
df5_select =df5[['Download', 'Source', 'Tempo']]
df5_select = MediaFileRede(df5_select)
#********************************************************************
# # # DBSCAN FILTER **********************************************************
# # df6 = pd.read_csv("../repositorio/" + EXP + "/REDE_DBSCAN_FILTER.csv")
# # df6['Download'] = df6['Download'].astype(float)
# # df6['Upload'] = df6['Upload'].astype(float)
# # df6['Duracao'] = df6['Duracao'].astype(float)
# # df6['STime'] = df6['STime'].astype(float)
# # df6['Bytes'] = df6['Bytes'].astype(float)
# # df6['Source'] = "DBSCAN - FILTER"
# # df6_filtro = df6.loc[(df6['Bytes'] > 0)]
# # df6_select = df6_filtro[['Upload','Bytes','Source', 'STime','Duracao']]
# # df6_select = MediaFileRede(df6_select)
# # #********************************************************************
# XMEANS **********************************************************
df7 = pd.read_csv("../repositorio/" + EXP + "/REDE_XMEANS.csv")
df7['Download'] = df7['Download'].astype(float)
df7['Upload'] = df7['Upload'].astype(float)
# df7['Duracao'] = df7['Duracao'].astype(float)
df7['Tempo'] = df7['Tempo'].astype(float)
# df7['Bytes'] = df7['Bytes'].astype(float)
df7['Source'] = "XMEANS"
# df1_filtro = df1.loc[(df1['Bytes'] > 0)]
df7_select =df7[['Download', 'Source', 'Tempo']]
df7_select = MediaFileRede(df7_select)
#********************************************************************
print("Loading Chart...")
# res = pd.concat([df1_select,df5_select,df7_select], sort=False)
# res = pd.concat([df1_select,df2_select,df3_select,df4_select, df5_select,df7_select], sort=False)
res = | pd.concat([df1_select,df2_select,df3_select,df4_select], sort=False) | pandas.concat |
#!/usr/bin/env python
import numpy as np
import pandas as pd
import click as ck
from sklearn.metrics import classification_report
from sklearn.metrics.pairwise import cosine_similarity
import sys
from collections import deque
import time
import logging
from sklearn.metrics import roc_curve, auc, matthews_corrcoef
from scipy.spatial import distance
from scipy import sparse
import math
from utils import FUNC_DICT, Ontology, NAMESPACES
from matplotlib import pyplot as plt
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.INFO)
# def get_goplus_defs():
# plus_defs = {}
# with open('data/definitions_mf.txt') as f:
# for line in f:
# line = line.strip()
# go_id, definition = line.split(': ')
# go_id = go_id.replace('_', ':')
# definition = definition.replace('_', ':')
# plus_defs[go_id] = set(definition.split(' and '))
# return plus_defs
ont = 'mf'
@ck.command()
@ck.option(
'--train-data-file', '-trdf', default=f'data/{ont}/train_data.pkl',
help='Data file with training features')
@ck.option(
'--valid-data-file', '-vrdf', default=f'data/{ont}/valid_data.pkl',
help='Data file with training features')
@ck.option(
'--test-data-file', '-tsdf', default=f'data/{ont}/predictions_deepgoel.pkl',
help='Test data file')
@ck.option(
'--terms-file', '-tf', default=f'data/{ont}/terms.pkl',
help='Data file with sequences and complete set of annotations')
@ck.option(
'--diamond-scores-file', '-dsf', default=f'data/{ont}/test_diamond.res',
help='Diamond output')
@ck.option(
'--out_file', '-of', help='Output file')
def main(train_data_file, valid_data_file, test_data_file, terms_file,
diamond_scores_file, out_file):
go_rels = Ontology('data/go.obo', with_rels=True)
terms_df = | pd.read_pickle(terms_file) | pandas.read_pickle |
#
# Convert API responses to Pandas DataFrames
#
import pandas as pd
def accounts(data):
"""accounts as dataframe"""
return pd.concat(
pd.json_normalize(v["securitiesAccount"]) for v in data.values()
).set_index("accountId")
def transactions(data):
"""transaction information as Dataframe"""
return pd.json_normalize(data)
def search(data):
"""search for symbol as a dataframe"""
ret = []
for symbol in data:
ret.append(data[symbol])
return pd.DataFrame(ret)
def instrument(data):
"""instrument info from cusip as dataframe"""
return pd.DataFrame(data)
def quote(data):
"""quote as dataframe"""
return pd.DataFrame(data).T.set_index("symbol")
def history(data):
"""get history as dataframe"""
df = pd.DataFrame(data["candles"])
df["datetime"] = pd.to_datetime(df["datetime"], unit="ms")
return df
def options(data):
"""options chain as dataframe"""
ret = []
for date in data["callExpDateMap"]:
for strike in data["callExpDateMap"][date]:
ret.extend(data["callExpDateMap"][date][strike])
for date in data["putExpDateMap"]:
for strike in data["putExpDateMap"][date]:
ret.extend(data["putExpDateMap"][date][strike])
df = pd.DataFrame(ret)
for col in (
"tradeTimeInLong",
"quoteTimeInLong",
"expirationDate",
"lastTradingDay",
):
if col in df.columns:
df[col] = | pd.to_datetime(df[col], unit="ms") | pandas.to_datetime |
import numpy as np
import pandas as pd
from sklearn.preprocessing import StandardScaler
class Data:
'''Obtains hydro data and preprocesses it.'''
def data(self, test_len):
names = ['date', 'price', 'avg_p', 'bid', 'ask',
'o', 'h', 'l', 'c', 'avgp', 'vol', 'oms', 'num']
# get data
df = pd.read_csv('pcell.csv', sep=';', header=1).iloc[:,:1]
df[[1, 2]] = pd.read_csv('pcell.csv', sep=';', header=1).iloc[:,6:8]
df = pd.concat([df, pd.read_csv('pcell.csv', sep=';', header=1).iloc[:,:-1].drop(
columns=['Date'])], axis=1).iloc[::-1].reset_index().drop(columns='index')
df.columns = names
# Filter out null
for name in names:
no_null = []
# check if null exist in column
if any(df[name].isnull()):
# traverse the boolean dataframe
for i, j in enumerate(df[name].isnull()):
if not j:
# hold a value from latest non null
tmp = df[name].iloc[i]
no_null.append(tmp)
else:
no_null.append(tmp)
# put back in dataframe
df[name] = | pd.Series(no_null) | pandas.Series |
from finrl_meta.data_processors.processor_alpaca import AlpacaProcessor as Alpaca
from finrl_meta.data_processors.processor_wrds import WrdsProcessor as Wrds
from finrl_meta.data_processors.processor_yahoofinance import YahooFinanceProcessor as YahooFinance
from finrl_meta.data_processors.processor_binance import BinanceProcessor as Binance
from finrl_meta.data_processors.processor_ricequant import RiceQuantProcessor as RiceQuant
from finrl_meta.data_processors.processor_joinquant import JoinquantProcessor
from finrl_meta.data_processors.processor_tusharepro import TushareProProcessor as Tusharepro
import pandas as pd
import numpy as np
import os
class DataProcessor():
def __init__(self, data_source, **kwargs):
self.data_source = data_source
self.dataframe = | pd.DataFrame() | pandas.DataFrame |
from autodesk.model import Model
from autodesk.sqlitedatastore import SqliteDataStore
from autodesk.states import UP, DOWN, ACTIVE, INACTIVE
from pandas import Timestamp, Timedelta
from pandas.testing import assert_frame_equal
from tests.stubdatastore import StubDataStore
import pandas as pd
import pytest
def make_spans(records):
return pd.DataFrame(records, columns=['start', 'end', 'state'])
@pytest.fixture()
def inmemory_model():
model = Model(SqliteDataStore(':memory:'))
yield model
model.close()
def test_get_desk_spans_empty():
t1 = Timestamp.min
t2 = Timestamp.max
model = Model(StubDataStore.empty())
result = model.get_desk_spans(t1, t2)
expected = make_spans([(t1, t2, DOWN)])
assert_frame_equal(result, expected)
def test_get_session_spans_empty():
t1 = Timestamp.min
t2 = Timestamp.max
model = Model(StubDataStore.empty())
result = model.get_session_spans(t1, t2)
expected = make_spans([(t1, t2, INACTIVE)])
assert_frame_equal(result, expected)
def test_get_desk_spans_one_up_span():
t1 = Timestamp(2018, 1, 1)
t2 = Timestamp(2018, 1, 2)
t3 = Timestamp(2018, 1, 3)
model = Model(StubDataStore(
session_events=[],
desk_events=[(t2, UP)]
))
result = model.get_desk_spans(t1, t3)
expected = make_spans([(t1, t2, DOWN), (t2, t3, UP)])
assert_frame_equal(result, expected)
def test_get_session_spans_one_active_span():
t1 = Timestamp(2018, 1, 1)
t2 = Timestamp(2018, 1, 2)
t3 = Timestamp(2018, 1, 3)
model = Model(StubDataStore(
session_events=[(t2, ACTIVE)],
desk_events=[]
))
result = model.get_session_spans(t1, t3)
expected = make_spans([(t1, t2, INACTIVE), (t2, t3, ACTIVE)])
assert_frame_equal(result, expected)
def test_get_session_state_empty():
model = Model(StubDataStore.empty())
assert model.get_session_state() == INACTIVE
def test_get_desk_state_empty():
model = Model(StubDataStore.empty())
assert model.get_desk_state() == DOWN
def test_get_active_time_empty():
model = Model(StubDataStore.empty())
assert model.get_active_time(Timestamp.min, Timestamp.max) == Timedelta(0)
def test_get_active_time_active_zero():
t = Timestamp(2018, 1, 1)
model = Model(StubDataStore(
session_events=[(t, ACTIVE)],
desk_events=[]
))
assert model.get_active_time(Timestamp.min, t) == Timedelta(0)
def test_get_active_time_active_for_10_minutes():
t1 = Timestamp(2018, 1, 1, 0, 0, 0)
t2 = Timestamp(2018, 1, 1, 0, 10, 0)
model = Model(StubDataStore(
session_events=[(t1, ACTIVE)],
desk_events=[]
))
assert model.get_active_time(Timestamp.min, t2) == Timedelta(minutes=10)
def test_get_active_time_just_after_desk_change():
t1 = Timestamp(2018, 1, 1, 0, 0, 0)
t2 = Timestamp(2018, 1, 1, 0, 10, 0)
model = Model(StubDataStore(
session_events=[(t1, ACTIVE)],
desk_events=[(t2, UP)]
))
assert model.get_active_time(Timestamp.min, t2) == Timedelta(0)
def test_get_active_time_active_20_minutes_with_changed_desk_state():
t1 = Timestamp(2018, 1, 1, 0, 0, 0)
t2 = Timestamp(2018, 1, 1, 0, 10, 0)
t3 = Timestamp(2018, 1, 1, 0, 20, 0)
model = Model(StubDataStore(
session_events=[(t1, ACTIVE)],
desk_events=[(t2, UP)]
))
assert model.get_active_time(Timestamp.min, t3) == Timedelta(minutes=10)
def test_compute_hourly_count_active_30_minutes():
t1 = Timestamp(2017, 4, 12, 10, 0, 0)
t2 = Timestamp(2017, 4, 12, 10, 30, 0)
model = Model(StubDataStore(
session_events=[(t1, ACTIVE), (t2, INACTIVE)],
desk_events=[]
))
result = model.compute_hourly_count(t1, t2)
specific_hour = result[
(result.weekday == 'Wednesday') & (result.hour == 10)
]
assert specific_hour.counts.iloc[0] == 1
def test_compute_hourly_count_active_0_minutes():
t1 = Timestamp(2017, 4, 12, 10, 0, 0)
t2 = Timestamp(2017, 4, 12, 10, 30, 0)
model = Model(StubDataStore(
session_events=[(t1, INACTIVE)],
desk_events=[]
))
result = model.compute_hourly_count(t1, t2)
assert result.counts.sum() == 0
def test_set_session_state_active(inmemory_model):
t1 = Timestamp(2018, 1, 1)
t2 = Timestamp(2018, 1, 2)
inmemory_model.set_session(t1, ACTIVE)
expected = make_spans([(t1, t2, ACTIVE)])
assert inmemory_model.get_session_state() == ACTIVE
assert_frame_equal(inmemory_model.get_session_spans(t1, t2), expected)
def test_set_session_state_inactive(inmemory_model):
t1 = | Timestamp(2018, 1, 1) | pandas.Timestamp |
import numpy as np
import pandas as pd
import sys
import time
def make_trip(N=50):
"""
Simulate random selection of coffee cards
Each card starts with N drinks.
Randomly pick a card until one of them runs out.
When a card runs out what are the odds there are drinks
left on the other card.
Output = number of drinks remaining on other card
when the selected card is exhausted
"""
Na=N
Nb=N
while 1:
if np.random.rand()<0.5:
if Na==0:
return(Nb)
else:
Na=Na-1
else:
if Nb==0:
return(Na)
else:
Nb=Nb-1
if __name__ == "__main__":
t1=time.time()
www=[]
num_trials=1000
N=50
if len(sys.argv)>1:
num_trials=int(sys.argv[1])
if len(sys.argv)>2:
N=int(sys.argv[2])
drinks=np.zeros(N+1,'i')
for i in range(num_trials):
k=make_trip(N)
drinks[k]=drinks[k]+1
counts= | pd.DataFrame({'n':drinks}) | pandas.DataFrame |
import pandas as pd
import xml.etree.ElementTree as ET
import lxml.etree as etree
most_serious_problem = | pd.read_csv(
"../data/processed_data/special_eb/data/3_final/most_serious_problem/special_eb_most_serious_problem_final.csv") | pandas.read_csv |
import os
import re
import string
import random
import numpy as np
import pandas as pd
from pybedtools import BedTool
from Bio import SeqIO
import warnings
import logging.config
warnings.filterwarnings("ignore")
## Intialize logger
logging.config.fileConfig('logging.ini', disable_existing_loggers=False)
logger = logging.getLogger(__name__)
def intersect_peak_regdna(peak_bed, regdna_bed, gene_df):
"""Intersect peaks and regulatory DNA to obtain signals within the
regulatory regions. The relative start and end of each peak is based
off gene start (if + strand) or gene stop (if - strand). Negative value
represents upstream of the gene; negative value represents downstream.
Args:
peak_bed - Feature peaks in bed object
regdna_bed - Regulatory DNA (region) in bed object
gene_df - Gene annotation
Returns:
Dataframe of peaks assigned to regulatory regions
"""
INTER_COLS = [
'peak_chr', 'peak_start', 'peak_end', 'peak_name',
'peak_score', 'reg_chr', 'reg_start', 'reg_end',
'gene', 'reg_score', 'strand']
OUT_COLS = [
'peak_chr', 'peak_start', 'peak_end', 'peak_score',
'gene', 'strand', 'rel_start', 'rel_end']
inter_bed = peak_bed.intersect(regdna_bed, wb=True)
inter_df = pd.DataFrame(data=inter_bed.to_dataframe().values, columns=INTER_COLS)
gene_df = gene_df.rename(columns={
'name': 'gene', 'start': 'gene_start', 'end': 'gene_end'})
inter_df = inter_df.merge(
gene_df[['gene', 'gene_start', 'gene_end']], how='left', on='gene')
## Calculate the relative positions of peaks to the gene start.
out_df = pd.DataFrame(columns=OUT_COLS)
for strand, subdf in inter_df.groupby('strand'):
if strand == '+':
subdf['rel_start'] = subdf['peak_start'] - subdf['gene_start']
subdf['rel_end'] = subdf['peak_end'] - subdf['gene_start']
elif strand == '-':
subdf['rel_start'] = subdf['gene_end'] - subdf['peak_end']
subdf['rel_end'] = subdf['gene_end'] - subdf['peak_start']
else:
logger.warning('WARNING: {} does not have strand info. Skipped.'.format(
np.unique(subdf['gene'])))
continue
out_df = out_df.append(subdf[OUT_COLS], ignore_index=True)
return out_df
def create_regdna(gene_bed, genome_fa, reg_bound=(500, 500)):
"""Create regulatory region(s) for each gene.
Args:
gene_bed - Gene annotation in bed object
reg_bound - Tuple for the boundary of regulatory region, i.e.
(upstream distance, downstream distance)
genome_fa - Genome fasta filepath
Returns:
Regulatory DNA (regions) in bed object
"""
upstream_bound, downstream_bound = reg_bound
chrom_size_dict = {x.id: len(x) for x in load_fasta(genome_fa)}
gene_df = gene_bed.to_dataframe()
bed_cols = gene_df.columns
## Calculate relative start and end, grouped by strand
regdna_df = pd.DataFrame(columns=bed_cols)
for chrom, subdf in gene_df.groupby('chrom'):
for strand, subdf2 in subdf.groupby('strand'):
if strand == '+':
subdf2['reg_start'] = subdf2['start'] - upstream_bound
subdf2['reg_end'] = subdf2['start'] + downstream_bound
elif strand == '-':
subdf2['reg_start'] = subdf2['end'] - downstream_bound
subdf2['reg_end'] = subdf2['end'] + upstream_bound
else:
logger.warning('WARNING: {} does not have strand info. Skipped.'.format(
np.unique(subdf2['gene'])))
continue
## Reformat dataframe
subdf2 = subdf2.drop(columns=['start', 'end'])
subdf2 = subdf2.rename(columns={'reg_start': 'start', 'reg_end': 'end'})
subdf2.loc[subdf2['start'] < 1, 'start'] = 1
subdf2.loc[subdf2['end'] < 1, 'end'] = 1
subdf2.loc[
subdf2['start'] >= chrom_size_dict[chrom], 'start'] = \
chrom_size_dict[chrom] - 1
subdf2.loc[
subdf2['end'] >= chrom_size_dict[chrom], 'end'] = \
chrom_size_dict[chrom] - 1
regdna_df = regdna_df.append(subdf2, ignore_index=True)
return BedTool.from_dataframe(regdna_df[bed_cols]).sort()
def calculate_matrix_position(df, upstream_bound):
"""Create new columns for the positions of features in feature matrix.
"""
df['mtx_start'] = df['rel_start'] + upstream_bound
df['mtx_end'] = df['rel_end'] + upstream_bound
return df
def get_onehot_dna_sequence(regdna_bed, genome_fa, reg_bound, genes):
"""Get DNA sequence from regulatory DNA bed object.
"""
FEAT_COL = ['gene_idx', 'mtx_start', 'mtx_end', 'peak_score', 'alphabet']
dna_df_list = []
regdna_df = regdna_bed.to_dataframe()
regdna_fa = regdna_bed.getfasta(fi=genome_fa, name=True)
for x in load_fasta(regdna_fa.seqfn):
## Keep upstream at left
strand = regdna_df.loc[regdna_df['name'] == x.id, 'strand'].values[0]
seq = str(x.seq) if strand == '+' else str(x.seq)[::-1]
## Convert one-hot encoding
dummy_seq = pd.get_dummies(list(seq))
shift = reg_bound[0] + reg_bound[1] - len(seq)
if shift > 0: ## if regulatory region is shorter than queried region
blank_seq = pd.DataFrame(
data=np.zeros((shift, 4), dtype=int),
columns=dummy_seq.columns)
dummy_seq = blank_seq.append(dummy_seq, ignore_index=False)
## Convert to row and col index of the entries of ones
coord_idx, alphabet_idx = np.where(dummy_seq == 1)
gene_idx = genes.index(x.id)
tmp_df = pd.DataFrame({
'mtx_start': coord_idx,
'mtx_end': coord_idx + 1,
'alphabet': alphabet_idx})
tmp_df['gene_idx'] = gene_idx
tmp_df['peak_score'] = 1
dna_df_list.append(tmp_df)
return pd.concat(dna_df_list, ignore_index=True)[FEAT_COL]
def get_onehot_dna_sequence_slim(regdna_bed, genome_fa, tss_df):
"""Get DNA sequence from regulatory DNA bed object.
"""
FEAT_COL = ['gene_idx', 'rel_dist', 'alphabet']
dna_df_list = []
regdna_df = regdna_bed.to_dataframe()
regdna_fa = regdna_bed.getfasta(fi=genome_fa, name=True)
genes = tss_df['name'].tolist()
print('==> get_onehot_dna_sequence_slim <==')
for i, s in enumerate(load_fasta(regdna_fa.seqfn)):
## Keep upstream at left
seq_info = regdna_df.iloc[i]
strand = seq_info['strand']
start_pos = seq_info['start']
end_pos = seq_info['end']
tss_pos = tss_df.loc[tss_df['name'] == s.id, 'start'].iloc[0]
if strand == '+':
rel_dists = np.arange(start_pos, end_pos, dtype=int) - tss_pos
seq = str(s.seq)
else:
rel_dists = tss_pos - np.arange(start_pos, end_pos, dtype=int)
seq = str(s.seq)[::-1]
## Convert one-hot encoding
dummy_seq = pd.get_dummies(list(seq))
## Convert to row and col index of the entries of ones
_, alphabet_idx = np.where(dummy_seq == 1)
if len(rel_dists) != len(alphabet_idx):
print(s.id)
gene_idx = genes.index(s.id)
tmp_df = pd.DataFrame({
'gene_idx': [gene_idx] * len(rel_dists),
'rel_dist': rel_dists,
'alphabet': alphabet_idx})
dna_df_list.append(tmp_df)
return pd.concat(dna_df_list, ignore_index=True)[FEAT_COL]
def get_nt_frequency(regdna_bed, genome_fa, genes):
"""Get di-nucleotide frequences of regualtory DNA.
"""
DINUCLEOTIDES = [
['AA', 'TT'], ['AC', 'GT'], ['AG', 'CT'],
['CA', 'TG'], ['CC', 'GG'], ['GA', 'TC'],
['AT'], ['CG'], ['GC'], ['TA']]
## Initialize freq dict for dints
nt_dict = {}
for dints in DINUCLEOTIDES:
for dint in dints:
nt_dict[dint] = np.zeros(len(genes))
len_dict = {i: 0 for i in range(len(genes))}
regdna_bed = regdna_bed.getfasta(fi=genome_fa, name=True)
for s in load_fasta(regdna_bed.seqfn):
gene_idx = genes.index(s.id)
seq_len = len(s.seq)
seq = str(s.seq)
## Add dint count and sequence length for each region
for k in range(seq_len - 1):
dint = seq[k: k + 2]
if 'N' not in dint:
nt_dict[dint][gene_idx] += 1
len_dict[gene_idx] += seq_len
## Calcualte frequency
for dint in nt_dict.keys():
for gene_idx, seq_len in len_dict.items():
nt_dict[dint][gene_idx] /= seq_len
## Combine reverse complement
for dints in DINUCLEOTIDES:
if len(dints) > 1:
nt_dict[dints[0]] = nt_dict[dints[0]] + nt_dict[dints[1]]
del nt_dict[dints[1]]
return nt_dict
def convert_gnashy_to_bed(filename, binarize_peak_score=False):
"""Convert 3-column gnashy file to bed obejct.
Args:
filename - Gnashy filename
binarize_peak_score - Boolean flag
Returns:
Output bed object
"""
with open(filename, "r") as f:
lines = f.readlines()
bed = []
for i, line in enumerate(lines):
chrm, pos, score = line.strip().split("\t")
if binarize_peak_score:
score = 1
bed.append(["chr" + chrm, int(pos), int(pos) + 1, ".", score])
return BedTool.from_dataframe(pd.DataFrame(bed))
def convert_orf_to_bed(filename):
"""Convert yeast orf fasta headers to orf coordinate in bed format.
Args:
filename - Fastq filename
Returns:
Gene annotation in bed object
"""
CH_DICT = {
'Chr I': 'chr1', 'Chr II': 'chr2', 'Chr III': 'chr3',
'Chr IV': 'chr4', 'Chr V': 'chr5', 'Chr VI': 'chr6',
'Chr VII': 'chr7', 'Chr VIII': 'chr8', 'Chr IX': 'chr9',
'Chr X': 'chr10', 'Chr XI': 'chr11', 'Chr XII': 'chr12',
'Chr XIII': 'chr13', 'Chr XIV': 'chr14', 'Chr XV': 'chr15',
'Chr XVI': 'chr16', 'Chr Mito': 'chrm'}
with open(filename, 'r') as f:
lines = f.readlines()
bed = []
for i in range(len(lines)):
if lines[i].startswith('>'):
x = lines[i].strip().strip('>').split(', ')
g = x[0].split(' ')[0]
s = '+' if g.split('-')[0].endswith('W') else '-'
if x[1].startswith('2-micron plasmid'): ## deal with plasmid
y = re.split('-|,| from ', x[1].strip('2-micron '))
ch = y[0]
else: ## deal with other chromsomes
y = re.split('-|,| from ', x[1])
ch = CH_DICT[y[0]]
pos = np.array(y[1:], dtype=int)
g_start, g_end = np.min(pos), np.max(pos)
bed.append([ch, g_start, g_end, g, '.', s])
return BedTool.from_dataframe( | pd.DataFrame(bed) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 21 10:00:33 2018
@author: jdkern
"""
from __future__ import division
from sklearn import linear_model
from statsmodels.tsa.api import VAR
import scipy.stats as st
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
######################################################################
# LOAD
######################################################################
#import data
df_load = pd.read_excel('Synthetic_demand_pathflows/hist_demanddata.xlsx',sheet_name='hourly_load',header=0)
df_weather = pd.read_excel('Synthetic_demand_pathflows/hist_demanddata.xlsx',sheet_name='weather',header=0)
BPA_weights = pd.read_excel('Synthetic_demand_pathflows/hist_demanddata.xlsx',sheet_name='BPA_location_weights',header=0)
CAISO_weights = pd.read_excel('Synthetic_demand_pathflows/hist_demanddata.xlsx',sheet_name='CAISO_location_weights',header=0)
Name_list=pd.read_csv('Synthetic_demand_pathflows/Covariance_Calculation.csv')
Name_list=list(Name_list.loc['SALEM_T':])
Name_list=Name_list[1:]
df_wind=pd.read_csv('Synthetic_wind_power/wind_power_sim.csv',header=0)
sim_years = int(len(df_wind)/8760) + 3
sim_weather=pd.read_csv('Synthetic_weather/synthetic_weather_data.csv',header=0,index_col=0)
sim_weather = sim_weather.iloc[0:365*sim_years,:]
sim_weather = sim_weather.iloc[365:len(sim_weather)-730,:]
sim_weather = sim_weather.reset_index(drop=True)
#weekday designation
dow = df_weather.loc[:,'Weekday']
#generate simulated day of the week assuming starts from monday
count=0
sim_dow= np.zeros(len(sim_weather))
for i in range(0,len(sim_weather)):
count = count +1
if count <=5:
sim_dow[i]=1
elif count > 5:
sim_dow[i]=0
if count ==7:
count =0
#Generate a datelist
datelist=pd.date_range(pd.datetime(2017,1,1),periods=365).tolist()
sim_month=np.zeros(len(sim_weather))
sim_day=np.zeros(len(sim_weather))
sim_year=np.zeros(len(sim_weather))
count=0
for i in range(0,len(sim_weather)):
if count <=364:
sim_month[i]=datelist[count].month
sim_day[i]=datelist[count].day
sim_year[i]=datelist[count].year
else:
count=0
sim_month[i]=datelist[count].month
sim_day[i]=datelist[count].day
sim_year[i]=datelist[count].year
count=count+1
######################################################################
# BPAT
######################################################################
#Find the simulated data at the sites
col_BPA_T = ['SALEM_T','SEATTLE_T','PORTLAND_T','EUGENE_T','BOISE_T']
col_BPA_W = ['SALEM_W','SEATTLE_W','PORTLAND_W','EUGENE_W','BOISE_W']
BPA_sim_T=sim_weather[col_BPA_T].values
BPA_sim_W=sim_weather[col_BPA_W].values
sim_days = len(sim_weather)
weighted_SimT = np.zeros((sim_days,1))
###########################################
#find average temps
cities = ['Salem','Seattle','Portland','Eugene','Boise']
num_cities = len(cities)
num_days = len(df_weather)
AvgT = np.zeros((num_days,num_cities))
Wind = np.zeros((num_days,num_cities))
weighted_AvgT = np.zeros((num_days,1))
for i in cities:
n1 = i + '_MaxT'
n2 = i + '_MinT'
n3 = i + '_Wind'
j = int(cities.index(i))
AvgT[:,j] = 0.5*df_weather.loc[:,n1] + 0.5*df_weather.loc[:,n2]
weighted_AvgT[:,0] = weighted_AvgT[:,0] + AvgT[:,j]*BPA_weights.loc[0,i]
Wind[:,j] = df_weather.loc[:,n3]
weighted_SimT[:,0] = weighted_SimT[:,0] + BPA_sim_T[:,j]*BPA_weights.loc[0,i]
#Convert simulated temperature to F
weighted_SimT=(weighted_SimT * 9/5) +32
BPA_sim_T_F=(BPA_sim_T * 9/5) +32
#convert to degree days
HDD = np.zeros((num_days,num_cities))
CDD = np.zeros((num_days,num_cities))
HDD_sim = np.zeros((sim_days,num_cities))
CDD_sim = np.zeros((sim_days,num_cities))
for i in range(0,num_days):
for j in range(0,num_cities):
HDD[i,j] = np.max((0,65-AvgT[i,j]))
CDD[i,j] = np.max((0,AvgT[i,j] - 65))
for i in range(0,sim_days):
for j in range(0,num_cities):
HDD_sim[i,j] = np.max((0,65-BPA_sim_T_F[i,j]))
CDD_sim[i,j] = np.max((0,BPA_sim_T_F[i,j] - 65))
#separate wind speed by cooling/heating degree day
binary_CDD = CDD>0
binary_HDD = HDD>0
CDD_wind = np.multiply(Wind,binary_CDD)
HDD_wind = np.multiply(Wind,binary_HDD)
binary_CDD_sim = CDD_sim > 0
binary_HDD_sim = HDD_sim > 0
CDD_wind_sim = np.multiply(BPA_sim_W,binary_CDD_sim)
HDD_wind_sim = np.multiply(BPA_sim_W,binary_HDD_sim)
#convert load to array
BPA_load = df_load.loc[:,'BPA'].values
#remove NaNs
a = np.argwhere(np.isnan(BPA_load))
for i in a:
BPA_load[i] = BPA_load[i+24]
peaks = np.zeros((num_days,1))
#find peaks
for i in range(0,num_days):
peaks[i] = np.max(BPA_load[i*24:i*24+24])
#Separate data by weighted temperature
M = np.column_stack((weighted_AvgT,peaks,dow,HDD,CDD,HDD_wind,CDD_wind))
M_sim=np.column_stack((weighted_SimT,sim_dow,HDD_sim,CDD_sim,HDD_wind_sim,CDD_wind_sim))
X70p = M[(M[:,0] >= 70),2:]
y70p = M[(M[:,0] >= 70),1]
X65_70 = M[(M[:,0] >= 65) & (M[:,0] < 70),2:]
y65_70 = M[(M[:,0] >= 65) & (M[:,0] < 70),1]
X60_65 = M[(M[:,0] >= 60) & (M[:,0] < 65),2:]
y60_65 = M[(M[:,0] >= 60) & (M[:,0] < 65),1]
X55_60 = M[(M[:,0] >= 55) & (M[:,0] < 60),2:]
y55_60 = M[(M[:,0] >= 55) & (M[:,0] < 60),1]
X50_55 = M[(M[:,0] >= 50) & (M[:,0] < 55),2:]
y50_55 = M[(M[:,0] >= 50) & (M[:,0] < 55),1]
X40_50 = M[(M[:,0] >= 40) & (M[:,0] < 50),2:]
y40_50 = M[(M[:,0] >= 40) & (M[:,0] < 50),1]
X30_40 = M[(M[:,0] >= 30) & (M[:,0] < 40),2:]
y30_40 = M[(M[:,0] >= 30) & (M[:,0] < 40),1]
X25_30 = M[(M[:,0] >= 25) & (M[:,0] < 30),2:]
y25_30 = M[(M[:,0] >= 25) & (M[:,0] < 30),1]
X25m = M[(M[:,0] < 25),2:]
y25m = M[(M[:,0] < 25),1]
X70p_Sim = M_sim[(M_sim[:,0] >= 70),1:]
X65_70_Sim = M_sim[(M_sim[:,0] >= 65) & (M_sim[:,0] < 70),1:]
X60_65_Sim = M_sim[(M_sim[:,0] >= 60) & (M_sim[:,0] < 65),1:]
X55_60_Sim = M_sim[(M_sim[:,0] >= 55) & (M_sim[:,0] < 60),1:]
X50_55_Sim = M_sim[(M_sim[:,0] >= 50) & (M_sim[:,0] < 55),1:]
X40_50_Sim = M_sim[(M_sim[:,0] >= 40) & (M_sim[:,0] < 50),1:]
X30_40_Sim = M_sim[(M_sim[:,0] >= 30) & (M_sim[:,0] < 40),1:]
X25_30_Sim = M_sim[(M_sim[:,0] >= 25) & (M_sim[:,0] < 30),1:]
X25m_Sim = M_sim[(M_sim[:,0] < 25),1:]
#multivariate regression
#Create linear regression object
reg70p = linear_model.LinearRegression()
reg65_70 = linear_model.LinearRegression()
reg60_65 = linear_model.LinearRegression()
reg55_60 = linear_model.LinearRegression()
reg50_55 = linear_model.LinearRegression()
reg40_50 = linear_model.LinearRegression()
reg30_40 = linear_model.LinearRegression()
reg25_30 = linear_model.LinearRegression()
reg25m = linear_model.LinearRegression()
# Train the model using the training sets
if len(y70p) > 0:
reg70p.fit(X70p,y70p)
if len(y65_70) > 0:
reg65_70.fit(X65_70,y65_70)
if len(y60_65) > 0:
reg60_65.fit(X60_65,y60_65)
if len(y55_60) > 0:
reg55_60.fit(X55_60,y55_60)
if len(y50_55) > 0:
reg50_55.fit(X50_55,y50_55)
if len(y40_50) > 0:
reg40_50.fit(X40_50,y40_50)
if len(y30_40) > 0:
reg30_40.fit(X30_40,y30_40)
if len(y25_30) > 0:
reg25_30.fit(X25_30,y25_30)
if len(y25m) > 0:
reg25m.fit(X25m,y25m)
# Make predictions using the testing set
predicted = []
for i in range(0,num_days):
s = M[i,2:]
s = s.reshape((1,len(s)))
if M[i,0]>=70:
y_hat = reg70p.predict(s)
elif M[i,0] >= 65 and M[i,0] < 70:
y_hat = reg65_70.predict(s)
elif M[i,0] >= 60 and M[i,0] < 65:
y_hat = reg60_65.predict(s)
elif M[i,0] >= 55 and M[i,0] < 60:
y_hat = reg55_60.predict(s)
elif M[i,0] >= 50 and M[i,0] < 55:
y_hat = reg50_55.predict(s)
elif M[i,0] >= 40 and M[i,0] < 50:
y_hat = reg40_50.predict(s)
elif M[i,0] >= 30 and M[i,0] < 40:
y_hat = reg30_40.predict(s)
elif M[i,0] >= 25 and M[i,0] < 30:
y_hat = reg25_30.predict(s)
elif M[i,0] < 25:
y_hat = reg25m.predict(s)
predicted = np.append(predicted,y_hat)
BPA_p = predicted.reshape((len(predicted),1))
#Simulate using the regression above
simulated=[]
for i in range(0,sim_days):
s = M_sim[i,1:]
s = s.reshape((1,len(s)))
if M_sim[i,0]>=70:
y_hat = reg70p.predict(s)
elif M_sim[i,0] >= 65 and M_sim[i,0] < 70:
y_hat = reg65_70.predict(s)
elif M_sim[i,0] >= 60 and M_sim[i,0] < 65:
y_hat = reg60_65.predict(s)
elif M_sim[i,0] >= 55 and M_sim[i,0] < 60:
y_hat = reg55_60.predict(s)
elif M_sim[i,0] >= 50 and M_sim[i,0] < 55:
y_hat = reg50_55.predict(s)
elif M_sim[i,0] >= 40 and M_sim[i,0] < 50:
y_hat = reg40_50.predict(s)
elif M_sim[i,0] >= 30 and M_sim[i,0] < 40:
y_hat = reg30_40.predict(s)
elif M_sim[i,0] >= 25 and M_sim[i,0] < 30:
y_hat = reg25_30.predict(s)
elif M_sim[i,0] < 25:
y_hat = reg25m.predict(s)
simulated = np.append(simulated,y_hat)
BPA_sim = simulated.reshape((len(simulated),1))
a=st.pearsonr(peaks,BPA_p)
print(a[0]**2, a[1])
# Residuals
BPAresiduals = BPA_p - peaks
BPA_y = peaks
# RMSE
RMSE = (np.sum((BPAresiduals**2))/len(BPAresiduals))**.5
output = np.column_stack((BPA_p,peaks))
#########################################################################
# CAISO
#########################################################################
#Find the simulated data at the sites
col_CAISO_T = ['FRESNO_T','OAKLAND_T','LOS ANGELES_T','SAN DIEGO_T','SACRAMENTO_T','SAN JOSE_T','SAN FRANCISCO_T']
col_CAISO_W = ['FRESNO_W','OAKLAND_W','LOS ANGELES_W','SAN DIEGO_W','SACRAMENTO_W','SAN JOSE_W','SAN FRANCISCO_W']
CAISO_sim_T=sim_weather[col_CAISO_T].values
CAISO_sim_W=sim_weather[col_CAISO_W].values
sim_days = len(sim_weather)
weighted_SimT = np.zeros((sim_days,1))
#find average temps
cities = ['Fresno','Oakland','LA','SanDiego','Sacramento','SanJose','SanFran']
num_cities = len(cities)
num_days = len(df_weather)
AvgT = np.zeros((num_days,num_cities))
Wind = np.zeros((num_days,num_cities))
weighted_AvgT = np.zeros((num_days,1))
for i in cities:
n1 = i + '_MaxT'
n2 = i + '_MinT'
n3 = i + '_Wind'
j = int(cities.index(i))
AvgT[:,j] = 0.5*df_weather.loc[:,n1] + 0.5*df_weather.loc[:,n2]
Wind[:,j] = df_weather.loc[:,n3]
weighted_AvgT[:,0] = weighted_AvgT[:,0] + AvgT[:,j]*CAISO_weights.loc[1,i]
weighted_SimT[:,0] = weighted_SimT[:,0] + CAISO_sim_T[:,j]*CAISO_weights.loc[1,i]
#Convert simulated temperature to F
weighted_SimT=(weighted_SimT * 9/5) +32
CAISO_sim_T_F=(CAISO_sim_T * 9/5) +32
#convert to degree days
HDD = np.zeros((num_days,num_cities))
CDD = np.zeros((num_days,num_cities))
HDD_sim = np.zeros((sim_days,num_cities))
CDD_sim = np.zeros((sim_days,num_cities))
for i in range(0,num_days):
for j in range(0,num_cities):
HDD[i,j] = np.max((0,65-AvgT[i,j]))
CDD[i,j] = np.max((0,AvgT[i,j] - 65))
for i in range(0,sim_days):
for j in range(0,num_cities):
HDD_sim[i,j] = np.max((0,65-CAISO_sim_T_F[i,j]))
CDD_sim[i,j] = np.max((0,CAISO_sim_T_F[i,j] - 65))
#separate wind speed by cooling/heating degree day
binary_CDD = CDD>0
binary_HDD = HDD>0
binary_CDD_sim = CDD_sim > 0
binary_HDD_sim = HDD_sim > 0
CDD_wind = np.multiply(Wind,binary_CDD)
HDD_wind = np.multiply(Wind,binary_HDD)
CDD_wind_sim = np.multiply(CAISO_sim_W,binary_CDD_sim)
HDD_wind_sim = np.multiply(CAISO_sim_W,binary_HDD_sim)
###########################
# CAISO - SDGE
###########################
#convert load to array
SDGE_load = df_load.loc[:,'SDGE'].values
#remove NaNs
a = np.argwhere(np.isnan(SDGE_load))
for i in a:
SDGE_load[i] = SDGE_load[i+24]
peaks = np.zeros((num_days,1))
#find peaks
for i in range(0,num_days):
peaks[i] = np.max(SDGE_load[i*24:i*24+24])
#Separate data by weighted temperature
M = np.column_stack((weighted_AvgT,peaks,dow,HDD,CDD,HDD_wind,CDD_wind))
M_sim=np.column_stack((weighted_SimT,sim_dow,HDD_sim,CDD_sim,HDD_wind_sim,CDD_wind_sim))
X80p = M[(M[:,0] >= 80),2:]
y80p = M[(M[:,0] >= 80),1]
X75_80 = M[(M[:,0] >= 75) & (M[:,0] < 80),2:]
y75_80 = M[(M[:,0] >= 75) & (M[:,0] < 80),1]
X70_75 = M[(M[:,0] >= 70) & (M[:,0] < 75),2:]
y70_75 = M[(M[:,0] >= 70) & (M[:,0] < 75),1]
X65_70 = M[(M[:,0] >= 65) & (M[:,0] < 70),2:]
y65_70 = M[(M[:,0] >= 65) & (M[:,0] < 70),1]
X60_65 = M[(M[:,0] >= 60) & (M[:,0] < 65),2:]
y60_65 = M[(M[:,0] >= 60) & (M[:,0] < 65),1]
X55_60 = M[(M[:,0] >= 55) & (M[:,0] < 60),2:]
y55_60 = M[(M[:,0] >= 55) & (M[:,0] < 60),1]
X50_55 = M[(M[:,0] >= 50) & (M[:,0] < 55),2:]
y50_55 = M[(M[:,0] >= 50) & (M[:,0] < 55),1]
X50 = M[(M[:,0] < 50),2:]
y50 = M[(M[:,0] < 50),1]
X80p_Sim = M_sim[(M_sim[:,0] >= 80),1:]
X75_80_Sim = M_sim[(M_sim[:,0] >= 75) & (M_sim[:,0] < 80),1:]
X70_75_Sim = M_sim[(M_sim[:,0] >= 70) & (M_sim[:,0] < 75),1:]
X65_70_Sim = M_sim[(M_sim[:,0] >= 65) & (M_sim[:,0] < 70),1:]
X60_65_Sim = M_sim[(M_sim[:,0] >= 60) & (M_sim[:,0] < 65),1:]
X55_60_Sim = M_sim[(M_sim[:,0] >= 55) & (M_sim[:,0] < 60),1:]
X50_55_Sim = M_sim[(M_sim[:,0] >= 50) & (M_sim[:,0] < 55),1:]
X50_Sim = M_sim[(M_sim[:,0] < 50),1:]
#Create linear regression object
reg80p = linear_model.LinearRegression()
reg75_80 = linear_model.LinearRegression()
reg70_75 = linear_model.LinearRegression()
reg65_70 = linear_model.LinearRegression()
reg60_65 = linear_model.LinearRegression()
reg55_60 = linear_model.LinearRegression()
reg50_55 = linear_model.LinearRegression()
reg50m = linear_model.LinearRegression()
## Train the model using the training sets
if len(y80p) > 0:
reg80p.fit(X80p,y80p)
if len(y75_80) > 0:
reg75_80.fit(X75_80,y75_80)
if len(y70_75) > 0:
reg70_75.fit(X70_75,y70_75)
if len(y65_70) > 0:
reg65_70.fit(X65_70,y65_70)
if len(y60_65) > 0:
reg60_65.fit(X60_65,y60_65)
if len(y55_60) > 0:
reg55_60.fit(X55_60,y55_60)
if len(y50_55) > 0:
reg50_55.fit(X50_55,y50_55)
if len(y50) > 0:
reg50m.fit(X50,y50)
# Make predictions using the testing set
predicted = []
for i in range(0,num_days):
s = M[i,2:]
s = s.reshape((1,len(s)))
if M[i,0]>=80:
y_hat = reg80p.predict(s)
elif M[i,0] >= 75 and M[i,0] < 80:
y_hat = reg75_80.predict(s)
elif M[i,0] >= 70 and M[i,0] < 75:
y_hat = reg70_75.predict(s)
elif M[i,0] >= 65 and M[i,0] < 70:
y_hat = reg65_70.predict(s)
elif M[i,0] >= 60 and M[i,0] < 65:
y_hat = reg60_65.predict(s)
elif M[i,0] >= 55 and M[i,0] < 60:
y_hat = reg55_60.predict(s)
elif M[i,0] >= 50 and M[i,0] < 55:
y_hat = reg50_55.predict(s)
elif M[i,0] < 50:
y_hat = reg50m.predict(s)
predicted = np.append(predicted,y_hat)
SDGE_p = predicted.reshape((len(predicted),1))
simulated=[]
for i in range(0,sim_days):
s = M_sim[i,1:]
s = s.reshape((1,len(s)))
if M_sim[i,0]>=80:
y_hat = reg80p.predict(s)
elif M_sim[i,0] >= 75 and M_sim[i,0] < 80:
y_hat = reg75_80.predict(s)
elif M_sim[i,0] >= 70 and M_sim[i,0] < 75:
y_hat = reg70_75.predict(s)
elif M_sim[i,0] >= 65 and M_sim[i,0] < 70:
y_hat = reg65_70.predict(s)
elif M_sim[i,0] >= 60 and M_sim[i,0] < 65:
y_hat = reg60_65.predict(s)
elif M_sim[i,0] >= 55 and M_sim[i,0] < 60:
y_hat = reg55_60.predict(s)
elif M_sim[i,0] >= 50 and M_sim[i,0] < 55:
y_hat = reg50_55.predict(s)
elif M_sim[i,0] < 50:
y_hat = reg50m.predict(s)
#
simulated = np.append(simulated,y_hat)
SDGE_sim = simulated.reshape((len(simulated),1))
# Residuals
SDGEresiduals = SDGE_p - peaks
SDGE_y = peaks
#a=st.pearsonr(peaks,SDGE_p)
#print a[0]**2
# RMSE
RMSE = (np.sum((SDGEresiduals**2))/len(SDGEresiduals))**.5
###########################
# CAISO - SCE
###########################
#convert load to array
SCE_load = df_load.loc[:,'SCE'].values
#remove NaNs
a = np.argwhere(np.isnan(SCE_load))
for i in a:
SCE_load[i] = SCE_load[i+24]
peaks = np.zeros((num_days,1))
#find peaks
for i in range(0,num_days):
peaks[i] = np.max(SCE_load[i*24:i*24+24])
#Separate data by weighted temperature
M = np.column_stack((weighted_AvgT,peaks,dow,HDD,CDD,HDD_wind,CDD_wind))
M_sim=np.column_stack((weighted_SimT,sim_dow,HDD_sim,CDD_sim,HDD_wind_sim,CDD_wind_sim))
X80p = M[(M[:,0] >= 80),2:]
y80p = M[(M[:,0] >= 80),1]
X75_80 = M[(M[:,0] >= 75) & (M[:,0] < 80),2:]
y75_80 = M[(M[:,0] >= 75) & (M[:,0] < 80),1]
X70_75 = M[(M[:,0] >= 70) & (M[:,0] < 75),2:]
y70_75 = M[(M[:,0] >= 70) & (M[:,0] < 75),1]
X65_70 = M[(M[:,0] >= 65) & (M[:,0] < 70),2:]
y65_70 = M[(M[:,0] >= 65) & (M[:,0] < 70),1]
X60_65 = M[(M[:,0] >= 60) & (M[:,0] < 65),2:]
y60_65 = M[(M[:,0] >= 60) & (M[:,0] < 65),1]
X55_60 = M[(M[:,0] >= 55) & (M[:,0] < 60),2:]
y55_60 = M[(M[:,0] >= 55) & (M[:,0] < 60),1]
X50_55 = M[(M[:,0] >= 50) & (M[:,0] < 55),2:]
y50_55 = M[(M[:,0] >= 50) & (M[:,0] < 55),1]
X50 = M[(M[:,0] < 50),2:]
y50 = M[(M[:,0] < 50),1]
X80p_Sim = M_sim[(M_sim[:,0] >= 80),1:]
X75_80_Sim = M_sim[(M_sim[:,0] >= 75) & (M_sim[:,0] < 80),1:]
X70_75_Sim = M_sim[(M_sim[:,0] >= 70) & (M_sim[:,0] < 75),1:]
X65_70_Sim = M_sim[(M_sim[:,0] >= 65) & (M_sim[:,0] < 70),1:]
X60_65_Sim = M_sim[(M_sim[:,0] >= 60) & (M_sim[:,0] < 65),1:]
X55_60_Sim = M_sim[(M_sim[:,0] >= 55) & (M_sim[:,0] < 60),1:]
X50_55_Sim = M_sim[(M_sim[:,0] >= 50) & (M_sim[:,0] < 55),1:]
X50_Sim = M_sim[(M_sim[:,0] < 50),1:]
##multivariate regression
#
#Create linear regression object
reg80p = linear_model.LinearRegression()
reg75_80 = linear_model.LinearRegression()
reg70_75 = linear_model.LinearRegression()
reg65_70 = linear_model.LinearRegression()
reg60_65 = linear_model.LinearRegression()
reg55_60 = linear_model.LinearRegression()
reg50_55 = linear_model.LinearRegression()
reg50m = linear_model.LinearRegression()
## Train the model using the training sets
if len(y80p) > 0:
reg80p.fit(X80p,y80p)
if len(y75_80) > 0:
reg75_80.fit(X75_80,y75_80)
if len(y70_75) > 0:
reg70_75.fit(X70_75,y70_75)
if len(y65_70) > 0:
reg65_70.fit(X65_70,y65_70)
if len(y60_65) > 0:
reg60_65.fit(X60_65,y60_65)
if len(y55_60) > 0:
reg55_60.fit(X55_60,y55_60)
if len(y50_55) > 0:
reg50_55.fit(X50_55,y50_55)
if len(y50) > 0:
reg50m.fit(X50,y50)
# Make predictions using the testing set
predicted = []
for i in range(0,num_days):
s = M[i,2:]
s = s.reshape((1,len(s)))
if M[i,0]>=80:
y_hat = reg80p.predict(s)
elif M[i,0] >= 75 and M[i,0] < 80:
y_hat = reg75_80.predict(s)
elif M[i,0] >= 70 and M[i,0] < 75:
y_hat = reg70_75.predict(s)
elif M[i,0] >= 65 and M[i,0] < 70:
y_hat = reg65_70.predict(s)
elif M[i,0] >= 60 and M[i,0] < 65:
y_hat = reg60_65.predict(s)
elif M[i,0] >= 55 and M[i,0] < 60:
y_hat = reg55_60.predict(s)
elif M[i,0] >= 50 and M[i,0] < 55:
y_hat = reg50_55.predict(s)
elif M[i,0] < 50:
y_hat = reg50m.predict(s)
predicted = np.append(predicted,y_hat)
SCE_p = predicted.reshape((len(predicted),1))
simulated=[]
for i in range(0,sim_days):
s = M_sim[i,1:]
s = s.reshape((1,len(s)))
if M_sim[i,0]>=80:
y_hat = reg80p.predict(s)
elif M_sim[i,0] >= 75 and M_sim[i,0] < 80:
y_hat = reg75_80.predict(s)
elif M_sim[i,0] >= 70 and M_sim[i,0] < 75:
y_hat = reg70_75.predict(s)
elif M_sim[i,0] >= 65 and M_sim[i,0] < 70:
y_hat = reg65_70.predict(s)
elif M_sim[i,0] >= 60 and M_sim[i,0] < 65:
y_hat = reg60_65.predict(s)
elif M_sim[i,0] >= 55 and M_sim[i,0] < 60:
y_hat = reg55_60.predict(s)
elif M_sim[i,0] >= 50 and M_sim[i,0] < 55:
y_hat = reg50_55.predict(s)
elif M_sim[i,0] < 50:
y_hat = reg50m.predict(s)
simulated = np.append(simulated,y_hat)
SCE_sim = simulated.reshape((len(simulated),1))
#a=st.pearsonr(peaks,SCE_p)
#print a[0]**2
# Residuals
SCEresiduals = SCE_p - peaks
SCE_y = peaks
# RMSE
RMSE = (np.sum((SCEresiduals**2))/len(SCEresiduals))**.5
###########################
# CAISO - PG&E Valley
###########################
#convert load to array
PGEV_load = df_load.loc[:,'PGE_V'].values
#remove NaNs
a = np.argwhere(np.isnan(PGEV_load))
for i in a:
PGEV_load[i] = PGEV_load[i+24]
peaks = np.zeros((num_days,1))
#find peaks
for i in range(0,num_days):
peaks[i] = np.max(PGEV_load[i*24:i*24+24])
#Separate data by weighted temperature
M = np.column_stack((weighted_AvgT,peaks,dow,HDD,CDD,HDD_wind,CDD_wind))
M_sim=np.column_stack((weighted_SimT,sim_dow,HDD_sim,CDD_sim,HDD_wind_sim,CDD_wind_sim))
X80p = M[(M[:,0] >= 80),2:]
y80p = M[(M[:,0] >= 80),1]
X75_80 = M[(M[:,0] >= 75) & (M[:,0] < 80),2:]
y75_80 = M[(M[:,0] >= 75) & (M[:,0] < 80),1]
X70_75 = M[(M[:,0] >= 70) & (M[:,0] < 75),2:]
y70_75 = M[(M[:,0] >= 70) & (M[:,0] < 75),1]
X65_70 = M[(M[:,0] >= 65) & (M[:,0] < 70),2:]
y65_70 = M[(M[:,0] >= 65) & (M[:,0] < 70),1]
X60_65 = M[(M[:,0] >= 60) & (M[:,0] < 65),2:]
y60_65 = M[(M[:,0] >= 60) & (M[:,0] < 65),1]
X55_60 = M[(M[:,0] >= 55) & (M[:,0] < 60),2:]
y55_60 = M[(M[:,0] >= 55) & (M[:,0] < 60),1]
X50_55 = M[(M[:,0] >= 50) & (M[:,0] < 55),2:]
y50_55 = M[(M[:,0] >= 50) & (M[:,0] < 55),1]
X50 = M[(M[:,0] < 50),2:]
y50 = M[(M[:,0] < 50),1]
X80p_Sim = M_sim[(M_sim[:,0] >= 80),1:]
X75_80_Sim = M_sim[(M_sim[:,0] >= 75) & (M_sim[:,0] < 80),1:]
X70_75_Sim = M_sim[(M_sim[:,0] >= 70) & (M_sim[:,0] < 75),1:]
X65_70_Sim = M_sim[(M_sim[:,0] >= 65) & (M_sim[:,0] < 70),1:]
X60_65_Sim = M_sim[(M_sim[:,0] >= 60) & (M_sim[:,0] < 65),1:]
X55_60_Sim = M_sim[(M_sim[:,0] >= 55) & (M_sim[:,0] < 60),1:]
X50_55_Sim = M_sim[(M_sim[:,0] >= 50) & (M_sim[:,0] < 55),1:]
X50_Sim = M_sim[(M_sim[:,0] < 50),1:]
##multivariate regression
#
#Create linear regression object
reg80p = linear_model.LinearRegression()
reg75_80 = linear_model.LinearRegression()
reg70_75 = linear_model.LinearRegression()
reg65_70 = linear_model.LinearRegression()
reg60_65 = linear_model.LinearRegression()
reg55_60 = linear_model.LinearRegression()
reg50_55 = linear_model.LinearRegression()
reg50m = linear_model.LinearRegression()
## Train the model using the training sets
if len(y80p) > 0:
reg80p.fit(X80p,y80p)
if len(y75_80) > 0:
reg75_80.fit(X75_80,y75_80)
if len(y70_75) > 0:
reg70_75.fit(X70_75,y70_75)
if len(y65_70) > 0:
reg65_70.fit(X65_70,y65_70)
if len(y60_65) > 0:
reg60_65.fit(X60_65,y60_65)
if len(y55_60) > 0:
reg55_60.fit(X55_60,y55_60)
if len(y50_55) > 0:
reg50_55.fit(X50_55,y50_55)
if len(y50) > 0:
reg50m.fit(X50,y50)
# Make predictions using the testing set
predicted = []
for i in range(0,num_days):
s = M[i,2:]
s = s.reshape((1,len(s)))
if M[i,0]>=80:
y_hat = reg80p.predict(s)
elif M[i,0] >= 75 and M[i,0] < 80:
y_hat = reg75_80.predict(s)
elif M[i,0] >= 70 and M[i,0] < 75:
y_hat = reg70_75.predict(s)
elif M[i,0] >= 65 and M[i,0] < 70:
y_hat = reg65_70.predict(s)
elif M[i,0] >= 60 and M[i,0] < 65:
y_hat = reg60_65.predict(s)
elif M[i,0] >= 55 and M[i,0] < 60:
y_hat = reg55_60.predict(s)
elif M[i,0] >= 50 and M[i,0] < 55:
y_hat = reg50_55.predict(s)
elif M[i,0] < 50:
y_hat = reg50m.predict(s)
predicted = np.append(predicted,y_hat)
PGEV_p = predicted.reshape((len(predicted),1))
simulated=[]
for i in range(0,sim_days):
s = M_sim[i,1:]
s = s.reshape((1,len(s)))
if M_sim[i,0]>=80:
y_hat = reg80p.predict(s)
elif M_sim[i,0] >= 75 and M_sim[i,0] < 80:
y_hat = reg75_80.predict(s)
elif M_sim[i,0] >= 70 and M_sim[i,0] < 75:
y_hat = reg70_75.predict(s)
elif M_sim[i,0] >= 65 and M_sim[i,0] < 70:
y_hat = reg65_70.predict(s)
elif M_sim[i,0] >= 60 and M_sim[i,0] < 65:
y_hat = reg60_65.predict(s)
elif M_sim[i,0] >= 55 and M_sim[i,0] < 60:
y_hat = reg55_60.predict(s)
elif M_sim[i,0] >= 50 and M_sim[i,0] < 55:
y_hat = reg50_55.predict(s)
elif M_sim[i,0] < 50:
y_hat = reg50m.predict(s)
simulated = np.append(simulated,y_hat)
PGEV_sim = simulated.reshape((len(simulated),1))
a=st.pearsonr(peaks,PGEV_p)
print(a[0]**2, a[1])
# Residuals
PGEVresiduals = PGEV_p - peaks
PGEV_y = peaks
# RMSE
RMSE = (np.sum((PGEVresiduals**2))/len(PGEVresiduals))**.5
###########################
# CAISO - PG&E Bay
###########################
#convert load to array
PGEB_load = df_load.loc[:,'PGE_B'].values
#remove NaNs
a = np.argwhere(np.isnan(PGEB_load))
for i in a:
PGEB_load[i] = PGEB_load[i+24]
peaks = np.zeros((num_days,1))
#find peaks
for i in range(0,num_days):
peaks[i] = np.max(PGEB_load[i*24:i*24+24])
#Separate data by weighted temperature
M = np.column_stack((weighted_AvgT,peaks,dow,HDD,CDD,HDD_wind,CDD_wind))
M_sim=np.column_stack((weighted_SimT,sim_dow,HDD_sim,CDD_sim,HDD_wind_sim,CDD_wind_sim))
X80p = M[(M[:,0] >= 80),2:]
y80p = M[(M[:,0] >= 80),1]
X75_80 = M[(M[:,0] >= 75) & (M[:,0] < 80),2:]
y75_80 = M[(M[:,0] >= 75) & (M[:,0] < 80),1]
X70_75 = M[(M[:,0] >= 70) & (M[:,0] < 75),2:]
y70_75 = M[(M[:,0] >= 70) & (M[:,0] < 75),1]
X65_70 = M[(M[:,0] >= 65) & (M[:,0] < 70),2:]
y65_70 = M[(M[:,0] >= 65) & (M[:,0] < 70),1]
X60_65 = M[(M[:,0] >= 60) & (M[:,0] < 65),2:]
y60_65 = M[(M[:,0] >= 60) & (M[:,0] < 65),1]
X55_60 = M[(M[:,0] >= 55) & (M[:,0] < 60),2:]
y55_60 = M[(M[:,0] >= 55) & (M[:,0] < 60),1]
X50_55 = M[(M[:,0] >= 50) & (M[:,0] < 55),2:]
y50_55 = M[(M[:,0] >= 50) & (M[:,0] < 55),1]
X50 = M[(M[:,0] < 50),2:]
y50 = M[(M[:,0] < 50),1]
X80p_Sim = M_sim[(M_sim[:,0] >= 80),1:]
X75_80_Sim = M_sim[(M_sim[:,0] >= 75) & (M_sim[:,0] < 80),1:]
X70_75_Sim = M_sim[(M_sim[:,0] >= 70) & (M_sim[:,0] < 75),1:]
X65_70_Sim = M_sim[(M_sim[:,0] >= 65) & (M_sim[:,0] < 70),1:]
X60_65_Sim = M_sim[(M_sim[:,0] >= 60) & (M_sim[:,0] < 65),1:]
X55_60_Sim = M_sim[(M_sim[:,0] >= 55) & (M_sim[:,0] < 60),1:]
X50_55_Sim = M_sim[(M_sim[:,0] >= 50) & (M_sim[:,0] < 55),1:]
X50_Sim = M_sim[(M_sim[:,0] < 50),1:]
#Create linear regression object
reg80p = linear_model.LinearRegression()
reg75_80 = linear_model.LinearRegression()
reg70_75 = linear_model.LinearRegression()
reg65_70 = linear_model.LinearRegression()
reg60_65 = linear_model.LinearRegression()
reg55_60 = linear_model.LinearRegression()
reg50_55 = linear_model.LinearRegression()
reg50m = linear_model.LinearRegression()
## Train the model using the training sets
if len(y80p) > 0:
reg80p.fit(X80p,y80p)
if len(y75_80) > 0:
reg75_80.fit(X75_80,y75_80)
if len(y70_75) > 0:
reg70_75.fit(X70_75,y70_75)
if len(y65_70) > 0:
reg65_70.fit(X65_70,y65_70)
if len(y60_65) > 0:
reg60_65.fit(X60_65,y60_65)
if len(y55_60) > 0:
reg55_60.fit(X55_60,y55_60)
if len(y50_55) > 0:
reg50_55.fit(X50_55,y50_55)
if len(y50) > 0:
reg50m.fit(X50,y50)
# Make predictions using the testing set
predicted = []
for i in range(0,num_days):
s = M[i,2:]
s = s.reshape((1,len(s)))
if M[i,0]>=80:
y_hat = reg80p.predict(s)
elif M[i,0] >= 75 and M[i,0] < 80:
y_hat = reg75_80.predict(s)
elif M[i,0] >= 70 and M[i,0] < 75:
y_hat = reg70_75.predict(s)
elif M[i,0] >= 65 and M[i,0] < 70:
y_hat = reg65_70.predict(s)
elif M[i,0] >= 60 and M[i,0] < 65:
y_hat = reg60_65.predict(s)
elif M[i,0] >= 55 and M[i,0] < 60:
y_hat = reg55_60.predict(s)
elif M[i,0] >= 50 and M[i,0] < 55:
y_hat = reg50_55.predict(s)
elif M[i,0] < 50:
y_hat = reg50m.predict(s)
predicted = np.append(predicted,y_hat)
PGEB_p = predicted.reshape((len(predicted),1))
simulated=[]
for i in range(0,sim_days):
s = M_sim[i,1:]
s = s.reshape((1,len(s)))
if M_sim[i,0]>=80:
y_hat = reg80p.predict(s)
elif M_sim[i,0] >= 75 and M_sim[i,0] < 80:
y_hat = reg75_80.predict(s)
elif M_sim[i,0] >= 70 and M_sim[i,0] < 75:
y_hat = reg70_75.predict(s)
elif M_sim[i,0] >= 65 and M_sim[i,0] < 70:
y_hat = reg65_70.predict(s)
elif M_sim[i,0] >= 60 and M_sim[i,0] < 65:
y_hat = reg60_65.predict(s)
elif M_sim[i,0] >= 55 and M_sim[i,0] < 60:
y_hat = reg55_60.predict(s)
elif M_sim[i,0] >= 50 and M_sim[i,0] < 55:
y_hat = reg50_55.predict(s)
elif M_sim[i,0] < 50:
y_hat = reg50m.predict(s) #
simulated = np.append(simulated,y_hat)
PGEB_sim = simulated.reshape((len(simulated),1))
#a=st.pearsonr(peaks,PGEB_p)
#print a[0]**2
# Residuals
PGEBresiduals = PGEB_p - peaks
PGEB_y = peaks
# RMSE
RMSE = (np.sum((PGEBresiduals**2))/len(PGEBresiduals))**.5
#Collect residuals from load regression
R = np.column_stack((BPAresiduals,SDGEresiduals,SCEresiduals,PGEVresiduals,PGEBresiduals))
ResidualsLoad = R[0:3*365,:]
###################################
# PATH 46
###################################
#import data
df_data1 = | pd.read_excel('Synthetic_demand_pathflows/46_daily.xlsx',sheet_name='Sheet1',header=0) | pandas.read_excel |
import os
import csv
import sys
import json
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from operator import itemgetter
from datetime import date, datetime
from collections import Counter, defaultdict
from normalize import TextNormalizer
# Constants
BASE = os.path.dirname(__file__)
PUBDATES = os.path.join(BASE, "pubdates.csv") # mapping of docid to pubdate
SERIES = os.path.join(BASE, "wordseries.json") # word:date count
DATEFMT = "%Y-%m-%d"
def docid(fileid):
"""
Returns the docid parsed from the file id
"""
fname = os.path.basename(fileid)
return os.path.splitext(fname)[0]
def parse_date(ts):
"""
Helper function to handle weird mongo datetime output.
"""
return datetime.strptime(ts.split()[0], DATEFMT)
def load_pubdates(fileids, path=PUBDATES):
fileids = frozenset([docid(fileid) for fileid in fileids])
with open(path, 'r') as f:
reader = csv.reader(f)
next(reader) # skip the header
return {
row[0]: parse_date(row[1])
for row in reader if row[0] in fileids and row[1]
}
class WordSeries(object):
@classmethod
def load(klass, path):
"""
Load the word series from disk.
"""
obj = klass()
with open(path, 'r') as f:
for line in f:
data = json.loads(line.strip())
word = data['word']
for dt, val in data['series'].items():
dt = datetime.strptime(dt, DATEFMT)
obj.words[word][dt] = val
return obj
def __init__(self):
# a map of token -> date -> count
self.words = defaultdict(Counter)
def __len__(self):
return len(self.words)
def __getitem__(self, key):
if key not in self.words:
raise KeyError("'{}' not in word list".format(key))
# Return a timeseries for the specified key.
values = []
dates = []
for date, value in sorted(self.words[key].items(), key=itemgetter(0)):
values.append(value)
dates.append(date)
return | pd.Series(values, index=dates, name=key) | pandas.Series |
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier, GradientBoostingClassifier
from sklearn.cross_validation import train_test_split
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import roc_curve, precision_recall_curve, auc, make_scorer, recall_score, accuracy_score, precision_score, confusion_matrix
from sklearn import svm
from sklearn.model_selection import GridSearchCV, StratifiedKFold, RandomizedSearchCV
import pandas as pd
import numpy as np
import csv
np.random.seed(0)
#START: <NAME>
def runRandomForest():
dataFrame = pd.read_csv('processedclevelandPrime.csv')
param_grid = {
'bootstrap': [True, False],
'max_depth': [3, 5, 10, 20, 50, 75, 100, None],
'max_features': ['sqrt', 'log2', None],
'min_samples_leaf': [1, 2, 4, 6, 10],
'min_samples_split': [2, 5, 10],
'n_estimators': [100, 250, 500, 1000, 2000]
}
scorers = {
'precision_score': make_scorer(precision_score),
'recall_score': make_scorer(recall_score),
'accuracy_score': make_scorer(accuracy_score)
}
for i in range(0, len(dataFrame['num'])):
if(dataFrame['num'][i] > 0):
dataFrame['num'][i] = 1
dataFrame['is_train'] = np.random.uniform(0, 1, len(dataFrame)) <= .7
train, test = dataFrame[dataFrame['is_train']==True], dataFrame[dataFrame['is_train']==False]
print('Number of observations in the training data:', len(train))
print('Number of observations in the test data:',len(test))
features = dataFrame.columns[:13]
clf = RandomForestClassifier()
def grid_search_wrapper(refit_score='precision_score'):
skf = StratifiedKFold(n_splits=10)
grid_search = RandomizedSearchCV(clf, param_grid, scoring=scorers, refit=refit_score, cv=skf, return_train_score=True, n_jobs=2, n_iter=500)
grid_search.fit(dataFrame[features], dataFrame['num'])
fin = pd.DataFrame(grid_search.cv_results_)
fin = fin.sort_values(by='mean_test_precision_score' , ascending=False)
print(fin[['mean_test_precision_score', 'mean_test_recall_score', 'mean_test_accuracy_score', 'param_max_depth', 'param_max_features', 'param_min_samples_split', 'param_n_estimators', 'param_bootstrap', 'param_min_samples_leaf']].round(3).head(1))
fin = fin.sort_values(by='mean_test_recall_score' , ascending=False)
print(fin[['mean_test_precision_score', 'mean_test_recall_score', 'mean_test_accuracy_score', 'param_max_depth', 'param_max_features', 'param_min_samples_split', 'param_n_estimators', 'param_bootstrap', 'param_min_samples_leaf']].round(3).head(1))
fin = fin.sort_values(by='mean_test_accuracy_score' , ascending=False)
print(fin[['mean_test_precision_score', 'mean_test_recall_score', 'mean_test_accuracy_score', 'param_max_depth', 'param_max_features', 'param_min_samples_split', 'param_n_estimators', 'param_bootstrap', 'param_min_samples_leaf']].round(3).head(1))
return grid_search
grid_search_clf = grid_search_wrapper(refit_score='precision_score')
def runKNN():
dataFrame = pd.read_csv('processedclevelandPrime.csv')
param_grid = {
'n_neighbors' : np.arange(1, 25, 1)
}
scorers = {
'precision_score': make_scorer(precision_score),
'recall_score': make_scorer(recall_score),
'accuracy_score': make_scorer(accuracy_score)
}
for i in range(0, len(dataFrame['num'])):
if(dataFrame['num'][i] > 0):
dataFrame['num'][i] = 1
X = np.array(dataFrame.ix[:, 0:13]) # end index is exclusive
y = np.array(dataFrame['num']) # another way of indexing a pandas df
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=0)
clf = KNeighborsClassifier()
def grid_search_wrapper(refit_score='precision_score'):
skf = StratifiedKFold(n_splits=10)
grid_search = GridSearchCV(clf, param_grid, scoring=scorers, refit=refit_score, cv=skf, return_train_score=True, n_jobs=-1)
grid_search.fit(X, y)
fin = pd.DataFrame(grid_search.cv_results_)
fin = fin.sort_values(by='mean_test_' + refit_score , ascending=False)
print(fin[['mean_test_precision_score', 'mean_test_recall_score', 'mean_test_accuracy_score', 'param_n_neighbors']].round(3).head(1))
fin = fin.sort_values(by='mean_test_recall_score' , ascending=False)
print(fin[['mean_test_precision_score', 'mean_test_recall_score', 'mean_test_accuracy_score', 'param_n_neighbors']].round(3).head(1))
fin = fin.sort_values(by='mean_test_accuracy_score' , ascending=False)
print(fin[['mean_test_precision_score', 'mean_test_recall_score', 'mean_test_accuracy_score', 'param_n_neighbors']].round(3).head(1))
return grid_search
grid_search_clf = grid_search_wrapper(refit_score='precision_score')
#END: <NAME>
#START: <NAME>
def runSVM():
dataFrame = pd.read_csv('processedclevelandPrime.csv')
param_grid = {
'C' : [0.001, 0.01, 0.1, 1, 10],
'gamma':[1e-1, 1, 1e1]
}
scorers = {
'precision_score': make_scorer(precision_score),
'recall_score': make_scorer(recall_score),
'accuracy_score': make_scorer(accuracy_score)
}
for i in range(0, len(dataFrame['num'])):
if(dataFrame['num'][i] > 0):
dataFrame['num'][i] = 1
X = np.array(dataFrame.ix[:, 0:13]) # end index is exclusive
y = np.array(dataFrame['num']) # another way of indexing a pandas df
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=0)
clf = svm.SVC(kernel = 'linear', probability=True)
def grid_search_wrapper(refit_score='accuracy_score'):
skf = StratifiedKFold(n_splits=10)
grid_search = GridSearchCV(clf, param_grid, scoring=scorers, refit=refit_score, cv=skf, return_train_score=True, n_jobs=2)
grid_search.fit(X, y)
fin = pd.DataFrame(grid_search.cv_results_)
fin = fin.sort_values(by='mean_test_precision_score' , ascending=False)
print(fin[['mean_test_precision_score', 'mean_test_recall_score', 'mean_test_accuracy_score', 'param_C', 'param_gamma']].round(3).head(1))
fin = fin.sort_values(by='mean_test_recall_score' , ascending=False)
print(fin[['mean_test_precision_score', 'mean_test_recall_score', 'mean_test_accuracy_score', 'param_C', 'param_gamma']].round(3).head(1))
fin = fin.sort_values(by='mean_test_accuracy_score' , ascending=False)
print(fin[['mean_test_precision_score', 'mean_test_recall_score', 'mean_test_accuracy_score', 'param_C', 'param_gamma']].round(3).head(1))
return grid_search
grid_search_clf = grid_search_wrapper(refit_score='precision_score')
def runAdaBoost():
dataFrame = pd.read_csv('processedclevelandPrime.csv')
param_grid = {
'n_estimators' : [50, 100, 250, 500, 1000, 2000],
'learning_rate':[0.001, 0.01, 0.1, 0.2, 0.3, .5, 1]
}
scorers = {
'precision_score': make_scorer(precision_score),
'recall_score': make_scorer(recall_score),
'accuracy_score': make_scorer(accuracy_score)
}
for i in range(0, len(dataFrame['num'])):
if(dataFrame['num'][i] > 0):
dataFrame['num'][i] = 1
X = np.array(dataFrame.ix[:, 0:13]) # end index is exclusive
y = np.array(dataFrame['num']) # another way of indexing a pandas df
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=0)
clf = AdaBoostClassifier()
def grid_search_wrapper(refit_score='accuracy_score'):
skf = StratifiedKFold(n_splits=10)
grid_search = GridSearchCV(clf, param_grid, scoring=scorers, refit=refit_score, cv=skf, return_train_score=True, n_jobs=2)
grid_search.fit(X, y)
fin = pd.DataFrame(grid_search.cv_results_)
fin = fin.sort_values(by='mean_test_precision_score' , ascending=False)
print(fin[['mean_test_precision_score', 'mean_test_recall_score', 'mean_test_accuracy_score', 'param_n_estimators', 'param_learning_rate']].round(3).head(1))
fin = fin.sort_values(by='mean_test_recall_score' , ascending=False)
print(fin[['mean_test_precision_score', 'mean_test_recall_score', 'mean_test_accuracy_score', 'param_n_estimators', 'param_learning_rate']].round(3).head(1))
fin = fin.sort_values(by='mean_test_accuracy_score' , ascending=False)
print(fin[['mean_test_precision_score', 'mean_test_recall_score', 'mean_test_accuracy_score', 'param_n_estimators', 'param_learning_rate']].round(3).head(1))
return grid_search
grid_search_clf = grid_search_wrapper(refit_score='precision_score')
#END: <NAME>
#START: <NAME>
def runGradientBoost():
dataFrame = pd.read_csv('processedclevelandPrime.csv')
param_grid = {
'max_depth': [3, 5, 10, 20, 50, 75, 100, None],
'max_features': ['sqrt', 'log2', None],
'min_samples_leaf': [1, 2, 4, 6, 10],
'min_samples_split': [2, 5, 10],
'n_estimators': [100, 250, 500, 1000, 2000],
'learning_rate': [0.001, 0.01, 0.1, 0.2, 0.3, .5, 1]
}
scorers = {
'precision_score': make_scorer(precision_score),
'recall_score': make_scorer(recall_score),
'accuracy_score': make_scorer(accuracy_score)
}
for i in range(0, len(dataFrame['num'])):
if(dataFrame['num'][i] > 0):
dataFrame['num'][i] = 1
X = np.array(dataFrame.ix[:, 0:13]) # end index is exclusive
y = np.array(dataFrame['num']) # another way of indexing a pandas df
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0)
clf = GradientBoostingClassifier(random_state=0)
def grid_search_wrapper(refit_score='accuracy_score'):
skf = StratifiedKFold(n_splits=10)
grid_search = RandomizedSearchCV(clf, param_grid, scoring=scorers, refit=refit_score, cv=skf, return_train_score=True, n_jobs=2, n_iter=500)
grid_search.fit(X, y)
fin = pd.DataFrame(grid_search.cv_results_)
fin = fin.sort_values(by='mean_test_precision_score' , ascending=False)
print(fin[['mean_test_precision_score', 'mean_test_recall_score', 'mean_test_accuracy_score', 'param_max_depth', 'param_max_features', 'param_min_samples_split', 'param_n_estimators', 'param_min_samples_leaf', 'param_learning_rate']].round(3).head(1))
fin = fin.sort_values(by='mean_test_recall_score' , ascending=False)
print(fin[['mean_test_precision_score', 'mean_test_recall_score', 'mean_test_accuracy_score', 'param_max_depth', 'param_max_features', 'param_min_samples_split', 'param_n_estimators', 'param_min_samples_leaf', 'param_learning_rate']].round(3).head(1))
fin = fin.sort_values(by='mean_test_accuracy_score' , ascending=False)
print(fin[['mean_test_precision_score', 'mean_test_recall_score', 'mean_test_accuracy_score', 'param_max_depth', 'param_max_features', 'param_min_samples_split', 'param_n_estimators', 'param_min_samples_leaf', 'param_learning_rate']].round(3).head(1))
return grid_search
grid_search_clf = grid_search_wrapper(refit_score='precision_score')
#END: <NAME>
#START: <NAME>
def adjusted_classes(y_scores, t):
return [1 if y >= t else 0 for y in y_scores]
def thresholds(y_test, y_pred):
aScore = None
leastFN = np.inf
lowT = 0
for t in np.arange(1.00, 0, -.01):
FN = 0
y_adjusted = adjusted_classes(y_pred, t)
for i in range(len(y_adjusted)):
if y_adjusted[i]==0 and y_test[i] != y_adjusted[i]:
FN += 1
if(FN < leastFN):
aScore = accuracy_score(y_test, y_adjusted)
leastFN = FN
lowT = t
print(lowT)
print(leastFN)
print(aScore)
print('\n')
#for a in aScores:
# print(a)
def getThresholds():
dataFrame = | pd.read_csv('processedclevelandPrime.csv') | pandas.read_csv |
import pandas as pd
chrom_sizes = pd.Series(
{1: 249250621,
10: 135534747,
11: 135006516,
12: 133851895,
13: 115169878,
14: 107349540,
15: 102531392,
16: 90354753,
17: 81195210,
18: 78077248,
19: 59128983,
2: 243199373,
20: 63025520,
21: 48129895,
22: 51304566,
3: 198022430,
4: 191154276,
5: 180915260,
6: 171115067,
7: 159138663,
8: 146364022,
9: 141213431,
}
)
chrom_sizes_norm = chrom_sizes / chrom_sizes.max()
def _make_tableau20():
# tableau20 from # http://www.randalolson.com/2014/06/28/how-to-make-beautiful-data-visualizations-in-python-with-matplotlib/
tableau20 = [(31, 119, 180), (174, 199, 232), (255, 127, 14), (255, 187, 120),
(44, 160, 44), (152, 223, 138), (214, 39, 40), (255, 152, 150),
(148, 103, 189), (197, 176, 213), (140, 86, 75), (196, 156, 148),
(227, 119, 194), (247, 182, 210), (127, 127, 127), (199, 199, 199),
(188, 189, 34), (219, 219, 141), (23, 190, 207), (158, 218, 229)]
# Scale the RGB values to the [0, 1] range, which is the format matplotlib
# accepts.
for i in range(len(tableau20)):
r, g, b = tableau20[i]
tableau20[i] = (r / 255., g / 255., b / 255.)
return tableau20
tableau20 = _make_tableau20()
def generate_null_snvs(df, snvs, num_null_sets=5):
"""
Generate a set of null SNVs based on an input list of SNVs and categorical
annotations.
Parameters
----------
df : pandas.DataFrame
Pandas dataframe where each column is a categorization of SNPs.
The index should be SNPs of the form chrom:pos.
snvs : list
List of input SNVs in the format chrom:pos. Entries that aren't in
the index of df will be dropped.
num_null_sets : int
Number of sets of null SNVs to generate.
Returns
-------
null_sets : pandas.Dataframe
Pandas dataframe with input SNVs as first column and null SNVs as
following columns.
"""
import numpy as np
import random
random.seed(20151007)
input_snvs = list(set(df.index) & set(snvs))
sig = df.ix[input_snvs]
not_sig = df.ix[set(df.index) - set(snvs)]
sig['group'] = sig.apply(lambda x: '::'.join(x), axis=1)
not_sig['group'] = not_sig.apply(lambda x: '::'.join(x), axis=1)
null_sets = []
vc = sig.group.value_counts()
bins = {c:sorted(list(df[c].value_counts().index)) for c in df.columns}
ordered_inputs = []
for i in vc.index:
ordered_inputs += list(sig[sig.group == i].index)
tdf = not_sig[not_sig.group == i]
count = vc[i]
for n in range(num_null_sets):
if tdf.shape[0] == 0:
groups = [i]
while tdf.shape[0] == 0:
# If there are no potential null SNVs in this group, we'll
# expand the group randomly.
g = groups[-1]
# Choose random bin.
cols = list(not_sig.columns)
cols.remove('group')
b = random.choice(cols)
# Get possibilities for that bin.
t = bins[b]
# Get last set of bin values and the value for the bin we
# want to change.
d = dict(list(zip(not_sig.columns, g.split('::'))))
cat = d[b]
# Randomly walk away from bin value.
ind = t.index(cat)
if ind == 0:
ind += 1
elif ind == len(t) - 1:
ind -= 1
else:
ind += random.choice([-1, 1])
d[b] = t[ind]
groups.append('::'.join(pd.Series(d)[not_sig.columns].astype(str)))
tdf = not_sig[not_sig.group.apply(lambda x: x in groups)]
if count <= tdf.shape[0]:
ind = random.sample(tdf.index, count)
else:
ind = list(np.random.choice(tdf.index, size=count, replace=True))
if i == vc.index[0]:
null_sets.append(ind)
else:
null_sets[n] += ind
null_sets = pd.DataFrame(null_sets).T
null_sets.columns = ['null_{}'.format(x) for x in null_sets.columns]
cs = list(null_sets.columns)
null_sets['input'] = ordered_inputs
null_sets = null_sets[['input'] + cs]
return null_sets
def make_grasp_phenotype_file(fn, pheno, out):
"""
Subset the GRASP database on a specific phenotype.
Parameters
----------
fn : str
Path to GRASP database file.
pheno : str
Phenotype to extract from database.
out : sttr
Path to output file for subset of GRASP database.
"""
import subprocess
c = 'awk -F "\\t" \'NR == 1 || $12 == "{}" \' {} > {}'.format(
pheno.replace("'", '\\x27'), fn, out)
subprocess.check_call(c, shell=True)
def parse_grasp_gwas(fn):
"""
Read GRASP database and filter for unique hits.
Parameters
----------
fn : str
Path to (subset of) GRASP database.
Returns
-------
df : pandas.DataFrame
Pandas dataframe with de-duplicated, significant SNPs. The index is of
the form chrom:pos where pos is the one-based position of the SNP. The
columns are chrom, start, end, rsid, and pvalue. rsid may be empty or
not actually an RSID. chrom, start, end make a zero-based bed file with
the SNP coordinates.
"""
df = pd.read_table(fn, low_memory=False)
df = df[df.Pvalue < 1e-5]
df = df.sort(columns=['chr(hg19)', 'pos(hg19)', 'Pvalue'])
df = df.drop_duplicates(subset=['chr(hg19)', 'pos(hg19)'])
df = df[df.Pvalue < 1e-5]
df['chrom'] = 'chr' + df['chr(hg19)'].astype(str)
df['end'] = df['pos(hg19)']
df['start'] = df.end - 1
df['rsid'] = df['SNPid(in paper)']
df['pvalue'] = df['Pvalue']
df = df[['chrom', 'start', 'end', 'rsid', 'pvalue']]
df.index = df['chrom'].astype(str) + ':' + df['end'].astype(str)
return df
def parse_roadmap_gwas(fn):
"""
Read Roadmap GWAS file and filter for unique, significant (p < 1e-5)
SNPs.
Parameters
----------
fn : str
Path to (subset of) GRASP database.
Returns
-------
df : pandas.DataFrame
Pandas dataframe with de-duplicated, significant SNPs. The index is of
the form chrom:pos where pos is the one-based position of the SNP. The
columns are chrom, start, end, rsid, and pvalue. rsid may be empty or
not actually an RSID. chrom, start, end make a zero-based bed file with
the SNP coordinates.
"""
df = pd.read_table(fn, low_memory=False,
names=['chrom', 'start', 'end', 'rsid', 'pvalue'])
df = df[df.pvalue < 1e-5]
df = df.sort(columns=['chrom', 'start', 'pvalue'])
df = df.drop_duplicates(subset=['chrom', 'start'])
df = df[df['chrom'] != 'chrY']
df.index = df['chrom'].astype(str) + ':' + df['end'].astype(str)
return df
def ld_prune(df, ld_beds, snvs=None):
"""
Prune set of GWAS based on LD and significance. A graph of all SNVs is
constructed with edges for LD >= 0.8 and the most significant SNV per
connected component is kept.
Parameters
----------
df : pandas.DataFrame
Pandas dataframe with unique SNVs. The index is of the form chrom:pos
where pos is the one-based position of the SNV. The columns must include
chrom, start, end, and pvalue. chrom, start, end make a zero-based bed
file with the SNV coordinates.
ld_beds : dict
Dict whose keys are chromosomes and whose values are filenames of
tabixed LD bed files. An LD bed file looks like "chr1 11007 11008
11008:11012:1" where the first three columns are the zero-based
half-open coordinate of the SNV and the fourth column has the one-based
coordinate followed of the SNV followed by the one-based coordinate of a
different SNV and the LD between them. In this example, the variants are
in perfect LD. The bed file should also contain the reciprocal line for
this LD relationship: "chr1 11011 11012 11012:11008:1".
snvs : list
List of SNVs to filter against. If a SNV is not in this list, it will
not be included. If you are working with GWAS SNPs, this is useful for
filtering out SNVs that aren't in the SNPsnap database for instance.
Returns
-------
out : pandas.DataFrame
Pandas dataframe in the same format as the input dataframe but with only
independent SNVs.
"""
import networkx as nx
import tabix
if snvs:
df = df.ix[set(df.index) & set(snvs)]
keep = set()
for chrom in list(ld_beds.keys()):
tdf = df[df['chrom'].astype(str) == chrom]
if tdf.shape[0] > 0:
f = tabix.open(ld_beds[chrom])
# Make a dict where each key is a SNP and the values are all of the
# other SNPs in LD with the key.
ld_d = {}
for j in tdf.index:
p = tdf.ix[j, 'end']
ld_d[p] = []
try:
r = f.query(chrom, p - 1, p)
while True:
try:
n = next(r)
p1, p2, r2 = n[-1].split(':')
if float(r2) >= 0.8:
ld_d[p].append(int(p2))
except StopIteration:
break
except TabixError:
continue
# Make adjacency matrix for LD.
cols = sorted(list(set(
[item for sublist in list(ld_d.values()) for item in sublist])))
t = pd.DataFrame(0, index=list(ld_d.keys()), columns=cols)
for k in list(ld_d.keys()):
t.ix[k, ld_d[k]] = 1
t.index = ['{}:{}'.format(chrom, x) for x in t.index]
t.columns = ['{}:{}'.format(chrom, x) for x in t.columns]
# Keep all SNPs not in LD with any others. These will be in the index
# but not in the columns.
keep |= set(t.index) - set(t.columns)
# Filter so we only have SNPs that are in LD with at least one other
# SNP.
ind = list(set(t.columns) & set(t.index))
# Keep one most sig. SNP per connected subgraph.
t = t.ix[ind, ind]
g = nx.Graph(t.values)
c = nx.connected_components(g)
while True:
try:
sg = next(c)
s = tdf.ix[t.index[list(sg)]]
keep.add(s[s.pvalue == s.pvalue.min()].index[0])
except StopIteration:
break
out = df.ix[keep]
return out
def ld_expand(df, ld_beds):
"""
Expand a set of SNVs into all SNVs with LD >= 0.8 and return a BedTool of
the expanded SNPs.
Parameters
----------
df : pandas.DataFrame
Pandas dataframe with SNVs. The index is of the form chrom:pos where pos
is the one-based position of the SNV. The columns are chrom, start, end.
chrom, start, end make a zero-based bed file with the SNV coordinates.
ld_beds : dict
Dict whose keys are chromosomes and whose values are filenames of
tabixed LD bed files. The LD bed files should be formatted like this:
chr1 14463 14464 14464:51479:0.254183
where the the first three columns indicate the zero-based coordinates of
a SNV and the the fourth column has the one-based coordinate of that
SNV, the one-based coordinate of another SNV on the same chromosome, and
the LD between these SNVs (all separated by colons).
Returns
-------
bt : pybedtools.BedTool
BedTool with input SNVs and SNVs they are in LD with.
indepdent SNVs.
"""
import pybedtools as pbt
import tabix
out_snps = []
for chrom in list(ld_beds.keys()):
t = tabix.open(ld_beds[chrom])
tdf = df[df['chrom'].astype(str) == chrom]
for ind in tdf.index:
p = tdf.ix[ind, 'end']
out_snps.append('{}\t{}\t{}\t{}\n'.format(chrom, p - 1, p, ind))
try:
r = t.query('{}'.format(chrom), p - 1, p)
while True:
try:
n = next(r)
p1, p2, r2 = n[-1].split(':')
if float(r2) >= 0.8:
out_snps.append('{}\t{}\t{}\t{}\n'.format(
n[0], int(p2) - 1, int(p2), ind))
except StopIteration:
break
except tabix.TabixError:
continue
bt = pbt.BedTool(''.join(out_snps), from_string=True)
bt = bt.sort()
return bt
def liftover_bed(
bed,
chain,
mapped=None,
unmapped=None,
liftOver_path='liftOver',
):
"""
Lift over a bed file using a given chain file.
Parameters
----------
bed : str or pybedtools.BedTool
Coordinates to lift over.
chain : str
Path to chain file to use for lift over.
mapped : str
Path for bed file with coordinates that are lifted over correctly.
unmapped : str
Path for text file to store coordinates that did not lift over
correctly. If this is not provided, these are discarded.
liftOver_path : str
Path to liftOver executable if not in path.
Returns
-------
new_coords : pandas.DataFrame
Pandas data frame with lift over results. Index is old coordinates in
the form chrom:start-end and columns are chrom, start, end and loc
(chrom:start-end) in new coordinate system.
"""
import subprocess
import pybedtools as pbt
if mapped == None:
import tempfile
mapped = tempfile.NamedTemporaryFile()
mname = mapped.name
else:
mname = mapped
if unmapped == None:
import tempfile
unmapped = tempfile.NamedTemporaryFile()
uname = unmapped.name
else:
uname = unmapped
if type(bed) == str:
bt = pbt.BedTool(bed)
elif type(bed) == pbt.bedtool.BedTool:
bt = bed
else:
sys.exit(1)
bt = bt.sort()
c = '{} {} {} {} {}'.format(liftOver_path, bt.fn, chain, mname, uname)
subprocess.check_call(c, shell=True)
with open(uname) as f:
missing = pbt.BedTool(''.join([x for x in f.readlines()[1::2]]),
from_string=True)
bt = bt.subtract(missing)
bt_mapped = pbt.BedTool(mname)
old_loc = []
for r in bt:
old_loc.append('{}:{}-{}'.format(r.chrom, r.start, r.end))
new_loc = []
new_chrom = []
new_start = []
new_end = []
for r in bt_mapped:
new_loc.append('{}:{}-{}'.format(r.chrom, r.start, r.end))
new_chrom.append(r.chrom)
new_start.append(r.start)
new_end.append(r.end)
new_coords = pd.DataFrame({'loc':new_loc, 'chrom': new_chrom,
'start': new_start, 'end': new_end},
index=old_loc)
for f in [mapped, unmapped]:
try:
f.close()
except AttributeError:
continue
return new_coords
def deseq2_size_factors(counts, meta, design):
"""
Get size factors for counts using DESeq2.
Parameters
----------
counts : pandas.DataFrame
Counts to pass to DESeq2.
meta : pandas.DataFrame
Pandas dataframe whose index matches the columns of counts. This is
passed to DESeq2's colData.
design : str
Design like ~subject_id that will be passed to DESeq2. The design
variables should match columns in meta.
Returns
-------
sf : pandas.Series
Series whose index matches the columns of counts and whose values are
the size factors from DESeq2. Divide each column by its size factor to
obtain normalized counts.
"""
import rpy2.robjects as r
from rpy2.robjects import pandas2ri
pandas2ri.activate()
r.r('suppressMessages(library(DESeq2))')
r.globalenv['counts'] = counts
r.globalenv['meta'] = meta
r.r('dds = DESeqDataSetFromMatrix(countData=counts, colData=meta, '
'design={})'.format(design))
r.r('dds = estimateSizeFactors(dds)')
r.r('sf = sizeFactors(dds)')
sf = r.globalenv['sf']
return pd.Series(sf, index=counts.columns)
def goseq_gene_enrichment(genes, sig, plot_fn=None, length_correct=True):
"""
Perform goseq enrichment for an Ensembl gene set.
Parameters
----------
genes : list
List of all genes as Ensembl IDs.
sig : list
List of boolean values indicating whether each gene is significant or
not.
plot_fn : str
Path to save length bias plot to. If not provided, the plot is deleted.
length_correct : bool
Correct for length bias.
Returns
-------
go_results : pandas.DataFrame
Dataframe with goseq results as well as Benjamini-Hochberg correct
p-values.
"""
import os
import readline
import statsmodels.stats.multitest as smm
import rpy2.robjects as r
genes = list(genes)
sig = [bool(x) for x in sig]
r.r('suppressMessages(library(goseq))')
r.globalenv['genes'] = list(genes)
r.globalenv['group'] = list(sig)
r.r('group = as.logical(group)')
r.r('names(group) = genes')
r.r('pwf = nullp(group, "hg19", "ensGene")')
if length_correct:
r.r('wall = goseq(pwf, "hg19", "ensGene")')
else:
r.r('wall = goseq(pwf, "hg19", "ensGene", method="Hypergeometric")')
r.r('t = as.data.frame(wall)')
t = r.globalenv['t']
go_results = pd.DataFrame(columns=list(t.colnames))
for i, c in enumerate(go_results.columns):
go_results[c] = list(t[i])
r, c, ask, abf = smm.multipletests(
go_results.over_represented_pvalue, alpha=0.05, method='fdr_i')
go_results['over_represented_pvalue_bh'] = c
r, c, ask, abf = smm.multipletests(
go_results.under_represented_pvalue, alpha=0.05, method='fdr_i')
go_results['under_represented_pvalue_bh'] = c
go_results.index = go_results.category
go_results = go_results.drop('category', axis=1)
if plot_fn and os.path.exists('Rplots.pdf'):
from os import rename
rename('Rplots.pdf', plot_fn)
elif os.path.exists('Rplots.pdf'):
from os import remove
remove('Rplots.pdf')
return go_results
def categories_to_colors(cats, colormap=None):
"""
Map categorical data to colors.
Parameters
----------
cats : pandas.Series or list
Categorical data as a list or in a Series.
colormap : list
List of RGB triples. If not provided, the tableau20 colormap defined in
this module will be used.
Returns
-------
legend : pd.Series
Series whose values are colors and whose index are the original
categories that correspond to those colors.
"""
if colormap is None:
colormap = tableau20
if type(cats) != pd.Series:
cats = pd.Series(cats)
legend = pd.Series(dict(list(zip(set(cats), colormap))))
# colors = pd.Series([legend[x] for x in cats.values], index=cats.index)
# I've removed this output:
# colors : pd.Series
# Series whose values are the colors for each category. If cats was a
# Series, then out will have the same index as cats.
return(legend)
def plot_color_legend(legend, horizontal=False, ax=None):
"""
Plot a pandas Series with labels and colors.
Parameters
----------
legend : pandas.Series
Pandas Series whose values are RGB triples and whose index contains
categorical labels.
horizontal : bool
If True, plot horizontally.
ax : matplotlib.axis
Axis to plot on.
Returns
-------
ax : matplotlib.axis
Plot axis.
"""
import matplotlib.pyplot as plt
import numpy as np
t = np.array([np.array([x for x in legend])])
if ax is None:
fig, ax = plt.subplots(1, 1)
if horizontal:
ax.imshow(t, interpolation='none')
ax.set_yticks([])
ax.set_xticks(np.arange(0, legend.shape[0]))
t = ax.set_xticklabels(legend.index)
else:
t = t.reshape([legend.shape[0], 1, 3])
ax.imshow(t, interpolation='none')
ax.set_xticks([])
ax.set_yticks(np.arange(0, legend.shape[0]))
t = ax.set_yticklabels(legend.index)
return ax
def make_color_legend_rects(colors, labels=None):
"""
Make list of rectangles and labels for making legends.
Parameters
----------
colors : pandas.Series or list
Pandas series whose values are colors and index is labels.
Alternatively, you can provide a list with colors and provide the labels
as a list.
labels : list
If colors is a list, this should be the list of corresponding labels.
Returns
-------
out : pd.Series
Pandas series whose values are matplotlib rectangles and whose index are
the legend labels for those rectangles. You can add each of these
rectangles to your axis using ax.add_patch(r) for r in out then create a
legend whose labels are out.values and whose labels are
legend_rects.index:
for r in legend_rects:
ax.add_patch(r)
lgd = ax.legend(legend_rects.values, labels=legend_rects.index)
"""
from matplotlib.pyplot import Rectangle
if labels:
d = dict(list(zip(labels, colors)))
se = pd.Series(d)
else:
se = colors
rects = []
for i in se.index:
r = Rectangle((0, 0), 0, 0, fc=se[i])
rects.append(r)
out = pd.Series(rects, index=se.index)
return out
class SVD:
def __init__(self, df, mean_center=True, scale_variance=False, full_matrices=False):
"""
Perform SVD for data matrix using scipy.linalg.svd. Note that this is currently inefficient
for large matrices due to some of the pandas operations.
Parameters
----------
df : pandas.DataFrame
Pandas data frame with data.
mean_center : bool
If True, mean center the rows. This should be done if not already
done.
scale_variance : bool
If True, scale the variance of each row to be one. Combined with
mean centering, this will transform your data into z-scores.
full_matrices : bool
Passed to scipy.linalg.svd. If True, U and Vh are of shape (M, M), (N, N). If False, the
shapes are (M, K) and (K, N), where K = min(M, N).
"""
import copy
self.data_orig = copy.deepcopy(df)
self.data = copy.deepcopy(df)
if mean_center:
self.data = (self.data.T - self.data.mean(axis=1)).T
if scale_variance:
self.data = (self.data.T / self.data.std(axis=1)).T
self._perform_svd(full_matrices)
def _perform_svd(self, full_matrices):
from scipy.linalg import svd
u, s, vh = svd(self.data, full_matrices=full_matrices)
self.u_orig = u
self.s_orig = s
self.vh_orig = vh
self.u = pd.DataFrame(
u,
index=self.data.index,
columns=['PC{}'.format(x) for x in range(1, u.shape[1] + 1)],
)
self.v = pd.DataFrame(
vh.T,
index=self.data.columns,
columns=['PC{}'.format(x) for x in range(1, vh.shape[0] + 1)],
)
index = ['PC{}'.format(x) for x in range(1, len(s) + 1)]
self.s_norm = pd.Series(s / s.sum(), index=index)
def plot_variance_explained(self, cumulative=False, xtick_start=1,
xtick_spacing=1, num_pc=None):
"""
Plot amount of variance explained by each principal component.
Parameters
----------
num_pc : int
Number of principal components to plot. If None, plot all.
cumulative : bool
If True, include cumulative variance.
xtick_start : int
The first principal component to label on the x-axis.
xtick_spacing : int
The spacing between labels on the x-axis.
"""
import matplotlib.pyplot as plt
from numpy import arange
if num_pc:
s_norm = self.s_norm[0:num_pc]
else:
s_norm = self.s_norm
if cumulative:
s_cumsum = s_norm.cumsum()
plt.bar(list(range(s_cumsum.shape[0])), s_cumsum.values,
label='Cumulative', color=(0.17254901960784313,
0.6274509803921569,
0.17254901960784313))
plt.bar(list(range(s_norm.shape[0])), s_norm.values, label='Per PC',
color=(0.12156862745098039, 0.4666666666666667,
0.7058823529411765))
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.ylabel('Variance')
else:
plt.bar(list(range(s_norm.shape[0])), s_norm.values,
color=(0.12156862745098039, 0.4666666666666667,
0.7058823529411765))
plt.ylabel('Proportion variance explained')
plt.xlabel('PC')
plt.xlim(0, s_norm.shape[0])
tick_locs = arange(xtick_start - 1, s_norm.shape[0],
step=xtick_spacing)
# 0.8 is the width of the bars.
tick_locs = tick_locs + 0.4
plt.xticks(tick_locs,
arange(xtick_start, s_norm.shape[0] + 1, xtick_spacing))
def plot_pc_scatter(self, pc1, pc2, v=True, subset=None, ax=None,
color=None, s=None, marker=None, color_name=None,
s_name=None, marker_name=None):
"""
Make a scatter plot of two principal components. You can create
differently colored, sized, or marked scatter points.
Parameters
----------
pc1 : str
String of form PCX where X is the number of the principal component
you want to plot on the x-axis.
pc2 : str
String of form PCX where X is the number of the principal component
you want to plot on the y-axis.
v : bool
If True, use the v matrix for plotting the principal components
(typical if input data was genes as rows and samples as columns).
If False, use the u matrix.
subset : list
Make the scatter plot using only a subset of the rows of u or v.
ax : matplotlib.axes
Plot the scatter plot on this axis.
color : pandas.Series
Pandas series containing a categorical variable to color the scatter
points.
s : pandas.Series
Pandas series containing a categorical variable to size the scatter
points. Currently limited to 7 distinct values (sizes).
marker : pandas.Series
Pandas series containing a categorical variable to choose the marker
type for the scatter points. Currently limited to 21 distinct values
(marker styles).
color_name : str
Name for the color legend if a categorical variable for color is
provided.
s_name : str
Name for the size legend if a categorical variable for size is
provided.
marker_name : str
Name for the marker legend if a categorical variable for marker type
is provided.
Returns
-------
ax : matplotlib.axes._subplots.AxesSubplot
Scatter plot axis.
TODO: Add ability to label points.
"""
import matplotlib.pyplot as plt
import seaborn as sns
assert s <= 7, 'Error: too many values for "s"'
if v:
df = self.v
else:
df = self.u
if color is not None:
if color.unique().shape[0] <= 10:
colormap = pd.Series(dict(list(zip(set(color.values),
tableau20[0:2 * len(set(color)):2]))))
else:
colormap = pd.Series(dict(list(zip(set(color.values),
sns.color_palette('husl', len(set(color)))))))
color = pd.Series([colormap[x] for x in color.values],
index=color.index)
color_legend = True
if not color_name:
color_name = color.index.name
else:
color = pd.Series([tableau20[0]] * df.shape[0], index=df.index)
color_legend = False
if s is not None:
smap = pd.Series(dict(list(zip(
set(s.values), list(range(30, 351))[0::50][0:len(set(s)) + 1]))))
s = pd.Series([smap[x] for x in s.values],
index=s.index)
s_legend = True
if not s_name:
s_name = s.index.name
else:
s = pd.Series(30, index=df.index)
s_legend = False
markers = ['o', '*', 's', 'v', '+', 'x', 'd',
'p', '2', '<', '|', '>', '_', 'h',
'1', '2', '3', '4', '8', '^', 'D']
if marker is not None:
markermap = pd.Series(dict(list(zip(set(marker.values), markers))))
marker = pd.Series([markermap[x] for x in marker.values],
index=marker.index)
marker_legend = True
if not marker_name:
marker_name = marker.index.name
else:
marker = | pd.Series('o', index=df.index) | pandas.Series |
# Created by <NAME>
# email : <EMAIL>
import json
import os
import time
from concurrent import futures
from copy import deepcopy
from pathlib import Path
from typing import IO, Union, List
from collections import defaultdict
import re
from itertools import tee
import logging
# Non standard libraries
import pandas as pd
from urllib import parse
from aanalytics2 import config, connector, token_provider
from .projects import *
from .requestCreator import RequestCreator
JsonOrDataFrameType = Union[pd.DataFrame, dict]
JsonListOrDataFrameType = Union[pd.DataFrame, List[dict]]
def retrieveToken(verbose: bool = False, save: bool = False, **kwargs)->str:
"""
LEGACY retrieve token directly following the importConfigFile or Configure method.
"""
token_with_expiry = token_provider.get_token_and_expiry_for_config(config.config_object,**kwargs)
token = token_with_expiry['token']
config.config_object['token'] = token
config.config_object['date_limit'] = time.time() + token_with_expiry['expiry'] / 1000 - 500
config.header.update({'Authorization': f'Bearer {token}'})
if verbose:
print(f"token valid till : {time.ctime(time.time() + token_with_expiry['expiry'] / 1000)}")
return token
class Login:
"""
Class to connect to the the login company.
"""
loggingEnabled = False
logger = None
def __init__(self, config: dict = config.config_object, header: dict = config.header, retry: int = 0,loggingObject:dict=None) -> None:
"""
Instantiate the Loggin class.
Arguments:
config : REQUIRED : dictionary with your configuration information.
header : REQUIRED : dictionary of your header.
retry : OPTIONAL : if you want to retry, the number of time to retry
loggingObject : OPTIONAL : If you want to set logging capability for your actions.
"""
if loggingObject is not None and sorted(["level","stream","format","filename","file"]) == sorted(list(loggingObject.keys())):
self.loggingEnabled = True
self.logger = logging.getLogger(f"{__name__}.login")
self.logger.setLevel(loggingObject["level"])
formatter = logging.Formatter(loggingObject["format"])
if loggingObject["file"]:
fileHandler = logging.FileHandler(loggingObject["filename"])
fileHandler.setFormatter(formatter)
self.logger.addHandler(fileHandler)
if loggingObject["stream"]:
streamHandler = logging.StreamHandler()
streamHandler.setFormatter(formatter)
self.logger.addHandler(streamHandler)
self.connector = connector.AdobeRequest(
config_object=config, header=header, retry=retry,loggingEnabled=self.loggingEnabled,logger=self.logger)
self.header = self.connector.header
self.COMPANY_IDS = {}
self.retry = retry
def getCompanyId(self,verbose:bool=False) -> dict:
"""
Retrieve the company ids for later call for the properties.
"""
if self.loggingEnabled:
self.logger.debug("getCompanyId start")
res = self.connector.getData(
"https://analytics.adobe.io/discovery/me", headers=self.header)
json_res = res
if self.loggingEnabled:
self.logger.debug(f"getCompanyId reponse: {json_res}")
try:
companies = json_res['imsOrgs'][0]['companies']
self.COMPANY_IDS = json_res['imsOrgs'][0]['companies']
return companies
except:
if verbose:
print("exception when trying to get companies with parameter 'all'")
print(json_res)
if self.loggingEnabled:
self.logger.error(f"Error trying to get companyId: {json_res}")
return None
def createAnalyticsConnection(self, companyId: str = None,loggingObject:dict=None) -> object:
"""
Returns an instance of the Analytics class so you can query the different elements from that instance.
Arguments:
companyId: REQUIRED : The globalCompanyId that you want to use in your connection
loggingObject : OPTIONAL : If you want to set logging capability for your actions.
the retry parameter set in the previous class instantiation will be used here.
"""
analytics = Analytics(company_id=companyId,
config_object=self.connector.config, header=self.header, retry=self.retry,loggingObject=loggingObject)
return analytics
class Analytics:
"""
Class that instantiate a connection to a single login company.
"""
# Endpoints
header = {"Accept": "application/json",
"Content-Type": "application/json",
"Authorization": "Bearer ",
"X-Api-Key": ""
}
_endpoint = 'https://analytics.adobe.io/api'
_getRS = '/collections/suites'
_getDimensions = '/dimensions'
_getMetrics = '/metrics'
_getSegments = '/segments'
_getCalcMetrics = '/calculatedmetrics'
_getUsers = '/users'
_getDateRanges = '/dateranges'
_getReport = '/reports'
loggingEnabled = False
logger = None
def __init__(self, company_id: str = None, config_object: dict = config.config_object, header: dict = config.header,
retry: int = 0,loggingObject:dict=None):
"""
Instantiate the Analytics class.
The Analytics class will be automatically connected to the API 2.0.
You have possibility to review the connection detail by looking into the connector instance.
"header", "company_id" and "endpoint_company" are attribute accessible for debugging.
Arguments:
company_id : REQUIRED : company ID retrieved by the getCompanyId
retry : OPTIONAL : Number of time you want to retrieve fail calls
loggingObject : OPTIONAL : logging object to log actions during runtime.
config_object : OPTIONAL : config object to be used for setting token (do not update if you do not know)
header : OPTIONAL : template header used for all requests (do not update if you do not know!)
"""
if company_id is None:
raise AttributeError(
'Expected "company_id" to be referenced.\nPlease ensure you pass the globalCompanyId when instantiating this class.')
if loggingObject is not None and sorted(["level","stream","format","filename","file"]) == sorted(list(loggingObject.keys())):
self.loggingEnabled = True
self.logger = logging.getLogger(f"{__name__}.analytics")
self.logger.setLevel(loggingObject["level"])
formatter = logging.Formatter(loggingObject["format"])
if loggingObject["file"]:
fileHandler = logging.FileHandler(loggingObject["filename"])
fileHandler.setFormatter(formatter)
self.logger.addHandler(fileHandler)
if loggingObject["stream"]:
streamHandler = logging.StreamHandler()
streamHandler.setFormatter(formatter)
self.logger.addHandler(streamHandler)
self.connector = connector.AdobeRequest(
config_object=config_object, header=header, retry=retry,loggingEnabled=self.loggingEnabled,logger=self.logger)
self.header = self.connector.header
self.connector.header['x-proxy-global-company-id'] = company_id
self.header['x-proxy-global-company-id'] = company_id
self.endpoint_company = f"{self._endpoint}/{company_id}"
self.company_id = company_id
self.listProjectIds = []
self.projectsDetails = {}
self.segments = []
self.calculatedMetrics = []
try:
import importlib.resources as pkg_resources
pathLOGS = pkg_resources.path(
"aanalytics2", "eventType_usageLogs.pickle")
except ImportError:
try:
# Try backported to PY<37 `importlib_resources`.
import pkg_resources
pathLOGS = pkg_resources.resource_filename(
"aanalytics2", "eventType_usageLogs.pickle")
except:
print('Empty LOGS_EVENT_TYPE attribute')
try:
with pathLOGS as f:
self.LOGS_EVENT_TYPE = pd.read_pickle(f)
except:
self.LOGS_EVENT_TYPE = "no data"
def __str__(self)->str:
obj = {
"endpoint" : self.endpoint_company,
"companyId" : self.company_id,
"header" : self.header,
"token" : self.connector.config['token']
}
return json.dumps(obj,indent=4)
def __repr__(self)->str:
obj = {
"endpoint" : self.endpoint_company,
"companyId" : self.company_id,
"header" : self.header,
"token" : self.connector.config['token']
}
return json.dumps(obj,indent=4)
def refreshToken(self, token: str = None):
if token is None:
raise AttributeError(
'Expected "token" to be referenced.\nPlease ensure you pass the token.')
self.header['Authorization'] = "Bearer " + token
def decodeAArequests(self,file:IO=None,urls:Union[list,str]=None,save:bool=False,**kwargs)->pd.DataFrame:
"""
Takes any of the parameter to load adobe url and decompose the requests into a dataframe, that you can save if you want.
Arguments:
file : OPTIONAL : file referencing the different requests saved (excel, or txt)
urls : OPTIONAL : list of requests (or a single request) that you want to decode.
save : OPTIONAL : parameter to save your decode list into a csv file.
Returns a dataframe.
possible kwargs:
encoding : the type of encoding to decode the file
"""
if self.loggingEnabled:
self.logger.debug(f"Starting decodeAArequests")
if file is None and urls is None:
raise ValueError("Require at least file or urls to contains data")
if file is not None:
if '.txt' in file:
with open(file,'r',encoding=kwargs.get('encoding','utf-8')) as f:
urls = f.readlines() ## passing decoding to urls
elif '.xlsx' in file:
temp_df = pd.read_excel(file,header=None)
urls = list(temp_df[0]) ## passing decoding to urls
if urls is not None:
if type(urls) == str:
data = parse.parse_qsl(urls)
df = pd.DataFrame(data)
df.columns = ['index','request']
df.set_index('index',inplace=True)
if save:
df.to_csv(f'request_{int(time.time())}.csv')
return df
elif type(urls) == list: ## decoding list of strings
tmp_list = [parse.parse_qsl(data) for data in urls]
tmp_dfs = [ | pd.DataFrame(data) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri May 14 19:56:42 2021
@author: vyass
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
df = pd.read_csv('eda_data.csv')
# choose relevant columns
df.columns
df_model =df[['avg_salary','Rating','Size','Type of ownership','Industry','Sector','Revenue','num_comp','hourly','employer_provided','job_state','same_state','age','python_yn','spark','aws','excel','job_simp','seniority','desc_len']]
# get dummy data
df_dum = | pd.get_dummies(df_model) | pandas.get_dummies |
#%%
import logging
logging.basicConfig(filename='covi19_dashboarder.log',
level=logging.ERROR,
format='%(asctime)s %(message)s')
logger = logging.getLogger("covi19_dashboarder")
class Preprocessor():
def __init__(self):
from pathlib import Path
self.current_data_confirmed_ = None
self.current_data_deaths_ = None
self.final_data_path_ = 'https://raw.githubusercontent.com/GermanCM/Covid19_data_analyzer/master/data/covid19_ts_data.csv'
def change_date_format(self, x):
try:
date_elements = x.split('/')
year = '20'+date_elements[2]
day = ('0' + date_elements[1]) if (int(date_elements[1])+1) <11 else date_elements[1]
month = ('0' + date_elements[0]) if int(date_elements[0])<11 else date_elements[0]
return year+'-'+month+'-'+day
except Exception as exc:
return exc
def get_current_data(self, ts_all_data_columns):
try:
from datetime import datetime, timedelta
from tqdm import tqdm
import pandas as pd
import numpy as np
####################TIME SERIES FILES
DATA_PATH_CONFIRMED = 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv'
DATA_PATH_DEATHS = 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_deaths_global.csv'
#DATA_PATH_RECOVERED = 'https://raw.githubusercontent.com/GermanCM/COVID-19/my_updated_master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_recovered_global.csv'
self.current_data_confirmed_ = pd.read_csv(filepath_or_buffer=DATA_PATH_CONFIRMED, sep=',')
self.current_data_deaths_ = pd.read_csv(filepath_or_buffer=DATA_PATH_DEATHS, sep=',')
#current_data_recovered = pd.read_csv(filepath_or_buffer=DATA_PATH_RECOVERED, sep=',')
ts_all_data = pd.DataFrame(columns=ts_all_data_columns)
time_columns = self.current_data_confirmed_.columns[4:]
date_new_names = | pd.Series(time_columns) | pandas.Series |
import pandas as pd
########### Add State Info ################
def add_state_abbrev(df, left):
us_state_abbrev = {
'Alabama': 'AL', 'Alaska': 'AK', 'Arizona': 'AZ', 'Arkansas': 'AR', 'California': 'CA', 'Colorado': 'CO',
'Connecticut': 'CT', 'Delaware': 'DE', 'Florida': 'FL', 'Georgia': 'GA', 'Hawaii': 'HI', 'Idaho': 'ID',
'Illinois': 'IL', 'Indiana': 'IN', 'Iowa': 'IA', 'Kansas': 'KS', 'Kentucky': 'KY', 'Louisiana': 'LA',
'Maine': 'ME', 'Maryland': 'MD', 'Massachusetts': 'MA', 'Michigan': 'MI', 'Minnesota': 'MN', 'Mississippi': 'MS',
'Missouri': 'MO', 'Montana': 'MT', 'Nebraska': 'NE', 'Nevada': 'NV', 'New Hampshire': 'NH', 'New Jersey': 'NJ',
'New Mexico': 'NM', 'New York': 'NY', 'North Carolina': 'NC', 'North Dakota': 'ND', 'Ohio': 'OH', 'Oklahoma': 'OK',
'Oregon': 'OR', 'Pennsylvania': 'PA', 'Rhode Island': 'RI', 'South Carolina': 'SC', 'South Dakota': 'SD',
'Tennessee': 'TN', 'Texas': 'TX', 'Utah': 'UT', 'Vermont': 'VT', 'Virginia': 'VA', 'Washington': 'WA',
'West Virginia': 'WV', 'Wisconsin': 'WI', 'Wyoming': 'WY', 'District of Columbia' : 'DC'}
us_abbr = pd.DataFrame.from_dict(us_state_abbrev, orient='index')
us_abbr = us_abbr.reset_index()
us_abbr.columns = ['State', 'Abbr']
right = 'State'
df = df.merge(us_abbr, how='inner', left_on=left, right_on=right)
return df
def add_state_region(df, left):
state_region = {'Alabama': 'Southern', 'Alaska': 'Western', 'Arizona': 'Western', 'Arkansas': 'Southern', 'California': 'Western', 'Colorado': 'Western',
'Connecticut': 'Northeastern', 'Delaware': 'Southern', 'Florida': 'Southern', 'Georgia': 'Southern', 'Hawaii': 'Western', 'Idaho': 'Western',
'Illinois': 'Midwestern', 'Indiana': 'Midwestern', 'Iowa': 'Midwestern', 'Kansas': 'Midwestern', 'Kentucky': 'Southern', 'Louisiana': 'Southern',
'Maine': 'Northeastern', 'Maryland': 'Southern', 'Massachusetts': 'Northeastern', 'Michigan': 'Midwestern', 'Minnesota': 'Midwestern', 'Mississippi': 'Southern',
'Missouri': 'Midwestern', 'Montana': 'Western', 'Nebraska': 'Midwestern', 'Nevada': 'Western', 'New Hampshire': 'Northeastern', 'New Jersey': 'Northeastern',
'New Mexico': 'Western', 'New York': 'Northeastern', 'North Carolina': 'Southern', 'North Dakota': 'Midwestern', 'Ohio': 'Midwestern', 'Oklahoma': 'Southern',
'Oregon': 'Western', 'Pennsylvania': 'Northeastern', 'Rhode Island': 'Northeastern', 'South Carolina': 'Southern', 'South Dakota': 'Midwestern',
'Tennessee': 'Southern', 'Texas': 'Southern', 'Utah': 'Western', 'Vermont': 'Northeastern', 'Virginia': 'Southern', 'Washington': 'Western',
'West Virginia': 'Southern', 'Wisconsin': 'Midwestern', 'Wyoming': 'Western', 'District of Columbia' : 'Southern'}
state_region = pd.DataFrame.from_dict(state_region, orient='index')
state_region = state_region.reset_index()
state_region.columns = ['State', 'Region']
right = 'State'
df = df.merge(state_region, how='outer', left_on=left, right_on=right)
return df
########### Consolidating Data ##########
### Location
def consolidate_sell_pop(location_house_sell_time, location_state_pop):
location_house_sell_time.columns = ['state', 'year', 'days_to_sell']
location_state_pop.columns = ['state', 'year', 'population']
merged_loc = location_house_sell_time.merge(location_state_pop, left_on= ['state', 'year'], right_on= ['state', 'year'], how='inner')
return merged_loc
def consolidate_sale_rent(location_rental_prices, location_house_prices):
location_rental_prices.columns = ['state', 'size', 'year', 'rent_value']
location_house_prices.columns = ['state', 'size', 'year', 'sell_value']
housing_merged_loc = location_rental_prices.merge(location_house_prices, left_on= ['state', 'size', 'year'], right_on= ['state', 'size', 'year'], how='inner')
return housing_merged_loc
def group_state_degree_data(df):
loc_field_focus = df.groupby(['State','Field'])['value'].sum().reset_index()
loc_field_focus_totals = df.groupby(['State'])['value'].sum().reset_index()
loc_field_focus_totals['Field'] = 'Total'
state_ratio = loc_field_focus.append(loc_field_focus_totals)
final =state_ratio.pivot_table(index = 'State', columns = 'Field', values = 'value')
final = append_zscores(final, 'Total', 'Total_z')
return final
def group_age_degree_data(df):
loc_age_focus = df.groupby(['Age Group','Field'])['value'].sum().reset_index()
loc_age_totals = df.groupby(['Age Group'])['value'].sum().reset_index()
loc_age_totals['Field'] = 'Total'
age_ratio = loc_age_focus.append(loc_age_totals)
final =age_ratio.pivot_table(index = 'Age Group', columns = 'Field', values = 'value')
final = append_zscores(final, 'Total', 'Total_z')
return final
def get_rent_sale_growth():
location_rental_prices = pd.read_csv('Final_Data/ETL/zillow_rental_prices.csv')
location_house_prices = | pd.read_csv('Final_Data/ETL/zillow_house_prices.csv') | pandas.read_csv |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 9 17:02:59 2018
@author: bruce
"""
# last version = plot_corr_mx_concate_time_linux_v1.6.0.py
import pandas as pd
import numpy as np
from scipy import fftpack
from scipy import signal
import matplotlib.pyplot as plt
from matplotlib.colors import LinearSegmentedColormap
import librosa
def correlation_matrix(corr_mx, cm_title):
from matplotlib import pyplot as plt
from matplotlib import cm as cm
fig = plt.figure()
ax1 = fig.add_subplot(111)
#cmap = cm.get_cmap('jet', 30)
cax = ax1.matshow(corr_mx, cmap='gray')
#cax = ax1.imshow(df.corr(), interpolation="nearest", cmap=cmap)
fig.colorbar(cax)
ax1.grid(False)
plt.title(cm_title)
#plt.title('cross correlation of test and retest')
ylabels=['T1','T2','T3','T4','T6','T7','T8','T9', 'T11', 'T12', 'T13', 'T14', 'T15', 'T16', 'T17', 'T18', 'T19', 'T20', 'T21', 'T22', 'T23', 'T25']
xlabels=['R1','R2','R3','R4','R6','R7','R8','R9', 'R11', 'R12', 'R13', 'R14', 'R15', 'R16', 'R17', 'R18', 'R19', 'R20', 'R21', 'R22', 'R23', 'R25']
ax1.set_xticks(np.arange(len(xlabels)))
ax1.set_yticks(np.arange(len(ylabels)))
ax1.set_xticklabels(xlabels,fontsize=6)
ax1.set_yticklabels(ylabels,fontsize=6)
# Add colorbar, make sure to specify tick locations to match desired ticklabels
#fig.colorbar(cax, ticks=[.75,.8,.85,.90,.95,1])
# show digit in matrix
corr_mx_array = np.asarray(corr_mx)
for i in range(22):
for j in range(22):
c = corr_mx_array[j,i]
ax1.text(i, j, round(c,2), va='center', ha='center')
plt.show()
def correlation_matrix_01(corr_mx, cm_title):
# find the maximum in each row
# input corr_mx is a dataframe
# need to convert it into a array first
#otherwise it is not working
temp = np.asarray(corr_mx)
output = (temp == temp.max(axis=1)[:,None]) # along rows
fig = plt.figure()
ax1 = fig.add_subplot(111)
#cmap = cm.get_cmap('jet', 30)
cs = ax1.matshow(output, cmap='gray')
#cax = ax1.imshow(df.corr(), interpolation="nearest", cmap=cmap)
fig.colorbar(cs)
ax1.grid(False)
plt.title(cm_title)
ylabels=['T1','T2','T3','T4','T6','T7','T8','T9', 'T11', 'T12', 'T13', 'T14', 'T15', 'T16', 'T17', 'T18', 'T19', 'T20', 'T21', 'T22', 'T23', 'T25']
xlabels=['R1','R2','R3','R4','R6','R7','R8','R9', 'R11', 'R12', 'R13', 'R14', 'R15', 'R16', 'R17', 'R18', 'R19', 'R20', 'R21', 'R22', 'R23', 'R25']
ax1.set_xticks(np.arange(len(xlabels)))
ax1.set_yticks(np.arange(len(ylabels)))
ax1.set_xticklabels(xlabels,fontsize=6)
ax1.set_yticklabels(ylabels,fontsize=6)
plt.show()
def correlation_matrix_rank(corr_mx, cm_title):
temp = corr_mx
#output = (temp == temp.max(axis=1)[:,None]) # along row
output = temp.rank(axis=1, ascending=False)
fig, ax1 = plt.subplots()
im1 = ax1.matshow(output, cmap=plt.cm.Wistia)
#cs = ax1.matshow(output)
fig.colorbar(im1)
ax1.grid(False)
ylabels=['T1','T2','T3','T4','T6','T7','T8','T9', 'T11', 'T12', 'T13', 'T14', 'T15', 'T16', 'T17', 'T18', 'T19', 'T20', 'T21', 'T22', 'T23', 'T25']
xlabels=['R1','R2','R3','R4','R6','R7','R8','R9', 'R11', 'R12', 'R13', 'R14', 'R15', 'R16', 'R17', 'R18', 'R19', 'R20', 'R21', 'R22', 'R23', 'R25']
ax1.set_xticks(np.arange(len(xlabels)))
ax1.set_yticks(np.arange(len(ylabels)))
ax1.set_xticklabels(xlabels,fontsize=6)
ax1.set_yticklabels(ylabels,fontsize=6)
plt.title(cm_title)
# show digit in matrix
output = np.asarray(output)
for i in range(22):
for j in range(22):
c = output[j,i]
ax1.text(i, j, int(c), va='center', ha='center')
plt.show()
def correlation_matrix_comb(corr_mx, cm_title):
fig, (ax2, ax3) = plt.subplots(1, 2)
ylabels=['T1','T2','T3','T4','T6','T7','T8','T9', 'T11', 'T12', 'T13', 'T14', 'T15', 'T16', 'T17', 'T18', 'T19', 'T20', 'T21', 'T22', 'T23', 'T25']
xlabels=['R1','R2','R3','R4','R6','R7','R8','R9', 'R11', 'R12', 'R13', 'R14', 'R15', 'R16', 'R17', 'R18', 'R19', 'R20', 'R21', 'R22', 'R23', 'R25']
'''
# graph 1 grayscale
im1 = ax1.matshow(corr_mx, cmap='gray')
# colorbar need numpy version 1.13.1
#fig.colorbar(im1, ax=ax1)
ax1.grid(False)
ax1.set_title(cm_title)
ax1.set_xticks(np.arange(len(xlabels)))
ax1.set_yticks(np.arange(len(ylabels)))
ax1.set_xticklabels(xlabels,fontsize=6)
ax1.set_yticklabels(ylabels,fontsize=6)
# show digit in matrix
corr_mx_array = np.asarray(corr_mx)
for i in range(22):
for j in range(22):
c = corr_mx_array[j,i]
ax1.text(i, j, round(c,2), va='center', ha='center')
'''
# graph 2 yellowscale
corr_mx_rank = corr_mx.rank(axis=1, ascending=False)
cmap_grey = LinearSegmentedColormap.from_list('mycmap', ['white', 'black'])
im2 = ax2.matshow(corr_mx, cmap='viridis')
# colorbar need numpy version 1.13.1
fig.colorbar(im2, ax=ax2)
ax2.grid(False)
ax2.set_title(cm_title)
ax2.set_xticks(np.arange(len(xlabels)))
ax2.set_yticks(np.arange(len(ylabels)))
ax2.set_xticklabels(xlabels,fontsize=6)
ax2.set_yticklabels(ylabels,fontsize=6)
# Add colorbar, make sure to specify tick locations to match desired ticklabels
# show digit in matrix
corr_mx_rank = np.asarray(corr_mx_rank)
for i in range(22):
for j in range(22):
c = corr_mx_rank[j,i]
ax2.text(i, j, int(c), va='center', ha='center')
# graph 3
# find the maximum in each row
# input corr_mx is a dataframe
# need to convert it into a array first
#otherwise it is not working
temp = np.asarray(corr_mx)
output = (temp == temp.max(axis=1)[:,None]) # along rows
im3 = ax3.matshow(output, cmap='gray')
# colorbar need numpy version 1.13.1
#fig.colorbar(im3, ax=ax3)
ax3.grid(False)
ax3.set_title(cm_title)
ax3.set_xticks(np.arange(len(xlabels)))
ax3.set_yticks(np.arange(len(ylabels)))
ax3.set_xticklabels(xlabels,fontsize=6)
ax3.set_yticklabels(ylabels,fontsize=6)
plt.show()
def correlation_matrix_tt_01(corr_mx, cm_title):
# find the maximum in each row
# input corr_mx is a dataframe
# need to convert it into a array first
#otherwise it is not working
temp = np.asarray(corr_mx)
output = (temp == temp.max(axis=1)[:,None]) # along rows
fig = plt.figure()
ax1 = fig.add_subplot(111)
#cmap = cm.get_cmap('jet', 30)
cax = ax1.matshow(output, cmap='gray')
#cax = ax1.imshow(df.corr(), interpolation="nearest", cmap=cmap)
fig.colorbar(cax)
ax1.grid(False)
plt.title(cm_title)
ylabels=['T1','T2','T3','T4','T6','T7','T8','T9', 'T11', 'T12', 'T13', 'T14', 'T15', 'T16', 'T17', 'T18', 'T19', 'T20', 'T21', 'T22', 'T23', 'T25']
xlabels=['T1','T2','T3','T4','T6','T7','T8','T9', 'T11', 'T12', 'T13', 'T14', 'T15', 'T16', 'T17', 'T18', 'T19', 'T20', 'T21', 'T22', 'T23', 'T25']
ax1.set_xticks(np.arange(len(xlabels)))
ax1.set_yticks(np.arange(len(ylabels)))
ax1.set_xticklabels(xlabels,fontsize=6)
ax1.set_yticklabels(ylabels,fontsize=6)
plt.show()
def correlation_matrix_rr_01(corr_mx, cm_title):
# find the maximum in each row
# input corr_mx is a dataframe
# need to convert it into a array first
#otherwise it is not working
temp = np.asarray(corr_mx)
output = (temp == temp.max(axis=1)[:,None]) # along rows
fig = plt.figure()
ax1 = fig.add_subplot(111)
#cmap = cm.get_cmap('jet', 30)
cax = ax1.matshow(output, cmap='gray')
#cax = ax1.imshow(df.corr(), interpolation="nearest", cmap=cmap)
fig.colorbar(cax)
ax1.grid(False)
plt.title(cm_title)
ylabels=['R1','R2','R3','R4','R6','R7','R8','R9', 'R11', 'R12', 'R13', 'R14', 'R15', 'R16', 'R17', 'R18', 'R19', 'R20', 'R21', 'R22', 'R23', 'R25']
xlabels=['R1','R2','R3','R4','R6','R7','R8','R9', 'R11', 'R12', 'R13', 'R14', 'R15', 'R16', 'R17', 'R18', 'R19', 'R20', 'R21', 'R22', 'R23', 'R25']
ax1.set_xticks(np.arange(len(xlabels)))
ax1.set_yticks(np.arange(len(ylabels)))
ax1.set_xticklabels(xlabels,fontsize=6)
ax1.set_yticklabels(ylabels,fontsize=6)
plt.show()
# shrink value for correlation matrix
# in order to use colormap -> 10 scale
def shrink_value_03_1(corr_in1):
corr_out1 = corr_in1.copy()
# here dataframe.copy() must be used, otherwise input can also be changed when changing output
for i in range (22):
for j in range(22):
if corr_in1.iloc[i, j] < 0.3:
corr_out1.iloc[i, j] = 0.3
return corr_out1
def shrink_value_05_1(corr_in2):
corr_out2 = corr_in2.copy()
# here dataframe.copy() must be used, otherwise input can also be changed when changing output
for i2 in range (22):
for j2 in range(22):
if corr_in2.iloc[i2, j2] < 0.5:
corr_out2.iloc[i2, j2] = 0.5
return corr_out2
# not used!!!!!!!!!!!!
# normalize the complex signal series
def normalize_complex_arr(a):
a_oo = a - a.real.min() - 1j*a.imag.min() # origin offsetted
return a_oo/np.abs(a_oo).max()
#################################
f_dB = lambda x : 20 * np.log10(np.abs(x))
# import the pkl file
#pkl_file=pd.read_pickle('/Users/bruce/Documents/uOttawa/Project/audio_brainstem_response/Data_BruceSunMaster_Studies/study2/study2DataFrame.pkl')
df_EFR=pd.read_pickle('/home/bruce/Dropbox/Project/4.Code for Linux/df_EFR.pkl')
# remove DC offset
df_EFR_detrend = pd.DataFrame()
for i in range(1408):
# combine next two rows later
df_EFR_detrend_data = pd.DataFrame(signal.detrend(df_EFR.iloc[i: i+1, 0:1024], type='constant').reshape(1,1024))
df_EFR_label = pd.DataFrame(df_EFR.iloc[i, 1024:1031].values.reshape(1,7))
df_EFR_detrend = df_EFR_detrend.append(pd.concat([df_EFR_detrend_data, df_EFR_label], axis=1, ignore_index=True))
# set the title of columns
df_EFR_detrend.columns = np.append(np.arange(1024), ["Subject", "Sex", "Condition", "Vowel", "Sound Level", "Num", "EFR/FFR"])
df_EFR_detrend = df_EFR_detrend.reset_index(drop=True)
df_EFR = df_EFR_detrend
# Time domain
# Define window function
win_kaiser = signal.kaiser(1024, beta=14)
win_hamming = signal.hamming(1024)
# average the df_EFR
df_EFR_win = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python
'''
Original author: <NAME> (ORNL)
Current version by: <NAME>
'''
from __future__ import print_function
import json
import decimal
import pandas
from journals.utilities import parse_datetime
from journals.databases.icat.sns.communicate import SnsICat
class SnsICatInterface(object):
def __init__(self):
self.icat = SnsICat()
self.key_list = ['ipts', 'duration', 'startTime', 'totalCounts', 'protonCharge', 'title']
# Utils
#------
@staticmethod
def _hyphen_range(s):
""" Takes a range in form of "a-b" and generate a list of numbers between a and b inclusive.
Also accepts comma separated ranges like "a-b,c-d,f" will build a list which will include
Numbers from a to b, a to d and f"""
s = "".join(s.split()) # removes white space
r = set()
for x in s.split(','):
t = x.split('-')
if len(t) not in [1, 2]:
logger.error("hash_range is given its arguement as " + s + " which seems not correctly formated.")
r.add(int(t[0])) if len(t) == 1 else r.update(set(range(int(t[0]), int(t[1]) + 1)))
l = list(r)
l.sort()
l_in_str = ','.join(str(x) for x in l)
return l_in_str
def _substitute_keys_in_dictionary(self,obj,old_key,new_key):
if isinstance(obj, dict):
if old_key in obj:
obj[new_key]=obj.pop(old_key)
return {k: self._substitute_keys_in_dictionary(v,old_key,new_key) for k, v in obj.items()}
elif isinstance(obj, list):
return [self._substitute_keys_in_dictionary(elem,old_key,new_key) for elem in obj]
def _convert_to_datetime(self,obj,key):
if isinstance(obj, dict):
if key in obj:
obj[key] = parse_datetime(obj[key])
return {k: self._convert_to_datetime(v,key) for k, v in obj.items()}
elif isinstance(obj, list):
return [self._convert_to_datetime(elem,key) for elem in obj]
# Functions
#----------
def get_instruments(self):
json_data = self.icat.get_instruments()
if json_data is not None and 'instrument' in json_data:
return json_data['instrument']
else:
raise Exception("ICAT did not return the expected result....")
def get_experiments(self,instrument):
json_data = self.icat.get_experiments(instrument)
return json_data
def get_experiments_meta(self, instrument):
json_data = self.icat.get_experiments_meta(instrument)
if json_data is not None and 'proposal' in json_data:
json_data = json_data['proposal']
else:
raise Exception("ICAT did not return the expected result....")
self._substitute_keys_in_dictionary(json_data,'@id','id')
self._convert_to_datetime(json_data,'createTime')
return json_data
def get_experiments_id_and_title(self,instrument):
json_data = self.get_experiments_meta(instrument)
json_data = { (int(entry['id'].split('-')[1]), entry['title']) for entry in json_data }
return json_data
def get_experiments_id_and_date(self,instrument):
json_data = self.get_experiments_meta(instrument)
json_data = { (int(entry['id'].split('-')[1]), entry['createTime']) for entry in json_data }
return json_data
def get_runs_all(self,instrument,experiment):
json_data = self.icat.get_runs_all(instrument,experiment)
self._substitute_keys_in_dictionary(json_data,'@id','id')
self._convert_to_datetime(json_data,'createTime')
self._convert_to_datetime(json_data,'startTime')
self._convert_to_datetime(json_data,'endTime')
return json_data
def get_runs(self,instrument,experiment):
raw_ranges = self.icat.get_run_ranges(instrument,experiment)
if raw_ranges is not None and 'runRange' in raw_ranges:
ranges = self._hyphen_range(raw_ranges["runRange"])
else:
raise Exception("ICAT did not return the expected result....")
return json.loads( "[" + ranges + "]" )
def get_runs_meta(self,instrument,experiment):
raw_ranges = self.icat.get_run_ranges_meta(instrument,experiment)
# TODO - Need to change to handle IPTS that return mutliple proposals as a list of dictionaries
if type(raw_ranges['proposal']) == list:
#ranges = ','.join([ self._hyphen_range(item['runRange']) for item in raw_ranges['proposal'] ])
raw_ranges['proposal'] = raw_ranges['proposal'][0]
ranges = self._hyphen_range(raw_ranges['proposal']['runRange'])
raw_ranges['proposal']['runRange'] = ranges
self._substitute_keys_in_dictionary(raw_ranges,'@id','id')
self._convert_to_datetime(raw_ranges,'createTime')
return raw_ranges
def get_run_number_and_title(self,instrument,experiment):
json_data = self.icat.get_runs_all(instrument,experiment)
if json_data is not None and 'proposal' in json_data:
try:
json_data = json_data['proposal']['runs']['run']
except:
raise Exception("ICAT did not return the expected result....")
else:
raise Exception("ICAT did not return the expected result....")
self._substitute_keys_in_dictionary(json_data,'@id','id')
data_list = list()
for entry in json_data:
title = None
if 'title' in entry:
title = entry['title']
data_list.append([entry['id'],title])
json_data_subset = {"data" : data_list}
return json_data_subset
def get_user_experiments(self,uid):
json_data = self.icat.get_user_experiments(uid)
if json_data is not None and 'proposals' in json_data:
return json_data['proposals']
else:
raise Exception("ICAT did not return the expected result....")
# Unit Functions
#---------------
def _get_list_of_all_ipts(self):
uri = self._ipts_uri
json_data = self._uri2xml2json(uri)
for x in json_data['proposals']['proposal']:
if isinstance(x['$'], str):
if x['$'].startswith('IPTS'):
self._ipts_list.append(int(x['$'].split('-')[1].split('.')[0]))
def _get_runs_from_ipts(self,data):
return [ element.get('id') for element in data.iter() if element.tag == 'run' ]
def _get_los_for_run(self,run,json_data):
json_metadata = json_data['metadata']
try:
ipts_pulled = json_metadata['proposal']['$'].split('-')[1]
except:
ipts_pulled = None
los_data = dict()
uid = run
meta_dict = self._get_meta_for_run(json_metadata)
meta_dict['ipts'] = ipts_pulled
los_data[uid] = meta_dict
self._update_master_los(los_data)
'''
NOTE: Below, the check for list is specific to IPTSs w/ proposal lists. These are:
Index IPTS
----- ----
88 8814
119 9818
'''
def _get_meta_for_ipts(self,runs,proposal_json):
if type(proposal_json) == list:
ipts_pulled = int(proposal_json[0]['@id'].split('-')[1])
runs_data = process_numbers(proposal_json[0]['runRange']['$'])
for i, proposal in enumerate(proposal_json[1:]):
runs_data += process_numbers(proposal_json[0]['runRange']['$'])
startTime = [(':'.join( proposal_json[0]['createTime']['$'].split(':')[0:3])).split('.')[0]]
for i, proposal in enumerate(proposal_json[1:]):
startTime += [(':'.join( proposal_json[i+1]['createTime']['$'].split(':')[0:3])).split('.')[0]]
else:
ipts_pulled = int(proposal_json['@id'].split('-')[1])
runs_data = process_numbers(proposal_json['runRange']['$'])
startTime = [(':'.join( proposal_json['createTime']['$'].split(':')[0:3])).split('.')[0]]
meta_ipts_data = dict()
meta_ipts_data[ipts_pulled] = {'runs' : runs_data,
'createtime' : startTime}
self._update_master_meta_ipts_data(meta_ipts_data)
def _update_master_meta_ipts_data(self,meta_ipts_data):
self._meta_ipts_data.update(meta_ipts_data)
def _get_los_for_ipts(self,runs,proposal_json):
if type(proposal_json) == list:
ipts_pulled = int(proposal_json[0]['@id'].split('-')[1])
runs_data = proposal_json[0]['runs']['run']
for i, proposal in enumerate(proposal_json[1:]):
runs_data += proposal_json[i+1]['runs']['run']
else:
ipts_pulled = int(proposal_json['@id'].split('-')[1])
runs_data = proposal_json['runs']['run']
los_data = dict()
if len(runs) == 1:
uid = proposal_json['runs']['run']['@id']
x = proposal_json['runs']['run']
meta_dict = self._get_meta_for_run(x)
meta_dict['ipts'] = ipts_pulled
los_data[uid] = meta_dict
else:
for x in runs_data:
uid = x['@id']
meta_dict = self._get_meta_for_run(x)
meta_dict['ipts'] = ipts_pulled
los_data[uid] = meta_dict
self._update_master_los(los_data)
def _update_master_los(self,los_data):
self._los_data.update(los_data)
def _get_meta_for_run(self,metadata):
meta = dict.fromkeys(self.key_list)
for key in self.key_list:
if key in metadata:
if key == 'duration':
meta[key] = str(int(float(metadata[key]['$'])/60.))+'min'
elif key == 'startTime':
meta[key] = (':'.join( metadata[key]['$'].split(':')[0:3])).split('.')[0]
elif key == 'totalCounts':
meta[key] = '{:.2E}'.format(decimal.Decimal(metadata[key]['$']))
elif key == 'protonCharge':
meta[key] = float("{0:.2f}".format(metadata[key]['$'] / 1e12) )
else:
meta[key] = metadata[key]['$']
return meta
# Main Functions
#------------------
def initializeMetaIptsData(self):
ipts_list = self.getListOfIPTS()
self.getIPTSs( ipts_list, data='meta')
def getMetaIptsData(self):
return self._meta_ipts_data
def applyIptsFilter(self,ipts_list):
self.reset_los()
self.getIPTSs(ipts_list)
def getDataFrame(self):
data = self.get_los()
df = | pandas.DataFrame.from_dict(data,orient='index') | pandas.DataFrame.from_dict |
# @Date: 2019-11-22T15:19:51+08:00
# @Email: <EMAIL>
# @Filename: ProcessUniProt.py
# @Last modified time: 2019-11-24T22:58:36+08:00
import urllib.parse
import urllib.request
import ftplib
import wget
import pandas as pd
import numpy as np
from random import uniform
from time import sleep
import os, re
from collections import Counter
from ..Utils.Tools import Gadget
from ..Utils.Logger import RunningLogger
from ..Utils.FileIO import decompression, HandleIO
UNIPROT_FASTA_SITE = "ftp.uniprot.org"
UNIPROT_FASTA_PATH = "pub/databases/uniprot/current_release/knowledgebase/complete/"
UNIPROT_FASTA_FILE = ["uniprot_sprot.fasta.gz", "uniprot_trembl.fasta.gz", "uniprot_sprot_varsplic.fasta.gz"]
class MapUniProtID:
'''
Use UniProt ID Mapping API
Key parameters in using UniProt ID Mapping API:
.. code-block:: python
:linenos:
params = {
'from': 'ACC+ID',
'to': 'ACC',
'format': 'tab',
'columns': 'id,length,reviewed,comment(ALTERNATIVE%20PRODUCTS),feature(ALTERNATIVE%20SEQUENCE)'...
'query': list_str,
}
.. csv-table:: Details of ID Abbreviation
:header: "Abbreviation", "Name", "Direction"
"ACC+ID", "UniProtKB AC/ID", "from"
"ACC", "UniProtKB AC", "both"
"ID", "UniProtKB ID", "both"
"EMBL_ID", "EMBL/GenBank/DDBJ", "both"
"REFSEQ_NT_ID", "RefSeq Nucleotide", "both"
"P_REFSEQ_AC", "RefSeq Protein", "both"
"PDB_ID", "PDB", "both"
"ENSEMBL_TRS_ID", "Ensembl Transcript", "both"
"ENSEMBL_ID", "Ensembl", "both"
"...", "...", "..."
Here is the `Reference`_.
.. _Reference: https://www.uniprot.org/help/api_idmapping
Following table show the details of rest params:
.. csv-table:: Details of other parameters
:header: "param", "Explanation", "Reference"
"columns", "comma-separated list of column names", "https://www.uniprot.org/help/api_queries"
"columns", "Lists the column names for programmatic (RESTful) access to tab-separated or Excel downloads of UniProtKB search results", "https://www.uniprot.org/help/uniprotkb_column_names"
"format", "html | tab | xls | fasta | gff | txt | xml | rdf | list | rss", "https://www.uniprot.org/help/api_queries"
"query", "query text/id(s)", "https://www.uniprot.org/help/text-search"
'''
URL = 'https://www.uniprot.org/uploadlists/'
USER_AGENT = 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36 QIHU 360SE'
COLUMNS = ['id', 'length', 'reviewed', 'comment(ALTERNATIVE%20PRODUCTS)', 'feature(ALTERNATIVE%20SEQUENCE)', 'genes', 'organism', 'sequence', 'protein%20names']
COLUMN_DICT = {
'id': 'Entry', 'length': 'Length', 'reviewed': 'Status',
'comment(ALTERNATIVE%20PRODUCTS)': 'Alternative products (isoforms)',
'feature(ALTERNATIVE%20SEQUENCE)': 'Alternative sequence (isoforms)',
'genes': 'Gene names', 'organism': 'Organism', 'sequence': 'Sequence',
'protein%20names': 'Protein names'}
params = {
'from': 'ACC+ID',
'to': 'ACC',
'format': 'tab',
}
@staticmethod
def go_to_uniprot(url, params, code='utf-8'):
sleep(uniform(0.99, 5))
data = urllib.parse.urlencode(params)
data = data.encode('utf-8')
req = urllib.request.Request(url, data, headers={'User-Agent': 'User-Agent:Mozilla/5.0'})
with urllib.request.urlopen(req) as f:
response = f.read()
return response.decode(code)
'''
session = requests.Session()
adapter = requests.adapters.HTTPAdapter(max_retries=2)
session.mount('https://', adapter)
session.mount('http://', adapter)
session.headers.update({'User-Agent': MapUniProtID.USER_AGENT})
with session.get(url, params=params) as r:
result = r.text
return result
# with requests.get(url, params=params, headers={'User-Agent': MapUniProtID.USER_AGENT}, stream=True) as r:
'''
def get_info_from_uniprot(self, usecols, outputPath, from_list=None, from_list_file_path=None, sep='\t', chunksize=100, header=None):
def iter_io(iter_object, params, url, outputPath):
params['query'] = ','.join(iter_object) # list_str
result = MapUniProtID.go_to_uniprot(url, params)
with open(outputPath, 'a+') as outputFile:
outputFile.write(result)
def tidy_result(path, colName='Entry', sep='\t'):
df = pd.read_csv(path, sep=sep, na_values=[colName])
df.dropna(subset=[colName], inplace=True)
df.to_csv(path, sep=sep, index=False)
if usecols != 'all':
if not set(self.COLUMNS) >= set(usecols):
self.Logger.logger.error('get_info_from_uniprot(): please specified usecols with elements in %s' % self.COLUMNS)
return False
else:
self.params['columns'] = ','.join(usecols)
else:
usecols = self.COLUMNS
self.params['columns'] = ','.join(self.COLUMNS)
if from_list_file_path:
df = pd.read_csv(from_list_file_path, header=header, chunksize=chunksize, sep=sep)
for chunk in df:
iter_io(chunk[0], self.params, self.URL, outputPath)
else:
if os.path.exists(outputPath):
new_colNames = [self.COLUMN_DICT.get(i, i) for i in usecols] + ['yourlist', 'isomap']
try:
finish = pd.read_csv(outputPath, sep='\t', usecols=['yourlist'], names=new_colNames, skiprows=1, header=None)['yourlist']
except Exception:
finish = pd.read_csv(outputPath, sep='\t', names=new_colNames[:-1], skiprows=1, header=None)
finish['isomap'] = np.nan
finish.to_csv(outputPath, sep="\t", index=False)
finish = finish['yourlist']
finish_li = []
for i in finish:
if i.count(',') > 0:
finish_li.extend(i.split(','))
else:
finish_li.append(i)
else:
finish_li = []
if finish_li:
new_li = list(set(from_list) - set(finish_li))
else:
new_li = from_list
for i in range(0, len(new_li), chunksize):
iter_io(new_li[i:i+chunksize], self.params, self.URL, outputPath)
tidy_result(outputPath)
return True
def __init__(self, dfrm, id_col, id_type, usecols, loggingPath, site_col=None, gene_col=None):
"""
Prerequisite:
* Assume that the parameters are all legal
* Never Change the input dataFrame
* Data has been filtered
"""
self.dfrm = dfrm
self.index = dfrm.index
self.id_col = id_col
self.id_type = id_type
self.site_col = site_col
self.usecols = usecols
self.gene_col = gene_col
if site_col is not None:
self.site_li = dfrm.groupby(by=[id_col]).apply(lambda x: [i for i in x[site_col]])
self.Logger = RunningLogger("MapUniProtID", loggingPath)
@staticmethod
def split_df(dfrm, colName, sep):
"""Split DataFrame"""
df = dfrm.copy()
return df.drop([colName], axis=1).join(df[colName].str.split(sep, expand=True).stack().reset_index(level=1, drop=True).rename(colName))
def getCanonicalInfo(self, dfrm):
"""
Will Change the dfrm
* Add new column (canonical_isoform)
"""
canonical_pattern = re.compile(r'IsoId=([0-9A-Z-]+); Sequence=Displayed')
dfrm['canonical_isoform'] = dfrm.apply(lambda x: ','.join(canonical_pattern.findall(x['Alternative products (isoforms)'])) if not isinstance(x['Alternative products (isoforms)'], float) else np.nan, axis=1)
special_case = dfrm[dfrm['canonical_isoform'] == ''].index
if len(special_case) > 0:
canonical_pattern = re.compile(r'IsoId=([0-9A-Z-,\s]+); Sequence=Displayed')
special_se = dfrm.loc[special_case].apply(lambda x: ','.join(canonical_pattern.findall(x['Alternative products (isoforms)'])), axis=1)
dfrm.loc[special_case, 'canonical_isoform'] = special_se
self.Logger.logger.warning("Special Cases of Canonical Info:")
self.Logger.logger.warning("\n%s\n" % str(special_se))
else:
special_se = pd.Series([])
return special_se
def get_raw_ID_Mapping(self, outputPath, chunksize=100):
"""
Get Raw ID MApping Result
"""
self.params['from'] = self.id_type
status = self.get_info_from_uniprot(
self.usecols,
outputPath,
self.dfrm[self.id_col].drop_duplicates(),
chunksize=chunksize
)
if not status:
return False
new_colNames = [self.COLUMN_DICT.get(i, i) for i in self.usecols] + ['yourlist', 'isomap']
dfrm = | pd.read_csv(outputPath, sep='\t', names=new_colNames, skiprows=1, header=None) | pandas.read_csv |
# Licensed to Modin Development Team under one or more contributor license
# agreements. See the NOTICE file distributed with this work for additional
# information regarding copyright ownership. The Modin Development Team
# licenses this file to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
#
# This file is copied and adapted from:
# http://github.com/modin-project/modin/master/modin/pandas/test/test_general.py
import sys
import pytest
import pandas
import numpy as np
from numpy.testing import assert_array_equal
import ray
from ray.util.client.ray_client_helpers import ray_start_client_server
modin_compatible_version = sys.version_info >= (3, 7, 0)
modin_installed = True
if modin_compatible_version:
try:
import modin # noqa: F401
except ModuleNotFoundError:
modin_installed = False
skip = not modin_compatible_version or not modin_installed
# These tests are written for versions of Modin that require python 3.7+
pytestmark = pytest.mark.skipif(skip, reason="Outdated or missing Modin dependency")
if not skip:
from ray.tests.modin.modin_test_utils import df_equals
import modin.pandas as pd
# Module scoped fixture. Will first run all tests without ray
# client, then rerun all tests with a single ray client session.
@pytest.fixture(params=[False, True], autouse=True, scope="module")
def run_ray_client(request):
if request.param:
with ray_start_client_server() as client:
yield client
else:
# Run without ray client (do nothing)
yield
# Cleanup state before rerunning tests with client
ray.shutdown()
random_state = np.random.RandomState(seed=42)
# Size of test dataframes
NCOLS, NROWS = (2 ** 6, 2 ** 8)
# Range for values for test data
RAND_LOW = 0
RAND_HIGH = 100
# Input data and functions for the tests
# The test data that we will test our code against
test_data = {
"int_data": {
"col{}".format(int((i - NCOLS / 2) % NCOLS + 1)): random_state.randint(
RAND_LOW, RAND_HIGH, size=(NROWS)
)
for i in range(NCOLS)
},
"float_nan_data": {
"col{}".format(int((i - NCOLS / 2) % NCOLS + 1)): [
x
if (j % 4 == 0 and i > NCOLS // 2) or (j != i and i <= NCOLS // 2)
else np.NaN
for j, x in enumerate(
random_state.uniform(RAND_LOW, RAND_HIGH, size=(NROWS))
)
]
for i in range(NCOLS)
},
}
test_data["int_data"]["index"] = test_data["int_data"].pop(
"col{}".format(int(NCOLS / 2))
)
for col in test_data["float_nan_data"]:
for row in range(NROWS // 2):
if row % 16 == 0:
test_data["float_nan_data"][col][row] = np.NaN
test_data_values = list(test_data.values())
test_data_keys = list(test_data.keys())
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_isna(data):
pandas_df = pandas.DataFrame(data)
modin_df = pd.DataFrame(data)
pandas_result = pandas.isna(pandas_df)
modin_result = pd.isna(modin_df)
df_equals(modin_result, pandas_result)
modin_result = pd.isna(pd.Series([1, np.nan, 2]))
pandas_result = pandas.isna(pandas.Series([1, np.nan, 2]))
df_equals(modin_result, pandas_result)
assert pd.isna(np.nan) == pandas.isna(np.nan)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_isnull(data):
pandas_df = pandas.DataFrame(data)
modin_df = pd.DataFrame(data)
pandas_result = pandas.isnull(pandas_df)
modin_result = pd.isnull(modin_df)
df_equals(modin_result, pandas_result)
modin_result = pd.isnull(pd.Series([1, np.nan, 2]))
pandas_result = pandas.isnull(pandas.Series([1, np.nan, 2]))
df_equals(modin_result, pandas_result)
assert pd.isna(np.nan) == pandas.isna(np.nan)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_notna(data):
pandas_df = pandas.DataFrame(data)
modin_df = pd.DataFrame(data)
pandas_result = pandas.notna(pandas_df)
modin_result = pd.notna(modin_df)
df_equals(modin_result, pandas_result)
modin_result = pd.notna(pd.Series([1, np.nan, 2]))
pandas_result = pandas.notna(pandas.Series([1, np.nan, 2]))
df_equals(modin_result, pandas_result)
assert pd.isna(np.nan) == pandas.isna(np.nan)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_notnull(data):
pandas_df = pandas.DataFrame(data)
modin_df = pd.DataFrame(data)
pandas_result = pandas.notnull(pandas_df)
modin_result = pd.notnull(modin_df)
df_equals(modin_result, pandas_result)
modin_result = pd.notnull(pd.Series([1, np.nan, 2]))
pandas_result = pandas.notnull(pandas.Series([1, np.nan, 2]))
df_equals(modin_result, pandas_result)
assert pd.isna(np.nan) == pandas.isna(np.nan)
def test_merge():
frame_data = {
"col1": [0, 1, 2, 3],
"col2": [4, 5, 6, 7],
"col3": [8, 9, 0, 1],
"col4": [2, 4, 5, 6],
}
modin_df = pd.DataFrame(frame_data)
pandas_df = pandas.DataFrame(frame_data)
frame_data2 = {"col1": [0, 1, 2], "col2": [1, 5, 6]}
modin_df2 = pd.DataFrame(frame_data2)
pandas_df2 = pandas.DataFrame(frame_data2)
join_types = ["outer", "inner"]
for how in join_types:
# Defaults
modin_result = pd.merge(modin_df, modin_df2, how=how)
pandas_result = pandas.merge(pandas_df, pandas_df2, how=how)
df_equals(modin_result, pandas_result)
# left_on and right_index
modin_result = pd.merge(
modin_df, modin_df2, how=how, left_on="col1", right_index=True
)
pandas_result = pandas.merge(
pandas_df, pandas_df2, how=how, left_on="col1", right_index=True
)
df_equals(modin_result, pandas_result)
# left_index and right_on
modin_result = pd.merge(
modin_df, modin_df2, how=how, left_index=True, right_on="col1"
)
pandas_result = pandas.merge(
pandas_df, pandas_df2, how=how, left_index=True, right_on="col1"
)
df_equals(modin_result, pandas_result)
# left_on and right_on col1
modin_result = pd.merge(
modin_df, modin_df2, how=how, left_on="col1", right_on="col1"
)
pandas_result = pandas.merge(
pandas_df, pandas_df2, how=how, left_on="col1", right_on="col1"
)
df_equals(modin_result, pandas_result)
# left_on and right_on col2
modin_result = pd.merge(
modin_df, modin_df2, how=how, left_on="col2", right_on="col2"
)
pandas_result = pandas.merge(
pandas_df, pandas_df2, how=how, left_on="col2", right_on="col2"
)
df_equals(modin_result, pandas_result)
# left_index and right_index
modin_result = pd.merge(
modin_df, modin_df2, how=how, left_index=True, right_index=True
)
pandas_result = pandas.merge(
pandas_df, pandas_df2, how=how, left_index=True, right_index=True
)
df_equals(modin_result, pandas_result)
s = pd.Series(frame_data.get("col1"))
with pytest.raises(ValueError):
pd.merge(s, modin_df2)
with pytest.raises(TypeError):
pd.merge("Non-valid type", modin_df2)
def test_pivot():
test_df = pd.DataFrame(
{
"foo": ["one", "one", "one", "two", "two", "two"],
"bar": ["A", "B", "C", "A", "B", "C"],
"baz": [1, 2, 3, 4, 5, 6],
"zoo": ["x", "y", "z", "q", "w", "t"],
}
)
df = pd.pivot(test_df, index="foo", columns="bar", values="baz")
assert isinstance(df, pd.DataFrame)
with pytest.raises(ValueError):
pd.pivot(test_df["bar"], index="foo", columns="bar", values="baz")
def test_pivot_table():
test_df = pd.DataFrame(
{
"A": ["foo", "foo", "foo", "foo", "foo", "bar", "bar", "bar", "bar"],
"B": ["one", "one", "one", "two", "two", "one", "one", "two", "two"],
"C": [
"small",
"large",
"large",
"small",
"small",
"large",
"small",
"small",
"large",
],
"D": [1, 2, 2, 3, 3, 4, 5, 6, 7],
"E": [2, 4, 5, 5, 6, 6, 8, 9, 9],
}
)
df = pd.pivot_table(
test_df, values="D", index=["A", "B"], columns=["C"], aggfunc=np.sum
)
assert isinstance(df, pd.DataFrame)
with pytest.raises(ValueError):
pd.pivot_table(
test_df["C"], values="D", index=["A", "B"], columns=["C"], aggfunc=np.sum
)
def test_unique():
modin_result = pd.unique([2, 1, 3, 3])
pandas_result = pandas.unique([2, 1, 3, 3])
assert_array_equal(modin_result, pandas_result)
assert modin_result.shape == pandas_result.shape
modin_result = pd.unique(pd.Series([2] + [1] * 5))
pandas_result = pandas.unique( | pandas.Series([2] + [1] * 5) | pandas.Series |
import numpy as np
import pytest
import pandas as pd
import pandas._testing as tm
@pytest.mark.parametrize("align_axis", [0, 1, "index", "columns"])
def test_compare_axis(align_axis):
# GH#30429
s1 = pd.Series(["a", "b", "c"])
s2 = pd.Series(["x", "b", "z"])
result = s1.compare(s2, align_axis=align_axis)
if align_axis in (1, "columns"):
indices = pd.Index([0, 2])
columns = pd.Index(["self", "other"])
expected = pd.DataFrame(
[["a", "x"], ["c", "z"]], index=indices, columns=columns
)
tm.assert_frame_equal(result, expected)
else:
indices = pd.MultiIndex.from_product([[0, 2], ["self", "other"]])
expected = pd.Series(["a", "x", "c", "z"], index=indices)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"keep_shape, keep_equal",
[
(True, False),
(False, True),
(True, True),
# False, False case is already covered in test_compare_axis
],
)
def test_compare_various_formats(keep_shape, keep_equal):
s1 = pd.Series(["a", "b", "c"])
s2 = pd.Series(["x", "b", "z"])
result = s1.compare(s2, keep_shape=keep_shape, keep_equal=keep_equal)
if keep_shape:
indices = pd.Index([0, 1, 2])
columns = pd.Index(["self", "other"])
if keep_equal:
expected = pd.DataFrame(
[["a", "x"], ["b", "b"], ["c", "z"]], index=indices, columns=columns
)
else:
expected = pd.DataFrame(
[["a", "x"], [np.nan, np.nan], ["c", "z"]],
index=indices,
columns=columns,
)
else:
indices = pd.Index([0, 2])
columns = pd.Index(["self", "other"])
expected = pd.DataFrame(
[["a", "x"], ["c", "z"]], index=indices, columns=columns
)
tm.assert_frame_equal(result, expected)
def test_compare_with_equal_nulls():
# We want to make sure two NaNs are considered the same
# and dropped where applicable
s1 = pd.Series(["a", "b", np.nan])
s2 = pd.Series(["x", "b", np.nan])
result = s1.compare(s2)
expected = pd.DataFrame([["a", "x"]], columns=["self", "other"])
tm.assert_frame_equal(result, expected)
def test_compare_with_non_equal_nulls():
# We want to make sure the relevant NaNs do not get dropped
s1 = pd.Series(["a", "b", "c"])
s2 = pd.Series(["x", "b", np.nan])
result = s1.compare(s2, align_axis=0)
indices = pd.MultiIndex.from_product([[0, 2], ["self", "other"]])
expected = pd.Series(["a", "x", "c", np.nan], index=indices)
| tm.assert_series_equal(result, expected) | pandas._testing.assert_series_equal |
import re
import json
import requests
from bs4 import BeautifulSoup
import time
import datetime
import pandas as pd
import numpy as np
# Fans page ==================================================================
# Crawl_PagePosts
def Crawl_PagePosts(pageurl, until_date='2019-01-01'):
page_id = pagecrawler.get_pageid(pageurl)
timeline_cursor = ''
content_df = [] # post
feedback_df = [] # reactions
max_date = datetime.datetime.now()
break_times = 0
rs = requests.session()
# request date and break loop when reach the goal
while max_date >= datetime.datetime.strptime(until_date, '%Y-%m-%d'):
try:
url = 'https://www.facebook.com/pages_reaction_units/more/'
params = {'page_id': page_id,
'cursor': str({"timeline_cursor":timeline_cursor,
"timeline_section_cursor":'{}',
"has_next_page":'true'}),
# 'surface': 'www_pages_home',
'surface': 'www_pages_posts',
'unit_count': 20,
'__a': '1'}
resp = rs.get(url, params=params)
data = json.loads(re.sub(r'for \(;;\);','',resp.text))
# contesnts:poster's name, poster's ID, post ID, time, content
ndf = pageparser.parse_content(data=data)
content_df.append(ndf)
# reactions
ndf1 = pageparser.get_reaction(data=data)
feedback_df.append(ndf1)
# update request params
max_date = ndf['TIME'].max()
print('TimeStamp: {}.'.format(ndf['TIME'].max()))
timeline_cursor = re.findall(r'timeline_cursor%22%3A%22(.*?)%22%2C%22timeline_section_cursor', data['domops'][0][3]['__html'])[0]
# break times to zero
break_times = 0
except:
break_times += 1
print('break_times:', break_times)
time.sleep(3)
time.sleep(2)
if break_times > 5:
break
# join content and reactions
content_df = pd.concat(content_df, ignore_index=True)
feedback_df = pd.concat(feedback_df, ignore_index=True)
df = | pd.merge(left=content_df, right=feedback_df, how='left', on=['PAGEID', 'POSTID']) | pandas.merge |
import os
import glob
import datetime
import pandas as pd
if __name__ == '__main__':
"""
ASOS 데이터셋 정보
파일명 : SURFACE_ASOS_[지점번호]_HR_[관측년도]_[관측년도]_[관측년도+1].csv
컬럼 : 지점(0), 일시(1), 기온(2), 강수량(3), 풍속(4),
풍향(5), 습도(6), 증기압(7), 이슬점온도(8), 현지기압(9),
해면기압(10), 일조(11), 일사(12), 적설(13), 3시간신적설(14),
전운량(15), 증하층운량(16), 운형(17), 최저운고(18), 시정(19),
지면상태(20), 현상번호(21), 지면온도(22), 5cm 지중온도(23), 10cm 지중온도(24),
30cm 지중온도(25)
컬럼 인덱스 : 0
총 컬럼 수 : 26
"""
dataset_src = os.path.join('C:\\', 'DATASET', 'ATMOSPHERE', 'OBSERVATION_RAW', 'ASOS')
start_date = datetime.datetime(year=2014, month=1, day=1, hour=0, minute=0)
end_date = datetime.datetime(year=2018, month=12, day=31, hour=23, minute=0)
file_name_format = "SURFACE_ASOS_%s_HR_%s_%s_%s.csv"
asos_variable_map = {0: 'station_num', 1: 'observation_time', 2: 'temperature', 3: 'precipitation',
4: 'wind_speed', 5: 'wind_direction', 6: 'humidity', 7: 'vapor_pressure',
8: 'dew_point_temperature', 9: 'local_pressure', 10: 'sea_surface_pressure',
11: 'sunshine', 12: 'solar_radiation', 13: 'snowfall', 14: 'snowfall_3hr',
15: 'total_cloud_amount', 16: 'low_middle_cloud_amount', 17: 'cloud_shape',
18: 'min_cloud_height', 19: 'visibility', 20: 'ground_condition',
21: 'current_number', 22: 'ground_temperature', 23: 'soil_temperature',
24: 'soil_temperature_5cm', 25: 'soil_temperature_10cm', 26: 'soil_temperature_30cm'}
select_variables = [asos_variable_map[select_num] for select_num in [0, 1, 2, 3]]
for year in range(start_date.year, end_date.year + 1):
file_list = glob.glob(os.path.join(dataset_src, str(year), '*.csv'))
for file_name in file_list:
df = | pd.read_csv(file_name, skiprows=1, header=None, encoding='cp949') | pandas.read_csv |
from unittest import TestCase
import pandas as pd
from moonstone.parsers.transform.cleaning import StringCleaner
class TestStringCleaner(TestCase):
def test_remove_trailing_spaces(self):
df = pd.DataFrame(
[
[1, ' b'],
[4, " a "]
],
columns=['number', 'string']
)
expected_df = pd.DataFrame(
[
[1, 'b'],
[4, "a"]
],
columns=['number', 'string']
)
method_name = "remove_trailing_spaces"
expected_history = [
[method_name, {'col_name': 'string'}]
]
transform_cleaning = StringCleaner(df)
getattr(transform_cleaning, method_name)('string')
self.assertTrue(transform_cleaning.history)
self.assertListEqual(transform_cleaning.history, expected_history)
pd.testing.assert_frame_equal(transform_cleaning.df, expected_df)
def test_to_slug(self):
df = pd.DataFrame(
[
[1, ' b test '],
[4, " a Stuff.2"]
],
columns=['number', 'string']
)
expected_df = pd.DataFrame(
[
[1, 'b-test'],
[4, "a-stuff-2"]
],
columns=['number', 'string']
)
method_name = "to_slug"
expected_history = [
[method_name, {'col_name': 'string'}]
]
transform_cleaning = StringCleaner(df)
getattr(transform_cleaning, method_name)('string')
self.assertTrue(transform_cleaning.history)
self.assertListEqual(transform_cleaning.history, expected_history)
| pd.testing.assert_frame_equal(transform_cleaning.df, expected_df) | pandas.testing.assert_frame_equal |
import numpy as np
import pandas as pd
from bs4.element import NavigableString, Comment, Doctype
from report_parser.src.text_class import Text
def print_tag(tag):
print('printing tag:', type(tag), tag.name)
if type(tag) not in [NavigableString, Doctype, Comment]:
for child in tag.children:
print('child:', type(child), child.name)
def get_texts_and_tables(html_elems, new_method):
contents = []
contents_num = len(html_elems)
cur_elem_num = 0
while cur_elem_num < contents_num:
elem_type, elem = html_elems[cur_elem_num]
accumulated_texts = []
table = None
while elem_type == 'text' and cur_elem_num < contents_num:
accumulated_texts.append(elem)
cur_elem_num += 1
if cur_elem_num < contents_num:
elem_type, elem = html_elems[cur_elem_num]
if len(accumulated_texts):
contents.append(Text(accumulated_texts))
accumulated_texts = []
if elem_type == 'table':
# TODO: временная мера для тестирования нового метода
if new_method:
table = parse_table_new(elem)
else:
table = parse_table(elem)
if table.shape[0]:
contents.append(table)
cur_elem_num += 1
return contents
def parse_table_new(table_rows):
"""
Парсинг таблиц, полученныхых в результате работы ParsingTree
"""
df = pd.DataFrame()
for i in range(len(table_rows)):
html_row = table_rows[i]
row = [x for x in html_row]
# Пропускаем пустые строки:
if not any([x[2] for x in row]):
continue
flatten_row = []
# Смотрим на значения каждой ячейки в строке
for col_index in range(len(row)):
row_span = row[col_index][0]
col_span = row[col_index][1]
value = row[col_index][2]
# Заполняем ячейки ниже значениями текущей ячейки
if row_span > 1:
# Берём нужное количество строк, начиная с текущей,
# и в нужный индекс вставляем значение с row_span == 1
for _ in range(row_span):
real_index = sum([x[1] for x in row][:col_index])
cell_value = (1, col_span, value)
table_rows[i + _].insert(real_index, cell_value)
# Копируем значение ячейки в несколько следующих столбцов (или нет)
if col_span == 1:
flatten_row.append(value)
else:
flatten_row.extend([value] * col_span)
# Добавляем список значений строки в датафрейм
df = df.append([flatten_row])
df.reset_index(inplace=True, drop=True)
return df
def parse_table(table_rows):
max_col_num = get_max_colspan(table_rows)
df = pd.DataFrame(columns=range(max_col_num), dtype=str)
col_shifts = [0]
row_shift = 0
for i in range(len(table_rows)):
html_row = table_rows[i]
df_len = len(df)
cur_shift = col_shifts.pop() if col_shifts else 0
if row_shift == 0:
# if True:
df.append(pd.Series(dtype=str), ignore_index=True)
next_row_shift = 0
for j in range(len(html_row)):
cell = html_row[j]
shape = (cell[0], cell[1])
need_rows = shape[0] - (len(df) - df_len)
next_row_shift = max(need_rows - 1, next_row_shift)
for _ in range(need_rows - 1):
df.append( | pd.Series(dtype=str) | pandas.Series |
import dash
from datetime import datetime, timedelta
import dash_bootstrap_components as dbc
import dash_core_components as dcc
import dash_daq as daq
import dash_html_components as html
import dash_table
import numpy as np
import pandas as pd
import plotly.graph_objs as go
from dash.dependencies import Input, Output, State
from sqlalchemy import or_, delete, extract
from ..app import app
from ..api.sqlalchemy_declarative import athlete, stravaSummary, stravaSamples, workoutStepLog, ouraSleepSummary, \
strydSummary, ouraReadinessSummary, annotations
from ..api.database import engine
from ..utils import utc_to_local, config, oura_credentials_supplied, stryd_credentials_supplied, \
peloton_credentials_supplied
from ..pages.power import power_curve, zone_chart
import re
import json
import operator
import scipy
transition = int(config.get('dashboard', 'transition'))
ctl_color = 'rgb(171, 131, 186)'
atl_color = 'rgb(245,226,59)'
tsb_color = 'rgb(193, 125, 55)'
tsb_fill_color = 'rgba(193, 125, 55, .5)'
ftp_color = 'rgb(100, 217, 236)'
white = config.get('oura', 'white')
teal = config.get('oura', 'teal')
light_blue = config.get('oura', 'light_blue')
dark_blue = config.get('oura', 'dark_blue')
orange = config.get('oura', 'orange')
orange_faded = 'rgba(217,100,43,.75)'
# Oura readiness ranges for recommendation
oura_high_threshold = 85
oura_med_threshold = 77
oura_low_threshold = 70
def get_layout(**kwargs):
athlete_info = app.session.query(athlete).filter(athlete.athlete_id == 1).first()
pmc_switch_settings = json.loads(athlete_info.pmc_switch_settings)
use_run_power = True if athlete_info.use_run_power else False
use_cycle_power = True if athlete_info.use_cycle_power else False
use_power = True if use_run_power or use_cycle_power else False
app.session.remove()
return html.Div([
# Dummy div for simultaneous callbacks on page load
dbc.Modal(id="annotation-modal", centered=True, autoFocus=True, fade=False, backdrop='static', size='xl',
children=[
dbc.ModalHeader(id='annotation-modal-header', children=['Annotations']),
dbc.ModalBody(id='annotation-modal-body', className='align-items-center text-center',
children=[
html.Div(className='col-lg-12 mb-2', style={'padding': 0},
children=[
html.Div(id='annotation-table-container', className='col mb-2',
style={'padding': 0},
children=[html.Div(id='annotation-table')]),
dbc.Button('Add Row', id='annotation-add-rows-button',
color='primary', size='sm', n_clicks=0)
]),
html.Div(id='annotation-save-container', className='col',
children=[
html.H6('Enter admin password to save changes',
className='col d-inline-block'),
html.Div(className='col mb-2', children=[
dbc.Input(id='annotation-password', bs_size="sm",
type='password', placeholder='Password', value=''),
]),
html.Div(className='col mb-2', children=[
dbc.Button("Save",
id="save-close-annotation-modal-button",
color='primary', size='sm', n_clicks=0),
html.Div(id='annotation-save-status')
])
])]),
dbc.ModalFooter(
dbc.Button("Close", id="close-annotation-modal-button", color='primary', size='sm',
href=f'/performance?refresh={str(datetime.now())}')
),
]),
dbc.Modal(id="activity-modal", is_open=False, centered=True, autoFocus=True, fade=False, backdrop='static',
size='xl',
children=[
dbc.ModalHeader(id='activity-modal-header'),
dbc.ModalBody([
html.Div([
dbc.Spinner(color='info', children=[
html.Div(id='activity-modal-body', className='row mt-2 mb-2', children=[
html.Div(className='col-lg-10', children=[
html.Div(className='row', children=[
html.Div(className='col-lg-12', children=[
dbc.Card(color='dark', children=[
dbc.CardHeader(html.H4('Activity Stream')),
dbc.CardBody([
html.Div(className='row', children=[
html.Div(id='modal-workout-summary',
className='col-lg-3'),
html.Div(id='modal-workout-trends', className='col-lg-9'),
])
])
])
])
])
]),
html.Div(id='modal-workout-stats', className='col-lg-2',
style={'height': '100%'}),
]),
]),
]),
html.Div([
dbc.Spinner(color='info', children=[
html.Div(id="activity-modal-body-2", className='row mt-2 mb-2',
children=[
html.Div(className='col-lg-6' if use_power else 'col-lg-12', children=[
dbc.Card(color='dark', children=[
dbc.CardHeader(id='modal-zone-title'),
dbc.CardBody(id='modal-zones')
])
]),
html.Div(className='col-lg-6',
style={} if use_power else {'display': 'none'}, children=[
dbc.Card(id='modal-power-curve-card', color='dark', children=[
dbc.CardHeader(html.H4('Power Curve')),
dbc.CardBody([
dcc.Graph(id='modal-power-curve-chart',
config={'displayModeBar': False},
style={'height': '100%'})
]
)
])
]),
])
]),
]),
]),
dbc.ModalFooter(
dbc.Button("Close", id="close-activity-modal-button", size='sm', color='primary', n_clicks=0)
),
]),
html.Div(className='row align-items-start text-center mt-2 mb-2', children=[
html.Div(id='pmd-header-and-chart', className='col-lg-8',
children=[
dbc.Card([
dbc.CardHeader([
html.Div(id='pmd-kpi')
]),
dbc.CardBody([
# Start Graph #
html.Div(className='row', children=[
html.Div(id='daily-recommendations', # Populated by callback
className='col-lg-3' if oura_credentials_supplied else '',
style={'display': 'none' if not oura_credentials_supplied else 'normal'}),
# PMC Chart
dcc.Graph(id='pm-chart',
className='col-lg-8 mr-0 ml-0' if oura_credentials_supplied else 'col-lg-11 mr-0 ml-0',
# Populated by callback
style={'height': '100%'},
config={'displayModeBar': False}),
# Switches
html.Div(id='pmc-controls', className='col-lg-1 text-left',
style={'display': 'flex', 'justifyContent': 'space-between'}, children=[
html.Div(className='row', children=[
html.Div(className='col-lg-12 col-3',
style={'padding': '0', 'alignSelf': 'center'},
children=[
html.Button(id="open-annotation-modal-button",
className='fa fa-comment-alt',
n_clicks=0,
style={'fontSize': '1.5rem',
'display': 'inline-block',
'vertical-align': 'middle',
'border': '0'}),
]),
dbc.Tooltip(
'Chart Annotations',
target="open-annotation-modal-button"),
html.Div(id='run-pmc',
className='col-lg-12 col-3 align-items-center',
style={'padding': '0', 'alignSelf': 'center'},
children=[
daq.BooleanSwitch(
id='run-pmc-switch',
on=True,
style={'display': 'inline-block',
'vertical-align': 'middle'}
),
html.I(id='run-pmc-icon', className='fa fa-running',
style={'fontSize': '1.5rem',
'display': 'inline-block',
'vertical-align': 'middle',
'paddingLeft': '.25vw', }),
]),
dbc.Tooltip(
'Include running workouts in Fitness trend.',
target="run-pmc"),
html.Div(id='ride-pmc', className='col-lg-12 col-3',
style={'padding': '0', 'alignSelf': 'center'},
children=[
daq.BooleanSwitch(
id='ride-pmc-switch',
on=pmc_switch_settings['ride_status'],
style={'display': 'inline-block',
'vertical-align': 'middle'}
),
html.I(id='ride-pmc-icon', className='fa fa-bicycle',
style={'fontSize': '1.5rem',
'display': 'inline-block',
'vertical-align': 'middle',
'paddingLeft': '.25vw', }),
]),
dbc.Tooltip(
'Include cycling workouts in Fitness trend.',
target="ride-pmc"),
html.Div(id='all-pmc', className='col-lg-12 col-3',
style={'padding': '0', 'alignSelf': 'center'},
children=[
daq.BooleanSwitch(
id='all-pmc-switch',
on=pmc_switch_settings['all_status'],
style={'display': 'inline-block',
'vertical-align': 'middle'}
),
html.I(id='all-pmc-icon', className='fa fa-stream',
style={'fontSize': '1.5rem',
'display': 'inline-block',
'vertical-align': 'middle',
'paddingLeft': '.25vw', }),
]),
dbc.Tooltip(
'Include all other workouts in Fitness trend.',
target="all-pmc"),
html.Div(id='power-pmc', className='col-lg-12 col-3',
style={'padding': '0', 'alignSelf': 'center'},
children=[
daq.BooleanSwitch(
id='power-pmc-switch',
on=use_power,
style={'display': 'inline-block',
'vertical-align': 'middle'},
disabled=pmc_switch_settings[
'power_status'] and not use_power
),
html.I(id='power-pmc-icon', className='fa fa-bolt',
style={'fontSize': '1.5rem',
'display': 'inline-block',
'vertical-align': 'middle',
'paddingLeft': '.25vw', }),
]),
dbc.Tooltip(
'Include power data for stress scores.',
target="power-pmc"),
html.Div(id='hr-pmc', className='col-lg-12 col-3',
style={'padding': '0', 'alignSelf': 'center'},
children=[
daq.BooleanSwitch(
id='hr-pmc-switch',
on=pmc_switch_settings['hr_status'],
style={'display': 'inline-block',
'vertical-align': 'middle'}
),
html.I(id='hr-pmc-icon', className='fa fa-heart',
style={'fontSize': '1.5rem',
'display': 'inline-block',
'vertical-align': 'middle',
'paddingLeft': '.25vw', }),
]),
dbc.Tooltip(
'Include heart rate data for stress scores.',
target="hr-pmc"),
html.Div(id='atl-pmc', className='col-lg-12 col-3',
style={'padding': '0', 'alignSelf': 'center'},
children=[
daq.BooleanSwitch(
id='atl-pmc-switch',
on=pmc_switch_settings['atl_status'],
style={'display': 'inline-block',
'vertical-align': 'middle'},
),
html.I(id='atl-pmc-icon', className='fa fa-chart-line',
style={'fontSize': '1.5rem',
'display': 'inline-block',
'vertical-align': 'middle',
'paddingLeft': '.25vw', }),
]),
dbc.Tooltip(
'Always include fatigue from all sports',
target="atl-pmc"),
]),
]),
]),
]),
]),
]),
html.Div(id='trend-containers', className='col-lg-4', children=[
html.Div(className='row mb-2', children=[
html.Div(className='col-lg-12', children=[
dbc.Card([
dbc.CardHeader(className='align-items-center text-left', children=[
html.H6('90 Day Performance', id='performance-title', className='mb-0',
style={'display': 'inline-block'}),
]),
dbc.CardBody([
html.Div(className='row align-items-center',
# style={'paddingBottom': '1.25rem'},
children=[
html.Div(className='col-12 align-items-center mb-2',
style={'height': '1.375rem'}, children=[
dbc.DropdownMenu(children=
[
dbc.DropdownMenuItem("All Dates",
id="performance-time-selector-all",
n_clicks_timestamp=0),
dbc.DropdownMenuItem("YTD",
# id=f"{datetime.now().strftime('%j')}"),
id='performance-time-selector-ytd',
n_clicks_timestamp=0),
dbc.DropdownMenuItem("L90D",
id="performance-time-selector-l90d",
n_clicks_timestamp=1),
dbc.DropdownMenuItem("L6W",
id='performance-time-selector-l6w',
n_clicks_timestamp=0),
dbc.DropdownMenuItem("L30D",
id="performance-time-selector-l30d",
n_clicks_timestamp=0),
],
label="L90D",
bs_size='sm',
className="mb-0",
id='performance-time-selector',
style={'display': 'inline-block', 'paddingRight': '2vw'},
),
html.I(id='performance-trend-running-icon',
className='fa fa-running',
style={'fontSize': '1.5rem', 'display': 'inline-block'}),
daq.ToggleSwitch(id='performance-activity-type-toggle',
className='mr-2 ml-2',
style={'display': 'inline-block'}, value=False),
html.I(id='performance-trend-bicycle-icon',
className='fa fa-bicycle',
style={'fontSize': '1.5rem', 'display': 'inline-block'}),
dbc.Tooltip('Analyze cycling activities',
target="performance-trend-bicycle-icon"),
dbc.Tooltip('Toggle activity type',
target="performance-activity-type-toggle"),
dbc.Tooltip('Analyze running activities',
target="performance-trend-running-icon"),
dbc.DropdownMenu(children=
[
dbc.DropdownMenuItem("All Intensities",
id="performance-intensity-selector-all",
n_clicks_timestamp=1),
dbc.DropdownMenuItem("High Intensity",
id='performance-intensity-selector-high',
n_clicks_timestamp=0),
dbc.DropdownMenuItem("Mod Intensity",
id="performance-intensity-selector-mod",
n_clicks_timestamp=0),
dbc.DropdownMenuItem("Low Intensity",
id='performance-intensity-selector-low',
n_clicks_timestamp=0),
],
label="All Intensities",
bs_size='sm',
className="mb-0",
id='performance-intensity-selector',
style={'display': 'inline-block', 'paddingLeft': '2vw'},
),
]),
# sport_filter_icons(id='zones'),
html.Div(
className='col-lg-6 col-12 mt-2' if peloton_credentials_supplied else 'col-12 mt-2',
children=[
dbc.Spinner(color='info', children=[
html.Div(id='performance-trend-zones'),
]),
]),
# populated by callback
html.Div(className='col-lg-6 col-12 mt-2', style={
'display': 'none'} if not peloton_credentials_supplied else {},
children=[
dbc.Spinner(color='info', children=[
html.Div(className='col-lg-12',
children=[
html.P(['Training Distribution'], style={
'height': '20px',
'font-family': '"Open Sans", verdana, arial, sans-serif',
'font-size': '14px',
'color': white,
'fill': 'rgb(220, 220, 220)',
'line-height': '10px',
'opacity': 1,
'font-weight': 'normal',
'white-space': 'pre',
'marginBottom': 0})
]),
html.Div(id='workout-distribution-table',
children=[
dash_table.DataTable(
id='workout-type-distributions',
columns=[{'name': 'Activity',
'id': 'workout'},
{'name': '%',
'id': 'Percent of Total'}],
style_as_list_view=True,
fixed_rows={'headers': True, 'data': 0},
style_table={'height': '180px',
'overflowY': 'auto'},
style_header={
'backgroundColor': 'rgba(0,0,0,0)',
'borderBottom': '1px solid rgb(220, 220, 220)',
'borderTop': '0px',
# 'textAlign': 'center',
'fontSize': 12,
'fontWeight': 'bold',
'fontFamily': '"Open Sans", "HelveticaNeue", "Helvetica Neue", Helvetica, Arial, sans-serif',
},
style_cell={
'backgroundColor': 'rgba(0,0,0,0)',
'color': 'rgb(220, 220, 220)',
'borderBottom': '1px solid rgb(73, 73, 73)',
'textOverflow': 'ellipsis',
'maxWidth': 25,
'fontSize': 12,
'fontFamily': '"Open Sans", "HelveticaNeue", "Helvetica Neue", Helvetica, Arial, sans-serif',
},
style_cell_conditional=[
{
'if': {'column_id': c},
'textAlign': 'center'
} for c in
['workout', 'Percent of Total']
],
page_action="none",
)
]),
]),
]),
]),
html.Div(className='row', style={'paddingTop': '.75rem'}, children=[
html.Div(className='col-lg-6' if use_power else '', children=[
html.Div(id='performance-power-curve-container', children=[
dbc.Spinner(color='info', children=[
dcc.Graph(id='performance-power-curve',
config={'displayModeBar': False})
])
])
]),
html.Div(className='col-lg-5 col-11' if use_power else 'col-11',
style={'paddingRight': 0},
children=[
# Generated by callback
html.Div([
dbc.Spinner(color='info', children=[
dcc.Graph(id='trend-chart', config={'displayModeBar': False})
]),
]),
]),
html.Div(id='trend-controls', className='col-1',
style={'display': 'flex',
'justifyContent': 'space-between',
'paddingLeft': 0, 'paddingRight': 0},
children=get_trend_controls()),
]),
])
])
]),
]),
]),
]),
html.Div(className='row', children=[
html.Div(className='col-lg-8', children=[
dbc.Card([
dbc.CardBody([
html.Div(className='col-lg-12', style={'overflow': 'hidden'},
children=dash_table.DataTable(
id='activity-table',
data=create_activity_table(),
columns=[
{'name': 'Date', 'id': 'date'},
{'name': 'Name', 'id': 'name'},
{'name': 'Type', 'id': 'type'},
{'name': 'Time', 'id': 'time'},
{'name': 'Mileage', 'id': 'distance'},
{'name': 'PSS', 'id': 'tss'},
{'name': 'HRSS', 'id': 'hrss'},
# {'name': 'TRIMP', 'id': 'trimp'},
# {'name': 'NP', 'id': 'weighted_average_power'},
# {'name': 'IF', 'id': 'relative_intensity'},
# {'name': 'EF', 'id': 'efficiency_factor'},
# {'name': 'VI', 'id': 'variability_index'},
{'name': 'FTP', 'id': 'ftp'},
{'name': 'activity_id', 'id': 'activity_id'}
] if use_power else [{'name': 'Date', 'id': 'date'},
{'name': 'Name', 'id': 'name'},
{'name': 'Type', 'id': 'type'},
{'name': 'Time', 'id': 'time'},
{'name': 'Mileage', 'id': 'distance'},
{'name': 'TRIMP', 'id': 'trimp'},
{'name': 'activity_id', 'id': 'activity_id'}],
style_as_list_view=True,
fixed_rows={'headers': True, 'data': 0},
style_table={'height': '100%'},
style_header={'backgroundColor': 'rgba(0,0,0,0)',
'borderBottom': '1px solid rgb(220, 220, 220)',
'borderTop': '0px',
# 'textAlign': 'left',
'fontWeight': 'bold',
'fontFamily': '"Open Sans", "HelveticaNeue", "Helvetica Neue", Helvetica, Arial, sans-serif',
# 'fontSize': '1.2rem'
},
style_cell={
'backgroundColor': 'rgba(0,0,0,0)',
'color': 'rgb(220, 220, 220)',
'borderBottom': '1px solid rgb(73, 73, 73)',
'textAlign': 'center',
# 'whiteSpace': 'no-wrap',
# 'overflow': 'hidden',
'textOverflow': 'ellipsis',
'maxWidth': 175,
'minWidth': 50,
# 'padding': '0px',
'fontFamily': '"Open Sans", "HelveticaNeue", "Helvetica Neue", Helvetica, Arial, sans-serif',
# 'fontSize': '1.2rem'
},
style_cell_conditional=[
{
'if': {'column_id': 'activity_id'},
'display': 'none'
}
],
filter_action="native",
page_action="none",
# page_current=0,
# page_size=10,
)
),
]), ]),
]),
html.Div(id='growth-container', className='col-lg-4',
children=[
dbc.Card([
dbc.CardHeader(
html.Div(className='row align-items-center text-left', children=[
### Title ###
html.Div(id='growth-header', className='col-lg-12')
]),
),
dbc.CardBody([
html.Div(className='col-12 text-center align-items-center mb-2', children=[
dbc.DropdownMenu(
[
dbc.DropdownMenuItem("Running", header=True),
dbc.DropdownMenuItem("Distance", id="run|distance"),
dbc.DropdownMenuItem("Duration", id="run|elapsed_time"),
dbc.DropdownMenuItem("hrSS", id="run|hrss"),
dbc.DropdownMenuItem("Stress Score", id="run|tss"),
dbc.DropdownMenuItem("Trimp", id="run|trimp"),
dbc.DropdownMenuItem(divider=True),
dbc.DropdownMenuItem("Cycling", header=True),
dbc.DropdownMenuItem("Distance", id="ride|distance"),
dbc.DropdownMenuItem("Duration", id="ride|elapsed_time"),
dbc.DropdownMenuItem("hrSS", id="ride|hrss"),
dbc.DropdownMenuItem("Stress Score", id="ride|tss"),
dbc.DropdownMenuItem("Trimp", id="ride|trimp"),
],
label="Run Distance",
bs_size='sm',
className="mb-0",
id='growth-chart-metric-select',
),
]),
dcc.Graph(id='growth-chart', config={'displayModeBar': False},
# style={'height': '90%'}
)
])
]),
]),
]),
html.Div(id='modal-activity-id-type-metric', style={'display': 'none'}),
])
# def detect_trend(ln_rmssd_7_slope_trivial, hr_average_7_slope_trivial, cv_rmssd_7_slope_trivial,
# ln_rmssd_normalized_7_slope_trivial, atl_7_slope_trivial):
# if ln_rmssd_7_slope_trivial >= 0 and hr_average_7_slope_trivial <= 0 and cv_rmssd_7_slope_trivial < 0:
# return 'Coping well'
# elif ln_rmssd_7_slope_trivial < 0 and hr_average_7_slope_trivial < 0 \
# and atl_7_slope_trivial >= 0: # E.O Customization
# return 'Risk of accumulated fatigue'
# elif hr_average_7_slope_trivial > 0 and cv_rmssd_7_slope_trivial > 0:
# return 'Maladaptation'
# elif ln_rmssd_7_slope_trivial < 0 and hr_average_7_slope_trivial > 0 and cv_rmssd_7_slope_trivial < 0 \
# and atl_7_slope_trivial > 0: # E.O Customization:
# return 'Accumulated fatigue'
# else:
# return 'No Relevant Trends'
def zscore(x, y, window):
'''
:param x: metric to compare to mean & std
:param y: metric to do the rolling calculation on
:param window: number of days to rollback
:return:
'''
r = y.rolling(window=window)
m = r.mean() # .shift(1)
s = r.std(ddof=0) # .shift(1)
z = (x - m) / s
return z
def daily_z_recommendation(hrv_z_score, hr_z_score):
# https://www.myithlete.com/how-to-use-the-ithlete-pro-training-guide/
x, y = hrv_z_score, hr_z_score
if (x < -1 and y > 1.75) or (x < -1 and y < -2):
return 'Rest'
elif (x < -1 and -2 < y < 1.75) or (x > -1 and y > 1.75) or (x > -1 and y < -2):
return 'Low'
elif (-1 < x < 1 and -2 < y < 1.75) or (x > 1 and -2 < y < -1) or (x > 1 and 1 < y < 1.75):
return 'Mod'
elif (x > 1 and -1 < y < 1):
return 'High'
def daily_z_desc(hrv_z_score, hr_z_score):
# https://www.myithlete.com/how-to-use-the-ithlete-pro-training-guide/
x, y = hrv_z_score, hr_z_score
if x < -1 and y > 1.75:
return 'Stress / Illness'
elif x < -1 and -2 < y < 1.75:
return 'Impaired Recovery'
elif -1 < x < 1 and -2 < y < 1.75:
return 'Normal Training'
elif x > 1 and -1 < y < 1:
return 'Intensive Training'
elif x > 0 and y < -2:
return 'Low Energy / Activation'
else:
return 'No Trend Detected'
def z_adaptation(hrv7_z_score, hr7_z_score):
x, y = hrv7_z_score, hr7_z_score
if -1 < x < 0 and 0 < y < 1.75:
return 'Competition Ready'
elif 0 < x < 1.5 and -2 < y < 0:
return 'Coping Well'
elif -2.25 < x < -1 and 0 < y < 1.75:
return 'Not Coping Well'
else:
return 'No Trend Detected'
def z_color(z_trend):
if z_trend in ['Competition Ready', 'Intensive Training']:
return teal
elif z_trend in ['Coping Well', 'Normal Training']:
return light_blue
elif z_trend in ['Not Coping Well', 'Low Energy / Activation', 'Impaired Recovery']:
return orange
elif z_trend == 'Stress / Illness':
return 'red'
else:
return white
def z_recommendation_chart(hrv_z_score, hr_z_score, hrv7_z_score, hr7_z_score, hrv, hr, z_desc):
shapes = [
## Rest ##
dict(type='rect', xref='x',
yref='y', x0=-3, x1=-1, y0=1.75, y1=3,
fillcolor=orange, layer='below',
line=dict(width=0), ),
dict(type='rect', xref='x',
yref='y', x0=-3, x1=-1, y0=-3, y1=-2,
fillcolor=orange, layer='below',
line=dict(width=0),
),
## Low ##
dict(type='rect', xref='x',
yref='y', x0=-3, x1=-1, y0=-2, y1=1.75,
fillcolor=white, layer='below',
line=dict(width=0), ),
dict(type='rect', xref='x',
yref='y', x0=-1, x1=3, y0=1.75, y1=3,
fillcolor=white, layer='below',
line=dict(width=0), ),
dict(type='rect', xref='x',
yref='y', x0=-1, x1=3, y0=-2, y1=-3,
fillcolor=white, layer='below',
line=dict(width=0), ),
## Mod ##
dict(type='rect', xref='x',
yref='y', x0=-1, x1=1, y0=-2, y1=1.75,
fillcolor=light_blue, layer='below',
line=dict(width=0), ),
dict(type='rect', xref='x',
yref='y', x0=1, x1=3, y0=-2, y1=-1,
fillcolor=light_blue, layer='below',
line=dict(width=0), ),
dict(type='rect', xref='x',
yref='y', x0=1, x1=3, y0=1, y1=1.75,
fillcolor=light_blue, layer='below',
line=dict(width=0), ),
## High ##
dict(type='rect', xref='x',
yref='y', x0=1, x1=3, y0=-1, y1=1,
fillcolor=teal, layer='below',
line=dict(width=0), ),
]
return html.Div([
html.H6(className='mb-0', children=[z_desc]),
dcc.Graph(id='z-score-treemap', className='col-lg-12 mb-2',
config={'displayModeBar': False},
figure={
'data': [
# 7 day baselines
go.Scatter(
x=[hrv7_z_score],
y=[hr7_z_score],
# text=df['movement_tooltip'],
hoverinfo='none',
marker={
'color': [dark_blue],
'symbol': 'diamond',
'line_color': white,
'line_width': .5},
# orientation='h',
),
# Daily values
go.Scatter(
x=[hrv_z_score],
y=[hr_z_score],
# text=df['movement_tooltip'],
hoverinfo='none',
marker={
'color': ['rgb(66,66,66)'],
'line_color': white,
'line_width': .5},
# orientation='h',
)
],
'layout': go.Layout(
height=150,
# width=100,
shapes=shapes,
annotations=[go.layout.Annotation(
x=hrv_z_score,
y=hr_z_score,
xref="x",
yref="y",
text='HRV: {:.0f}<br>HR: {:.0f}'.format(hrv, hr),
bgcolor='rgba(66,66,66,.5)',
font=dict(
size=10,
color=white
),
arrowcolor='rgba(0,0,0,0)',
showarrow=True,
arrowhead=0,
ax=30,
ay=0
)],
# transition=dict(duration=transition),
font=dict(
size=8,
color=white
),
xaxis=dict(
title='Recovery',
range=[-3, 3],
showticklabels=False,
showgrid=False,
),
yaxis=dict(
title='<br>Activation',
range=[-3, 3],
showticklabels=False,
showgrid=False,
),
showlegend=False,
margin={'l': 25, 'b': 12, 't': 12, 'r': 25},
hovermode='x'
)
}
)
])
def get_hrv_df():
hrv_df = pd.read_sql(
sql=app.session.query(ouraSleepSummary.report_date, ouraSleepSummary.summary_date, ouraSleepSummary.rmssd,
ouraSleepSummary.hr_average).statement,
con=engine, index_col='report_date').sort_index(ascending=True)
# Merge readiness score
hrv_df = hrv_df.merge(pd.read_sql(
sql=app.session.query(ouraReadinessSummary.report_date, ouraReadinessSummary.score).statement,
con=engine, index_col='report_date'), how='left', left_index=True, right_index=True)
trimp_df = pd.read_sql(sql=app.session.query(stravaSummary.start_day_local, stravaSummary.trimp).statement,
con=engine, index_col='start_day_local').sort_index(ascending=True)
app.session.remove()
# Calculate ln rmssd
hrv_df['ln_rmssd'] = np.log(hrv_df['rmssd'])
# Calculate AVNN
hrv_df['AVNN'] = 60000 / hrv_df['hr_average']
trimp_df.index = pd.to_datetime(trimp_df.index)
hrv_df = pd.merge(hrv_df, trimp_df, how='left', left_index=True, right_index=True)
# Calculate HRV metrics
hrv_df.set_index(pd.to_datetime(hrv_df.index), inplace=True)
hrv_df = hrv_df.resample('D').mean()
# HRV baseline
hrv_df['rmssd_7'] = hrv_df['rmssd'].rolling(7).mean()
# Daily HRV change for KPI
hrv_df['rmssd_yesterday'] = hrv_df['rmssd'].shift(1)
# HR baseline
hrv_df['hr_average_yesterday'] = hrv_df['hr_average'].shift(1)
hrv_df['hr_average_7'] = hrv_df['hr_average'].rolling(7).mean()
# Natural Log calculations
hrv_df['ln_rmssd_7'] = hrv_df['ln_rmssd'].rolling(7).mean()
# 30/60 day Stdev and means
hrv_df['ln_rmssd_30'] = hrv_df['ln_rmssd'].rolling(30).mean()
hrv_df['ln_rmssd_60'] = hrv_df['ln_rmssd'].rolling(60).mean()
hrv_df['ln_rmssd_30_stdev'] = hrv_df['ln_rmssd'].rolling(30).std()
hrv_df['ln_rmssd_60_stdev'] = hrv_df['ln_rmssd'].rolling(60).std()
# Normal value (SWC) thresholds for 7 day hrv baseline trends to analyze physiological changes
hrv_df['swc_baseline_upper'] = hrv_df['ln_rmssd_60'] + hrv_df['ln_rmssd_60_stdev']
hrv_df['swc_baseline_lower'] = hrv_df['ln_rmssd_60'] - hrv_df['ln_rmssd_60_stdev']
# Normal value (SWC) thresholds for 7 day hrv baseline trends to guide workflow steps
hrv_df['swc_flowchart_upper'] = hrv_df['ln_rmssd_30'] + (hrv_df['ln_rmssd_30_stdev'] * .5)
hrv_df['swc_flowchart_lower'] = hrv_df['ln_rmssd_30'] - (hrv_df['ln_rmssd_30_stdev'] * .5)
hrv_df['within_flowchart_swc'] = True
hrv_df.loc[(hrv_df['ln_rmssd_7'] < hrv_df['swc_flowchart_lower']) | (hrv_df['ln_rmssd_7'] > hrv_df[
'swc_flowchart_upper']), 'within_flowchart_swc'] = False
# Normal value thresholds (SWC) for daily rmssd
hrv_df['swc_daily_upper'] = hrv_df['ln_rmssd_60'] + (hrv_df['ln_rmssd_60_stdev'] * 1.5)
hrv_df['swc_daily_lower'] = hrv_df['ln_rmssd_60'] - (hrv_df['ln_rmssd_60_stdev'] * 1.5)
hrv_df['within_daily_swc'] = True
hrv_df.loc[(hrv_df['ln_rmssd'] < hrv_df['swc_daily_lower']) | (hrv_df['ln_rmssd'] > hrv_df[
'swc_daily_upper']), 'within_daily_swc'] = False
# Z Score Method
# TODO: Update these z scores so be normalized by CV
hrv_df['hrv_z_score'] = zscore(x=hrv_df['ln_rmssd'], y=hrv_df['ln_rmssd'], window=30)
hrv_df['hr_z_score'] = zscore(x=hrv_df['hr_average'], y=hrv_df['hr_average'], window=30)
hrv_df["z_recommendation"] = hrv_df[["hrv_z_score", "hr_z_score"]].apply(lambda x: daily_z_recommendation(*x),
axis=1)
hrv_df["z_desc"] = hrv_df[["hrv_z_score", "hr_z_score"]].apply(lambda x: daily_z_desc(*x), axis=1)
# ithlete uses daily hr and hrv normalized by CV, use 7 day averages over 30 days instead?
hrv_df['hrv7_z_score'] = zscore(x=hrv_df['ln_rmssd_7'], y=hrv_df['ln_rmssd'], window=60)
hrv_df['hr7_z_score'] = zscore(x=hrv_df['hr_average_7'], y=hrv_df['hr_average'], window=60)
# Detect training adaptations based on 7day z scores
hrv_df["detected_trend"] = hrv_df[["hrv7_z_score", "hr7_z_score"]].apply(lambda x: z_adaptation(*x), axis=1)
# Threshold Flags
# hrv_df['under_low_threshold'] = hrv_df['ln_rmssd_7'] < hrv_df['swc_baseline_lower']
# hrv_df['under_low_threshold_yesterday'] = hrv_df['under_low_threshold'].shift(1)
# hrv_df['over_upper_threshold'] = hrv_df['ln_rmssd_7'] > hrv_df['swc_baseline_upper']
# hrv_df['over_upper_threshold_yesterday'] = hrv_df['over_upper_threshold'].shift(1)
# for i in hrv_df.index:
# if hrv_df.at[i, 'under_low_threshold_yesterday'] == False and hrv_df.at[
# i, 'under_low_threshold'] == True:
# hrv_df.at[i, 'lower_threshold_crossed'] = True
# else:
# hrv_df.at[i, 'lower_threshold_crossed'] = False
# if hrv_df.at[i, 'over_upper_threshold_yesterday'] == False and hrv_df.at[
# i, 'over_upper_threshold'] == True:
# hrv_df.at[i, 'upper_threshold_crossed'] = True
# else:
# hrv_df.at[i, 'upper_threshold_crossed'] = False
return hrv_df
def get_trend_controls(selected=None, sport='run'):
athlete_info = app.session.query(athlete).filter(athlete.athlete_id == 1).first()
use_run_power = True if athlete_info.use_run_power else False
use_cycle_power = True if athlete_info.use_cycle_power else False
use_power = True if use_run_power or use_cycle_power else False
app.session.remove()
metrics = {'average-watts': {'fa fa-bolt': 'Power (w)'},
'average-heartrate': {'fa fa-heartbeat': 'Heartrate'},
'tss': {'fa fa-tachometer-alt': 'Stress (tss)'},
'distance': {'fa fa-arrows-alt-h': 'Distance (mi)'},
'elapsed-time': {'fa fa-clock': 'Duration (min)'},
'average-speed': {'fa fa-flag-checkered': 'Pace'},
'average-ground-time': {'fa fa-road': 'Ground contact time'},
'average-oscillation': {'fa fa-arrows-alt-v': 'Vertical Oscillation'},
'average-leg-spring': {'fa fa-frog': 'Leg Spring Stiffness (LSS)'}
}
hide = []
if not selected:
selected = 'average_heartrate' if not use_power else 'average_watts'
if sport.lower() == 'run':
if not use_run_power:
hide.extend(['average-watts', 'tss', 'average-ground-time', 'average-oscillation', 'average-leg-spring'])
# if not stryd_credentials_supplied:
# hide.extend(['average-ground-time', 'average-oscillation', 'average-leg-spring'])
elif sport.lower() == 'ride':
hide.extend(['average-ground-time', 'average-oscillation', 'average-leg-spring'])
if not use_cycle_power:
hide.extend(['average-watts', 'tss'])
controls = []
for metric in metrics.keys():
style = {'padding': '0', 'alignSelf': 'center', 'display': 'none'} if metric in hide else {'padding': '0',
'alignSelf': 'center'}
is_selected = True if selected.replace('_', '-') == metric else False
controls.append(
html.Div(className='col-lg-12 align-items-center',
style=style,
children=[
html.I(id=f'{metric}-trend-button',
className=list(metrics[metric].keys())[0],
n_clicks_timestamp=1 if is_selected else 0,
style={'fontSize': '1rem',
'color': teal if is_selected else white,
'vertical-align': 'middle',
'bgColor': 'rgba(0,0,0,0)',
'border': 'none'}),
]),
)
controls.append(dbc.Tooltip(list(metrics[metric].values())[0], target=f'{metric}-trend-button'), )
return html.Div(className='row', children=controls)
def get_trend_chart(metric, sport='Run', days=90, intensity='all'):
date = datetime.now().date() - timedelta(days=days)
df = pd.read_sql(
sql=app.session.query(stravaSummary).filter(
stravaSummary.type.like(sport), stravaSummary.elapsed_time > app.session.query(athlete).filter(
athlete.athlete_id == 1).first().min_non_warmup_workout_time).statement, con=engine)
if intensity != 'all':
df = df[df['workout_intensity'] == intensity]
stryd_df = pd.read_sql(
sql=app.session.query(strydSummary).statement, con=engine)
app.session.remove()
df = df.merge(stryd_df, how='left', left_on='activity_id', right_on='strava_activity_id')
# Remove bad data
df[metric].replace(0, np.nan, inplace=True)
# Convert duration to minutes
if metric == 'elapsed_time':
df['duration'] = df[metric] / 60
metric = 'duration'
elif metric == 'average_speed':
df['average_pace'] = 60 / df[metric]
metric = 'average_pace'
# Get all time PR of current metric
if metric in ['average_pace', 'average_ground_time', 'average_oscillation']:
pr = df[metric].min()
else:
pr = df[metric].max()
# Filter df to date selection made from dropdown
df = df[df['start_date_local_x'].dt.date >= date]
# Resample to accurately plot line of best fit
df = df.set_index('start_date_local_x')
df = df[[metric]].resample('D').mean().reset_index()
# Ignore dates with null values when running our model
idx = np.isfinite(df[metric])
if len(idx) > 1:
slope, intercept, r_value, p_value, std_err = scipy.stats.linregress(df.reset_index().index[idx],
df[metric][idx])
# Color trend line but its strength of fit
if r_value >= .8 or r_value <= -.8: # Strong fit
trend_strength = teal
elif (r_value < .8 and r_value >= .5) or (r_value > -.8 and r_value <= -.5): # Medium Fit
trend_strength = light_blue
else: # Weak fit
trend_strength = white
df[metric + '_trend'] = (df.index * slope) + intercept
else:
df[metric + '_trend'] = np.nan
trend_strength = white
# Change index back for the chart
df = df.set_index('start_date_local_x')
# Format tooltips
if metric in ['duration', 'average_pace']:
text = ['{}: <b>{}'.format(metric.title().replace('_', ' '), str(timedelta(minutes=x)).split(".")[0]) for x in
df[metric].fillna(0)]
elif metric in ['distance', 'average_oscillation', 'average_leg_spring']:
text = ['{}: <b>{:.1f}'.format(metric.title().replace('_', ' '), x) for x in df[metric]]
else:
text = ['{}: <b>{:.0f}'.format(metric.title().replace('_', ' '), x) for x in df[metric]]
data = [
go.Scatter(
name=metric.title(),
x=df.index,
y=[np.nan if x == pr else x for x in df[metric]],
yaxis='y',
text=text,
hoverinfo='x+text',
mode='markers',
line={'dash': 'dot',
'color': 'rgba(220,220,220,.25)',
'width': 2},
showlegend=False,
marker={'size': 5},
),
go.Scatter(
name='{} Trend'.format(metric.title()),
x=df.index,
y=df[metric + '_trend'],
yaxis='y',
hoverinfo='none',
mode='lines',
line={'color': trend_strength,
'width': 3},
showlegend=False,
),
# go.Scatter(
# name='PR',
# x=df.index,
# y=[pr for x in df.index],
# mode='lines+text',
# text=[
# 'PR: <b>{:.0f}'.format(pr) if x == df.index.max() else ''
# for x in df.index],
# textfont=dict(
# size=10,
# color=orange
# ),
# textposition='top left',
# hoverinfo='none',
# # opacity=0.7,
# line={'dash': 'dot', 'color': orange, 'width': 1},
# showlegend=False,
# )
]
if pr in df[metric].values:
data.append(
go.Scatter(
name=metric.title() + ' PR',
x=df.index,
y=[np.nan if x != pr else x for x in df[metric]],
yaxis='y',
text=text[df.index.get_loc(df.loc[df[metric] == pr].index.values[0])],
hoverinfo='x+text',
mode='markers',
line={'dash': 'dot',
'color': orange,
'width': 2},
showlegend=False,
marker={'size': 5},
)
)
figure = {
'data': data,
'layout': go.Layout(
title=metric.title().replace('_', ' '),
height=200,
font=dict(
size=10,
color=white
),
xaxis=dict(
showticklabels=True,
showgrid=False,
tickformat='%b %d',
),
yaxis=dict(
# range=[df[metric].min() - (df[metric].min() * .15), df[metric].max() * 1.15],
showticklabels=True,
showgrid=True,
gridcolor='rgb(73, 73, 73)',
# tickformat=',d',
),
showlegend=False,
margin={'l': 25, 'b': 20, 't': 20, 'r': 0},
autosize=True,
hovermode='closest'
)
}
return figure
def training_zone(form):
if form:
if 25 < form:
return 'No Fitness'
elif 5 < form <= 25:
return 'Performance'
elif -10 < form <= 5:
return 'Maintenance'
elif -25 < form <= -10:
return 'Productive'
elif -40 < form < -25:
return 'Cautionary'
elif form <= -40:
return 'Overreaching'
else:
return 'Form'
def readiness_score_recommendation(readiness_score):
try:
readiness_score = int(readiness_score)
if readiness_score == 0:
return ''
elif readiness_score >= oura_high_threshold:
return 'High'
elif readiness_score >= oura_med_threshold:
return 'Mod'
elif readiness_score >= oura_low_threshold:
return 'Low'
else:
return 'Rest'
except:
return 'N/A'
def recommendation_color(recommendaion_desc):
if recommendaion_desc == 'High':
return teal
elif recommendaion_desc == 'Mod' or recommendaion_desc == 'HIIT':
return light_blue
elif recommendaion_desc == 'Low':
return white
elif recommendaion_desc == 'Rest':
return orange
elif recommendaion_desc == 'N/A':
return 'rgba(220,220,220,.25)'
def create_daily_recommendations(plan_rec):
if plan_rec:
recovery_metric = app.session.query(athlete).filter(athlete.athlete_id == 1).first().recovery_metric
if recovery_metric == 'hrv':
recovery_metric_label = 'HRV'
recovery_metric_tooltip = 'Workflow steps based on daily rmssd changes within 60 day mean +/- 1.5 stdev'
elif recovery_metric == 'hrv_baseline':
recovery_metric_label = 'Baseline'
recovery_metric_tooltip = 'Workflow steps based on 7 day rmssd baseline changes within 30 day mean +/- .5 stdev'
elif recovery_metric == 'readiness':
recovery_metric_label = 'Readiness'
recovery_metric_tooltip = 'Workflow steps based on Oura readiness score > 70'
elif recovery_metric == 'zscore':
recovery_metric_label = 'HRV & HR'
recovery_metric_tooltip = 'Recommendation based on mutli-parameter approach'
data = plan_rec.replace('rec_', '').split('|')
plan_step = int(float(data[0]))
plan_recommendation = data[1]
plan_rationale = data[2]
oura_recommendation = data[3]
readiness_score = int(data[4])
sleep_score = int(data[5])
hrv_z_score = float(data[6])
hr_z_score = float(data[7])
hrv7_z_score = float(data[8])
hr7_z_score = float(data[9])
z_desc = data[10]
hrv = data[11]
hrv_yesterday = data[12]
hrv7 = data[13]
hr = data[14]
hr_yesterday = data[15]
hr7 = data[16]
hrv = float(hrv) if hrv is not None else 'N/A'
hrv_yesterday = float(hrv_yesterday) if hrv_yesterday is not None else 'N/A'
hrv7 = float(hrv7) if hrv7 is not None else 'N/A'
hr = float(hr) if hr is not None else 'N/A'
hr_yesterday = float(hr_yesterday) if hr_yesterday is not None else 'N/A'
hr7 = float(hr7) if hr7 is not None else 'N/A'
if oura_recommendation == 'Rest':
oura_rationale = f'Readiness score is < {oura_low_threshold}'
elif oura_recommendation == 'Low':
oura_rationale = f'Readiness score is between {oura_low_threshold} and {oura_med_threshold}'
elif oura_recommendation == 'Mod':
oura_rationale = f'Readiness score is between {oura_med_threshold} and {oura_high_threshold}'
elif oura_recommendation == 'High':
oura_rationale = f'Readiness score is {oura_high_threshold} or higher'
else:
oura_recommendation, oura_rationale = 'N/A', 'N/A'
else:
hrv, hrv_yesterday, hrv7, hrv7_change, hr, hr_yesterday, hr7, plan_rationale, plan_recommendation, oura_recommendation, recovery_metric, recovery_metric_label = \
'N/A', 'N/A', 'N/A', 'N/A', 'N/A', 'N/A', 'N/A', 'N/A', 'N/A', 'N/A', 'N/A', 'N/A'
sleep_score, readiness_score, oura_rationale, plan_step, hrv_z_score, hr_z_score = None, None, None, None, None, None
readiness_score = round(readiness_score) if readiness_score else 'N/A'
sleep_score = round(sleep_score) if sleep_score else 'N/A'
if hrv_yesterday != 'N/A':
hrv_yesterday_arrow = 'fa fa-angle-up' if hrv > hrv_yesterday else 'fa fa-angle-down'
hrv_yesterday_color = teal if hrv > hrv_yesterday else orange
else:
hrv_yesterday_arrow = ''
hrv_yesterday_color = ''
if hrv != 'N/A' and hrv7 != 'N/A':
change = hrv - hrv7
hrv_vs_baseline_arrow = 'fa fa-angle-up' if change > 0 else 'fa fa-angle-down'
hrv_vs_baseline_color = teal if change > 0 else orange
else:
hrv_vs_baseline_arrow, hrv_vs_baseline_color = '', ''
if hr_yesterday != 'N/A':
hr_yesterday_arrow = 'fa fa-angle-up' if hr > hr_yesterday else 'fa fa-angle-down'
hr_yesterday_color = teal if hr < hr_yesterday else orange
else:
hr_yesterday_arrow = ''
hr_yesterday_color = ''
if hr != 'N/A' and hr7 != 'N/A':
change = hr - hr7
hr_vs_baseline_arrow = 'fa fa-angle-up' if change > 0 else 'fa fa-angle-down'
hr_vs_baseline_color = teal if change < 0 else orange
else:
hr_vs_baseline_arrow, hr_vs_baseline_color = '', ''
workflow_img = html.Div(className='col-lg-12', children=[
html.Img(src=f'../assets/images/hrv{plan_step}.png', height=200,
width=150) if plan_step is not None else html.Div(),
])
hrv_gauge = html.Div(className='col-lg-12', children=[
dcc.Graph(id='hrv-gauge', className='col-lg-12',
config={'displayModeBar': False},
figure={
'data': [
go.Bar(
x=[-1.5],
y=['test'],
hoverinfo='none',
marker={
'color': [teal]},
orientation='h',
),
go.Bar(
x=[-.75],
y=['test'],
hoverinfo='none',
marker={
'color': [white]},
orientation='h',
),
go.Bar(
x=[-.75],
y=['test'],
hoverinfo='none',
marker={
'color': [orange]},
orientation='h',
),
go.Bar(
x=[1.5],
y=['test'],
hoverinfo='none',
marker={
'color': [teal]},
orientation='h',
),
go.Bar(
x=[.75],
y=['test'],
hoverinfo='none',
marker={
'color': [light_blue]},
orientation='h',
),
go.Bar(
x=[.75],
y=['test'],
hoverinfo='none',
marker={
'color': [orange]},
orientation='h',
),
],
'layout': go.Layout(
barmode='relative',
height=45,
# transition=dict(duration=transition),
font=dict(
size=10,
color=white
),
xaxis=dict(
showticklabels=True,
range=[-3, 3],
tickvals=[hrv_z_score],
ticktext=[hrv],
),
yaxis=dict(
showticklabels=False,
# range=[0, df['met_1min'].max() if df['met_1min'].max() > 7 else 8],
# tickvals=[1, 3, 7],
# ticktext=['Low ', 'Med ', 'High '],
showgrid=False,
),
showlegend=False,
margin={'l': 0, 'b': 15, 't': 0, 'r': 0},
hovermode='x'
)
}
) if hrv_z_score and hr_z_score else None
])
daily_hrv_kpis = html.Div([html.Div(className='row text-center align-items-center', children=[
# html.Div(className='col-lg-12 text-center align-items-center mb-2', children=[
# html.H5(hrv, style={'display': 'inline'}),
]),
html.Div(className='row', children=[
html.Div(className='col-lg-6', children=[
html.H6('Yesterday', className='col-lg-12 mb-0'),
html.Div(className='col-lg-12 text-center align-items-center mb-0', children=[
html.H5('{:.0f}'.format(
hrv_yesterday) if hrv_yesterday != 'N/A' else hrv_yesterday,
style={'display': 'inline'}),
html.I(className=f'{hrv_yesterday_arrow} text-center align-items-center',
style={'fontSize': '1rem',
'display': 'inline',
'paddingLeft': '.25vw',
'color': hrv_yesterday_color}),
]),
]),
html.Div(className='col-lg-6', children=[
html.H6('Baseline', className='col-lg-12 mb-0'),
html.Div(className='col-lg-12 text-center align-items-center mb-0', children=[
html.H5('{:.0f}'.format(hrv7) if hrv7 != 'N/A' else hrv7,
style={'display': 'inline'}),
html.I(className=f'{hrv_vs_baseline_arrow} text-center align-items-center',
style={'fontSize': '1rem',
'display': 'inline',
'paddingLeft': '.25vw',
'color': hrv_vs_baseline_color}),
]),
]),
])
])
daily_hr_kpis = html.Div([html.Div(className='row text-center align-items-center', children=[
# html.Div(className='col-lg-12 text-center align-items-center mb-2', children=[
# html.H5(hrv, style={'display': 'inline'}),
]),
html.Div(className='row', children=[
html.Div(className='col-lg-6', children=[
html.H6('Yesterday', className='col-lg-12 mb-0'),
html.Div(className='col-lg-12 text-center align-items-center mb-0', children=[
html.H5(
'{:.0f}'.format(hr_yesterday) if hr_yesterday != 'N/A' else hr_yesterday,
style={'display': 'inline'}),
html.I(className=f'{hr_yesterday_arrow} text-center align-items-center',
style={'fontSize': '1rem',
'display': 'inline',
'paddingLeft': '.25vw',
'color': hr_yesterday_color}),
]),
]),
html.Div(className='col-lg-6', children=[
html.H6('Baseline', className='col-lg-12 mb-0'),
html.Div(className='col-lg-12 text-center align-items-center mb-0', children=[
html.H5('{:.0f}'.format(hr7) if hr7 != 'N/A' else hr7,
style={'display': 'inline'}),
html.I(className=f'{hr_vs_baseline_arrow} text-center align-items-center',
style={'fontSize': '1rem',
'display': 'inline',
'paddingLeft': '.25vw',
'color': hr_vs_baseline_color}),
]),
]),
])
])
oura_gauge = html.Div(children=[
html.H6(f'Oura Ready: {readiness_score} | Sleep: {sleep_score}', className='col-lg-12'),
# html.H3(oura_recommendation, id='oura-rationale',
# style={'color': recommendation_color(oura_recommendation)}),
# dbc.Tooltip(None if oura_recommendation == 'N/A' else oura_rationale,
# target="oura-rationale"),
html.Div(className='col-lg-12', children=[
dcc.Graph(id='oura-gauge', className='col-lg-12',
config={'displayModeBar': False},
figure={
'data': [
go.Bar(
x=[70, 7, 8, 15],
y=['dummy', 'dummy', 'dummy', 'dummy'],
# text=df['movement_tooltip'],
hoverinfo='none',
marker={
'color': [orange, white, light_blue, teal]},
orientation='h',
),
],
'layout': go.Layout(
height=45,
# transition=dict(duration=transition),
font=dict(
size=10,
color=white
),
xaxis=dict(
showticklabels=True,
range=[50, 100],
tickvals=[readiness_score, sleep_score],
ticktext=['R', 'S'],
),
yaxis=dict(
showticklabels=False,
showgrid=False,
),
showlegend=False,
margin={'l': 0, 'b': 15, 't': 0, 'r': 0},
hovermode='x'
)
}
) if oura_recommendation != 'N/A' else None
]),
])
if recovery_metric == 'N/A':
recommendation_context = html.Div(html.H3('N/A'))
if recovery_metric in ['hrv', 'hrv_baseline']:
recommendation_context = html.Div([
workflow_img,
oura_gauge,
html.H6("Heart Rate Variability", className='col-lg-12'),
hrv_gauge,
daily_hrv_kpis,
])
elif recovery_metric == 'readiness':
recommendation_context = html.Div([
oura_gauge,
z_recommendation_chart(hrv_z_score, hr_z_score, hrv7_z_score, hr7_z_score, hrv, hr, z_desc),
html.H6("Heart Rate Variability", className='col-lg-12'),
daily_hrv_kpis,
html.H6("Heart Rate", className='col-lg-12'),
daily_hr_kpis
])
elif recovery_metric == 'zscore':
recommendation_context = html.Div([
z_recommendation_chart(hrv_z_score, hr_z_score, hrv7_z_score, hr7_z_score, hrv, hr, z_desc),
oura_gauge,
html.H6("Heart Rate Variability", className='col-lg-12'),
daily_hrv_kpis,
html.H6("Heart Rate", className='col-lg-12'),
daily_hr_kpis
])
return html.Div(id='recommendation', style={'display': 'flex', 'flexDirection': 'column',
'justifyContent': 'space-between'},
children=[
html.Div(children=[
html.H6(f'{recovery_metric_label} Recommendation', id='workflow-recommendation-title',
className='col-lg-12'),
dbc.Tooltip(None if plan_recommendation == 'N/A' else recovery_metric_tooltip,
target="workflow-recommendation-title"),
html.H3(plan_recommendation, className='col-lg-12', id='hrv-rationale',
style={'color': recommendation_color(plan_recommendation)}),
dbc.Tooltip(None if plan_recommendation == 'N/A' else plan_rationale,
target="hrv-rationale", ),
recommendation_context,
#
# ]),
# html.Div(className='row text-center align-items-center', children=[
# html.H6('Baseline HRV', className='col-lg-12'),
# html.Div(className='col-lg-4', children=[
# html.P(f'Baseline {hrv7}')
# ]),
# html.Div(className='col-lg-4', children=[
# html.P(f'Yesterday {hrv7_yesterday}')
# ]),
# html.Div(className='col-lg-4', children=[
# html.P(f'Baseline {hrv7}')
# ]),
# ])
#
]),
])
def create_fitness_kpis(date, ctl, ramp, rr_min_threshold, rr_max_threshold, atl, tsb, hrv7, trend):
# TODO: Remove ramp rate?
if atl is not None and ctl is not None:
ctl = round(ctl, 1)
tsb = round(tsb, 1)
if ctl == 0 or ctl == 'N/A':
atl_ctl_ratio_injury_risk = 'No Fitness'
atl_ctl_ratio = 'N/A'
atl_ctl_ratio_injury_risk_color = white
else:
atl_ctl_ratio = atl / ctl
if atl_ctl_ratio > 1.75:
atl_ctl_ratio_injury_risk = 'High Injury Risk'
atl_ctl_ratio_injury_risk_color = orange
elif 1.3 < atl_ctl_ratio <= 1.75:
atl_ctl_ratio_injury_risk = 'Increased Injury Risk'
atl_ctl_ratio_injury_risk_color = light_blue
elif 0.8 < atl_ctl_ratio <= 1.3:
atl_ctl_ratio_injury_risk = 'Optimal Load'
atl_ctl_ratio_injury_risk_color = teal
elif 0.8 >= atl_ctl_ratio:
atl_ctl_ratio_injury_risk = 'Loss of Fitness'
atl_ctl_ratio_injury_risk_color = orange
else:
atl_ctl_ratio_injury_risk, ctl, atl, atl_ctl_ratio, atl_ctl_ratio_injury_risk_color = 'N/A', 'N/A', 'N/A', 'N/A', white
# injury_risk = 'High' if ramp >= rr_max_threshold else 'Medium' if ramp >= rr_min_threshold else 'Low'
detected_trend_color = z_color(trend)
return [html.Div(className='row', children=[
### Date KPI ###
html.Div(className='col-lg-2', children=[
html.Div(children=[
html.H6('{}'.format(datetime.strptime(date, '%Y-%m-%d').strftime("%b %d, %Y")),
className='d-inline-block',
style={'fontWeight': 'bold', 'color': 'rgb(220, 220, 220)', 'marginTop': '0',
'marginBottom': '0'}),
]),
]),
### CTL KPI ###
html.Div(id='ctl-kpi', className='col-lg-2', children=[
html.Div(children=[
html.H6('Fitness {}'.format(ctl),
className='d-inline-block',
style={'color': ctl_color, 'marginTop': '0', 'marginBottom': '0'}),
]),
]),
dbc.Tooltip(
'Fitness (CTL) is an exponentially weighted average of your last 42 days of training stress scores (TSS) and reflects the training you have done over the last 6 weeks. Fatigue is sport specific.',
target="ctl-kpi"),
### ATL KPI ###
html.Div(id='atl-kpi', className='col-lg-2', children=[
html.Div(children=[
html.H6('Fatigue {}'.format(round(atl, 1) if atl != 'N/A' else 'N/A'),
className='d-inline-block',
style={'color': atl_color, 'marginTop': '0', 'marginBottom': '0'}),
]),
]),
dbc.Tooltip(
'Fatigue (ATL) is an exponentially weighted average of your last 7 days of training stress scores which provides an estimate of your fatigue accounting for the workouts you have done recently. Fatigue is not sport specific.',
target="atl-kpi"),
### TSB KPI ###
html.Div(id='tsb-kpi', className='col-lg-2', children=[
html.Div(children=[
html.H6('{} {}'.format('Form' if type(tsb) == type(str()) else training_zone(tsb), tsb),
className='d-inline-block',
style={'color': tsb_color, 'marginTop': '0', 'marginBottom': '0'}),
]),
]),
dbc.Tooltip(
"Training Stress Balance (TSB) or Form represents the balance of training stress. A positive TSB number means that you would have a good chance of performing well during those 'positive' days, and would suggest that you are both fit and fresh.",
target="tsb-kpi", ),
### Injury Risk ###
html.Div(id='injury-risk', className='col-lg-2', children=[
html.Div(children=[
# html.H6('Injury Risk: {}'.format(injury_risk),
html.H6('{}'.format(atl_ctl_ratio_injury_risk),
className='d-inline-block',
style={'color': atl_ctl_ratio_injury_risk_color, 'marginTop': '0', 'marginBottom': '0'})
]),
]),
# dbc.Tooltip('7 day CTL △ = {:.1f}'.format(ramp), target='injury-risk'),
dbc.Tooltip(
'ATL to CTL ratio = {}'.format(round(atl_ctl_ratio, 1) if atl_ctl_ratio != 'N/A' else 'N/A'),
target='injury-risk'),
### Detected Trend ###
html.Div(id='detected-trend-kpi', className='col-lg-2', children=[
html.Div(children=[
html.H6(trend if trend else 'No Trend Detected',
className='d-inline-block',
style={'color': detected_trend_color, 'marginTop': '0', 'marginBottom': '0'})
]),
] if oura_credentials_supplied else []),
dbc.Tooltip(
"Identified training adaption from physiological trends",
target="detected-trend-kpi"
)
# ### HRV 7 Day Average ###
# html.Div(id='hrv7-kpi', className='col-lg-2', children=[
# html.Div(children=[
# html.H6('7 Day HRV {}'.format(hrv7),
# className='d-inline-block',
# style={'color': teal, 'marginTop': '0', 'marginBottom': '0'})
# ]),
# ] if oura_credentials_supplied else []),
# dbc.Tooltip(
# "Rolling 7 Day HRV Average. Falling below the baseline threshold indicates you are not recovered and should hold back on intense training. Staying within the thresholds indicates you should stay on course, and exceeding the thresholds indicates a positive adaptation and workout intensity can be increased.",
# target="hrv7-kpi"
# )
]),
]
def create_activity_table(date=None):
df_summary_table_columns = ['name', 'type', 'time', 'distance', 'tss', 'hrss', 'trimp', 'weighted_average_power',
'relative_intensity', 'efficiency_factor', 'variability_index', 'ftp', 'activity_id']
# Covert date to datetime object if read from clickData
if date is not None:
df_table = pd.read_sql(
sql=app.session.query(stravaSummary.start_day_local, stravaSummary.name, stravaSummary.type,
stravaSummary.elapsed_time,
stravaSummary.distance, stravaSummary.tss, stravaSummary.hrss,
stravaSummary.trimp, stravaSummary.weighted_average_power,
stravaSummary.relative_intensity, stravaSummary.efficiency_factor,
stravaSummary.variability_index, stravaSummary.ftp,
stravaSummary.activity_id)
.filter(stravaSummary.start_day_local == date)
.statement,
con=engine)
else:
df_table = pd.read_sql(
sql=app.session.query(stravaSummary.start_day_local, stravaSummary.name, stravaSummary.type,
stravaSummary.elapsed_time,
stravaSummary.distance, stravaSummary.tss, stravaSummary.hrss,
stravaSummary.trimp, stravaSummary.weighted_average_power,
stravaSummary.relative_intensity, stravaSummary.efficiency_factor,
stravaSummary.variability_index, stravaSummary.ftp,
stravaSummary.activity_id)
.statement, con=engine)
app.session.remove()
df_table['distance'] = df_table['distance'].replace({0: np.nan})
# Filter df to columns we want for the table
# If data was returned for date passed
if len(df_table) > 0:
# Add date column
df_table['date'] = df_table['start_day_local'].apply(lambda x: x.strftime('%a, %b %d, %Y'))
df_table['time'] = df_table['elapsed_time'].apply(lambda x: str(timedelta(seconds=x)))
# Add id column and sort to selecting row from dash data table still works when filtering
df_table.sort_values(by='start_day_local', ascending=False, inplace=True)
df_table.reset_index(inplace=True)
df_table['id'] = df_table.index
df_summary_table_columns = ['id', 'date'] + df_summary_table_columns
# Reorder columns
df_table = df_table[df_summary_table_columns]
# Table Rounding
round_0_cols = ['tss', 'hrss', 'trimp', 'weighted_average_power', 'ftp']
df_table[round_0_cols] = df_table[round_0_cols].round(0)
round_2_cols = ['distance', 'relative_intensity', 'efficiency_factor', 'variability_index']
df_table[round_2_cols] = df_table[round_2_cols].round(2)
return df_table[df_summary_table_columns].sort_index(ascending=True).to_dict('records')
else:
return [{}]
# return html.H3('No workouts found for {}'.format(date.strftime("%b %d, %Y")), style={'textAlign': 'center'})
def create_growth_kpis(date, cy, cy_metric, ly, ly_metric, metric):
cy_title, ly_title = f'{cy}: N/A', f'{ly}: N/A'
if cy_metric and metric in ['elapsed_time', 'high_intensity_seconds', 'low_intensity_seconds',
'mod_intensity_seconds']:
cy_title = '{}: {}'.format(cy, timedelta(seconds=cy_metric))
if ly_metric and metric in ['elapsed_time', 'high_intensity_seconds', 'low_intensity_seconds',
'mod_intensity_seconds']:
ly_title = '{}: {}'.format(ly, timedelta(seconds=ly_metric))
if cy_metric and metric == 'distance':
cy_title = '{}: {:.1f} mi.'.format(cy, cy_metric)
if ly_metric and metric == 'distance':
ly_title = '{}: {:.1f} mi.'.format(ly, ly_metric)
if cy_metric and metric in ['hrss', 'trimp', 'tss']:
cy_title = '{}: {:.0f}'.format(cy, cy_metric)
if ly_metric and metric in ['hrss', 'trimp', 'tss']:
ly_title = '{}: {:.0f}'.format(ly, ly_metric)
if cy_metric and ly_metric:
cy_color = orange if cy_metric < ly_metric else teal
else:
cy_color = white
return html.Div(className='row text-center align-items-center', children=[
### Title ###
html.Div(id='yoy-title', className='col-lg-4', children=[
html.Div(children=[
html.H6('YOY Performance', className='mt-0 mb-0 d-inline-block'),
]),
]),
### Current Year ###
html.Div(id='target-change-kpi', className='col-lg-4', children=[
html.Div(children=[
html.H6(cy_title, className='mt-0 mb-0 d-inline-block',
style={'color': cy_color}),
]),
]),
### Last Year ###
html.Div(id='atl-kpi', className='col-lg-4', children=[
html.Div(children=[
html.H6(ly_title, className='mt-0 mb-0 d-inline-block',
style={'color': white}),
]),
])
])
def create_yoy_chart(metric, sport='all'):
'''
:param metric: Allowed values from strava summary table [hrss, tss, trimp, distance, elapsed_time, high_intensity_seconds, mod_intensity_seconds, low_intensity_seconds]
:return:
'''
# weekly_tss_goal = app.session.query(athlete).filter(athlete.athlete_id == 1).first().weekly_tss_goal
athlete_info = app.session.query(athlete).filter(athlete.athlete_id == 1).first()
use_power = True if athlete_info.use_run_power or athlete_info.use_cycle_power else False
if sport != 'all':
df = pd.read_sql(
sql=app.session.query(stravaSummary).filter(
stravaSummary.elapsed_time > athlete_info.min_non_warmup_workout_time,
stravaSummary.type.like(sport),
).statement, con=engine, index_col='start_date_utc').sort_index(ascending=True)
else:
df = pd.read_sql(
sql=app.session.query(stravaSummary).filter(
stravaSummary.elapsed_time > athlete_info.min_non_warmup_workout_time,
# or_(
# extract('year', stravaSummary.start_date_utc) == datetime.utcnow().year,
# extract('year', stravaSummary.start_date_utc) == (datetime.utcnow().year - 1))
).statement, con=engine, index_col='start_date_utc').sort_index(ascending=True)
app.session.remove()
df['year'] = df.index.year
df['day'] = df.index.dayofyear
df = df.pivot_table(index='day', columns='year', values=metric, aggfunc=np.sum).fillna(0)
df = df.set_index(pd.to_datetime(datetime(1970, 1, 1) + pd.to_timedelta(df.index - 1, 'd')))
# If new year and no workouts yet, add column
if datetime.now().year not in df.columns:
df[datetime.now().year] = np.nan
# Resample so every day of year is shown on x axis and for yearly goal
df.at[pd.to_datetime(datetime(1970, 1, 1)), df.columns[0]] = None
df = df.resample('D').sum()
df = df[:-1]
# Remove future days of current year
df[df.columns[-1]] = np.where(df.index.dayofyear > datetime.now().timetuple().tm_yday, np.nan, df[df.columns[-1]])
data = []
colors = [teal, white, light_blue, dark_blue, ctl_color, atl_color, tsb_color, orange, 'rgba(250, 47, 76,.7)',
orange]
# Plot latest line first for most recent 10 years
index, current_date, cy_metric, ly_metric, target = 0, None, None, None, None
for year in list(df.columns)[9:-12:-1]:
if metric in ['elapsed_time', 'high_intensity_seconds', 'low_intensity_seconds', 'mod_intensity_seconds']:
text = ['{}: <b>{}'.format(str(year), timedelta(seconds=x)) for x in df[year].cumsum().fillna(0)]
elif metric == 'distance':
text = ['{}: <b>{:.1f} mi'.format(str(year), x) for x in df[year].cumsum().fillna(0)]
elif metric in ['hrss', 'trimp', 'tss']:
text = ['{}: <b>{:.0f}'.format(str(year), x) for x in df[year].cumsum().fillna(0)]
data.append(
go.Scatter(
name=str(year),
x=df.index,
y=df[year].cumsum(),
mode='lines',
text=text,
hoverinfo='x+text',
customdata=[
'{}'.format(f'cy|{metric}|{year}' if index == 0 else f'ly|{metric}|{year}' if index == 1 else None)
for x in df.index],
line={'shape': 'spline', 'color': colors[index]},
# Default to only CY and PY shown
visible=True if index < 2 else 'legendonly'
)
)
# Store current data points for hoverdata kpi initial values
if index == 0:
temp_df = df[~np.isnan(df[year])]
temp_df[year] = temp_df[year].cumsum()
current_date = temp_df.index.max()
cy_metric = temp_df.loc[current_date][year]
cy = year
if index == 1:
temp_df[year] = temp_df[year].cumsum()
ly_metric = temp_df.loc[current_date][year]
ly = year
index += 1
# Multiply by 40 weeks in the year (roughly 3 week on 1 off)
# df['daily_tss_goal'] = (weekly_tss_goal * 52) / 365
# temp_df['daily_tss_goal'] = df['daily_tss_goal'].cumsum()
# target = temp_df.loc[current_date]['daily_tss_goal']
# data.append(
# go.Scatter(
# name='SS Goal',
# x=df.index,
# y=df['daily_tss_goal'].cumsum(),
# mode='lines',
# customdata=['target' for x in df.index],
# hoverinfo='x',
# line={'dash': 'dot',
# 'color': 'rgba(127, 127, 127, .35)',
# 'width': 2},
# # showlegend=False
# )
# )
hoverData = dict(points=[
{'x': current_date, 'y': cy_metric, 'customdata': f'cy|{metric}|{cy}'},
{'x': current_date, 'y': ly_metric, 'customdata': f'ly|{metric}|{ly}'},
# {'x': current_date, 'y': target, 'customdata': 'target'}
])
figure = {
'data': data,
'layout': go.Layout(
# transition=dict(duration=transition),
font=dict(
size=10,
color=white
),
height=416,
xaxis=dict(
showgrid=False,
showticklabels=True,
tickformat='%b %d',
),
yaxis=dict(
showgrid=True,
gridcolor='rgb(73, 73, 73)',
gridwidth=.5,
),
# Set margins to 0, style div sets padding
margin={'l': 40, 'b': 25, 't': 10, 'r': 20},
showlegend=True,
legend=dict(
x=.5,
y=1,
bgcolor='rgba(0,0,0,0)',
xanchor='center',
orientation='h',
),
autosize=True,
hovermode='x'
)
}
return figure, hoverData
def get_workout_types(df_summary, run_status, ride_status, all_status):
df_summary['type'] = df_summary['type'].fillna('REMOVE')
df_summary = df_summary[df_summary['type'] != 'REMOVE']
# Generate list of all workout types for when the 'all' boolean is selected
other_workout_types = [x for x in df_summary['type'].unique() if 'ride' not in x.lower() and 'run' not in x.lower()]
run_workout_types = [x for x in df_summary['type'].unique() if 'run' in x.lower()]
ride_workout_types = [x for x in df_summary['type'].unique() if 'ride' in x.lower()]
# Concat all types into 1 list based of switches selected
workout_types = []
workout_types = workout_types + other_workout_types if all_status else workout_types
workout_types = workout_types + ride_workout_types if ride_status else workout_types
workout_types = workout_types + run_workout_types if run_status else workout_types
return workout_types
def create_fitness_chart(run_status, ride_status, all_status, power_status, hr_status, atl_status):
df_summary = pd.read_sql(sql=app.session.query(stravaSummary).statement, con=engine,
index_col='start_date_local').sort_index(ascending=True)
athlete_info = app.session.query(athlete).filter(athlete.athlete_id == 1).first()
rr_max_threshold = athlete_info.rr_max_goal
rr_min_threshold = athlete_info.rr_min_goal
use_power = True if athlete_info.use_run_power or athlete_info.use_cycle_power else False
## Readiness score now exists in get_hrv_df()
# df_readiness = pd.read_sql(
# sql=app.session.query(ouraReadinessSummary.report_date, ouraReadinessSummary.score).statement,
# con=engine,
# index_col='report_date').sort_index(ascending=True)
df_sleep = pd.read_sql(
sql=app.session.query(ouraSleepSummary.report_date, ouraSleepSummary.score).statement,
con=engine,
index_col='report_date').sort_index(ascending=True)
df_sleep = df_sleep.rename(columns={'score': 'sleep_score'})
df_plan = pd.read_sql(
sql=app.session.query(workoutStepLog.date, workoutStepLog.workout_step,
workoutStepLog.workout_step_desc, workoutStepLog.rationale).statement,
con=engine,
index_col='date').sort_index(ascending=True)
df_annotations = pd.read_sql(
sql=app.session.query(annotations.athlete_id, annotations.date, annotations.annotation).filter(
athlete.athlete_id == 1).statement,
con=engine,
index_col='date').sort_index(ascending=False)
app.session.remove()
chart_annotations = [go.layout.Annotation(
x=pd.to_datetime(x),
y=0,
xref="x",
yref="y",
text=y,
arrowcolor=white,
showarrow=True,
arrowhead=3,
# ax=0,
# ay=-100
) for (x, y) in zip(df_annotations.index, df_annotations.annotation)
]
if oura_credentials_supplied:
hrv_df = get_hrv_df()
# Create flag to color tss bars when ftp test - use number so column remains through resample
df_new_run_ftp = df_summary[df_summary['type'].str.lower().str.contains('run')]
df_new_run_ftp['new_run_ftp_flag'] = 0
if len(df_new_run_ftp) > 0:
df_new_run_ftp['previous_ftp'] = df_new_run_ftp['ftp'].shift(1)
df_new_run_ftp = df_new_run_ftp[~np.isnan(df_new_run_ftp['previous_ftp'])]
df_new_run_ftp.loc[df_new_run_ftp['previous_ftp'] > df_new_run_ftp['ftp'], 'new_run_ftp_flag'] = -1
df_new_run_ftp.loc[df_new_run_ftp['previous_ftp'] < df_new_run_ftp['ftp'], 'new_run_ftp_flag'] = 1
# Highlight the workout which caused the new FTP to be set
df_new_run_ftp['new_run_ftp_flag'] = df_new_run_ftp['new_run_ftp_flag'].shift(-1)
df_new_ride_ftp = df_summary[df_summary['type'].str.lower().str.contains('ride')]
df_new_ride_ftp['new_ride_ftp_flag'] = 0
if len(df_new_ride_ftp) > 0:
df_new_ride_ftp['previous_ftp'] = df_new_ride_ftp['ftp'].shift(1)
df_new_ride_ftp = df_new_ride_ftp[~np.isnan(df_new_ride_ftp['previous_ftp'])]
df_new_ride_ftp.loc[df_new_ride_ftp['previous_ftp'] > df_new_ride_ftp['ftp'], 'new_ride_ftp_flag'] = -1
df_new_ride_ftp.loc[df_new_ride_ftp['previous_ftp'] < df_new_ride_ftp['ftp'], 'new_ride_ftp_flag'] = 1
# Highlight the workout which caused the new FTP to be set
df_new_ride_ftp['new_ride_ftp_flag'] = df_new_ride_ftp['new_ride_ftp_flag'].shift(-1)
# Add flags back to main df
df_summary = df_summary.merge(df_new_run_ftp['new_run_ftp_flag'].to_frame(), how='left', left_index=True,
right_index=True)
df_summary = df_summary.merge(df_new_ride_ftp['new_ride_ftp_flag'].to_frame(), how='left', left_index=True,
right_index=True)
df_summary.loc[df_summary['new_run_ftp_flag'] == 1, 'tss_flag'] = 1
df_summary.loc[df_summary['new_run_ftp_flag'] == -1, 'tss_flag'] = -1
df_summary.loc[df_summary['new_ride_ftp_flag'] == 1, 'tss_flag'] = 1
df_summary.loc[df_summary['new_ride_ftp_flag'] == -1, 'tss_flag'] = -1
# Create df of ftp tests to plot
forecast_days = 13
atl_days = 7
initial_atl = 0
atl_exp = np.exp(-1 / atl_days)
ctl_days = 42
initial_ctl = 0
ctl_exp = np.exp(-1 / ctl_days)
# Insert dummy row with current date+forecast_days to ensure resample gets all dates
df_summary.loc[utc_to_local(datetime.utcnow()) + timedelta(days=forecast_days)] = None
if power_status and hr_status:
# If tss not available, use hrss
df_summary['stress_score'] = df_summary.apply(lambda row: row['hrss'] if np.isnan(row['tss']) else row['tss'],
axis=1).fillna(0)
elif power_status:
df_summary['stress_score'] = df_summary['tss']
elif hr_status:
df_summary['stress_score'] = df_summary['hrss']
else:
df_summary['stress_score'] = 0
# Calculate Metrics
# Fitness and Form will change based off booleans that are selected
# ATL should always be based off of ALL sports so toggle defaults to true
# However if user wants to just see ATL for toggled sports they can disable toggle
workout_types = get_workout_types(df_summary, run_status, ride_status, all_status)
# Sample to daily level and sum stress scores to aggregate multiple workouts per day
if not atl_status:
atl_df = df_summary
atl_df.at[~atl_df['type'].isin(workout_types), 'stress_score'] = 0
atl_df.at[~atl_df['type'].isin(workout_types), 'tss'] = 0
atl_df.at[~atl_df['type'].isin(workout_types), 'hrss'] = 0
atl_df = atl_df[['stress_score', 'tss', 'hrss']].resample('D').sum()
else:
atl_df = df_summary[['stress_score', 'tss', 'hrss']].resample('D').sum()
atl_df['ATL'] = np.nan
atl_df['ATL'].iloc[0] = (atl_df['stress_score'].iloc[0] * (1 - atl_exp)) + (initial_atl * atl_exp)
for i in range(1, len(atl_df)):
atl_df['ATL'].iloc[i] = (atl_df['stress_score'].iloc[i] * (1 - atl_exp)) + (atl_df['ATL'].iloc[i - 1] * atl_exp)
atl_df['atl_tooltip'] = ['Fatigue: <b>{:.1f} ({}{:.1f})</b>'.format(x, '+' if x - y > 0 else '', x - y) for (x, y)
in zip(atl_df['ATL'], atl_df['ATL'].shift(1))]
atl_df = atl_df.drop(columns=['stress_score', 'tss', 'hrss'])
# Sample to daily level and sum stress scores to aggregate multiple workouts per day
pmd = df_summary[df_summary['type'].isin(workout_types)]
# Make sure df goes to same max date as ATL df
pmd.at[atl_df.index.max(), 'name'] = None
pmd = pmd[
['stress_score', 'tss', 'hrss', 'low_intensity_seconds', 'mod_intensity_seconds', 'high_intensity_seconds',
'tss_flag']].resample('D').sum()
pmd['CTL'] = np.nan
pmd['CTL'].iloc[0] = (pmd['stress_score'].iloc[0] * (1 - ctl_exp)) + (initial_ctl * ctl_exp)
for i in range(1, len(pmd)):
pmd['CTL'].iloc[i] = (pmd['stress_score'].iloc[i] * (1 - ctl_exp)) + (pmd['CTL'].iloc[i - 1] * ctl_exp)
# Merge pmd into ATL df
pmd = pmd.merge(atl_df, how='right', right_index=True, left_index=True)
pmd['l90d_low_intensity'] = pmd['low_intensity_seconds'].rolling(90).sum()
pmd['l90d_high_intensity'] = (pmd['mod_intensity_seconds'] + pmd['high_intensity_seconds']).rolling(90).sum()
pmd['l90d_percent_high_intensity'] = pmd['l90d_high_intensity'] / (
pmd['l90d_high_intensity'] + pmd['l90d_low_intensity'])
pmd['TSB'] = pmd['CTL'].shift(1) - pmd['ATL'].shift(1)
pmd['Ramp_Rate'] = pmd['CTL'] - pmd['CTL'].shift(7)
# Tooltips
pmd['ctl_tooltip'] = ['Fitness: <b>{:.1f} ({}{:.1f})</b>'.format(x, '+' if x - y > 0 else '', x - y) for (x, y)
in
zip(pmd['CTL'], pmd['CTL'].shift(1))]
pmd['tsb_tooltip'] = ['Form: <b>{} {:.1f} ({}{:.1f})</b>'.format(x, y, '+' if y - z > 0 else '', y - z) for
(x, y, z) in
zip(pmd['TSB'].map(training_zone), pmd['TSB'], pmd['TSB'].shift(1))]
if not use_power:
pmd['stress_tooltip'] = ['TRIMP: <b>{:.1f}</b>'.format(x) for x in pmd['stress_score']]
else:
pmd['stress_tooltip'] = [
'Stress: <b>{:.1f}</b><br><br>PSS: <b>{:.1f}</b><br>HRSS: <b>{:.1f}</b>'.format(x, y, z)
for
(x, y, z) in zip(pmd['stress_score'], pmd['tss'], pmd['hrss'])]
# split actuals and forecasts into separata dataframes to plot lines
actual = pmd[:len(pmd) - forecast_days]
forecast = pmd[-forecast_days:]
# Start chart at first point where CTL exists (Start+42 days)
pmd = pmd[42:]
actual = actual[42:]
if oura_credentials_supplied:
# Merge hrv data into actual df
actual = actual.merge(hrv_df, how='left', left_index=True, right_index=True)
# Merge hrv plan redommendation
actual = actual.merge(df_plan, how='left', left_index=True, right_index=True)
# Merge readiness and sleep for kpis
# actual = actual.merge(df_readiness, how='left', left_index=True, right_index=True)
actual = actual.merge(df_sleep, how='left', left_index=True, right_index=True)
actual['workout_plan'] = 'rec_' + actual['workout_step'].astype('str') + '|' + \
actual['workout_step_desc'] + '|' + \
actual['rationale'] + '|' + \
actual['score'].fillna(0).apply(readiness_score_recommendation) + '|' + \
actual['score'].fillna(0).astype('int').astype('str') + '|' + \
actual['sleep_score'].fillna(0).astype('int').astype('str') + '|' + \
actual['hrv_z_score'].astype('str') + '|' + \
actual['hr_z_score'].astype('str') + '|' + \
actual['hrv7_z_score'].astype('str') + '|' + \
actual['hr7_z_score'].astype('str') + '|' + \
actual['z_desc'].astype('str') + '|' + \
actual['rmssd'].astype('str') + '|' + \
actual['rmssd_yesterday'].astype('str') + '|' + \
actual['rmssd_7'].astype('str') + '|' + \
actual['hr_average'].astype('str') + '|' + \
actual['hr_average_yesterday'].astype('str') + '|' + \
actual['hr_average_7'].astype('str') + '|'
hover_rec = actual['workout_plan'].tail(1).values[0]
stress_bar_colors = []
for i in actual.index:
if use_power:
stress_bar_colors.append('green' if actual.at[i, 'tss_flag'] == 1 else 'red' if actual.at[
i, 'tss_flag'] == -1 else 'rgba(127, 127, 127, 1)')
else:
stress_bar_colors.append('rgba(127, 127, 127, 1)')
latest = actual.loc[actual.index.max()]
yesterday = actual.loc[actual.index.max() - timedelta(days=1)]
### Start Graph ###
hoverData = {'points': [{'x': actual.index.max().date(),
'y': latest['CTL'].max(),
'text': 'Fitness'},
{'y': latest['Ramp_Rate'].max(), 'text': 'Ramp'},
{'y': rr_max_threshold, 'text': 'RR High'},
{'y': rr_min_threshold, 'text': 'RR Low'},
{'y': latest['ATL'].max(), 'text': 'Fatigue'},
{'y': latest['TSB'].max(), 'text': 'Form'},
]
}
if oura_credentials_supplied:
hoverData['points'].extend([{'text': 'HRV: <b>{:.0f} ({}{:.0f})'.format(latest['rmssd'].max(),
'+' if latest['rmssd'].max() -
yesterday[
'rmssd'].max() > 0 else '',
latest['rmssd'].max() - yesterday[
'rmssd'].max())},
{'text': '7 Day HRV Avg: <b>{:.2f} ({}{:.2f})'.format(latest['rmssd_7'].max(),
'+' if latest[
'rmssd_7'].max() -
yesterday[
'rmssd_7'].max() > 0 else '',
latest['rmssd_7'].max() -
yesterday[
'rmssd_7'].max())},
{'y': latest['detected_trend'],
'text': f'Detected Trend: <b>{latest["detected_trend"]}'}])
figure = {
'data': [
go.Scatter(
name='Fitness (CTL)',
x=actual.index,
y=round(actual['CTL'], 1),
mode='lines',
text=actual['ctl_tooltip'],
hoverinfo='text',
opacity=0.7,
line={'shape': 'spline', 'color': ctl_color},
),
go.Scatter(
name='Fitness (CTL) Forecast',
x=forecast.index,
y=round(forecast['CTL'], 1),
mode='lines',
text=forecast['ctl_tooltip'],
hoverinfo='text',
opacity=0.7,
line={'shape': 'spline', 'color': ctl_color, 'dash': 'dot'},
showlegend=False,
),
go.Scatter(
name='Fatigue (ATL)',
x=actual.index,
y=round(actual['ATL'], 1),
mode='lines',
text=actual['atl_tooltip'],
hoverinfo='text',
line={'color': atl_color},
),
go.Scatter(
name='Fatigue (ATL) Forecast',
x=forecast.index,
y=round(forecast['ATL'], 1),
mode='lines',
text=forecast['atl_tooltip'],
hoverinfo='text',
line={'color': atl_color, 'dash': 'dot'},
showlegend=False,
),
go.Scatter(
name='Form (TSB)',
x=actual.index,
y=round(actual['TSB'], 1),
mode='lines',
text=actual['tsb_tooltip'],
hoverinfo='text',
opacity=0.7,
line={'color': tsb_color},
fill='tozeroy',
fillcolor=tsb_fill_color,
),
go.Scatter(
name='Form (TSB) Forecast',
x=forecast.index,
y=round(forecast['TSB'], 1),
mode='lines',
text=forecast['tsb_tooltip'],
hoverinfo='text',
opacity=0.7,
line={'color': tsb_color, 'dash': 'dot'},
showlegend=False,
),
go.Bar(
name='Stress',
x=actual.index,
y=actual['stress_score'],
# mode='markers',
yaxis='y2',
text=actual['stress_tooltip'],
hoverinfo='text',
marker={
'color': stress_bar_colors}
),
go.Scatter(
name='High Intensity',
x=actual.index,
y=actual['l90d_percent_high_intensity'],
mode='markers',
yaxis='y4',
text=['L90D % High Intensity:<b> {:.0f}%'.format(x * 100) for x in
actual['l90d_percent_high_intensity']],
hoverinfo='text',
marker=dict(
color=['rgba(250, 47, 76,.7)' if actual.at[
i, 'l90d_percent_high_intensity'] > .2 else light_blue
for i in actual.index],
)
),
go.Scatter(
name='80/20 Threshold',
text=['80/20 Threshold' if x == pmd.index.max() else '' for x in
pmd.index],
textposition='top left',
x=pmd.index,
y=[.2 for x in pmd.index],
yaxis='y4',
mode='lines+text',
hoverinfo='none',
line={'dash': 'dashdot',
'color': 'rgba(250, 47, 76,.5)'},
showlegend=False,
),
go.Scatter(
name='Ramp Rate',
x=pmd.index,
y=pmd['Ramp_Rate'],
text=['Ramp Rate: {:.1f}'.format(x) for x in pmd['Ramp_Rate']],
mode='lines',
hoverinfo='none',
line={'color': 'rgba(220,220,220,0)'},
# visible='legendonly',
),
go.Scatter(
name='Ramp Rate (High)',
x=pmd.index,
y=[rr_max_threshold for x in pmd.index],
text=['RR High' for x in pmd['Ramp_Rate']],
mode='lines',
hoverinfo='none',
line={'color': 'rgba(220,220,220,0)'},
# visible='legendonly',
),
go.Scatter(
name='Ramp Rate (Low)',
x=pmd.index,
y=[rr_min_threshold for x in pmd.index],
text=['RR Low' for x in pmd['Ramp_Rate']],
mode='lines',
hoverinfo='none',
line={'color': 'rgba(220,220,220,0)'},
# visible='legendonly',
),
go.Scatter(
name='No Fitness',
text=['No Fitness' if x == pmd.index.max() else '' for x in
pmd.index],
textposition='top left',
x=pmd.index,
y=[25 for x in pmd.index],
mode='lines+text',
hoverinfo='none',
line={'dash': 'dashdot',
'color': 'rgba(127, 127, 127, .35)'},
showlegend=False,
),
go.Scatter(
name='Performance',
text=['Performance' if x == pmd.index.max() else '' for x in
pmd.index],
textposition='top left',
x=pmd.index,
y=[5 for x in pmd.index],
mode='lines+text',
hoverinfo='none',
line={'dash': 'dashdot',
'color': 'rgba(127, 127, 127, .35)'},
showlegend=False,
),
go.Scatter(
name='Maintenance',
text=['Maintenance' if x == pmd.index.max() else '' for x in
pmd.index],
textposition='top left',
x=pmd.index,
y=[-10 for x in pmd.index],
mode='lines+text',
hoverinfo='none',
line={'dash': 'dashdot',
'color': 'rgba(127, 127, 127, .35)'},
showlegend=False,
),
go.Scatter(
name='Productive',
text=['Productive' if x == pmd.index.max() else '' for x in
pmd.index],
textposition='top left',
x=pmd.index,
y=[-25 for x in pmd.index],
mode='lines+text',
hoverinfo='none',
line={'dash': 'dashdot',
'color': 'rgba(127, 127, 127, .35)'},
showlegend=False,
),
go.Scatter(
name='Cautionary',
text=['Cautionary' if x == pmd.index.max() else '' for x in
pmd.index],
textposition='top left',
x=pmd.index,
y=[-40 for x in pmd.index],
mode='lines+text',
hoverinfo='none',
line={'dash': 'dashdot',
'color': 'rgba(127, 127, 127, .35)'},
showlegend=False,
),
go.Scatter(
name='Overreaching',
text=['Overreaching' if x == pmd.index.max() else '' for x in
pmd.index],
textposition='top left',
x=pmd.index,
y=[-45 for x in pmd.index],
mode='lines+text',
hoverinfo='none',
line={'dash': 'dashdot',
'color': 'rgba(127, 127, 127, .35)'},
showlegend=False,
),
],
'layout': go.Layout(
# transition=dict(duration=transition),
font=dict(
size=10,
color=white
),
annotations=chart_annotations,
xaxis=dict(
showgrid=False,
showticklabels=True,
tickformat='%b %d',
# Specify range to get rid of auto x-axis padding when using scatter markers
range=[pmd.index.max() - timedelta(days=89 + forecast_days),
pmd.index.max()],
# default L6W
rangeselector=dict(
bgcolor='rgb(66, 66, 66)',
bordercolor='#d4d4d4',
borderwidth=.5,
buttons=list([
# Specify row count to get rid of auto x-axis padding when using scatter markers
dict(count=(len(pmd) + 1),
label='ALL',
step='day',
stepmode='backward'),
# Forecast goes into next year, so in December end of year, using 'year' shows the next year
# Count the number of days in year instead
dict(count=actual.index.max().timetuple().tm_yday + forecast_days,
label='YTD',
step='day',
stepmode='backward'),
dict(count=89 + forecast_days,
label='L90D',
step='day',
stepmode='backward'),
dict(count=41 + forecast_days,
label='L6W',
step='day',
stepmode='backward'),
dict(count=29 + forecast_days,
label='L30D',
step='day',
stepmode='backward'),
]),
xanchor='center',
font=dict(
size=10,
),
x=.5,
y=1,
),
),
yaxis=dict(
# domain=[0, .85],
showticklabels=False,
range=[actual['TSB'].min() * 1.05, actual['ATL'].max() * 1.25],
showgrid=True,
gridcolor='rgb(73, 73, 73)',
gridwidth=.5,
),
yaxis2=dict(
# domain=[0, .85],
showticklabels=False,
range=[0, pmd['stress_score'].max() * 4],
showgrid=False,
type='linear',
side='right',
anchor='x',
overlaying='y',
# layer='above traces'
),
yaxis4=dict(
# domain=[.85, 1],
range=[0, 3],
showgrid=False,
showticklabels=False,
anchor='x',
side='right',
overlaying='y',
),
margin={'l': 0, 'b': 25, 't': 0, 'r': 0},
showlegend=False,
autosize=True,
bargap=.75,
)
}
# If Oura data supplied, incorporate data into performance management chart
if oura_credentials_supplied:
hoverData['points'].extend([{'text': hover_rec},
{'y': hrv_df.tail(1)['rmssd_7'].values[0], 'text': '7 Day'}])
figure['data'].extend([
go.Scatter(
name='SWC Threshold',
x=actual.index.append(actual.index[::-1]),
y=pd.concat([actual['swc_baseline_upper'], actual['swc_baseline_lower'][::-1]]),
text='swc lower',
yaxis='y3',
mode='lines',
hoverinfo='none',
fill='tonexty',
line={'color': dark_blue},
),
go.Scatter(
name='HRV',
x=actual.index,
y=actual['ln_rmssd'],
yaxis='y3',
mode='lines',
text=['HRV: <b>{:.0f} ({}{:.0f})'.format(x, '+' if x - y > 0 else '', x - y)
for (x, y) in zip(actual['rmssd'], actual['rmssd'].shift(1))],
hoverinfo='text',
line={'color': 'rgba(220,220,220,.20)'},
),
go.Scatter(
name='HRV 7 Day Avg',
x=actual.index,
y=actual['ln_rmssd_7'],
yaxis='y3',
mode='lines',
text=['7 Day HRV Avg: <b>{:.2f} ({}{:.2f})'.format(x, '+' if x - y > 0 else '', x - y)
for (x, y) in zip(actual['rmssd_7'], actual['rmssd_7'].shift(1))],
hoverinfo='text',
line={'color': teal, 'shape': 'spline'},
),
# Dummy scatter to store hrv plan recommendation so hovering data can be stored in hoverdata
go.Scatter(
name='Workout Plan Recommendation',
x=actual.index,
y=[0 for x in actual.index],
text=actual['workout_plan'],
hoverinfo='none',
marker={'color': 'rgba(0, 0, 0, 0)'}
),
])
# Only show workflow hrv thresholds if recovery metric is hrv based
if athlete_info.recovery_metric in ['hrv_baseline', 'hrv']:
figure['data'].extend([
go.Scatter(
name='HRV SWC Flowchart (Lower)',
x=actual.index,
y=actual[
'swc_flowchart_lower' if athlete_info.recovery_metric == 'hrv_baseline' else 'swc_daily_lower'],
yaxis='y3',
mode='lines',
hoverinfo='none',
line={
'color': 'rgba(100, 217, 236,.5)' if athlete_info.recovery_metric == 'hrv_baseline' else 'rgba(220,220,220,.20)',
'shape': 'spline', 'dash': 'dot'},
),
go.Scatter(
name='HRV SWC Flowchart (Upper)',
x=actual.index,
y=actual[
'swc_flowchart_upper' if athlete_info.recovery_metric == 'hrv_baseline' else 'swc_daily_upper'],
yaxis='y3',
mode='lines',
hoverinfo='none',
line={
'color': 'rgba(100, 217, 236,.5)' if athlete_info.recovery_metric == 'hrv_baseline' else 'rgba(220,220,220,.20)',
'shape': 'spline', 'dash': 'dot'},
)
])
# ### Trends ###
#
# # Automated trend detection: https://www.hrv4training.com/blog/interpreting-hrv-trends
# actual['ln_rmssd_7'] = actual['ln_rmssd'].rolling(7).mean()
# # HR baseline
# actual['hr_average_7'] = actual['hr_average'].rolling(7).mean()
# # Coefficient of Variation baseline
# actual['cv_rmssd_7'] = (actual['ln_rmssd'].rolling(7).std() / actual['ln_rmssd'].rolling(7).mean()) * 100
# # HRV Normalized baseline
# actual['ln_rmssd_normalized_7'] = actual['ln_rmssd_7'] / actual['AVNN'].rolling(7).mean()
#
# # Calculate 2 Week Slopes
# actual['ln_rmssd_7_slope'] = actual['ln_rmssd_7'].rolling(14).apply(
# lambda x: scipy.stats.linregress(range(14), x).slope)
# actual['hr_average_7_slope'] = actual['hr_average_7'].rolling(14).apply(
# lambda x: scipy.stats.linregress(range(14), x).slope)
# actual['cv_rmssd_7_slope'] = actual['cv_rmssd_7'].rolling(14).apply(
# lambda x: scipy.stats.linregress(range(14), x).slope)
# actual['ln_rmssd_normalized_7_slope'] = actual['ln_rmssd_normalized_7'].rolling(14).apply(
# lambda x: scipy.stats.linregress(range(14), x).slope)
#
# # Get Stdev and mean for last 60 days worth of slopes
#
# # Remove trivial changes
#
# # actual.loc[(
# # (actual['ln_rmssd_7'] > (
# # actual['ln_rmssd'].rolling(60, min_periods=0).mean() + actual['ln_rmssd'].rolling(60,
# # min_periods=0).std())
# # ) |
# # actual['ln_rmssd_7'] < (
# # actual['ln_rmssd'].rolling(60, min_periods=0).mean() - actual['ln_rmssd'].rolling(60,
# # min_periods=0).std())
# # ),
# # 'ln_rmssd_7_slope_trivial'] = actual['ln_rmssd_7_slope']
#
# actual.loc[
# (
# (actual['ln_rmssd_7_slope'] >
# (actual['ln_rmssd_7_slope'].rolling(60).mean() + actual['ln_rmssd_7_slope'].rolling(60).std())) |
# (actual['ln_rmssd_7_slope'] <
# (actual['ln_rmssd_7_slope'].rolling(60).mean() - actual['ln_rmssd_7_slope'].rolling(60).std()))
# ), 'ln_rmssd_7_slope_trivial'] = actual['ln_rmssd_7_slope']
# actual.loc[
# (
# (actual['hr_average_7_slope'] >
# (actual['hr_average_7_slope'].rolling(60).mean() + actual['hr_average_7_slope'].rolling(
# 60).std())) |
# (actual['hr_average_7_slope'] <
# (actual['hr_average_7_slope'].rolling(60).mean() - actual['hr_average_7_slope'].rolling(60).std()))
# ), 'hr_average_7_slope_trivial'] = actual['hr_average_7_slope']
#
# actual.loc[
# (
# (actual['cv_rmssd_7_slope'] >
# (actual['cv_rmssd_7_slope'].rolling(60).mean() + actual['cv_rmssd_7_slope'].rolling(60).std())) |
# (actual['cv_rmssd_7_slope'] <
# (actual['cv_rmssd_7_slope'].rolling(60).mean() - actual['cv_rmssd_7_slope'].rolling(60).std()))
# ), 'cv_rmssd_7_slope_trivial'] = actual['cv_rmssd_7_slope']
#
# actual.loc[
# (
# (actual['ln_rmssd_normalized_7_slope'] >
# (actual['ln_rmssd_normalized_7_slope'].rolling(60).mean() + actual[
# 'ln_rmssd_normalized_7_slope'].rolling(60).std())) |
# (actual['ln_rmssd_normalized_7_slope'] <
# (actual['ln_rmssd_normalized_7_slope'].rolling(60).mean() - actual[
# 'ln_rmssd_normalized_7_slope'].rolling(60).std()))
# ), 'ln_rmssd_normalized_7_slope_trivial'] = actual['ln_rmssd_normalized_7_slope']
#
# # E.O Customization
# # ATL Normalized baseline
# actual['atl_7'] = actual['ATL'].rolling(7).mean().fillna(0)
# actual['atl_7_slope'] = actual['atl_7'].rolling(14).apply(
# lambda x: scipy.stats.linregress(range(14), x).slope)
#
# actual.loc[
# (
# (actual['atl_7_slope'] >
# (actual['atl_7_slope'].rolling(60).mean() + actual['atl_7_slope'].rolling(60).std())) |
# (actual['atl_7_slope'] <
# (actual['atl_7_slope'].rolling(60).mean() - actual['atl_7_slope'].rolling(60).std()))
# ), 'atl_7_slope_trivial'] = actual['atl_7_slope']
#
# # Fill slopes with 0 when non trivial for trend detection
# for col in actual.columns:
# if 'trivial' in col:
# actual[col] = actual[col].fillna(0)
#
# # Check for trend
# actual["detected_trend"] = actual[
# ["ln_rmssd_7_slope_trivial", "hr_average_7_slope_trivial", "cv_rmssd_7_slope_trivial",
# "ln_rmssd_normalized_7_slope_trivial", "atl_7_slope_trivial"]].apply(lambda x: detect_trend(*x), axis=1)
#
# ### Depricated: This overwrites any other trends identified within the rolling 14 days
# # Highlight the 14 days that the trend is actually calculated on
# # For every trend that has been detected, highlight the 14 days prior to that day with the trend
# # for i in actual.index:
# # for d in range(0, 14):
# # if actual.loc[i]['detected_trend'] != 'No Relevant Trends':
# # actual.at[i - timedelta(days=d + 1), 'detected_trend'] = actual.loc[i]['detected_trend']
#
# # Debugging
# # actual[
# # ["ln_rmssd_7_slope_trivial", "hr_average_7_slope_trivial", "cv_rmssd_7_slope_trivial",
# # "ln_rmssd_normalized_7_slope_trivial"]].to_csv('actual.csv', sep=',')
#
# Plot training adaptation on the hrv 7 day average line
for trend in actual['detected_trend'].unique():
if trend not in ['No Trend Detected']:
actual.loc[actual['detected_trend'] == trend, trend] = actual['ln_rmssd_7']
color = z_color(trend)
### Detected Trends ###
figure['data'].append(
go.Scatter(
name=trend,
x=actual.index,
y=actual[trend],
text=[f'Detected Trend: <b>{trend}' for x in actual['detected_trend']],
yaxis='y3',
mode='markers',
hoverinfo='none',
marker=dict(
color=color,
line=dict(
color='rgba(66,66,66,.75)',
width=1
)
),
)
)
figure['layout']['yaxis3'] = dict(
# domain=[.85, 1],
range=[actual['ln_rmssd_7'].min() * .3, actual['ln_rmssd_7'].max() * 1.05],
showgrid=False,
showticklabels=False,
anchor='x',
side='right',
overlaying='y',
)
return figure, hoverData
def workout_distribution(sport='Run', days=90, intensity='all'):
min_non_warmup_workout_time = app.session.query(athlete).filter(
athlete.athlete_id == 1).first().min_non_warmup_workout_time
df_summary = pd.read_sql(
sql=app.session.query(stravaSummary).filter(
stravaSummary.start_date_utc >= datetime.utcnow() - timedelta(days=days),
stravaSummary.type.like(sport),
stravaSummary.elapsed_time > min_non_warmup_workout_time,
or_(stravaSummary.low_intensity_seconds > 0, stravaSummary.mod_intensity_seconds > 0,
stravaSummary.high_intensity_seconds > 0)
).statement,
con=engine, index_col='start_date_utc')
if intensity != 'all':
df_summary = df_summary[df_summary['workout_intensity'] == intensity]
athlete_bookmarks = json.loads(app.session.query(athlete.peloton_auto_bookmark_ids).filter(
athlete.athlete_id == 1).first().peloton_auto_bookmark_ids)
app.session.remove()
# Clean up workout names for training discribution table (% of total)
df_summary['workout'] = 'Other'
class_names = []
for x in athlete_bookmarks.keys():
for y in athlete_bookmarks[x].keys():
for class_type in json.loads(athlete_bookmarks[x][y]):
try:
new_class = re.findall(r'min\s(.*)\s', class_type)[0]
except:
new_class = None
if new_class and new_class not in class_names:
class_names.append(new_class)
# class_names = ['Power Zone Max', 'Power Zone Endurance', 'Power Zone', 'Endurance', 'Recovery', 'Speed',
# 'Intervals', 'HIIT',
# 'Progression', 'Race Prep', 'Tabata', 'Hills', 'Long', 'Fun', 'Tempo'] # , '5k', '10k', 'Marathon']
for name in class_names:
for i in df_summary.index:
if name.lower() in df_summary.loc[i]['name'].lower():
df_summary.at[i, 'workout'] = name
continue
# Categorize 5k, 10k half and marathon as 'Race'
df_summary['workout'] = df_summary['workout'].replace({'5k': 'Race', '10k': 'Race', 'Marathon': 'Race'})
# Shorten 'Power Zone' to 'PZ'
df_summary['workout'] = df_summary['workout'].str.replace('Power Zone', 'PZ')
# Categorize Free runs at zone pace
df_summary.loc[df_summary['name'].str.lower().str.contains('zone') & df_summary['name'].str.lower().str.contains(
'pace'), 'workout'] = 'Z Pace'
# Categorize all yoga as 'Yoga'
df_summary.loc[df_summary['type'] == 'Yoga', 'workout'] = 'Yoga'
# Categorize all WeightTraining as 'Weights'
df_summary.loc[df_summary['type'] == 'WeightTraining', 'workout'] = 'Weights'
# Categorize all hikes as 'Hike'
df_summary.loc[df_summary['type'] == 'Hike', 'workout'] = 'Hike'
# # Split into intensity subsets workout as low/med/high
# df_summary['high_intensity_seconds'] = df_summary['high_intensity_seconds'] + df_summary['mod_intensity_seconds']
# df_summary['intensity'] = df_summary[
# ['low_intensity_seconds', 'mod_intensity_seconds', 'high_intensity_seconds']].idxmax(axis=1)
# df_summary['total_intensity_seconds'] = df_summary['high_intensity_seconds'].fillna(0) + df_summary[
# 'mod_intensity_seconds'].fillna(0) + \
# df_summary['low_intensity_seconds'].fillna(0)
df_summary['total_intensity_seconds'] = df_summary['moving_time'] # Replacing different intensity groups for now
df_summary['intensity'] = 'total_intensity_seconds'
# Set up columns for table
col_names = ['Activity', '%'] # , 'Time']
# for intensity in ['high_intensity_seconds', 'low_intensity_seconds']:
df_temp = df_summary[df_summary['intensity'] == 'total_intensity_seconds']
df_temp = df_temp.groupby('workout')[['total_intensity_seconds', 'elapsed_time']].sum()
df_temp['workout'] = df_temp.index
# Format time (seconds) as time intervals
df_temp['time'] = df_temp['total_intensity_seconds'].apply(
lambda x: '{}'.format(timedelta(seconds=int(x))))
# df_temp['elapsed_time'] = df_temp['elapsed_time'].apply(
# lambda x: '{}'.format(timedelta(seconds=int(x))))
df_temp['Percent of Total'] = (df_temp['total_intensity_seconds'] / df_temp[
'total_intensity_seconds'].sum()) * 100
df_temp['Percent of Total'] = df_temp['Percent of Total'].apply(lambda x: '{:.0f}%'.format(x))
return df_temp.sort_values(ascending=False, by=['total_intensity_seconds']).to_dict('records')
def workout_summary_kpi(df_samples):
athlete_info = app.session.query(athlete).filter(athlete.athlete_id == 1).first()
use_power = True if athlete_info.use_run_power or athlete_info.use_cycle_power else False
app.session.remove()
height = '25%' if use_power else '33%'
data = [
html.Div(className='align-items-center text-center', style={'height': height}, children=[
html.H5('Power', className=' mb-0'),
html.P('Max: {:.0f}'.format(df_samples['watts'].max()), className='mb-0'),
html.P('Avg: {:.0f}'.format(df_samples['watts'].mean()), className='mb-0'),
html.P('Min: {:.0f}'.format(df_samples['watts'].min()), className='mb-0')
]),
html.Div(className='align-items-center text-center', style={'height': height}, children=[
html.H5('Heartrate', className='mb-0'),
html.P('Max: {:.0f}'.format(df_samples['heartrate'].max()), className='mb-0'),
html.P('Avg: {:.0f}'.format(df_samples['heartrate'].mean()), className=' mb-0'),
html.P('Min: {:.0f}'.format(df_samples['heartrate'].min()), className=' mb-0')
]),
html.Div(className='align-items-center text-center', style={'height': height}, children=[
html.H5('Speed', className=' mb-0'),
html.P('Max: {:.1f}'.format(df_samples['velocity_smooth'].max()), className=' mb-0'),
html.P('Avg: {:.1f}'.format(df_samples['velocity_smooth'].mean()), className=' mb-0'),
html.P('Min: {:.1f}'.format(df_samples['velocity_smooth'].min()), className='mb-0')
]),
html.Div(className='align-items-center text-center', style={'height': height}, children=[
html.H5('Cadence', className=' mb-0'),
html.P('Max: {:.0f}'.format(df_samples['cadence'].max()), className=' mb-0'),
html.P('Avg: {:.0f}'.format(df_samples['cadence'].mean()), className=' mb-0'),
html.P('Min: {:.0f}'.format(df_samples['cadence'].min()), className=' mb-0')
])
]
if not use_power:
data = data[1:]
return data
def workout_details(df_samples, start_seconds=None, end_seconds=None):
'''
:param df_samples filtered on 1 activity
:return: metric trend charts
'''
athlete_info = app.session.query(athlete).filter(athlete.athlete_id == 1).first()
use_power = True if athlete_info.use_run_power or athlete_info.use_cycle_power else False
app.session.remove()
df_samples['watts'] = df_samples['watts'].fillna(0)
df_samples['heartrate'] = df_samples['heartrate'].fillna(0)
df_samples['velocity_smooth'] = df_samples['velocity_smooth'].fillna(0)
df_samples['cadence'] = df_samples['cadence'].fillna(0)
# Create df of records to highlight if clickData present from callback
if start_seconds is not None and end_seconds is not None:
highlight_df = df_samples[(df_samples['time'] >= int(start_seconds)) & (df_samples['time'] <= int(end_seconds))]
else:
highlight_df = df_samples[df_samples['activity_id'] == 0] # Dummy
# Remove best points from main df_samples so lines do not overlap nor show 2 hoverinfos
for idx, row in df_samples.iterrows():
if idx in highlight_df.index:
df_samples.loc[idx, 'velocity_smooth'] = np.nan
df_samples.loc[idx, 'cadence'] = np.nan
df_samples.loc[idx, 'heartrate'] = np.nan
df_samples.loc[idx, 'watts'] = np.nan
data = [
go.Scatter(
name='Speed',
x=df_samples['time_interval'],
y=round(df_samples['velocity_smooth'], 1),
# hoverinfo='x+y',
yaxis='y2',
mode='lines',
line={'color': teal}
),
go.Scatter(
name='Speed',
x=highlight_df['time_interval'],
y=round(highlight_df['velocity_smooth'], 1),
# hoverinfo='x+y',
yaxis='y2',
mode='lines',
line={'color': orange}
),
go.Scatter(
name='Cadence',
x=df_samples['time_interval'],
y=round(df_samples['cadence']),
# hoverinfo='x+y',
yaxis='y',
mode='lines',
line={'color': teal}
),
go.Scatter(
name='Cadence',
x=highlight_df['time_interval'],
y=round(highlight_df['cadence']),
# hoverinfo='x+y',
yaxis='y',
mode='lines',
line={'color': orange}
),
go.Scatter(
name='Heart Rate',
x=df_samples['time_interval'],
y=round(df_samples['heartrate']),
# hoverinfo='x+y',
yaxis='y3',
mode='lines',
line={'color': teal}
),
go.Scatter(
name='Heart Rate',
x=highlight_df['time_interval'],
y=round(highlight_df['heartrate']),
# hoverinfo='x+y',
yaxis='y3',
mode='lines',
line={'color': orange}
),
]
if use_power:
data.extend([
go.Scatter(
name='Power',
x=df_samples['time_interval'],
y=round(df_samples['watts']),
# hoverinfo='x+y',
yaxis='y4',
mode='lines',
line={'color': teal}
),
go.Scatter(
name='Power',
x=highlight_df['time_interval'],
y=round(highlight_df['watts']),
# hoverinfo='x+y',
yaxis='y4',
mode='lines',
line={'color': orange}
)
])
return html.Div([
dcc.Graph(
id='trends', style={'height': '100%'},
config={
'displayModeBar': False,
},
# figure= fig
figure={
'data': data,
'layout': go.Layout(
# transition=dict(duration=transition),
font=dict(
size=10,
color=white
),
# TODO: Subplot unified tooltip not yet supported https://github.com/plotly/plotly.js/issues/4755
# hovermode='x unified',
hovermode='x',
paper_bgcolor='rgb(66,66,66)',
plot_bgcolor='rgba(0,0,0,0)',
margin={'l': 40, 'b': 25, 't': 5, 'r': 40},
showlegend=False,
# legend={'x': .5, 'y': 1.05, 'xanchor': 'center', 'orientation': 'h',
# 'traceorder': 'normal', 'bgcolor': 'rgba(127, 127, 127, 0)'},
xaxis=dict(
showticklabels=True,
showgrid=False,
showline=True,
tickformat="%Mm",
hoverformat="%H:%M:%S",
# spikemode='across',
# showspikes=True,
# spikesnap='cursor',
zeroline=False,
# tickvals=[1, 2, 5, 10, 30, 60, 120, 5 * 60, 10 * 60, 20 * 60, 60 * 60, 60 * 90],
# ticktext=['1s', '2s', '5s', '10s', '30s', '1m',
# '2m', '5m', '10m', '20m', '60m', '90m'],
),
yaxis=dict(
color=white,
showticklabels=True,
tickvals=[df_samples['cadence'].min(),
# round(df_samples['cadence'].mean()),
df_samples['cadence'].max()],
zeroline=False,
domain=[0, 0.24] if use_power else [0, 0.32],
anchor='x'
),
yaxis2=dict(
color=white,
showticklabels=True,
tickvals=[round(df_samples['velocity_smooth'].min()),
# round(df_samples['velocity_smooth'].mean()),
round(df_samples['velocity_smooth'].max())],
zeroline=False,
domain=[0.26, 0.49] if use_power else [.34, 0.66],
anchor='x'
),
yaxis3=dict(
color=white,
showticklabels=True,
tickvals=[df_samples['heartrate'].min(),
# round(df_samples['heartrate'].mean()),
df_samples['heartrate'].max()],
zeroline=False,
domain=[0.51, 0.74] if use_power else [0.68, 1],
anchor='x'
),
yaxis4=dict(
color=white,
showticklabels=True,
tickvals=[df_samples['watts'].min(),
# round(df_samples['watts'].mean()),
df_samples['watts'].max()],
zeroline=False,
domain=[0.76, 1],
anchor='x'
) if use_power else None
)
}
)])
def calculate_splits(df_samples):
if np.isnan(df_samples['distance'].max()):
return None
else:
df_samples['miles'] = df_samples['distance'] * 0.000189394
df_samples['mile_marker'] = df_samples['miles'].apply(np.floor)
df_samples['mile_marker_previous'] = df_samples['mile_marker'].shift(1)
df_samples = df_samples[(df_samples['mile_marker'] != df_samples['mile_marker_previous']) |
(df_samples.index == df_samples.index.max())]
df_samples = df_samples.iloc[1:]
df_samples['time_prev'] = df_samples['time'].shift(1).fillna(0)
df_samples['time'] = df_samples['time'] - df_samples['time_prev']
# Get remainder of miles for final mile_marker and normalize final time for non full mile to get accurate pace if remaining mileage at end of ride exists
max_index = df_samples.index.max()
if df_samples.at[max_index, 'mile_marker'] == df_samples.at[max_index, 'mile_marker_previous']:
df_samples.at[max_index, 'mile_marker'] = df_samples.at[max_index, 'miles'] % 1
df_samples.at[max_index, 'time'] = df_samples.at[max_index, 'time'] / df_samples.at[
max_index, 'mile_marker']
# Format as 2 decimal places after calculation is done for pace so table looks nice
df_samples.at[max_index, 'mile_marker'] = round(df_samples.at[max_index, 'miles'] % 1, 2)
df_samples['time_str'] = ['{:02.0f}:{:02.0f} /mi'.format(x // 60, (x % 60)) for x in df_samples['time']]
df_samples_table_columns = ['mile_marker', 'time_str']
col_names = ['Mile', 'Pace']
return html.Div(className='table', style={'height': '100%'}, children=[
dash_table.DataTable(
columns=[{"name": x, "id": y} for (x, y) in
zip(col_names, df_samples[df_samples_table_columns].columns)],
data=df_samples[df_samples_table_columns].sort_index(ascending=True).to_dict('records'),
style_as_list_view=True,
fixed_rows={'headers': True, 'data': 0},
style_table={'height': '100%'},
style_header={'backgroundColor': 'rgba(0, 0, 0, 0)',
# 'borderBottom': '1px solid rgb(220, 220, 220)',
'textAlign': 'center',
'borderTop': '0px',
# 'fontWeight': 'bold',
'fontFamily': '"Open Sans", "HelveticaNeue", "Helvetica Neue", Helvetica, Arial, sans-serif',
},
style_cell={
'backgroundColor': 'rgba(0, 0, 0, 0)',
'textAlign': 'center',
# 'borderBottom': '1px solid rgb(73, 73, 73)',
'maxWidth': 100,
'fontFamily': '"Open Sans", "HelveticaNeue", "Helvetica Neue", Helvetica, Arial, sans-serif',
},
style_cell_conditional=[
{
'if': {'column_id': c},
'display': 'none'
} for c in ['activity_id']
],
filter_action="none",
page_action="none",
# page_current=0,
# page_size=10,
)
])
def create_annotation_table():
df_annotations = pd.read_sql(
sql=app.session.query(annotations.athlete_id, annotations.date, annotations.annotation).filter(
athlete.athlete_id == 1).statement,
con=engine).sort_index(ascending=False)
app.session.remove()
return dash_table.DataTable(id='annotation-table',
columns=[{"name": x, "id": y} for (x, y) in
zip(['Date', 'Annotation'], ['date', 'annotation'])],
data=df_annotations[['date', 'annotation']].sort_index(ascending=False).to_dict(
'records'),
style_as_list_view=True,
fixed_rows={'headers': True, 'data': 0},
style_table={'height': '100%'},
style_header={'backgroundColor': 'rgba(0,0,0,0)',
'borderBottom': '1px solid rgb(220, 220, 220)',
'borderTop': '0px',
'textAlign': 'left',
'fontWeight': 'bold',
'fontFamily': '"Open Sans", "HelveticaNeue", "Helvetica Neue", Helvetica, Arial, sans-serif',
},
style_cell={
'backgroundColor': 'rgba(0,0,0,0)',
# 'color': 'rgb(220, 220, 220)',
'borderBottom': '1px solid rgb(73, 73, 73)',
'textAlign': 'center',
# 'maxWidth': 175,
'fontFamily': '"Open Sans", "HelveticaNeue", "Helvetica Neue", Helvetica, Arial, sans-serif',
},
style_cell_conditional=[
{
'if': {'column_id': 'activity_id'},
'display': 'none'
}
],
filter_action="none",
editable=True,
row_deletable=True,
page_action="none",
)
# PMC KPIs
@app.callback(
[Output('daily-recommendations', 'children'),
Output('pmd-kpi', 'children')],
[Input('pm-chart', 'hoverData')])
def update_fitness_kpis(hoverData):
date, fitness, ramp, fatigue, form, hrv, hrv_change, hrv7, hrv7_change, plan_rec, trend = None, None, None, None, None, None, None, None, None, None, None
if hoverData is not None:
if len(hoverData['points']) > 3:
date = hoverData['points'][0]['x']
for point in hoverData['points']:
try:
if 'Fitness' in point['text']:
fitness = point['y']
if 'Ramp' in point['text']:
ramp = point['y']
if 'RR High' in point['text']:
rr_max_threshold = point['y']
if 'RR Low' in point['text']:
rr_min_threshold = point['y']
if 'Fatigue' in point['text']:
fatigue = point['y']
if 'Form' in point['text']:
form = point['y']
if '7 Day' in point['text']:
hrv7 = float(re.findall(r'(?<=\>)(.*?)(?=\s)', point['text'])[0])
if 'rec_' in point['text']:
plan_rec = point['text']
if 'Trend:' in point['text']:
trend = point['text'].replace("Detected Trend: <b>", '')
except:
continue
return create_daily_recommendations(plan_rec) if oura_credentials_supplied else [], \
create_fitness_kpis(date, fitness, ramp, rr_max_threshold, rr_min_threshold, fatigue, form, hrv7,
trend)
# PMD Boolean Switches
@app.callback(
[Output('pm-chart', 'figure'),
Output('pm-chart', 'hoverData')],
[Input('ride-pmc-switch', 'on'),
Input('run-pmc-switch', 'on'),
Input('all-pmc-switch', 'on'),
Input('power-pmc-switch', 'on'),
Input('hr-pmc-switch', 'on'),
Input('atl-pmc-switch', 'on')],
[State('ride-pmc-switch', 'on'),
State('run-pmc-switch', 'on'),
State('all-pmc-switch', 'on'),
State('power-pmc-switch', 'on'),
State('hr-pmc-switch', 'on'),
State('atl-pmc-switch', 'on')
]
)
def refresh_fitness_chart(ride_switch, run_switch, all_switch, power_switch, hr_switch, atl_pmc_switch, ride_status,
run_status, all_status, power_status, hr_status, atl_status):
pmc_switch_settings = {'ride_status': ride_status, 'run_status': run_status, 'all_status': all_status,
'power_status': power_status, 'hr_status': hr_status, 'atl_status': atl_status}
### Save Switch settings in DB ###
app.session.query(athlete).filter(athlete.athlete_id == 1).update(
{athlete.pmc_switch_settings: json.dumps(pmc_switch_settings)})
app.session.commit()
app.session.remove()
pmc_figure, hoverData = create_fitness_chart(ride_status=ride_status, run_status=run_status,
all_status=all_status, power_status=power_status, hr_status=hr_status,
atl_status=atl_status)
return pmc_figure, hoverData
# Zone and distribution callback for sport/date fitlers. Also update date label/card header with callback here
@app.callback(
[Output('trend-chart', 'figure'),
Output('trend-controls', 'children'), ],
[Input('average-watts-trend-button', 'n_clicks_timestamp'),
Input('average-heartrate-trend-button', 'n_clicks_timestamp'),
Input('tss-trend-button', 'n_clicks_timestamp'),
Input('distance-trend-button', 'n_clicks_timestamp'),
Input('elapsed-time-trend-button', 'n_clicks_timestamp'),
Input('average-speed-trend-button', 'n_clicks_timestamp'),
Input('average-ground-time-trend-button', 'n_clicks_timestamp'),
Input('average-oscillation-trend-button', 'n_clicks_timestamp'),
Input('average-leg-spring-trend-button', 'n_clicks_timestamp'),
Input('performance-activity-type-toggle', 'value'),
Input('performance-time-selector-all', 'n_clicks_timestamp'),
Input('performance-time-selector-ytd', 'n_clicks_timestamp'),
Input('performance-time-selector-l90d', 'n_clicks_timestamp'),
Input('performance-time-selector-l6w', 'n_clicks_timestamp'),
Input('performance-time-selector-l30d', 'n_clicks_timestamp'),
Input('performance-intensity-selector-all', 'n_clicks_timestamp'),
Input('performance-intensity-selector-high', 'n_clicks_timestamp'),
Input('performance-intensity-selector-mod', 'n_clicks_timestamp'),
Input('performance-intensity-selector-low', 'n_clicks_timestamp')
],
[State('average-watts-trend-button', 'n_clicks_timestamp'),
State('average-heartrate-trend-button', 'n_clicks_timestamp'),
State('tss-trend-button', 'n_clicks_timestamp'),
State('distance-trend-button', 'n_clicks_timestamp'),
State('elapsed-time-trend-button', 'n_clicks_timestamp'),
State('average-speed-trend-button', 'n_clicks_timestamp'),
State('average-ground-time-trend-button', 'n_clicks_timestamp'),
State('average-oscillation-trend-button', 'n_clicks_timestamp'),
State('average-leg-spring-trend-button', 'n_clicks_timestamp'),
State('performance-activity-type-toggle', 'value'),
State('performance-time-selector-all', 'n_clicks_timestamp'),
State('performance-time-selector-ytd', 'n_clicks_timestamp'),
State('performance-time-selector-l90d', 'n_clicks_timestamp'),
State('performance-time-selector-l6w', 'n_clicks_timestamp'),
State('performance-time-selector-l30d', 'n_clicks_timestamp'),
State('performance-intensity-selector-all', 'n_clicks_timestamp'),
State('performance-intensity-selector-high', 'n_clicks_timestamp'),
State('performance-intensity-selector-mod', 'n_clicks_timestamp'),
State('performance-intensity-selector-low', 'n_clicks_timestamp')
]
)
def update_trend_chart(*args):
athlete_info = app.session.query(athlete).filter(athlete.athlete_id == 1).first()
use_run_power = True if athlete_info.use_run_power else False
use_cycle_power = True if athlete_info.use_cycle_power else False
use_power = True if use_run_power or use_cycle_power else False
app.session.remove()
ctx = dash.callback_context
sport = 'run' if ctx.states['performance-activity-type-toggle.value'] == False else 'ride'
# Since the sport/date toggle can be the last trigger, we need to look at timestamp of date buttons and value of sport toggle to determine which date/sport to be using
states = ctx.states
# Create dict of just date buttons
date_buttons = states.copy()
[date_buttons.pop(x) for x in list(date_buttons.keys()) if 'performance-time-selector' not in x]
date_days = {'all': 99999, 'ytd': int(datetime.now().strftime('%j')), 'l90d': 90, 'l6w': 42, 'l30d': 30}
days = date_days[max(date_buttons.items(), key=operator.itemgetter(1))[0].split('.')[0].replace(
'performance-time-selector-', '')]
# Create dict of just intensity buttons
state_buttons = states.copy()
[state_buttons.pop(x) for x in list(state_buttons.keys()) if 'performance-intensity-selector' not in x]
last_intensity_click = max(state_buttons.items(), key=operator.itemgetter(1))[0].split('.')[0].replace(
'performance-intensity-selector-', '')
if ctx.triggered:
# Pop date buttons from main dict
[ctx.states.pop(x) for x in list(ctx.states.keys()) if
'performance-time-selector' in x or 'performance-intensity-selector' in x]
# Remove sport toggle from dict, then get max of all timestamps
ctx.states.pop('performance-activity-type-toggle.value')
metric = max(ctx.states.items(), key=operator.itemgetter(1))[0].split(".")[0].replace('-trend-button',
'').replace('-', '_')
else:
metric = 'average_heartrate' if not use_power else 'average_watts'
figure = get_trend_chart(metric=metric, sport=sport, days=days, intensity=last_intensity_click)
return figure, get_trend_controls(sport=sport, selected=metric)
@app.callback(
[Output('performance-time-selector', 'label'),
Output('performance-intensity-selector', 'label'),
Output('performance-title', 'children'),
Output('performance-trend-running-icon', 'style'),
Output('performance-trend-bicycle-icon', 'style'),
Output('performance-trend-zones', 'children'),
Output('workout-type-distributions', 'data'),
Output('performance-power-curve', 'figure'),
Output('performance-power-curve-container', 'style')],
[Input('performance-activity-type-toggle', 'value'),
Input('performance-time-selector-all', 'n_clicks_timestamp'),
Input('performance-time-selector-ytd', 'n_clicks_timestamp'),
Input('performance-time-selector-l90d', 'n_clicks_timestamp'),
Input('performance-time-selector-l6w', 'n_clicks_timestamp'),
Input('performance-time-selector-l30d', 'n_clicks_timestamp'),
Input('performance-intensity-selector-all', 'n_clicks_timestamp'),
Input('performance-intensity-selector-high', 'n_clicks_timestamp'),
Input('performance-intensity-selector-mod', 'n_clicks_timestamp'),
Input('performance-intensity-selector-low', 'n_clicks_timestamp')]
)
def update_icon(*args):
athlete_info = app.session.query(athlete).filter(athlete.athlete_id == 1).first()
use_power = True if athlete_info.use_run_power or athlete_info.use_cycle_power else False
app.session.remove()
inputs = dash.callback_context.inputs
sport = 'run' if not inputs['performance-activity-type-toggle.value'] else 'ride'
if sport == 'run':
run_style = {'fontSize': '1.5rem', 'display': 'inline-block', 'vertical-align': 'middle', 'color': teal}
ride_style = {'fontSize': '1.5rem', 'display': 'inline-block', 'vertical-align': 'middle'}
else:
run_style = {'fontSize': '1.5rem', 'display': 'inline-block', 'vertical-align': 'middle'}
ride_style = {'fontSize': '1.5rem', 'display': 'inline-block', 'vertical-align': 'middle', 'color': teal}
# Create dict of just date buttons
time_inputs = inputs.copy()
[time_inputs.pop(x) for x in list(time_inputs.keys()) if 'performance-time-selector' not in x]
date_days = {'all': 99999, 'ytd': int(datetime.now().strftime('%j')), 'l90d': 90, 'l6w': 42, 'l30d': 30}
last_time_click = max(time_inputs.items(), key=operator.itemgetter(1))[0].split('.')[0].replace(
'performance-time-selector-', '')
days = date_days[last_time_click]
time_label = last_time_click.upper()
# Create dict of just intensity buttons
intensity_inputs = inputs.copy()
[intensity_inputs.pop(x) for x in list(intensity_inputs.keys()) if 'performance-intensity-selector' not in x]
last_intensity_click = max(intensity_inputs.items(), key=operator.itemgetter(1))[0].split('.')[0].replace(
'performance-intensity-selector-', '')
intensity_label = last_intensity_click.title() + ' Intensity'
return time_label, intensity_label, html.H6(time_label + ' Performance', className='mb-0'), run_style, ride_style, \
zone_chart(days=days, sport=sport, height=200, intensity=last_intensity_click), workout_distribution(
sport=sport, days=days, intensity=last_intensity_click), power_curve(
activity_type=sport, height=200, time_comparison=days, intensity=last_intensity_click) if use_power else {}, {
'display': 'normal'} if use_power else {'display': 'none'}
# Trend chart callback for sport/date fitlers
# Create YOY Chart
@app.callback(
[Output('growth-chart', 'figure'),
Output('growth-chart', 'hoverData'),
Output('growth-chart-metric-select', 'label')],
[Input('run|distance', 'n_clicks'),
Input('run|elapsed_time', 'n_clicks'),
Input('run|hrss', 'n_clicks'),
Input('run|trimp', 'n_clicks'),
Input('run|tss', 'n_clicks'),
Input('ride|distance', 'n_clicks'),
Input('ride|elapsed_time', 'n_clicks'),
Input('ride|hrss', 'n_clicks'),
Input('ride|trimp', 'n_clicks'),
Input('ride|tss', 'n_clicks')]
)
def update_yoy_chart(*args):
ctx = dash.callback_context
if not ctx.triggered:
sport = "run"
metric = 'distance'
else:
select = ctx.triggered[0]["prop_id"].split(".")[0].split('|')
sport = select[0]
metric = select[1]
label = (sport + ' ' + metric.replace('elapsed_time', 'duration')).title().replace('_', ' ').replace('Cycling',
'Ride')
figure, hoverData = create_yoy_chart(sport=sport, metric=metric)
return figure, hoverData, label
# Growth Chart KPIs
@app.callback(
Output('growth-header', 'children'),
[Input('growth-chart', 'hoverData')])
def update_growth_kpis(hoverData):
cy, cy_metric, ly, ly_metric, cy_date, metric = None, None, None, None, None, None
if hoverData is not None:
for point in hoverData['points']:
if 'cy' in point['customdata']:
metric = point['customdata'].split('|')[1]
cy = point['customdata'].split('|')[2]
cy_metric = point['y']
cy_date = point['x']
elif 'ly' in point['customdata']:
metric = point['customdata'].split('|')[1]
ly = point['customdata'].split('|')[2]
ly_metric = point['y']
return create_growth_kpis(date=hoverData['points'][0]['x'], cy=cy, cy_metric=cy_metric, ly=ly,
ly_metric=ly_metric, metric=metric)
@app.callback(
Output('activity-table', 'data'),
[Input('pm-chart', 'clickData')]
)
def update_fitness_table(clickData):
if clickData:
if len(clickData['points']) >= 3:
date = clickData['points'][0]['x']
return create_activity_table(date)
else:
return create_activity_table()
# Activity Modal Toggle - store activity id clicked from table into div for other callbacks to use for generating charts in modal
@app.callback(
[Output("activity-modal", "is_open"),
Output("activity-modal-header", "children"),
Output("modal-activity-id-type-metric", 'children')],
[Input('activity-table', 'active_cell'),
Input("close-activity-modal-button", "n_clicks")],
[State('activity-table', 'data'),
State("activity-modal", "is_open")]
)
def toggle_activity_modal(active_cell, n2, data, is_open):
if active_cell or n2:
try:
activity_id = data[active_cell['row_id']]['activity_id']
except:
activity_id = None
if activity_id:
# if open, populate charts
if not is_open:
activity = app.session.query(stravaSummary).filter(stravaSummary.activity_id == activity_id).first()
app.session.remove()
# return activity_id
return not is_open, html.H5(
'{} - {}'.format(datetime.strftime(activity.start_day_local, '%A %b %d, %Y'),
activity.name)), '{}|{}|{}'.format(activity_id,
'ride' if 'ride' in activity.type else 'run' if 'run' in activity.type else activity.type,
'power_zone' if activity.max_watts and activity.ftp else 'hr_zone')
else:
return not is_open, None, None
return is_open, None, None
# Activity modal power curve callback
@app.callback(
[Output("modal-power-curve-chart", "figure"),
Output("modal-power-curve-card", "style")],
[Input("modal-activity-id-type-metric", "children")],
[State("activity-modal", "is_open")]
)
def modal_power_curve(activity, is_open):
if activity and is_open:
activity_id = activity.split('|')[0]
activity_type = activity.split('|')[1]
metric = activity.split('|')[2]
# Only show power zone chart if power data exists
if metric == 'power_zone':
figure = power_curve(last_id=activity_id, activity_type=activity_type)
return figure, {'height': '100%'}
else:
return {}, {'display': 'None'}
else:
return {}, {'display': 'None'}
# Activity modal power zone callback
@app.callback(
[Output("modal-zones", "children"),
Output("modal-zone-title", "children")],
[Input("modal-activity-id-type-metric", "children")],
[State("activity-modal", "is_open")]
)
def modal_power_zone(activity, is_open):
if activity and is_open:
activity_id = activity.split('|')[0]
return zone_chart(activity_id=activity_id, chart_id='modal-zone-chart'), html.H4('Training Zones')
else:
return None, None
# Activity modal workout details callback
@app.callback(
[Output("modal-workout-summary", "children"),
Output("modal-workout-trends", "children"),
Output("modal-workout-stats", "children")],
[Input("modal-activity-id-type-metric", "children")],
[State("activity-modal", "is_open")]
)
def modal_workout_trends(activity, is_open):
if activity and is_open:
activity_id = activity.split('|')[0]
df_samples = pd.read_sql(
sql=app.session.query(stravaSamples).filter(stravaSamples.activity_id == activity_id).statement,
con=engine,
index_col=['timestamp_local'])
app.session.remove()
return workout_summary_kpi(df_samples), workout_details(df_samples), calculate_splits(df_samples)
else:
return None, None, None
# # Annotation Modal Toggle
@app.callback(
Output("annotation-modal", "is_open"),
[Input('open-annotation-modal-button', 'n_clicks')],
[State("annotation-modal", "is_open")],
)
def toggle_annotation_modal(n1, is_open):
if n1:
return not is_open
return is_open
# Annotation Load table Toggle
@app.callback(
Output("annotation-table-container", "children"),
[Input("annotation-modal", "is_open")],
)
def annotation_table(is_open):
if is_open:
return create_annotation_table()
# Annotation Table Add Row
@app.callback(
Output('annotation-table', 'data'),
[Input('annotation-add-rows-button', 'n_clicks')],
[State('annotation-table', 'data'),
State('annotation-table', 'columns')])
def add_row(n_clicks, rows, columns):
if n_clicks > 0:
rows.append({c['id']: '' for c in columns})
return rows
# Annotation Save & Close table Toggle
@app.callback(
Output("annotation-save-status", "children"),
[Input("save-close-annotation-modal-button", "n_clicks")],
[State("annotation-password", "value"),
State('annotation-table', 'data')]
)
def annotation_table_save(n_clicks, password, data):
if n_clicks > 0 and password == config.get('settings', 'password'):
try:
df = | pd.DataFrame(data) | pandas.DataFrame |
"""
Simple audio clustering
1. Get the embeddings - at an interval of 0.5s each
2. Get the VAD - variable interval
3. Get embeddings for a VAD interval -> Take average of the embeddings
4. Get the ground truth for embedding for each speaker - marked 0.5s interval
5. L2 Normalize the embeddings before taking a distance measure
6. Clustering - Speaker Verification Task
1. Fix the ground truth embedding as the centroid for each speaker
2. Cluster all the points to the closest centroid
3. Verify the output
"""
import os
import argparse
import json
import yaml
import pickle
import numpy as np
import pandas as pd
import utils
import isat_diarization as isat_d
import constants
def dist_emb(emb_1, emb_2, dist_type="euclid"):
"""
Distance between two embeddings
"""
dist = None
if dist_type == "euclid":
# Euclidean distance
dist = np.linalg.norm(emb_1 - emb_2)
elif dist_type == "cosine":
# Cosine similarity
dist = np.dot(emb_1, emb_2) / (np.linalg.norm(emb_1) * np.linalg.norm(emb_2))
return dist
def cluster_gt(embeddings, vad, dict_gt_speakers):
dict_clusters = {
val: {
"embedding_id": key,
"embedding_val": embeddings[key],
} for key, val in dict_gt_speakers.items()
}
list_emb = [(dict_gt_speakers[key], embeddings[key]) for key, val in dict_gt_speakers.items()]
labels = []
for emb_index, emb_actual in enumerate(embeddings):
min_dist = np.inf
label = "NoSpeaker"
for speaker, emb_ref in list_emb:
dist = dist_emb(emb_ref, emb_actual)
if min_dist > dist:
min_dist = dist
label = speaker
labels.append(label)
return labels
def normalize_embeddings(embeddings):
"""
https://numpy.org/doc/stable/reference/generated/numpy.linalg.norm.html
"""
l2_norm = np.linalg.norm(embeddings, ord=2)
return embeddings
def get_embeddings(audio_path, dir_target, src="gen"):
"""
:param src: "gen" for generate, "file" for read from file
"""
embeddings = None
if src == "gen":
print(f"Generating embeddings")
embeddings = isat_d.gen_embeddings(audio_path, dir_target)
elif src == "file":
embeddings_path = os.path.join(dir_target, "embeddings.pkl")
with open(embeddings_path, "rb") as fh:
embeddings = pickle.load(fh)
print(f"Loaded embeddings from: {embeddings_path}")
print(f"embeddings: type: {type(embeddings)}")
embeddings_data = embeddings.data
return embeddings_data
def get_vad(vad_path):
with open(vad_path, "rb") as fh:
vad = json.load(fh)
print(f"Loaded vad from: {vad_path}")
print(f"vad: type: {type(vad)}")
return vad
def get_gt_emb():
dict_gt = {
0: "A",
20: "B",
30: "C",
}
return dict_gt
def yml_dump():
import yaml
dict_gt = {
0: {
"audio_path": "x.wav",
"output_path": "../outputs",
"num_speakers": 2,
"ground_truths": [
{
"start": 2.1,
"end": 3.1,
"id": 123,
"name": "Krishna"
},
{
"start": 4.4,
"end": 7.1,
"id": 500,
"name": "Gauranga"
}
]
},
1: {
"audio_path": "y.wav",
"output_path": "../outputs",
"num_speakers": 2,
"ground_truths": [
{
"start": 2.1,
"end": 3.1,
"id": 123,
"name": "Krishna"
},
{
"start": 4.4,
"end": 7.1,
"id": 500,
"name": "Gauranga"
}
]
}
}
with open("../data/spkr_diarization_gt_temp.yml", "w") as fh:
yaml.dump(dict_gt, fh)
def round_off_embedding(start_time, float_embed_width=0.5):
"""Round a number to the closest half integer.
round_off_embedding(1.3)
1.5
round_off_embedding(2.6)
2.5
round_off_embedding(3.0)
3.0
round_off_embedding(4.1)
4.0
round_off_embedding(4.1, 0.25)
4.0
"""
reciprocal = int(1 / float_embed_width)
embed_id = round(start_time * reciprocal) / reciprocal
embed_id = round(start_time * reciprocal)
return embed_id
def get_embed_from_start_end(dict_all_gt):
dict_all_embed_gt = {}
for file_index, dict_gt in dict_all_gt.items():
dict_embed_gt = {
"ground_truths": [],
"audio_path": dict_gt["audio_path"],
"output_path": dict_gt["output_path"],
"num_speakers": dict_gt["num_speakers"]
}
list_ground_truths = []
for spkr_index, dict_spkr in enumerate(dict_gt["ground_truths"]):
start = dict_spkr["start"]
# end = dict_spkr["end"]
# id = dict_spkr["id"]
# name = dict_spkr["name"]
embed_start_id = round_off_embedding(start)
dict_gt = {
"embed_start_id": embed_start_id,
"id": dict_spkr["id"],
"name": dict_spkr["name"]
}
list_ground_truths.append(dict_gt)
dict_embed_gt["ground_truths"] = list_ground_truths
dict_all_embed_gt[file_index] = dict_embed_gt
return dict_all_embed_gt
def cluster_all(gt_yml_fp):
dict_all_embed_gt = read_ground_truths(gt_yml_fp)
status = "Done"
for file_index, dict_gt in dict_all_embed_gt.items():
list_ground_truths = dict_gt["ground_truths"]
audio_path = dict_gt["audio_path"]
output_path = dict_gt["output_path"]
dict_emb_gt = {dict_spkr["embed_start_id"]: dict_spkr["name"] for dict_spkr in list_ground_truths}
# for spkr_index, dict_spkr in enumerate(list_ground_truths):
# dict_emb_gt[dict_spkr["embed_start_id"]] = dict_spkr["name"]
if not os.path.exists(output_path):
os.makedirs(output_path)
run_clustering(audio_path, output_path, dict_emb_gt)
return status
def read_ground_truths(gt_yml_fp):
with open(gt_yml_fp, "r") as fh:
dict_all_gt = yaml.load(fh)
print(dict_all_gt)
dict_all_embed_gt = get_embed_from_start_end(dict_all_gt)
print(dict_all_embed_gt)
return dict_all_embed_gt
def run_clustering(audio_path, output_path, dict_gt):
embeddings = get_embeddings(audio_path, output_path)
# vad_path = os.path.join(output_path, "vad.json")
# vad = get_vad(vad_path)
vad = None
labels = cluster_gt(embeddings, vad, dict_gt)
print(utils.print_list(labels, "Clustered Embeddings"))
df = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
import numpy as np
class IPA:
def __init__(self, url_file_1, url_file_2):
self.url_file_1 = file_harapan
self.url_fule_2 = file_persepsi
def filtering_column(file_path):
print("run filtering column...")
result = pd.read_excel(file_path).drop(
columns=['NIM'])
return result
def validity_test(file_path, a, b, c, d, e):
"""Validity for 5 category:
tangiable, realibility, responsiveness, assurance, empathy
params:
file_path: path for excel file
a b c d e: this for initial column in every category
Return: JSON of correlation matrix
"""
data_kuesioner = IPA.filtering_column(file_path)
tangiable = data_kuesioner.loc[0:, a:'Total_A']
corr_matrix1 = tangiable.corr().round(3).loc['Total_A'].to_dict()
realibility = data_kuesioner.loc[0:, b:'Total_B']
corr_matrix2 = realibility.corr().round(3).loc['Total_B'].to_dict()
responsiveness = data_kuesioner.loc[0:, c:'Total_C']
corr_matrix3 = responsiveness.corr().round(3).loc['Total_C'].to_dict()
assurance = data_kuesioner.loc[0:, d:'Total_D']
corr_matrix4 = assurance.corr().round(3).loc['Total_D'].to_dict()
empathy = data_kuesioner.loc[0:, e:'Total_E']
corr_matrix5 = empathy.corr().round(3).loc['Total_E'].to_dict()
hasil = {'A': corr_matrix1, 'B': corr_matrix2,
'C': corr_matrix3, 'D': corr_matrix4, 'E': corr_matrix5}
return hasil
def uji_realibilitas(file_path, a, b, c, d, e):
data_kuesioner = pd.read_excel(file_path)
tangiable = data_kuesioner.loc[0:, a:'Total_A']
realibility = data_kuesioner.loc[0:, b:'Total_B']
responsiveness = data_kuesioner.loc[0:, c:'Total_C']
assurance = data_kuesioner.loc[0:, d:'Total_D']
empathy = data_kuesioner.loc[0:, e:'Total_E']
return {'A': IPA.cronbatch_alpha(tangiable).round(3), 'B': IPA.cronbatch_alpha(realibility).round(3),
'C': IPA.cronbatch_alpha(responsiveness).round(3), 'D': IPA.cronbatch_alpha(assurance).round(3),
'E': IPA.cronbatch_alpha(empathy).round(3)}
def cronbatch_alpha(file_path):
itemscores = np.asarray(file_path.iloc[:, :-1])
itemvars = itemscores.var(axis=0, ddof=0)
tscores = itemscores.sum(axis=0)
nitems = itemscores.shape[1]
nitems_row = itemscores.shape[0]
# result
total = itemscores.sum(axis=1)**2
total_kuadrat_baris = total.sum()
total_varians = (total_kuadrat_baris -
((itemscores.sum()**2) / nitems_row)) / nitems_row
# kedua = (itemvars.sum() / 1)
result = (nitems / (nitems-1)) * (1 - (itemvars.sum() / total_varians))
return result
def ipa_test(file_path_1, file_path_2):
harapan = IPA.filtering_column(file_path_1).sum()
presepsi = IPA.filtering_column(file_path_2).sum()
result = pd.DataFrame(presepsi/harapan*100, columns=['IPA'])
return result.round(3)
def SE(file_path):
harapan = IPA.filtering_column(file_path).mean()
result = pd.DataFrame(harapan, columns=['SE']).round(3)
return result
def SP(file_path):
presepsi = IPA.filtering_column(file_path).mean()
result = pd.DataFrame(presepsi, columns=['SP']).round(3)
return result
def GAP(file_path_1, file_path_2):
SE = IPA.SE(file_path_1)
SP = IPA.SP(file_path_2)
# mencari nilai sumbu X dan Y
SE_mean = SE.mean()
SP_mean = SP.mean()
# KP = SP - SE
frameGAP = pd.concat([SE, SP], axis=1)
frameGAP['GAP'] = frameGAP['SP'] - frameGAP['SE']
data = frameGAP['GAP']
return data.round(3)
def group(file_path_1, file_path_2):
"""Description
Keyword arguments:
file_path_1 -- file data harapan
file_path_2 -- file data presepsi
Return: JSON data GAP
"""
data_IPA = IPA.ipa_test(file_path_1, file_path_2)
data_SE = IPA.SE(file_path_1)
data_SP = IPA.SP(file_path_2)
data_GAP = IPA.GAP(file_path_1, file_path_2)
results = pd.concat([data_IPA, data_SE, data_SP, data_GAP], axis=1)
return results
def kuadran(data_harapan, data_presepsi):
SE_Harapan = IPA.SE(data_harapan)
SP_Presepsi = IPA.SP(data_presepsi)
# sumbu
convert_SP = SP_Presepsi.mean()
convert_SE = SE_Harapan.mean()
# sumbu X dan Y
X = | pd.DataFrame(convert_SP, columns=['X']) | pandas.DataFrame |
import pandas as pd
import numpy as np
import csv
import os
import matplotlib.pyplot as plt
## Written by <NAME>
def topspin_to_pd(input_filename):
###row_dict was written by <NAME> ###
Rows = dict()
with open(input_filename) as p:
reader = csv.reader(p, delimiter=" ")
for row in reader:
row = [x for x in row if x]
if "#" in row or not row:
continue
else:
try:
Rows[row[0]] = [row[3],row[4]]
except:
pass
HSQC_Data_df = | pd.DataFrame.from_dict(Rows, orient='index',columns = ['1H','13C']) | pandas.DataFrame.from_dict |
#Cleaning data
import pandas as pd
import numpy as np
def clean():
#Reading in features/echonest frames and tracks df's to merge on track_id
features = pd.read_csv('features.csv',skiprows=[2,3])
features.rename(columns={'feature':'track_id'}, inplace=True)
columns = np.array(features.columns)
descs = np.array(features.iloc[0,:])
for i,k in enumerate(columns):
columns[i] = k + '_' + descs[i]
features.columns = columns
features.drop(features.index[[0,1]],inplace=True)
features.reset_index(inplace=True)
features.drop('index',inplace=True,axis=1)
features.rename(columns={'track_id_statistics':'track_id'}, inplace=True)
cols = features.columns
features = | pd.read_csv('features.csv',skiprows=[0,1,2,3],header=None,names=cols) | pandas.read_csv |
"""Live and historical flood monitoring data from the Environment Agency API"""
import requests
import pandas as pd
import flood_tool.geo as geo
import flood_tool.tool as tool
import numpy as np
import folium
__all__ = []
LIVE_URL = "http://environment.data.gov.uk/flood-monitoring/id/stations"
ARCHIVE_URL = "http://environment.data.gov.uk/flood-monitoring/archive/"
class get_map(object):
'''
class to retieve rainfall data from website to design alert system and find historical data for a specific date
'''
def __init__(self, date='2019-10-08'):
self.station_url = 'http://environment.data.gov.uk/flood-monitoring/id/stations.csv?parameter=rainfall'
self.value_url = 'http://environment.data.gov.uk/flood-monitoring/id/measures.csv?parameter=rainfall'
self.spe_date_url = 'http://environment.data.gov.uk/flood-monitoring/data/readings.csv?parameter=rainfall¶meter=rainfall&_view=full&date=' + str(date)
# for requirement 3
# read the latest values from website
# read the corresponding values of stations
# and it is updated once the new file is uploaded
self.date = date
DF_value = pd.read_csv(self.value_url)
self.value = DF_value.loc[:, ['stationReference', 'value', 'dateTime']]
# read the correspongding latitude and longitude of stations
DF_location = pd.read_csv(self.station_url)
self.location = DF_location.loc[:, ['stationReference', 'lat', 'long']]
# merge the two DataFrame to be used later.
# the purpose is to combine the location with value
self.DF3 = self.value.merge(self.location, left_on='stationReference', right_on='stationReference')
DF_readings = pd.read_csv(self.spe_date_url)
readings = DF_readings.loc[:, ['stationReference', 'value', 'dateTime']]
DF4 = self.location.merge(readings, left_on='stationReference', right_on='stationReference')
# turns all elements in value column into floats
DF4['value'] = | pd.to_numeric(DF4['value'], errors='coerce') | pandas.to_numeric |
import pandas as pd
import matplotlib.pyplot as plt
import data
import testing_data
import statistics
import numpy as np
pd.set_option('display.max_columns', None)
def findWaitingTime(arrival_time, processes, total_processes, burst_time, waiting_time, quantum):
rem_bt = [0] * total_processes
for i in range(total_processes):
rem_bt[i] = burst_time[i]
t = 0
while (1):
done = True
for i in range(total_processes):
var = True
if (rem_bt[i] > 0):
done = False
if (arrival_time[i] <= t):
if (rem_bt[i] > quantum):
t += quantum
rem_bt[i] -= quantum
else:
t = t + rem_bt[i]
waiting_time[i] = t - burst_time[i] - arrival_time[i]
rem_bt[i] = 0
else:
t += 1
var = False
if var == False:
break
if (done == True):
break
def fairnessFunc(waiting_time):
largest_diff = 0
for i in range(waiting_time):
diff = abs(waiting_time[i] - waiting_time[i+1])
if(diff > largest_diff):
largest_diff = diff
def findTurnAroundTime(arrival_time, processes, total_processes, burst_time, waiting_time, turnaroundtime):
for i in range(total_processes):
turnaroundtime[i] = burst_time[i] + waiting_time[i]
def findavgTime(arrival_time, processes, total_processes, burst_time, quantum):
waiting_time = [0] * total_processes
turnaroundtime = [0] * total_processes
findWaitingTime(arrival_time, processes, total_processes, burst_time, waiting_time, quantum)
findTurnAroundTime(arrival_time, processes, total_processes, burst_time, waiting_time, turnaroundtime)
total_waitingtime = []
total_turnaroundtime = []
total_wt = 0
total_tat = 0
for i in range(total_processes):
total_wt = total_wt + waiting_time[i]
total_tat = total_tat + turnaroundtime[i]
total_waitingtime.append(total_wt)
total_turnaroundtime.append(total_tat)
avg_wt = total_wt / total_processes
avg_tat = total_tat / total_processes
process_df = pd.DataFrame()
# process_df['process_id'] = processes
process_df['burst_time'] = burst_time
process_df['arrival_time'] = arrival_time
process_df['total_waitingtime'] = total_waitingtime
process_df['total_turnarounftime'] = total_turnaroundtime
#####
diff_list = []
# largest_diff_list = []
for i in range(len(total_waitingtime)-1):
diff = abs(total_waitingtime[i] - total_waitingtime[i + 1])
diff_list.append(diff)
# process_df['diff_waiting_time'] = diff_list
largest_diff = max(diff_list)
# largest_diff_list.append(largest_diff)
# print(largest_diff_list)
return process_df, avg_tat, avg_wt, largest_diff
def plotGraphs(quantum_df, i):
quantum_df = quantum_df.sort_values('quantum')
plt.plot('quantum', 'fairness_score', data=quantum_df, color='magenta', label = 'fair_wt')
plt.plot('quantum', 'average_waitingtime', data=quantum_df, color='blue', label ="avg_wt")
plt.legend()
plt.title('train_set_' + str(i))
plt.grid()
plt.xlabel('quantum value')
plt.ylabel('time')
plt.tight_layout()
plt.savefig('train_set_'+ str(i) +'.png')
plt.show()
# quantum_df.plot.scatter(x = 'quantum', y = 'fair_waitingtime')
# plt.show()
quantum_assignment_df = pd.DataFrame()
quantum_df = | pd.DataFrame() | pandas.DataFrame |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.