prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
"""Tests for the sdv.constraints.tabular module."""
import uuid
from datetime import datetime
from unittest.mock import Mock
import numpy as np
import pandas as pd
import pytest
from sdv.constraints.errors import MissingConstraintColumnError
from sdv.constraints.tabular import (
Between, ColumnFormula, CustomConstraint, GreaterThan, Negative, OneHotEncoding, Positive,
Rounding, Unique, UniqueCombinations)
def dummy_transform_table(table_data):
return table_data
def dummy_reverse_transform_table(table_data):
return table_data
def dummy_is_valid_table(table_data):
return [True] * len(table_data)
def dummy_transform_table_column(table_data, column):
return table_data
def dummy_reverse_transform_table_column(table_data, column):
return table_data
def dummy_is_valid_table_column(table_data, column):
return [True] * len(table_data[column])
def dummy_transform_column(column_data):
return column_data
def dummy_reverse_transform_column(column_data):
return column_data
def dummy_is_valid_column(column_data):
return [True] * len(column_data)
class TestCustomConstraint():
def test___init__(self):
"""Test the ``CustomConstraint.__init__`` method.
The ``transform``, ``reverse_transform`` and ``is_valid`` methods
should be replaced by the given ones, importing them if necessary.
Setup:
- Create dummy functions (created above this class).
Input:
- dummy transform and revert_transform + is_valid FQN
Output:
- Instance with all the methods replaced by the dummy versions.
"""
is_valid_fqn = __name__ + '.dummy_is_valid_table'
# Run
instance = CustomConstraint(
transform=dummy_transform_table,
reverse_transform=dummy_reverse_transform_table,
is_valid=is_valid_fqn
)
# Assert
assert instance._transform == dummy_transform_table
assert instance._reverse_transform == dummy_reverse_transform_table
assert instance._is_valid == dummy_is_valid_table
def test__run_transform_table(self):
"""Test the ``CustomConstraint._run`` method.
The ``_run`` method excutes ``transform`` and ``reverse_transform``
based on the signature of the functions. In this test, we evaluate
the execution of "table" based functions.
Setup:
- Pass dummy transform function with ``table_data`` argument.
Side Effects:
- Run transform function once with ``table_data`` as input.
Output:
- applied identity transformation "table_data = transformed".
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_transform_mock = Mock(side_effect=dummy_transform_table,
return_value=table_data)
# Run
instance = CustomConstraint(transform=dummy_transform_mock)
transformed = instance.transform(table_data)
# Asserts
called = dummy_transform_mock.call_args
dummy_transform_mock.assert_called_once()
pd.testing.assert_frame_equal(called[0][0], table_data)
pd.testing.assert_frame_equal(transformed, dummy_transform_mock.return_value)
def test__run_reverse_transform_table(self):
"""Test the ``CustomConstraint._run`` method.
The ``_run`` method excutes ``transform`` and ``reverse_transform``
based on the signature of the functions. In this test, we evaluate
the execution of "table" based functions.
Setup:
- Pass dummy reverse transform function with ``table_data`` argument.
Side Effects:
- Run reverse transform function once with ``table_data`` as input.
Output:
- applied identity transformation "table_data = reverse_transformed".
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_reverse_transform_mock = Mock(side_effect=dummy_reverse_transform_table,
return_value=table_data)
# Run
instance = CustomConstraint(reverse_transform=dummy_reverse_transform_mock)
reverse_transformed = instance.reverse_transform(table_data)
# Asserts
called = dummy_reverse_transform_mock.call_args
dummy_reverse_transform_mock.assert_called_once()
pd.testing.assert_frame_equal(called[0][0], table_data)
pd.testing.assert_frame_equal(
reverse_transformed, dummy_reverse_transform_mock.return_value)
def test__run_is_valid_table(self):
"""Test the ``CustomConstraint._run_is_valid`` method.
The ``_run_is_valid`` method excutes ``is_valid`` based on
the signature of the functions. In this test, we evaluate
the execution of "table" based functions.
Setup:
- Pass dummy is valid function with ``table_data`` argument.
Side Effects:
- Run is valid function once with ``table_data`` as input.
Output:
- Return a list of [True] of length ``table_data``.
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_is_valid_mock = Mock(side_effect=dummy_is_valid_table)
# Run
instance = CustomConstraint(is_valid=dummy_is_valid_mock)
is_valid = instance.is_valid(table_data)
# Asserts
expected_out = [True] * len(table_data)
called = dummy_is_valid_mock.call_args
dummy_is_valid_mock.assert_called_once()
pd.testing.assert_frame_equal(called[0][0], table_data)
np.testing.assert_array_equal(is_valid, expected_out)
def test__run_transform_table_column(self):
"""Test the ``CustomConstraint._run`` method.
The ``_run`` method excutes ``transform`` and ``reverse_transform``
based on the signature of the functions. In this test, we evaluate
the execution of "table" and "column" based functions.
Setup:
- Pass dummy transform function with ``table_data`` and ``column`` arguments.
Side Effects:
- Run transform function once with ``table_data`` and ``column`` as input.
Output:
- applied identity transformation "table_data = transformed".
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_transform_mock = Mock(side_effect=dummy_transform_table_column,
return_value=table_data)
# Run
instance = CustomConstraint(columns='a', transform=dummy_transform_mock)
transformed = instance.transform(table_data)
# Asserts
called = dummy_transform_mock.call_args
assert called[0][1] == 'a'
dummy_transform_mock.assert_called_once()
pd.testing.assert_frame_equal(called[0][0], table_data)
pd.testing.assert_frame_equal(transformed, dummy_transform_mock.return_value)
def test__run_reverse_transform_table_column(self):
"""Test the ``CustomConstraint._run`` method.
The ``_run`` method excutes ``transform`` and ``reverse_transform``
based on the signature of the functions. In this test, we evaluate
the execution of "table" and "column" based functions.
Setup:
- Pass dummy reverse transform function with ``table_data`` and ``column`` arguments.
Side Effects:
- Run reverse transform function once with ``table_data`` and ``column`` as input.
Output:
- applied identity transformation "table_data = transformed".
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_reverse_transform_mock = Mock(side_effect=dummy_reverse_transform_table_column,
return_value=table_data)
# Run
instance = CustomConstraint(columns='a', reverse_transform=dummy_reverse_transform_mock)
reverse_transformed = instance.reverse_transform(table_data)
# Asserts
called = dummy_reverse_transform_mock.call_args
assert called[0][1] == 'a'
dummy_reverse_transform_mock.assert_called_once()
pd.testing.assert_frame_equal(called[0][0], table_data)
pd.testing.assert_frame_equal(
reverse_transformed, dummy_reverse_transform_mock.return_value)
def test__run_is_valid_table_column(self):
"""Test the ``CustomConstraint._run_is_valid`` method.
The ``_run_is_valid`` method excutes ``is_valid`` based on
the signature of the functions. In this test, we evaluate
the execution of "table" and "column" based functions.
Setup:
- Pass dummy is valid function with ``table_data`` and ``column`` argument.
Side Effects:
- Run is valid function once with ``table_data`` and ``column`` as input.
Output:
- Return a list of [True] of length ``table_data``.
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_is_valid_mock = Mock(side_effect=dummy_is_valid_table_column)
# Run
instance = CustomConstraint(columns='a', is_valid=dummy_is_valid_mock)
is_valid = instance.is_valid(table_data)
# Asserts
expected_out = [True] * len(table_data)
called = dummy_is_valid_mock.call_args
assert called[0][1] == 'a'
dummy_is_valid_mock.assert_called_once()
pd.testing.assert_frame_equal(called[0][0], table_data)
np.testing.assert_array_equal(is_valid, expected_out)
def test__run_transform_column(self):
"""Test the ``CustomConstraint._run`` method.
The ``_run`` method excutes ``transform`` and ``reverse_transform``
based on the signature of the functions. In this test, we evaluate
the execution of "column" based functions.
Setup:
- Pass dummy transform function with ``column_data`` argument.
Side Effects:
- Run transform function twice, once with the attempt of
``table_data`` and ``column`` and second with ``column_data`` as input.
Output:
- applied identity transformation "table_data = transformed".
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_transform_mock = Mock(side_effect=dummy_transform_column,
return_value=table_data)
# Run
instance = CustomConstraint(columns='a', transform=dummy_transform_mock)
transformed = instance.transform(table_data)
# Asserts
called = dummy_transform_mock.call_args_list
assert len(called) == 2
# call 1 (try)
assert called[0][0][1] == 'a'
pd.testing.assert_frame_equal(called[0][0][0], table_data)
# call 2 (catch TypeError)
pd.testing.assert_series_equal(called[1][0][0], table_data['a'])
pd.testing.assert_frame_equal(transformed, dummy_transform_mock.return_value)
def test__run_reverse_transform_column(self):
"""Test the ``CustomConstraint._run`` method.
The ``_run`` method excutes ``transform`` and ``reverse_transform``
based on the signature of the functions. In this test, we evaluate
the execution of "column" based functions.
Setup:
- Pass dummy reverse transform function with ``column_data`` argument.
Side Effects:
- Run reverse transform function twice, once with the attempt of
``table_data`` and ``column`` and second with ``column_data`` as input.
Output:
- Applied identity transformation "table_data = transformed".
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_reverse_transform_mock = Mock(side_effect=dummy_reverse_transform_column,
return_value=table_data)
# Run
instance = CustomConstraint(columns='a', reverse_transform=dummy_reverse_transform_mock)
reverse_transformed = instance.reverse_transform(table_data)
# Asserts
called = dummy_reverse_transform_mock.call_args_list
assert len(called) == 2
# call 1 (try)
assert called[0][0][1] == 'a'
pd.testing.assert_frame_equal(called[0][0][0], table_data)
# call 2 (catch TypeError)
pd.testing.assert_series_equal(called[1][0][0], table_data['a'])
pd.testing.assert_frame_equal(
reverse_transformed, dummy_reverse_transform_mock.return_value)
def test__run_is_valid_column(self):
"""Test the ``CustomConstraint._run_is_valid`` method.
The ``_run_is_valid`` method excutes ``is_valid`` based on
the signature of the functions. In this test, we evaluate
the execution of "column" based functions.
Setup:
- Pass dummy is valid function with ``column_data`` argument.
Side Effects:
- Run is valid function twice, once with the attempt of
``table_data`` and ``column`` and second with ``column_data`` as input.
Output:
- Return a list of [True] of length ``table_data``.
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_is_valid_mock = Mock(side_effect=dummy_is_valid_column)
# Run
instance = CustomConstraint(columns='a', is_valid=dummy_is_valid_mock)
is_valid = instance.is_valid(table_data)
# Asserts
expected_out = [True] * len(table_data)
called = dummy_is_valid_mock.call_args_list
assert len(called) == 2
# call 1 (try)
assert called[0][0][1] == 'a'
pd.testing.assert_frame_equal(called[0][0][0], table_data)
# call 2 (catch TypeError)
pd.testing.assert_series_equal(called[1][0][0], table_data['a'])
np.testing.assert_array_equal(is_valid, expected_out)
class TestUniqueCombinations():
def test___init__(self):
"""Test the ``UniqueCombinations.__init__`` method.
It is expected to create a new Constraint instance and receiving the names of
the columns that need to produce unique combinations.
Side effects:
- instance._colums == columns
"""
# Setup
columns = ['b', 'c']
# Run
instance = UniqueCombinations(columns=columns)
# Assert
assert instance._columns == columns
def test___init__sets_rebuild_columns_if_not_reject_sampling(self):
"""Test the ``UniqueCombinations.__init__`` method.
The rebuild columns should only be set if the ``handling_strategy``
is not ``reject_sampling``.
Side effects:
- instance.rebuild_columns are set
"""
# Setup
columns = ['b', 'c']
# Run
instance = UniqueCombinations(columns=columns, handling_strategy='transform')
# Assert
assert instance.rebuild_columns == tuple(columns)
def test___init__does_not_set_rebuild_columns_reject_sampling(self):
"""Test the ``UniqueCombinations.__init__`` method.
The rebuild columns should not be set if the ``handling_strategy``
is ``reject_sampling``.
Side effects:
- instance.rebuild_columns are empty
"""
# Setup
columns = ['b', 'c']
# Run
instance = UniqueCombinations(columns=columns, handling_strategy='reject_sampling')
# Assert
assert instance.rebuild_columns == ()
def test___init__with_one_column(self):
"""Test the ``UniqueCombinations.__init__`` method with only one constraint column.
Expect a ``ValueError`` because UniqueCombinations requires at least two
constraint columns.
Side effects:
- A ValueError is raised
"""
# Setup
columns = ['c']
# Run and assert
with pytest.raises(ValueError):
UniqueCombinations(columns=columns)
def test_fit(self):
"""Test the ``UniqueCombinations.fit`` method.
The ``UniqueCombinations.fit`` method is expected to:
- Call ``UniqueCombinations._valid_separator``.
- Find a valid separator for the data and generate the joint column name.
Input:
- Table data (pandas.DataFrame)
"""
# Setup
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
# Run
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
instance.fit(table_data)
# Asserts
expected_combinations = pd.DataFrame({
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
assert instance._separator == '#'
assert instance._joint_column == 'b#c'
pd.testing.assert_frame_equal(instance._combinations, expected_combinations)
def test_is_valid_true(self):
"""Test the ``UniqueCombinations.is_valid`` method.
If the input data satisfies the constraint, result is a series of ``True`` values.
Input:
- Table data (pandas.DataFrame), satisfying the constraint.
Output:
- Series of ``True`` values (pandas.Series)
Side effects:
- Since the ``is_valid`` method needs ``self._combinations``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
out = instance.is_valid(table_data)
expected_out = pd.Series([True, True, True], name='b#c')
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_false(self):
"""Test the ``UniqueCombinations.is_valid`` method.
If the input data doesn't satisfy the constraint, result is a series of ``False`` values.
Input:
- Table data (pandas.DataFrame), which does not satisfy the constraint.
Output:
- Series of ``False`` values (pandas.Series)
Side effects:
- Since the ``is_valid`` method needs ``self._combinations``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
incorrect_table = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['D', 'E', 'F'],
'c': ['g', 'h', 'i']
})
out = instance.is_valid(incorrect_table)
# Assert
expected_out = pd.Series([False, False, False], name='b#c')
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_non_string_true(self):
"""Test the ``UniqueCombinations.is_valid`` method with non string columns.
If the input data satisfies the constraint, result is a series of ``True`` values.
Input:
- Table data (pandas.DataFrame), satisfying the constraint.
Output:
- Series of ``True`` values (pandas.Series)
Side effects:
- Since the ``is_valid`` method needs ``self._combinations``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [1, 2, 3],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
columns = ['b', 'c', 'd']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
out = instance.is_valid(table_data)
expected_out = pd.Series([True, True, True], name='b#c#d')
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_non_string_false(self):
"""Test the ``UniqueCombinations.is_valid`` method with non string columns.
If the input data doesn't satisfy the constraint, result is a series of ``False`` values.
Input:
- Table data (pandas.DataFrame), which does not satisfy the constraint.
Output:
- Series of ``False`` values (pandas.Series)
Side effects:
- Since the ``is_valid`` method needs ``self._combinations``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [1, 2, 3],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
columns = ['b', 'c', 'd']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
incorrect_table = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [6, 7, 8],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
out = instance.is_valid(incorrect_table)
# Assert
expected_out = pd.Series([False, False, False], name='b#c#d')
pd.testing.assert_series_equal(expected_out, out)
def test_transform(self):
"""Test the ``UniqueCombinations.transform`` method.
It is expected to return a Table data with the columns concatenated by the separator.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data transformed, with the columns concatenated (pandas.DataFrame)
Side effects:
- Since the ``transform`` method needs ``self._joint_column``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
out = instance.transform(table_data)
# Assert
assert instance._combinations_to_uuids is not None
assert instance._uuids_to_combinations is not None
expected_out_a = pd.Series(['a', 'b', 'c'], name='a')
pd.testing.assert_series_equal(expected_out_a, out['a'])
try:
[uuid.UUID(u) for c, u in out['b#c'].items()]
except ValueError:
assert False
def test_transform_non_string(self):
"""Test the ``UniqueCombinations.transform`` method with non strings.
It is expected to return a Table data with the columns concatenated by the separator.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data transformed, with the columns as UUIDs.
Side effects:
- Since the ``transform`` method needs ``self._joint_column``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [1, 2, 3],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
columns = ['b', 'c', 'd']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
out = instance.transform(table_data)
# Assert
assert instance._combinations_to_uuids is not None
assert instance._uuids_to_combinations is not None
expected_out_a = pd.Series(['a', 'b', 'c'], name='a')
pd.testing.assert_series_equal(expected_out_a, out['a'])
try:
[uuid.UUID(u) for c, u in out['b#c#d'].items()]
except ValueError:
assert False
def test_transform_not_all_columns_provided(self):
"""Test the ``UniqueCombinations.transform`` method.
If some of the columns needed for the transform are missing, and
``fit_columns_model`` is False, it will raise a ``MissingConstraintColumnError``.
Input:
- Table data (pandas.DataFrame)
Output:
- Raises ``MissingConstraintColumnError``.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns, fit_columns_model=False)
instance.fit(table_data)
# Run/Assert
with pytest.raises(MissingConstraintColumnError):
instance.transform(pd.DataFrame({'a': ['a', 'b', 'c']}))
def test_reverse_transform(self):
"""Test the ``UniqueCombinations.reverse_transform`` method.
It is expected to return the original data separating the concatenated columns.
Input:
- Table data transformed (pandas.DataFrame)
Output:
- Original table data, with the concatenated columns separated (pandas.DataFrame)
Side effects:
- Since the ``transform`` method needs ``self._joint_column``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
transformed_data = instance.transform(table_data)
out = instance.reverse_transform(transformed_data)
# Assert
assert instance._combinations_to_uuids is not None
assert instance._uuids_to_combinations is not None
expected_out = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform_non_string(self):
"""Test the ``UniqueCombinations.reverse_transform`` method with a non string column.
It is expected to return the original data separating the concatenated columns.
Input:
- Table data transformed (pandas.DataFrame)
Output:
- Original table data, with the concatenated columns separated (pandas.DataFrame)
Side effects:
- Since the ``transform`` method needs ``self._joint_column``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [1, 2, 3],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
columns = ['b', 'c', 'd']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
transformed_data = instance.transform(table_data)
out = instance.reverse_transform(transformed_data)
# Assert
assert instance._combinations_to_uuids is not None
assert instance._uuids_to_combinations is not None
expected_out = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [1, 2, 3],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
pd.testing.assert_frame_equal(expected_out, out)
class TestGreaterThan():
def test__validate_scalar(self):
"""Test the ``_validate_scalar`` method.
This method validates the inputs if and transforms them into
the correct format.
Input:
- scalar_column = 0
- column_names = 'b'
Output:
- column_names == ['b']
"""
# Setup
scalar_column = 0
column_names = 'b'
scalar = 'high'
# Run
out = GreaterThan._validate_scalar(scalar_column, column_names, scalar)
# Assert
out == ['b']
def test__validate_scalar_list(self):
"""Test the ``_validate_scalar`` method.
This method validates the inputs if and transforms them into
the correct format.
Input:
- scalar_column = 0
- column_names = ['b']
Output:
- column_names == ['b']
"""
# Setup
scalar_column = 0
column_names = ['b']
scalar = 'low'
# Run
out = GreaterThan._validate_scalar(scalar_column, column_names, scalar)
# Assert
out == ['b']
def test__validate_scalar_error(self):
"""Test the ``_validate_scalar`` method.
This method raises an error when the the scalar column is a list.
Input:
- scalar_column = 0
- column_names = 'b'
Side effect:
- Raise error since the scalar is a list
"""
# Setup
scalar_column = [0]
column_names = 'b'
scalar = 'high'
# Run / Assert
with pytest.raises(TypeError):
GreaterThan._validate_scalar(scalar_column, column_names, scalar)
def test__validate_inputs_high_is_scalar(self):
"""Test the ``_validate_inputs`` method.
This method checks ``scalar`` and formats the data based
on what is expected to be a list or not. In addition, it
returns the ``constraint_columns``.
Input:
- low = 'a'
- high = 3
- scalar = 'high'
Output:
- low == ['a']
- high == 3
- constraint_columns = ('a')
"""
# Setup / Run
low, high, constraint_columns = GreaterThan._validate_inputs(
low='a', high=3, scalar='high', drop=None)
# Assert
low == ['a']
high == 3
constraint_columns == ('a',)
def test__validate_inputs_low_is_scalar(self):
"""Test the ``_validate_inputs`` method.
This method checks ``scalar`` and formats the data based
on what is expected to be a list or not. In addition, it
returns the ``constraint_columns``.
Input:
- low = 3
- high = 'b'
- scalar = 'low'
- drop = None
Output:
- low == 3
- high == ['b']
- constraint_columns = ('b')
"""
# Setup / Run
low, high, constraint_columns = GreaterThan._validate_inputs(
low=3, high='b', scalar='low', drop=None)
# Assert
low == 3
high == ['b']
constraint_columns == ('b',)
def test__validate_inputs_scalar_none(self):
"""Test the ``_validate_inputs`` method.
This method checks ``scalar`` and formats the data based
on what is expected to be a list or not. In addition, it
returns the ``constraint_columns``.
Input:
- low = 'a'
- high = 3 # where 3 is a column name
- scalar = None
- drop = None
Output:
- low == ['a']
- high == [3]
- constraint_columns = ('a', 3)
"""
# Setup / Run
low, high, constraint_columns = GreaterThan._validate_inputs(
low='a', high=3, scalar=None, drop=None)
# Assert
low == ['a']
high == [3]
constraint_columns == ('a', 3)
def test__validate_inputs_scalar_none_lists(self):
"""Test the ``_validate_inputs`` method.
This method checks ``scalar`` and formats the data based
on what is expected to be a list or not. In addition, it
returns the ``constraint_columns``.
Input:
- low = ['a']
- high = ['b', 'c']
- scalar = None
- drop = None
Output:
- low == ['a']
- high == ['b', 'c']
- constraint_columns = ('a', 'b', 'c')
"""
# Setup / Run
low, high, constraint_columns = GreaterThan._validate_inputs(
low=['a'], high=['b', 'c'], scalar=None, drop=None)
# Assert
low == ['a']
high == ['b', 'c']
constraint_columns == ('a', 'b', 'c')
def test__validate_inputs_scalar_none_two_lists(self):
"""Test the ``_validate_inputs`` method.
This method checks ``scalar`` and formats the data based
on what is expected to be a list or not. In addition, it
returns the ``constraint_columns``.
Input:
- low = ['a', 0]
- high = ['b', 'c']
- scalar = None
- drop = None
Side effect:
- Raise error because both high and low are more than one column
"""
# Run / Assert
with pytest.raises(ValueError):
GreaterThan._validate_inputs(low=['a', 0], high=['b', 'c'], scalar=None, drop=None)
def test__validate_inputs_scalar_unknown(self):
"""Test the ``_validate_inputs`` method.
This method checks ``scalar`` and formats the data based
on what is expected to be a list or not. In addition, it
returns the ``constraint_columns``.
Input:
- low = 'a'
- high = 'b'
- scalar = 'unknown'
- drop = None
Side effect:
- Raise error because scalar is unknown
"""
# Run / Assert
with pytest.raises(ValueError):
GreaterThan._validate_inputs(low='a', high='b', scalar='unknown', drop=None)
def test__validate_inputs_drop_error_low(self):
"""Test the ``_validate_inputs`` method.
Make sure the method raises an error if ``drop``==``scalar``
when ``scalar`` is not ``None``.
Input:
- low = 2
- high = 'b'
- scalar = 'low'
- drop = 'low'
Side effect:
- Raise error because scalar is unknown
"""
# Run / Assert
with pytest.raises(ValueError):
GreaterThan._validate_inputs(low=2, high='b', scalar='low', drop='low')
def test__validate_inputs_drop_error_high(self):
"""Test the ``_validate_inputs`` method.
Make sure the method raises an error if ``drop``==``scalar``
when ``scalar`` is not ``None``.
Input:
- low = 'a'
- high = 3
- scalar = 'high'
- drop = 'high'
Side effect:
- Raise error because scalar is unknown
"""
# Run / Assert
with pytest.raises(ValueError):
GreaterThan._validate_inputs(low='a', high=3, scalar='high', drop='high')
def test__validate_inputs_drop_success(self):
"""Test the ``_validate_inputs`` method.
Make sure the method raises an error if ``drop``==``scalar``
when ``scalar`` is not ``None``.
Input:
- low = 'a'
- high = 'b'
- scalar = 'high'
- drop = 'low'
Output:
- low = ['a']
- high = 0
- constraint_columns == ('a')
"""
# Run / Assert
low, high, constraint_columns = GreaterThan._validate_inputs(
low='a', high=0, scalar='high', drop='low')
assert low == ['a']
assert high == 0
assert constraint_columns == ('a',)
def test___init___(self):
"""Test the ``GreaterThan.__init__`` method.
The passed arguments should be stored as attributes.
Input:
- low = 'a'
- high = 'b'
Side effects:
- instance._low == 'a'
- instance._high == 'b'
- instance._strict == False
"""
# Run
instance = GreaterThan(low='a', high='b')
# Asserts
assert instance._low == ['a']
assert instance._high == ['b']
assert instance._strict is False
assert instance._scalar is None
assert instance._drop is None
assert instance.constraint_columns == ('a', 'b')
def test___init__sets_rebuild_columns_if_not_reject_sampling(self):
"""Test the ``GreaterThan.__init__`` method.
The rebuild columns should only be set if the ``handling_strategy``
is not ``reject_sampling``.
Side effects:
- instance.rebuild_columns are set
"""
# Run
instance = GreaterThan(low='a', high='b', handling_strategy='transform')
# Assert
assert instance.rebuild_columns == ['b']
def test___init__does_not_set_rebuild_columns_reject_sampling(self):
"""Test the ``GreaterThan.__init__`` method.
The rebuild columns should not be set if the ``handling_strategy``
is ``reject_sampling``.
Side effects:
- instance.rebuild_columns are empty
"""
# Run
instance = GreaterThan(low='a', high='b', handling_strategy='reject_sampling')
# Assert
assert instance.rebuild_columns == ()
def test___init___high_is_scalar(self):
"""Test the ``GreaterThan.__init__`` method.
The passed arguments should be stored as attributes. Make sure ``scalar``
is set to ``'high'``.
Input:
- low = 'a'
- high = 0
- strict = True
- drop = 'low'
- scalar = 'high'
Side effects:
- instance._low == 'a'
- instance._high == 0
- instance._strict == True
- instance._drop = 'low'
- instance._scalar == 'high'
"""
# Run
instance = GreaterThan(low='a', high=0, strict=True, drop='low', scalar='high')
# Asserts
assert instance._low == ['a']
assert instance._high == 0
assert instance._strict is True
assert instance._scalar == 'high'
assert instance._drop == 'low'
assert instance.constraint_columns == ('a',)
def test___init___low_is_scalar(self):
"""Test the ``GreaterThan.__init__`` method.
The passed arguments should be stored as attributes. Make sure ``scalar``
is set to ``'high'``.
Input:
- low = 0
- high = 'a'
- strict = True
- drop = 'high'
- scalar = 'low'
Side effects:
- instance._low == 0
- instance._high == 'a'
- instance._stric == True
- instance._drop = 'high'
- instance._scalar == 'low'
"""
# Run
instance = GreaterThan(low=0, high='a', strict=True, drop='high', scalar='low')
# Asserts
assert instance._low == 0
assert instance._high == ['a']
assert instance._strict is True
assert instance._scalar == 'low'
assert instance._drop == 'high'
assert instance.constraint_columns == ('a',)
def test___init___strict_is_false(self):
"""Test the ``GreaterThan.__init__`` method.
Ensure that ``operator`` is set to ``np.greater_equal``
when ``strict`` is set to ``False``.
Input:
- low = 'a'
- high = 'b'
- strict = False
"""
# Run
instance = GreaterThan(low='a', high='b', strict=False)
# Assert
assert instance.operator == np.greater_equal
def test___init___strict_is_true(self):
"""Test the ``GreaterThan.__init__`` method.
Ensure that ``operator`` is set to ``np.greater``
when ``strict`` is set to ``True``.
Input:
- low = 'a'
- high = 'b'
- strict = True
"""
# Run
instance = GreaterThan(low='a', high='b', strict=True)
# Assert
assert instance.operator == np.greater
def test__init__get_columns_to_reconstruct_default(self):
"""Test the ``GreaterThan._get_columns_to_reconstruct`` method.
This method returns:
- ``_high`` if drop is "high"
- ``_low`` if drop is "low"
- ``_low`` if scalar is "high"
- ``_high`` otherwise
Setup:
- low = 'a'
- high = 'b'
Side effects:
- self._columns_to_reconstruct == ['b']
"""
# Setup
instance = GreaterThan(low='a', high='b')
instance._columns_to_reconstruct == ['b']
def test__init__get_columns_to_reconstruct_drop_high(self):
"""Test the ``GreaterThan._get_columns_to_reconstruct`` method.
This method returns:
- ``_high`` if drop is "high"
- ``_low`` if drop is "low"
- ``_low`` if scalar is "high"
- ``_high`` otherwise
Setup:
- low = 'a'
- high = 'b'
- drop = 'high'
Side effects:
- self._columns_to_reconstruct == ['b']
"""
# Setup
instance = GreaterThan(low='a', high='b', drop='high')
instance._columns_to_reconstruct == ['b']
def test__init__get_columns_to_reconstruct_drop_low(self):
"""Test the ``GreaterThan._get_columns_to_reconstruct`` method.
This method returns:
- ``_high`` if drop is "high"
- ``_low`` if drop is "low"
- ``_low`` if scalar is "high"
- ``_high`` otherwise
Setup:
- low = 'a'
- high = 'b'
- drop = 'low'
Side effects:
- self._columns_to_reconstruct == ['a']
"""
# Setup
instance = GreaterThan(low='a', high='b', drop='low')
instance._columns_to_reconstruct == ['a']
def test__init__get_columns_to_reconstruct_scalar_high(self):
"""Test the ``GreaterThan._get_columns_to_reconstruct`` method.
This method returns:
- ``_high`` if drop is "high"
- ``_low`` if drop is "low"
- ``_low`` if scalar is "high"
- ``_high`` otherwise
Setup:
- low = 'a'
- high = 0
- scalar = 'high'
Side effects:
- self._columns_to_reconstruct == ['a']
"""
# Setup
instance = GreaterThan(low='a', high=0, scalar='high')
instance._columns_to_reconstruct == ['a']
def test__get_value_column_list(self):
"""Test the ``GreaterThan._get_value`` method.
This method returns a scalar or a ndarray of values
depending on the type of the ``field``.
Input:
- Table with given data.
- field = 'low'
"""
# Setup
instance = GreaterThan(low='a', high='b')
table_data = pd.DataFrame({
'a': [1, 2, 4],
'b': [4, 5, 6],
'c': [7, 8, 9]
})
out = instance._get_value(table_data, 'low')
# Assert
expected = table_data[['a']].values
np.testing.assert_array_equal(out, expected)
def test__get_value_scalar(self):
"""Test the ``GreaterThan._get_value`` method.
This method returns a scalar or a ndarray of values
depending on the type of the ``field``.
Input:
- Table with given data.
- field = 'low'
- scalar = 'low'
"""
# Setup
instance = GreaterThan(low=3, high='b', scalar='low')
table_data = pd.DataFrame({
'a': [1, 2, 4],
'b': [4, 5, 6],
'c': [7, 8, 9]
})
out = instance._get_value(table_data, 'low')
# Assert
expected = 3
assert out == expected
def test__get_diff_columns_name_low_is_scalar(self):
"""Test the ``GreaterThan._get_diff_columns_name`` method.
The returned names should be equal to the given columns plus
tokenized with '#'.
Input:
- Table with given data.
"""
# Setup
instance = GreaterThan(low=0, high=['a', 'b#'], scalar='low')
table_data = pd.DataFrame({
'a': [1, 2, 4],
'b#': [4, 5, 6]
})
out = instance._get_diff_columns_name(table_data)
# Assert
expected = ['a#', 'b##']
assert out == expected
def test__get_diff_columns_name_high_is_scalar(self):
"""Test the ``GreaterThan._get_diff_columns_name`` method.
The returned names should be equal to the given columns plus
tokenized with '#'.
Input:
- Table with given data.
"""
# Setup
instance = GreaterThan(low=['a', 'b'], high=0, scalar='high')
table_data = pd.DataFrame({
'a': [1, 2, 4],
'b': [4, 5, 6]
})
out = instance._get_diff_columns_name(table_data)
# Assert
expected = ['a#', 'b#']
assert out == expected
def test__get_diff_columns_name_scalar_is_none(self):
"""Test the ``GreaterThan._get_diff_columns_name`` method.
The returned names should be equal one name of the two columns
with a token between them.
Input:
- Table with given data.
"""
# Setup
instance = GreaterThan(low='a', high='b#', scalar=None)
table_data = pd.DataFrame({
'a': [1, 2, 4],
'b#': [4, 5, 6]
})
out = instance._get_diff_columns_name(table_data)
# Assert
expected = ['b##a']
assert out == expected
def test__get_diff_columns_name_scalar_is_none_multi_column_low(self):
"""Test the ``GreaterThan._get_diff_columns_name`` method.
The returned names should be equal one name of the two columns
with a token between them.
Input:
- Table with given data.
"""
# Setup
instance = GreaterThan(low=['a#', 'c'], high='b', scalar=None)
table_data = pd.DataFrame({
'a#': [1, 2, 4],
'b': [4, 5, 6],
'c#': [7, 8, 9]
})
out = instance._get_diff_columns_name(table_data)
# Assert
expected = ['a##b', 'c#b']
assert out == expected
def test__get_diff_columns_name_scalar_is_none_multi_column_high(self):
"""Test the ``GreaterThan._get_diff_columns_name`` method.
The returned names should be equal one name of the two columns
with a token between them.
Input:
- Table with given data.
"""
# Setup
instance = GreaterThan(low=0, high=['b', 'c'], scalar=None)
table_data = pd.DataFrame({
0: [1, 2, 4],
'b': [4, 5, 6],
'c#': [7, 8, 9]
})
out = instance._get_diff_columns_name(table_data)
# Assert
expected = ['b#0', 'c#0']
assert out == expected
def test__check_columns_exist_success(self):
"""Test the ``GreaterThan._check_columns_exist`` method.
This method raises an error if the specified columns in
``low`` or ``high`` do not exist.
Input:
- Table with given data.
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run / Assert
table_data = pd.DataFrame({
'a': [1, 2, 4],
'b': [4, 5, 6]
})
instance._check_columns_exist(table_data, 'low')
instance._check_columns_exist(table_data, 'high')
def test__check_columns_exist_error(self):
"""Test the ``GreaterThan._check_columns_exist`` method.
This method raises an error if the specified columns in
``low`` or ``high`` do not exist.
Input:
- Table with given data.
"""
# Setup
instance = GreaterThan(low='a', high='c')
# Run / Assert
table_data = pd.DataFrame({
'a': [1, 2, 4],
'b': [4, 5, 6]
})
instance._check_columns_exist(table_data, 'low')
with pytest.raises(KeyError):
instance._check_columns_exist(table_data, 'high')
def test__fit_only_one_datetime_arg(self):
"""Test the ``Between._fit`` method by passing in only one arg as datetime.
If only one of the high / low args is a datetime type, expect a ValueError.
Input:
- low is an int column
- high is a datetime
Output:
- n/a
Side Effects:
- ValueError
"""
# Setup
instance = GreaterThan(low='a', high=pd.to_datetime('2021-01-01'), scalar='high')
# Run and assert
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6]
})
with pytest.raises(ValueError):
instance._fit(table_data)
def test__fit__low_is_not_found_and_scalar_is_none(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should raise an error if
the ``low`` is set to a value not seen in ``table_data``.
Input:
- Table without ``low`` in columns.
Side Effect:
- KeyError.
"""
# Setup
instance = GreaterThan(low=3, high='b')
# Run / Assert
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6]
})
with pytest.raises(KeyError):
instance._fit(table_data)
def test__fit__high_is_not_found_and_scalar_is_none(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should raise an error if
the ``high`` is set to a value not seen in ``table_data``.
Input:
- Table without ``high`` in columns.
Side Effect:
- KeyError.
"""
# Setup
instance = GreaterThan(low='a', high=3)
# Run / Assert
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6]
})
with pytest.raises(KeyError):
instance._fit(table_data)
def test__fit__low_is_not_found_scalar_is_high(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should raise an error if
the ``low`` is set to a value not seen in ``table_data``.
Input:
- Table without ``low`` in columns.
Side Effect:
- KeyError.
"""
# Setup
instance = GreaterThan(low='c', high=3, scalar='high')
# Run / Assert
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6]
})
with pytest.raises(KeyError):
instance._fit(table_data)
def test__fit__high_is_not_found_scalar_is_high(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should raise an error if
the ``high`` is set to a value not seen in ``table_data``.
Input:
- Table without ``high`` in columns.
Side Effect:
- KeyError.
"""
# Setup
instance = GreaterThan(low=3, high='c', scalar='low')
# Run / Assert
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6]
})
with pytest.raises(KeyError):
instance._fit(table_data)
def test__fit__columns_to_reconstruct_drop_high(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should set ``_columns_to_reconstruct``
to ``instance._high`` if ``instance_drop`` is `high`.
Input:
- Table with two columns.
Side Effect:
- ``_columns_to_reconstruct`` is ``instance._high``
"""
# Setup
instance = GreaterThan(low='a', high='b', drop='high')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance._fit(table_data)
# Asserts
assert instance._columns_to_reconstruct == ['b']
def test__fit__columns_to_reconstruct_drop_low(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should set ``_columns_to_reconstruct``
to ``instance._low`` if ``instance_drop`` is `low`.
Input:
- Table with two columns.
Side Effect:
- ``_columns_to_reconstruct`` is ``instance._low``
"""
# Setup
instance = GreaterThan(low='a', high='b', drop='low')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance._fit(table_data)
# Asserts
assert instance._columns_to_reconstruct == ['a']
def test__fit__columns_to_reconstruct_default(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should set ``_columns_to_reconstruct``
to `high` by default.
Input:
- Table with two columns.
Side Effect:
- ``_columns_to_reconstruct`` is ``instance._high``
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance._fit(table_data)
# Asserts
assert instance._columns_to_reconstruct == ['b']
def test__fit__columns_to_reconstruct_high_is_scalar(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should set ``_columns_to_reconstruct``
to `low` if ``instance._scalar`` is ``'high'``.
Input:
- Table with two columns.
Side Effect:
- ``_columns_to_reconstruct`` is ``instance._low``
"""
# Setup
instance = GreaterThan(low='a', high='b', scalar='high')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance._fit(table_data)
# Asserts
assert instance._columns_to_reconstruct == ['a']
def test__fit__columns_to_reconstruct_low_is_scalar(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should set ``_columns_to_reconstruct``
to `high` if ``instance._scalar`` is ``'low'``.
Input:
- Table with two columns.
Side Effect:
- ``_columns_to_reconstruct`` is ``instance._high``
"""
# Setup
instance = GreaterThan(low='a', high='b', scalar='low')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance._fit(table_data)
# Asserts
assert instance._columns_to_reconstruct == ['b']
def test__fit__diff_columns_one_column(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should set ``_diff_columns``
to the one column in ``instance.constraint_columns`` plus a
token if there is only one column in that set.
Input:
- Table with one column.
Side Effect:
- ``_columns_to_reconstruct`` is ``instance._low``
"""
# Setup
instance = GreaterThan(low='a', high=3, scalar='high')
# Run
table_data = pd.DataFrame({'a': [1, 2, 3]})
instance._fit(table_data)
# Asserts
assert instance._diff_columns == ['a#']
def test__fit__diff_columns_multiple_columns(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should set ``_diff_columns``
to the two columns in ``instance.constraint_columns`` separated
by a token if there both columns are in that set.
Input:
- Table with two column.
Side Effect:
- ``_columns_to_reconstruct`` is ``instance._low``
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance._fit(table_data)
# Asserts
assert instance._diff_columns == ['b#a']
def test__fit_int(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should only learn and store the
``dtype`` of the ``high`` column as the ``_dtype`` attribute
if ``_low_is_scalar`` and ``high_is_scalar`` are ``False``.
Input:
- Table that contains two constrained columns with the high one
being made of integers.
Side Effect:
- The _dtype attribute gets `int` as the value even if the low
column has a different dtype.
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6],
'c': [7, 8, 9]
})
instance._fit(table_data)
# Asserts
assert all([dtype.kind == 'i' for dtype in instance._dtype])
def test__fit_float(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should only learn and store the
``dtype`` of the ``high`` column as the ``_dtype`` attribute
if ``_low_is_scalar`` and ``high_is_scalar`` are ``False``.
Input:
- Table that contains two constrained columns with the high one
being made of float values.
Side Effect:
- The _dtype attribute gets `float` as the value even if the low
column has a different dtype.
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4., 5., 6.],
'c': [7, 8, 9]
})
instance._fit(table_data)
# Asserts
assert all([dtype.kind == 'f' for dtype in instance._dtype])
def test__fit_datetime(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should only learn and store the
``dtype`` of the ``high`` column as the ``_dtype`` attribute
if ``_low_is_scalar`` and ``high_is_scalar`` are ``False``.
Input:
- Table that contains two constrained columns of datetimes.
Side Effect:
- The _dtype attribute gets `datetime` as the value.
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01']),
'b': pd.to_datetime(['2020-01-02'])
})
instance._fit(table_data)
# Asserts
assert all([dtype.kind == 'M' for dtype in instance._dtype])
def test__fit_type__high_is_scalar(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should learn and store the
``dtype`` of the ``low`` column as the ``_dtype`` attribute
if ``_scalar`` is ``'high'``.
Input:
- Table that contains two constrained columns with the low one
being made of floats.
Side Effect:
- The _dtype attribute gets `float` as the value.
"""
# Setup
instance = GreaterThan(low='a', high=3, scalar='high')
# Run
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6],
'c': [7, 8, 9]
})
instance._fit(table_data)
# Asserts
assert all([dtype.kind == 'f' for dtype in instance._dtype])
def test__fit_type__low_is_scalar(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should learn and store the
``dtype`` of the ``high`` column as the ``_dtype`` attribute
if ``_scalar`` is ``'low'``.
Input:
- Table that contains two constrained columns with the high one
being made of floats.
Side Effect:
- The _dtype attribute gets `float` as the value.
"""
# Setup
instance = GreaterThan(low=3, high='b', scalar='low')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4., 5., 6.],
'c': [7, 8, 9]
})
instance._fit(table_data)
# Asserts
assert all([dtype.kind == 'f' for dtype in instance._dtype])
def test__fit_high_is_scalar_multi_column(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should learn and store the
``dtype`` of the ``high`` column as the ``_dtype`` attribute.
Input:
- Table that contains two constrained columns with different dtype.
"""
# Setup
instance = GreaterThan(low=['a', 'b'], high=0, scalar='high')
dtype_int = pd.Series([1]).dtype
dtype_float = np.dtype('float')
table_data = pd.DataFrame({
'a': [1, 2, 4],
'b': [4., 5., 6.]
})
instance._fit(table_data)
# Assert
expected_diff_columns = ['a#', 'b#']
expected_dtype = pd.Series([dtype_int, dtype_float], index=table_data.columns)
assert instance._diff_columns == expected_diff_columns
pd.testing.assert_series_equal(instance._dtype, expected_dtype)
def test__fit_low_is_scalar_multi_column(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should learn and store the
``dtype`` of the ``high`` column as the ``_dtype`` attribute.
Input:
- Table that contains two constrained columns with different dtype.
"""
# Setup
instance = GreaterThan(low=0, high=['a', 'b'], scalar='low')
dtype_int = pd.Series([1]).dtype
dtype_float = np.dtype('float')
table_data = pd.DataFrame({
'a': [1, 2, 4],
'b': [4., 5., 6.]
})
instance._fit(table_data)
# Assert
expected_diff_columns = ['a#', 'b#']
expected_dtype = pd.Series([dtype_int, dtype_float], index=table_data.columns)
assert instance._diff_columns == expected_diff_columns
pd.testing.assert_series_equal(instance._dtype, expected_dtype)
def test_is_valid_strict_false(self):
"""Test the ``GreaterThan.is_valid`` method with strict False.
If strict is False, equal values should count as valid.
Input:
- Table with a strictly valid row, a strictly invalid row and
a row that has the same value for both high and low.
Output:
- False should be returned for the strictly invalid row and True
for the other two.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=False)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = [True, True, False]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_strict_true(self):
"""Test the ``GreaterThan.is_valid`` method with strict True.
If strict is True, equal values should count as invalid.
Input:
- Table with a strictly valid row, a strictly invalid row and
a row that has the same value for both high and low.
Output:
- True should be returned for the strictly valid row and False
for the other two.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = [True, False, False]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_low_is_scalar_high_is_column(self):
"""Test the ``GreaterThan.is_valid`` method.
If low is a scalar, and high is a column name, then
the values in that column should all be higher than
``instance._low``.
Input:
- Table with values above and below low.
Output:
- True should be returned for the rows where the high
column is above low.
"""
# Setup
instance = GreaterThan(low=3, high='b', strict=False, scalar='low')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = [True, False, False]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_high_is_scalar_low_is_column(self):
"""Test the ``GreaterThan.is_valid`` method.
If high is a scalar, and low is a column name, then
the values in that column should all be lower than
``instance._high``.
Input:
- Table with values above and below high.
Output:
- True should be returned for the rows where the low
column is below high.
"""
# Setup
instance = GreaterThan(low='a', high=2, strict=False, scalar='high')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = [True, True, False]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_high_is_scalar_multi_column(self):
"""Test the ``GreaterThan.is_valid`` method.
If high is a scalar, and low is multi column, then
the values in that column should all be lower than
``instance._high``.
Input:
- Table with values above and below high.
Output:
- True should be returned for the rows where the low
column is below high.
"""
# Setup
instance = GreaterThan(low=['a', 'b'], high=2, strict=False, scalar='high')
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = [False, True, False]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_low_is_scalar_multi_column(self):
"""Test the ``GreaterThan.is_valid`` method.
If low is a scalar, and high is multi column, then
the values in that column should all be higher than
``instance._low``.
Input:
- Table with values above and below low.
Output:
- True should be returned for the rows where the high
column is above low.
"""
# Setup
instance = GreaterThan(low=2, high=['a', 'b'], strict=False, scalar='low')
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = [False, True, True]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_scalar_is_none_multi_column(self):
"""Test the ``GreaterThan.is_valid`` method.
If scalar is none, and high is multi column, then
the values in that column should all be higher than
in the low column.
Input:
- Table with values above and below low.
Output:
- True should be returned for the rows where the high
column is above low.
"""
# Setup
instance = GreaterThan(low='b', high=['a', 'c'], strict=False)
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
# Run
out = instance.is_valid(table_data)
# Assert
expected_out = [False, True, True]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_high_is_datetime(self):
"""Test the ``GreaterThan.is_valid`` method.
If high is a datetime and low is a column,
the values in that column should all be lower than
``instance._high``.
Input:
- Table with values above and below `high`.
Output:
- True should be returned for the rows where the low
column is below `high`.
"""
# Setup
high_dt = pd.to_datetime('8/31/2021')
instance = GreaterThan(low='a', high=high_dt, strict=False, scalar='high')
table_data = pd.DataFrame({
'a': [datetime(2020, 5, 17), datetime(2020, 2, 1), datetime(2021, 9, 1)],
'b': [4, 2, 2],
})
# Run
out = instance.is_valid(table_data)
# Assert
expected_out = [True, True, False]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_low_is_datetime(self):
"""Test the ``GreaterThan.is_valid`` method.
If low is a datetime and high is a column,
the values in that column should all be higher than
``instance._low``.
Input:
- Table with values above and below `low`.
Output:
- True should be returned for the rows where the high
column is above `low`.
"""
# Setup
low_dt = pd.to_datetime('8/31/2021')
instance = GreaterThan(low=low_dt, high='a', strict=False, scalar='low')
table_data = pd.DataFrame({
'a': [datetime(2021, 9, 17), datetime(2021, 7, 1), datetime(2021, 9, 1)],
'b': [4, 2, 2],
})
# Run
out = instance.is_valid(table_data)
# Assert
expected_out = [True, False, True]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_two_cols_with_nans(self):
"""Test the ``GreaterThan.is_valid`` method with nan values.
If there is a NaN row, expect that `is_valid` returns True.
Input:
- Table with a NaN row
Output:
- True should be returned for the NaN row.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
# Run
table_data = pd.DataFrame({
'a': [1, None, 3],
'b': [4, None, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = [True, True, False]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_two_cols_with_one_nan(self):
"""Test the ``GreaterThan.is_valid`` method with nan values.
If there is a row in which we compare one NaN value with one
non-NaN value, expect that `is_valid` returns True.
Input:
- Table with a row that contains only one NaN value.
Output:
- True should be returned for the row with the NaN value.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
# Run
table_data = pd.DataFrame({
'a': [1, None, 3],
'b': [4, 5, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = [True, True, False]
np.testing.assert_array_equal(expected_out, out)
def test__transform_int_drop_none(self):
"""Test the ``GreaterThan._transform`` method passing a high column of type int.
The ``GreaterThan._transform`` method is expected to compute the distance
between the high and low columns and create a diff column with the
logarithm of the distance + 1.
Setup:
- ``_drop`` is set to ``None``, so all original columns will be in output.
Input:
- Table with two columns two constrained columns at a constant distance of
exactly 3 and one additional dummy column.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4).
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
instance._diff_columns = ['a#b']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
pd.testing.assert_frame_equal(out, expected_out)
def test__transform_int_drop_high(self):
"""Test the ``GreaterThan._transform`` method passing a high column of type int.
The ``GreaterThan._transform`` method is expected to compute the distance
between the high and low columns and create a diff column with the
logarithm of the distance + 1. It should also drop the high column.
Setup:
- ``_drop`` is set to ``high``.
Input:
- Table with two columns two constrained columns at a constant distance of
exactly 3 and one additional dummy column.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4) and the high column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='high')
instance._diff_columns = ['a#b']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
pd.testing.assert_frame_equal(out, expected_out)
def test__transform_int_drop_low(self):
"""Test the ``GreaterThan._transform`` method passing a high column of type int.
The ``GreaterThan._transform`` method is expected to compute the distance
between the high and low columns and create a diff column with the
logarithm of the distance + 1. It should also drop the low column.
Setup:
- ``_drop`` is set to ``low``.
Input:
- Table with two columns two constrained columns at a constant distance of
exactly 3 and one additional dummy column.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4) and the low column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='low')
instance._diff_columns = ['a#b']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
pd.testing.assert_frame_equal(out, expected_out)
def test__transform_float_drop_none(self):
"""Test the ``GreaterThan._transform`` method passing a high column of type float.
The ``GreaterThan._transform`` method is expected to compute the distance
between the high and low columns and create a diff column with the
logarithm of the distance + 1.
Setup:
- ``_drop`` is set to ``None``, so all original columns will be in output.
Input:
- Table with two constrained columns at a constant distance of
exactly 3 and one additional dummy column.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4).
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
instance._diff_columns = ['a#b']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4., 5., 6.],
'c': [7, 8, 9],
})
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4., 5., 6.],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
pd.testing.assert_frame_equal(out, expected_out)
def test__transform_datetime_drop_none(self):
"""Test the ``GreaterThan._transform`` method passing a high column of type datetime.
If the columns are of type datetime, ``_transform`` is expected
to convert the timedelta distance into numeric before applying
the +1 and logarithm.
Setup:
- ``_drop`` is set to ``None``, so all original columns will be in output.
Input:
- Table with values at a distance of exactly 1 second.
Output:
- Same table with a diff column of the logarithms
of the dinstance in nanoseconds + 1, which is np.log(1_000_000_001).
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
instance._diff_columns = ['a#b']
instance._is_datetime = True
# Run
table_data = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00']),
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-02T00:00:01']),
'c': [1, 2],
})
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00']),
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-02T00:00:01']),
'c': [1, 2],
'a#b': [np.log(1_000_000_001), np.log(1_000_000_001)],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_transform_not_all_columns_provided(self):
"""Test the ``GreaterThan.transform`` method.
If some of the columns needed for the transform are missing, it will raise
a ``MissingConstraintColumnError``.
Input:
- Table data (pandas.DataFrame)
Output:
- Raises ``MissingConstraintColumnError``.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, fit_columns_model=False)
# Run/Assert
with pytest.raises(MissingConstraintColumnError):
instance.transform(pd.DataFrame({'a': ['a', 'b', 'c']}))
def test__transform_high_is_scalar(self):
"""Test the ``GreaterThan._transform`` method with high as scalar.
The ``GreaterThan._transform`` method is expected to compute the distance
between the high scalar value and the low column and create a diff column
with the logarithm of the distance + 1.
Setup:
- ``_high`` is set to 5 and ``_scalar`` is ``'high'``.
Input:
- Table with one low column and two dummy columns.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4).
"""
# Setup
instance = GreaterThan(low='a', high=5, strict=True, scalar='high')
instance._diff_columns = ['a#b']
instance.constraint_columns = ['a']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#b': [np.log(5), np.log(4), np.log(3)],
})
pd.testing.assert_frame_equal(out, expected_out)
def test__transform_low_is_scalar(self):
"""Test the ``GreaterThan._transform`` method with high as scalar.
The ``GreaterThan._transform`` method is expected to compute the distance
between the high scalar value and the low column and create a diff column
with the logarithm of the distance + 1.
Setup:
- ``_high`` is set to 5 and ``_scalar`` is ``'low'``.
Input:
- Table with one low column and two dummy columns.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4).
"""
# Setup
instance = GreaterThan(low=2, high='b', strict=True, scalar='low')
instance._diff_columns = ['a#b']
instance.constraint_columns = ['b']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#b': [np.log(3), np.log(4), np.log(5)],
})
pd.testing.assert_frame_equal(out, expected_out)
def test__transform_high_is_scalar_multi_column(self):
"""Test the ``GreaterThan._transform`` method.
The ``GreaterThan._transform`` method is expected to compute the logarithm
of given columns + 1.
Input:
- Table with given data.
Output:
- Same table with additional columns of the logarithms + 1.
"""
# Setup
instance = GreaterThan(low=['a', 'b'], high=3, strict=True, scalar='high')
instance._diff_columns = ['a#', 'b#']
instance.constraint_columns = ['a', 'b']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance._transform(table_data)
# Assert
expected = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#': [np.log(3), np.log(2), np.log(1)],
'b#': [np.log(0), np.log(-1), np.log(-2)],
})
pd.testing.assert_frame_equal(out, expected)
def test__transform_low_is_scalar_multi_column(self):
"""Test the ``GreaterThan._transform`` method.
The ``GreaterThan._transform`` method is expected to compute the logarithm
of given columns + 1.
Input:
- Table with given data.
Output:
- Same table with additional columns of the logarithms + 1.
"""
# Setup
instance = GreaterThan(low=3, high=['a', 'b'], strict=True, scalar='low')
instance._diff_columns = ['a#', 'b#']
instance.constraint_columns = ['a', 'b']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance._transform(table_data)
# Assert
expected = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#': [np.log(-1), np.log(0), np.log(1)],
'b#': [np.log(2), np.log(3), np.log(4)],
})
pd.testing.assert_frame_equal(out, expected)
def test__transform_scalar_is_none_multi_column(self):
"""Test the ``GreaterThan._transform`` method.
The ``GreaterThan._transform`` method is expected to compute the logarithm
of given columns + 1.
Input:
- Table with given data.
Output:
- Same table with additional columns of the logarithms + 1.
"""
# Setup
instance = GreaterThan(low=['a', 'c'], high='b', strict=True)
instance._diff_columns = ['a#', 'c#']
instance.constraint_columns = ['a', 'c']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance._transform(table_data)
# Assert
expected = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#': [np.log(4)] * 3,
'c#': [np.log(-2)] * 3,
})
pd.testing.assert_frame_equal(out, expected)
def test_reverse_transform_int_drop_high(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype int.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- add the low column
- convert the output to integers
- add back the dropped column
Setup:
- ``_drop`` is set to ``high``.
Input:
- Table with a diff column that contains the constant np.log(4).
Output:
- Same table with the high column replaced by the low one + 3, as int
and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='high')
instance._dtype = [pd.Series([1]).dtype] # exact dtype (32 or 64) depends on OS
instance._diff_columns = ['a#b']
instance._columns_to_reconstruct = ['b']
# Run
transformed = pd.DataFrame({
'a': [1, 2, 3],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'c': [7, 8, 9],
'b': [4, 5, 6],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_float_drop_high(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype float.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- add the low column
- convert the output to float values
- add back the dropped column
Setup:
- ``_drop`` is set to ``high``.
Input:
- Table with a diff column that contains the constant np.log(4).
Output:
- Same table with the high column replaced by the low one + 3, as float values
and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='high')
instance._dtype = [np.dtype('float')]
instance._diff_columns = ['a#b']
instance._columns_to_reconstruct = ['b']
# Run
transformed = pd.DataFrame({
'a': [1.1, 2.2, 3.3],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [1.1, 2.2, 3.3],
'c': [7, 8, 9],
'b': [4.1, 5.2, 6.3],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_datetime_drop_high(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype datetime.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- convert the distance to a timedelta
- add the low column
- convert the output to datetimes
Setup:
- ``_drop`` is set to ``high``.
Input:
- Table with a diff column that contains the constant np.log(1_000_000_001).
Output:
- Same table with the high column replaced by the low one + one second
and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='high')
instance._dtype = [np.dtype('<M8[ns]')]
instance._diff_columns = ['a#b']
instance._is_datetime = True
instance._columns_to_reconstruct = ['b']
# Run
transformed = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00']),
'c': [1, 2],
'a#b': [np.log(1_000_000_001), np.log(1_000_000_001)],
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00']),
'c': [1, 2],
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-02T00:00:01'])
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_int_drop_low(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype int.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- subtract from the high column
- convert the output to integers
- add back the dropped column
Setup:
- ``_drop`` is set to ``low``.
Input:
- Table with a diff column that contains the constant np.log(4).
Output:
- Same table with the low column replaced by the high one - 3, as int
and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='low')
instance._dtype = [pd.Series([1]).dtype] # exact dtype (32 or 64) depends on OS
instance._diff_columns = ['a#b']
instance._columns_to_reconstruct = ['a']
# Run
transformed = pd.DataFrame({
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'b': [4, 5, 6],
'c': [7, 8, 9],
'a': [1, 2, 3],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_datetime_drop_low(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype datetime.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- convert the distance to a timedelta
- subtract from the high column
- convert the output to datetimes
Setup:
- ``_drop`` is set to ``low``.
Input:
- Table with a diff column that contains the constant np.log(1_000_000_001).
Output:
- Same table with the low column replaced by the high one - one second
and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='low')
instance._dtype = [np.dtype('<M8[ns]')]
instance._diff_columns = ['a#b']
instance._is_datetime = True
instance._columns_to_reconstruct = ['a']
# Run
transformed = pd.DataFrame({
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-02T00:00:01']),
'c': [1, 2],
'a#b': [np.log(1_000_000_001), np.log(1_000_000_001)],
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-02T00:00:01']),
'c': [1, 2],
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00'])
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_int_drop_none(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype int.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- add the low column when the row is invalid
- convert the output to integers
Setup:
- ``_drop`` is set to ``None``.
Input:
- Table with a diff column that contains the constant np.log(4).
The table should have one invalid row where the low column is
higher than the high column.
Output:
- Same table with the high column replaced by the low one + 3 for all
invalid rows, as int and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
instance._dtype = [pd.Series([1]).dtype] # exact dtype (32 or 64) depends on OS
instance._diff_columns = ['a#b']
instance._columns_to_reconstruct = ['b']
# Run
transformed = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 1, 6],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_datetime_drop_none(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype datetime.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- convert the distance to a timedelta
- add the low column when the row is invalid
- convert the output to datetimes
Setup:
- ``_drop`` is set to ``None``.
Input:
- Table with a diff column that contains the constant np.log(1_000_000_001).
The table should have one invalid row where the low column is
higher than the high column.
Output:
- Same table with the high column replaced by the low one + one second
for all invalid rows, and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
instance._dtype = [np.dtype('<M8[ns]')]
instance._diff_columns = ['a#b']
instance._is_datetime = True
instance._columns_to_reconstruct = ['b']
# Run
transformed = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00']),
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-01T00:00:01']),
'c': [1, 2],
'a#b': [np.log(1_000_000_001), np.log(1_000_000_001)],
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00']),
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-02T00:00:01']),
'c': [1, 2]
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_low_is_scalar(self):
"""Test the ``GreaterThan.reverse_transform`` method with low as a scalar.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- add the low value when the row is invalid
- convert the output to integers
Setup:
- ``_drop`` is set to ``None``.
- ``_low`` is set to an int and ``_scalar`` is ``'low'``.
Input:
- Table with a diff column that contains the constant np.log(4).
The table should have one invalid row where the low value is
higher than the high column.
Output:
- Same table with the high column replaced by the low value + 3 for all
invalid rows, as int and the diff column dropped.
"""
# Setup
instance = GreaterThan(low=3, high='b', strict=True, scalar='low')
instance._dtype = [pd.Series([1]).dtype] # exact dtype (32 or 64) depends on OS
instance._diff_columns = ['a#b']
instance._columns_to_reconstruct = ['b']
# Run
transformed = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 1, 6],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 6, 6],
'c': [7, 8, 9],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_high_is_scalar(self):
"""Test the ``GreaterThan.reverse_transform`` method with high as a scalar.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- subtract from the high value when the row is invalid
- convert the output to integers
Setup:
- ``_drop`` is set to ``None``.
- ``_high`` is set to an int and ``_scalar`` is ``'high'``.
Input:
- Table with a diff column that contains the constant np.log(4).
The table should have one invalid row where the low column is
higher than the high value.
Output:
- Same table with the low column replaced by the high one - 3 for all
invalid rows, as int and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high=3, strict=True, scalar='high')
instance._dtype = [pd.Series([1]).dtype] # exact dtype (32 or 64) depends on OS
instance._diff_columns = ['a#b']
instance._columns_to_reconstruct = ['a']
# Run
transformed = pd.DataFrame({
'a': [1, 2, 4],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 0],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_high_is_scalar_multi_column(self):
"""Test the ``GreaterThan.reverse_transform`` method with high as a scalar.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- subtract from the high value when the row is invalid
- convert the output to integers
Setup:
- ``_drop`` is set to ``None``.
- ``_high`` is set to an int and ``_scalar`` is ``'high'``.
- ``_low`` is set to multiple columns.
Input:
- Table with a diff column that contains the constant np.log(4)/np.log(5).
The table should have one invalid row where the low column is
higher than the high value.
Output:
- Same table with the low column replaced by the high one - 3/-4 for all
invalid rows, as int and the diff column dropped.
"""
# Setup
instance = GreaterThan(low=['a', 'b'], high=3, strict=True, scalar='high')
dtype = pd.Series([1]).dtype # exact dtype (32 or 64) depends on OS
instance._dtype = [dtype, dtype]
instance._diff_columns = ['a#', 'b#']
instance._columns_to_reconstruct = ['a', 'b']
# Run
transformed = pd.DataFrame({
'a': [1, 2, 4],
'b': [0, 5, 6],
'c': [7, 8, 9],
'a#': [np.log(4)] * 3,
'b#': [np.log(5)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [1, 0, 0],
'b': [0, -1, -1],
'c': [7, 8, 9],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_low_is_scalar_multi_column(self):
"""Test the ``GreaterThan.reverse_transform`` method with low as a scalar.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- add the low value when the row is invalid
- convert the output to integers
Setup:
- ``_drop`` is set to ``None``.
- ``_low`` is set to an int and ``_scalar`` is ``'low'``.
- ``_high`` is set to multiple columns.
Input:
- Table with a diff column that contains the constant np.log(4)/np.log(5).
The table should have one invalid row where the low value is
higher than the high column.
Output:
- Same table with the high column replaced by the low value +3/+4 for all
invalid rows, as int and the diff column dropped.
"""
# Setup
instance = GreaterThan(low=3, high=['a', 'b'], strict=True, scalar='low')
dtype = pd.Series([1]).dtype # exact dtype (32 or 64) depends on OS
instance._dtype = [dtype, dtype]
instance._diff_columns = ['a#', 'b#']
instance._columns_to_reconstruct = ['a', 'b']
# Run
transformed = pd.DataFrame({
'a': [1, 2, 4],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#': [np.log(4)] * 3,
'b#': [np.log(5)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [6, 6, 4],
'b': [7, 7, 6],
'c': [7, 8, 9],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_scalar_is_none_multi_column(self):
"""Test the ``GreaterThan.reverse_transform`` method with low as a scalar.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- add the low value when the row is invalid
- convert the output to integers
Setup:
- ``_low`` = ['a', 'c'].
- ``_high`` = ['b'].
Input:
- Table with a diff column that contains the constant np.log(4)/np.log(-2).
The table should have one invalid row where the low value is
higher than the high column.
Output:
- Same table with the high column replaced by the low value +3/-4 for all
invalid rows, as int and the diff column dropped.
"""
# Setup
instance = GreaterThan(low=['a', 'c'], high=['b'], strict=True)
dtype = pd.Series([1]).dtype # exact dtype (32 or 64) depends on OS
instance._dtype = [dtype, dtype]
instance._diff_columns = ['a#', 'c#']
instance._columns_to_reconstruct = ['a', 'c']
# Run
transformed = pd.DataFrame({
'a': [1, 2, 4],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#': [np.log(1)] * 3,
'c#': [np.log(1)] * 3,
})
out = instance.reverse_transform(transformed)
print(out)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 4],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_multi_column_positive(self):
"""Test the ``GreaterThan.reverse_transform`` method for positive constraint.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- subtract from the high value when the row is invalid
- convert the output to integers
Input:
- Table with given data.
Output:
- Same table with with replaced rows and dropped columns.
"""
# Setup
instance = GreaterThan(low=0, high=['a', 'b'], strict=True, scalar='low')
dtype = pd.Series([1]).dtype # exact dtype (32 or 64) depends on OS
instance._dtype = [dtype, dtype]
instance._diff_columns = ['a#', 'b#']
instance._columns_to_reconstruct = ['a', 'b']
# Run
transformed = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, -1],
'c': [7, 8, 9],
'a#': [np.log(2), np.log(3), np.log(4)],
'b#': [np.log(5), np.log(6), np.log(0)],
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 0],
'c': [7, 8, 9],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_multi_column_negative(self):
"""Test the ``GreaterThan.reverse_transform`` method for negative constraint.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- subtract from the high value when the row is invalid
- convert the output to integers
Input:
- Table with given data.
Output:
- Same table with with replaced rows and dropped columns.
"""
# Setup
instance = GreaterThan(low=['a', 'b'], high=0, strict=True, scalar='high')
dtype = pd.Series([1]).dtype # exact dtype (32 or 64) depends on OS
instance._dtype = [dtype, dtype]
instance._diff_columns = ['a#', 'b#']
instance._columns_to_reconstruct = ['a', 'b']
# Run
transformed = pd.DataFrame({
'a': [-1, -2, 1],
'b': [-4, -5, -1],
'c': [7, 8, 9],
'a#': [np.log(2), np.log(3), np.log(0)],
'b#': [np.log(5), np.log(6), np.log(2)],
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [-1, -2, 0],
'b': [-4, -5, -1],
'c': [7, 8, 9],
})
pd.testing.assert_frame_equal(out, expected_out)
class TestPositive():
def test__init__(self):
"""
Test the ``Positive.__init__`` method.
The method is expected to set the ``_low`` instance variable
to 0, the ``_scalar`` variable to ``'low'``. The rest of the
parameters should be passed. Check that ``_drop`` is set to
``None`` when ``drop`` is ``False``.
Input:
- strict = True
- low = 'a'
- drop = False
Side effects:
- instance._low == 'a'
- instance._high == 0
- instance._strict == True
- instance._scalar == 'low'
- instance._drop = None
"""
# Run
instance = Positive(columns='a', strict=True, drop=False)
# Asserts
assert instance._low == 0
assert instance._high == ['a']
assert instance._strict is True
assert instance._scalar == 'low'
assert instance._drop is None
def test__init__drop_true(self):
"""
Test the ``Positive.__init__`` method with drop is ``True``.
Check that ``_drop`` is set to 'high' when ``drop`` is ``True``.
Input:
- strict = True
- low = 'a'
- drop = True
Side effects:
- instance._low == 'a'
- instance._high == 0
- instance._strict == True
- instance._scalar == 'low'
- instance._drop = 'high'
"""
# Run
instance = Positive(columns='a', strict=True, drop=True)
# Asserts
assert instance._low == 0
assert instance._high == ['a']
assert instance._strict is True
assert instance._scalar == 'low'
assert instance._drop == 'high'
class TestNegative():
def test__init__(self):
"""
Test the ``Negative.__init__`` method.
The method is expected to set the ``_high`` instance variable
to 0, the ``_scalar`` variable to ``'high'``. The rest of the
parameters should be passed. Check that ``_drop`` is set to
``None`` when ``drop`` is ``False``.
Input:
- strict = True
- low = 'a'
- drop = False
Side effects:
- instance._low == 'a'
- instance._high == 0
- instance._strict == True
- instance._scalar = 'high'
- instance._drop = None
"""
# Run
instance = Negative(columns='a', strict=True, drop=False)
# Asserts
assert instance._low == ['a']
assert instance._high == 0
assert instance._strict is True
assert instance._scalar == 'high'
assert instance._drop is None
def test__init__drop_true(self):
"""
Test the ``Negative.__init__`` method with drop is ``True``.
Check that ``_drop`` is set to 'low' when ``drop`` is ``True``.
Input:
- strict = True
- low = 'a'
- drop = True
Side effects:
- instance._low == 'a'
- instance._high == 0
- instance._strict == True
- instance._scalar = 'high'
- instance._drop = 'low'
"""
# Run
instance = Negative(columns='a', strict=True, drop=True)
# Asserts
assert instance._low == ['a']
assert instance._high == 0
assert instance._strict is True
assert instance._scalar == 'high'
assert instance._drop == 'low'
def new_column(data):
"""Formula to be used for the ``TestColumnFormula`` class."""
if data['a'] is None or data['b'] is None:
return None
return data['a'] + data['b']
class TestColumnFormula():
def test___init__(self):
"""Test the ``ColumnFormula.__init__`` method.
It is expected to create a new Constraint instance,
import the formula to use for the computation, and
set the specified constraint column.
Input:
- column = 'col'
- formula = new_column
"""
# Setup
column = 'col'
# Run
instance = ColumnFormula(column=column, formula=new_column)
# Assert
assert instance._column == column
assert instance._formula == new_column
assert instance.constraint_columns == ('col', )
def test___init__sets_rebuild_columns_if_not_reject_sampling(self):
"""Test the ``ColumnFormula.__init__`` method.
The rebuild columns should only be set if the ``handling_strategy``
is not ``reject_sampling``.
Side effects:
- instance.rebuild_columns are set
"""
# Setup
column = 'col'
# Run
instance = ColumnFormula(column=column, formula=new_column, handling_strategy='transform')
# Assert
assert instance.rebuild_columns == (column,)
def test___init__does_not_set_rebuild_columns_reject_sampling(self):
"""Test the ``ColumnFormula.__init__`` method.
The rebuild columns should not be set if the ``handling_strategy``
is ``reject_sampling``.
Side effects:
- instance.rebuild_columns are empty
"""
# Setup
column = 'col'
# Run
instance = ColumnFormula(column=column, formula=new_column,
handling_strategy='reject_sampling')
# Assert
assert instance.rebuild_columns == ()
def test_is_valid_valid(self):
"""Test the ``ColumnFormula.is_valid`` method for a valid data.
If the data fulfills the formula, result is a series of ``True`` values.
Input:
- Table data fulfilling the formula (pandas.DataFrame)
Output:
- Series of ``True`` values (pandas.Series)
"""
# Setup
column = 'c'
instance = ColumnFormula(column=column, formula=new_column)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [5, 7, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, True, True])
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_non_valid(self):
"""Test the ``ColumnFormula.is_valid`` method for a non-valid data.
If the data does not fulfill the formula, result is a series of ``False`` values.
Input:
- Table data not fulfilling the formula (pandas.DataFrame)
Output:
- Series of ``False`` values (pandas.Series)
"""
# Setup
column = 'c'
instance = ColumnFormula(column=column, formula=new_column)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [1, 2, 3]
})
instance = ColumnFormula(column=column, formula=new_column)
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([False, False, False])
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_with_nans(self):
"""Test the ``ColumnFormula.is_valid`` method for with a formula that produces nans.
If the data fulfills the formula, result is a series of ``True`` values.
Input:
- Table data fulfilling the formula (pandas.DataFrame)
Output:
- Series of ``True`` values (pandas.Series)
"""
# Setup
column = 'c'
instance = ColumnFormula(column=column, formula=new_column)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, None],
'c': [5, 7, None]
})
instance = ColumnFormula(column=column, formula=new_column)
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, True, True])
pd.testing.assert_series_equal(expected_out, out)
def test__transform(self):
"""Test the ``ColumnFormula._transform`` method.
It is expected to drop the indicated column from the table.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data without the indicated column (pandas.DataFrame)
"""
# Setup
column = 'c'
instance = ColumnFormula(column=column, formula=new_column)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [5, 7, 9]
})
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
})
pd.testing.assert_frame_equal(expected_out, out)
def test__transform_without_dropping_column(self):
"""Test the ``ColumnFormula._transform`` method without dropping the column.
If `drop_column` is false, expect to not drop the constraint column.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data with the indicated column (pandas.DataFrame)
"""
# Setup
column = 'c'
instance = ColumnFormula(column=column, formula=new_column, drop_column=False)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [5, 7, 9]
})
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [5, 7, 9]
})
pd.testing.assert_frame_equal(expected_out, out)
def test__transform_missing_column(self):
"""Test the ``ColumnFormula._transform`` method when the constraint column is missing.
When ``_transform`` is called with data that does not contain the constraint column,
expect to return the data as-is.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data, unchanged (pandas.DataFrame)
"""
# Setup
column = 'c'
instance = ColumnFormula(column=column, formula=new_column)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'd': [5, 7, 9]
})
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'd': [5, 7, 9]
})
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform(self):
"""Test the ``ColumnFormula.reverse_transform`` method.
It is expected to compute the indicated column by applying the given formula.
Input:
- Table data with the column with incorrect values (pandas.DataFrame)
Output:
- Table data with the computed column (pandas.DataFrame)
"""
# Setup
column = 'c'
instance = ColumnFormula(column=column, formula=new_column)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [1, 1, 1]
})
out = instance.reverse_transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [5, 7, 9]
})
pd.testing.assert_frame_equal(expected_out, out)
class TestRounding():
def test___init__(self):
"""Test the ``Rounding.__init__`` method.
It is expected to create a new Constraint instance
and set the rounding args.
Input:
- columns = ['b', 'c']
- digits = 2
"""
# Setup
columns = ['b', 'c']
digits = 2
# Run
instance = Rounding(columns=columns, digits=digits)
# Assert
assert instance._columns == columns
assert instance._digits == digits
def test___init__invalid_digits(self):
"""Test the ``Rounding.__init__`` method with an invalid argument.
Pass in an invalid ``digits`` argument, and expect a ValueError.
Input:
- columns = ['b', 'c']
- digits = 20
"""
# Setup
columns = ['b', 'c']
digits = 20
# Run
with pytest.raises(ValueError):
Rounding(columns=columns, digits=digits)
def test___init__invalid_tolerance(self):
"""Test the ``Rounding.__init__`` method with an invalid argument.
Pass in an invalid ``tolerance`` argument, and expect a ValueError.
Input:
- columns = ['b', 'c']
- digits = 2
- tolerance = 0.1
"""
# Setup
columns = ['b', 'c']
digits = 2
tolerance = 0.1
# Run
with pytest.raises(ValueError):
Rounding(columns=columns, digits=digits, tolerance=tolerance)
def test_is_valid_positive_digits(self):
"""Test the ``Rounding.is_valid`` method for a positive digits argument.
Input:
- Table data with desired decimal places (pandas.DataFrame)
Output:
- Series of ``True`` values (pandas.Series)
"""
# Setup
columns = ['b', 'c']
digits = 2
tolerance = 1e-3
instance = Rounding(columns=columns, digits=digits, tolerance=tolerance)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3, 4, 5],
'b': [4.12, 5.51, None, 6.941, 1.129],
'c': [5.315, 7.12, 1.12, 9.131, 12.329],
'd': ['a', 'b', 'd', 'e', None],
'e': [123.31598, -1.12001, 1.12453, 8.12129, 1.32923]
})
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([False, True, False, True, True])
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_negative_digits(self):
"""Test the ``Rounding.is_valid`` method for a negative digits argument.
Input:
- Table data with desired decimal places (pandas.DataFrame)
Output:
- Series of ``True`` values (pandas.Series)
"""
# Setup
columns = ['b']
digits = -2
tolerance = 1
instance = Rounding(columns=columns, digits=digits, tolerance=tolerance)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3, 4, 5],
'b': [401, 500, 6921, 799, None],
'c': [5.3134, 7.1212, 9.1209, 101.1234, None],
'd': ['a', 'b', 'd', 'e', 'f']
})
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, True, False, True, False])
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_zero_digits(self):
"""Test the ``Rounding.is_valid`` method for a zero digits argument.
Input:
- Table data not with the desired decimal places (pandas.DataFrame)
Output:
- Series of ``False`` values (pandas.Series)
"""
# Setup
columns = ['b', 'c']
digits = 0
tolerance = 1e-4
instance = Rounding(columns=columns, digits=digits, tolerance=tolerance)
# Run
table_data = pd.DataFrame({
'a': [1, 2, None, 3, 4],
'b': [4, 5.5, 1.2, 6.0001, 5.99999],
'c': [5, 7.12, 1.31, 9.00001, 4.9999],
'd': ['a', 'b', None, 'd', 'e'],
'e': [2.1254, 17.12123, 124.12, 123.0112, -9.129434]
})
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, False, False, True, True])
pd.testing.assert_series_equal(expected_out, out)
def test_reverse_transform_positive_digits(self):
"""Test the ``Rounding.reverse_transform`` method with positive digits.
Expect that the columns are rounded to the specified integer digit.
Input:
- Table data with the column with incorrect values (pandas.DataFrame)
Output:
- Table data with the computed column (pandas.DataFrame)
"""
# Setup
columns = ['b', 'c']
digits = 3
instance = Rounding(columns=columns, digits=digits)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3, None, 4],
'b': [4.12345, None, 5.100, 6.0001, 1.7999],
'c': [1.1, 1.234, 9.13459, 4.3248, 6.1312],
'd': ['a', 'b', 'd', 'e', None]
})
out = instance.reverse_transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3, None, 4],
'b': [4.123, None, 5.100, 6.000, 1.800],
'c': [1.100, 1.234, 9.135, 4.325, 6.131],
'd': ['a', 'b', 'd', 'e', None]
})
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform_negative_digits(self):
"""Test the ``Rounding.reverse_transform`` method with negative digits.
Expect that the columns are rounded to the specified integer digit.
Input:
- Table data with the column with incorrect values (pandas.DataFrame)
Output:
- Table data with the computed column (pandas.DataFrame)
"""
# Setup
columns = ['b']
digits = -3
instance = Rounding(columns=columns, digits=digits)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3, 4, 5],
'b': [41234.5, None, 5000, 6001, 5928],
'c': [1.1, 1.23423, 9.13459, 12.12125, 18.12152],
'd': ['a', 'b', 'd', 'e', 'f']
})
out = instance.reverse_transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3, 4, 5],
'b': [41000.0, None, 5000.0, 6000.0, 6000.0],
'c': [1.1, 1.23423, 9.13459, 12.12125, 18.12152],
'd': ['a', 'b', 'd', 'e', 'f']
})
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform_zero_digits(self):
"""Test the ``Rounding.reverse_transform`` method with zero digits.
Expect that the columns are rounded to the specified integer digit.
Input:
- Table data with the column with incorrect values (pandas.DataFrame)
Output:
- Table data with the computed column (pandas.DataFrame)
"""
# Setup
columns = ['b', 'c']
digits = 0
instance = Rounding(columns=columns, digits=digits)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3, 4, 5],
'b': [4.12345, None, 5.0, 6.01, 7.9],
'c': [1.1, 1.0, 9.13459, None, 8.89],
'd': ['a', 'b', 'd', 'e', 'f']
})
out = instance.reverse_transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3, 4, 5],
'b': [4.0, None, 5.0, 6.0, 8.0],
'c': [1.0, 1.0, 9.0, None, 9.0],
'd': ['a', 'b', 'd', 'e', 'f']
})
pd.testing.assert_frame_equal(expected_out, out)
def transform(data, low, high):
"""Transform to be used for the TestBetween class."""
data = (data - low) / (high - low) * 0.95 + 0.025
return np.log(data / (1.0 - data))
class TestBetween():
def test___init__sets_rebuild_columns_if_not_reject_sampling(self):
"""Test the ``Between.__init__`` method.
The rebuild columns should only be set if the ``handling_strategy``
is not ``reject_sampling``.
Side effects:
- instance.rebuild_columns are set
"""
# Setup
column = 'col'
# Run
instance = Between(column=column, low=10, high=20, handling_strategy='transform')
# Assert
assert instance.rebuild_columns == (column,)
def test___init__does_not_set_rebuild_columns_reject_sampling(self):
"""Test the ``Between.__init__`` method.
The rebuild columns should not be set if the ``handling_strategy``
is ``reject_sampling``.
Side effects:
- instance.rebuild_columns are empty
"""
# Setup
column = 'col'
# Run
instance = Between(column=column, low=10, high=20, handling_strategy='reject_sampling')
# Assert
assert instance.rebuild_columns == ()
def test_fit_only_one_datetime_arg(self):
"""Test the ``Between.fit`` method by passing in only one arg as datetime.
If only one of the bound parameters is a datetime type, expect a ValueError.
Input:
- low is an int scalar
- high is a datetime
Output:
- n/a
Side Effects:
- ValueError
"""
# Setup
column = 'a'
low = 0.0
high = pd.to_datetime('2021-01-01')
instance = Between(column=column, low=low, high=high)
# Run and assert
table_data = pd.DataFrame({
'a': [0.1, 0.5, 0.9],
'b': [4, 5, 6],
})
with pytest.raises(ValueError):
instance.fit(table_data)
def test_transform_scalar_scalar(self):
"""Test the ``Between.transform`` method by passing ``low`` and ``high`` as scalars.
It is expected to create a new column similar to the constraint ``column``, and then
scale and apply a logit function to that column.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data with an extra column containing the transformed ``column`` (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 0.0
high = 1.0
instance = Between(column=column, low=low, high=high, high_is_scalar=True,
low_is_scalar=True)
# Run
table_data = pd.DataFrame({
'a': [0.1, 0.5, 0.9],
'b': [4, 5, 6],
})
instance.fit(table_data)
out = instance.transform(table_data)
# Assert
expected_out = pd.DataFrame({
'b': [4, 5, 6],
'a#0.0#1.0': transform(table_data[column], low, high)
})
pd.testing.assert_frame_equal(expected_out, out)
def test__transform_scalar_column(self):
"""Test the ``Between._transform`` method with ``low`` as scalar and ``high`` as a column.
It is expected to create a new column similar to the constraint ``column``, and then
scale and apply a logit function to that column.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data with an extra column containing the transformed ``column`` (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 0.0
high = 'b'
instance = Between(column=column, low=low, high=high, low_is_scalar=True)
# Run
table_data = pd.DataFrame({
'a': [0.1, 0.5, 0.9],
'b': [0.5, 1, 6],
})
instance.fit(table_data)
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'b': [0.5, 1, 6],
'a#0.0#b': transform(table_data[column], low, table_data[high])
})
pd.testing.assert_frame_equal(expected_out, out)
def test__transform_column_scalar(self):
"""Test the ``Between._transform`` method with ``low`` as a column and ``high`` as scalar.
It is expected to create a new column similar to the constraint ``column``, and then
scale and apply a logit function to that column.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data with an extra column containing the transformed ``column`` (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 'b'
high = 1.0
instance = Between(column=column, low=low, high=high, high_is_scalar=True)
# Run
table_data = pd.DataFrame({
'a': [0.1, 0.5, 0.9],
'b': [0, -1, 0.5],
})
instance.fit(table_data)
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'b': [0, -1, 0.5],
'a#b#1.0': transform(table_data[column], table_data[low], high)
})
pd.testing.assert_frame_equal(expected_out, out)
def test__transform_column_column(self):
"""Test the ``Between._transform`` method by passing ``low`` and ``high`` as columns.
It is expected to create a new column similar to the constraint ``column``, and then
scale and apply a logit function to that column.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data with an extra column containing the transformed ``column`` (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 'b'
high = 'c'
instance = Between(column=column, low=low, high=high)
# Run
table_data = pd.DataFrame({
'a': [0.1, 0.5, 0.9],
'b': [0, -1, 0.5],
'c': [0.5, 1, 6]
})
instance.fit(table_data)
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'b': [0, -1, 0.5],
'c': [0.5, 1, 6],
'a#b#c': transform(table_data[column], table_data[low], table_data[high])
})
pd.testing.assert_frame_equal(expected_out, out)
def test__transform_datetime_datetime(self):
"""Test the ``Between._transform`` method by passing ``low`` and ``high`` as datetimes.
It is expected to create a new column similar to the constraint ``column``, and then
scale and apply a logit function to that column.
Input:
- Table data (pandas.DataFrame)
- High and Low as datetimes
Output:
- Table data with an extra column containing the transformed ``column`` (pandas.DataFrame)
"""
# Setup
column = 'a'
low = pd.to_datetime('1900-01-01')
high = pd.to_datetime('2021-01-01')
instance = Between(column=column, low=low, high=high, high_is_scalar=True,
low_is_scalar=True)
# Run
table_data = pd.DataFrame({
'a': [
pd.to_datetime('2020-09-03'),
pd.to_datetime('2020-08-01'),
pd.to_datetime('2020-08-03'),
],
'b': [4, 5, 6],
})
instance.fit(table_data)
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'b': [4, 5, 6],
'a#1900-01-01T00:00:00.000000000#2021-01-01T00:00:00.000000000': transform(
table_data[column], low, high)
})
pd.testing.assert_frame_equal(expected_out, out)
def test__transform_datetime_column(self):
"""Test the ``Between._transform`` method with ``low`` as datetime and ``high`` as a column.
It is expected to create a new column similar to the constraint ``column``, and then
scale and apply a logit function to that column.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data with an extra column containing the transformed ``column`` (pandas.DataFrame)
"""
# Setup
column = 'a'
low = pd.to_datetime('1900-01-01')
high = 'b'
instance = Between(column=column, low=low, high=high, low_is_scalar=True)
# Run
table_data = pd.DataFrame({
'a': [
pd.to_datetime('2020-09-03'),
pd.to_datetime('2020-08-01'),
pd.to_datetime('2020-08-03'),
],
'b': [
pd.to_datetime('2020-10-03'),
pd.to_datetime('2020-11-01'),
pd.to_datetime('2020-11-03'),
],
})
instance.fit(table_data)
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'b': [
pd.to_datetime('2020-10-03'),
pd.to_datetime('2020-11-01'),
pd.to_datetime('2020-11-03'),
],
'a#1900-01-01T00:00:00.000000000#b': transform(
table_data[column], low, table_data[high])
})
pd.testing.assert_frame_equal(expected_out, out)
def test__transform_column_datetime(self):
"""Test the ``Between._transform`` method with ``low`` as a column and ``high`` as datetime.
It is expected to create a new column similar to the constraint ``column``, and then
scale and apply a logit function to that column.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data with an extra column containing the transformed ``column`` (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 'b'
high = pd.to_datetime('2021-01-01')
instance = Between(column=column, low=low, high=high, high_is_scalar=True)
# Run
table_data = pd.DataFrame({
'a': [
pd.to_datetime('2020-09-03'),
pd.to_datetime('2020-08-01'),
pd.to_datetime('2020-08-03'),
],
'b': [
pd.to_datetime('2020-01-03'),
pd.to_datetime('2020-02-01'),
pd.to_datetime('2020-02-03'),
],
})
instance.fit(table_data)
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'b': [
pd.to_datetime('2020-01-03'),
pd.to_datetime('2020-02-01'),
pd.to_datetime('2020-02-03'),
],
'a#b#2021-01-01T00:00:00.000000000': transform(
table_data[column], table_data[low], high)
})
pd.testing.assert_frame_equal(expected_out, out)
def test__transform_column_column_datetime(self):
"""Test the ``Between._transform`` method with ``low`` and ``high`` as datetime columns.
It is expected to create a new column similar to the constraint ``column``, and then
scale and apply a logit function to that column.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data with an extra column containing the transformed ``column`` (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 'b'
high = 'c'
instance = Between(column=column, low=low, high=high)
# Run
table_data = pd.DataFrame({
'a': [
pd.to_datetime('2020-09-03'),
pd.to_datetime('2020-08-01'),
pd.to_datetime('2020-08-03'),
],
'b': [
pd.to_datetime('2020-01-03'),
pd.to_datetime('2020-02-01'),
pd.to_datetime('2020-02-03'),
],
'c': [
pd.to_datetime('2020-10-03'),
pd.to_datetime('2020-11-01'),
pd.to_datetime('2020-11-03'),
]
})
instance.fit(table_data)
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'b': [
pd.to_datetime('2020-01-03'),
pd.to_datetime('2020-02-01'),
pd.to_datetime('2020-02-03'),
],
'c': [
pd.to_datetime('2020-10-03'),
pd.to_datetime('2020-11-01'),
pd.to_datetime('2020-11-03'),
],
'a#b#c': transform(table_data[column], table_data[low], table_data[high])
})
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform_scalar_scalar(self):
"""Test ``Between.reverse_transform`` with ``low`` and ``high`` as scalars.
It is expected to recover the original table which was transformed, but with different
column order. It does so by applying a sigmoid to the transformed column and then
scaling it back to the original space. It also replaces the transformed column with
an equal column but with the original name.
Input:
- Transformed table data (pandas.DataFrame)
Output:
- Original table data, without necessarily keepying the column order (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 0.0
high = 1.0
instance = Between(column=column, low=low, high=high, high_is_scalar=True,
low_is_scalar=True)
table_data = pd.DataFrame({
'b': [4, 5, 6],
'a': [0.1, 0.5, 0.9]
})
# Run
instance.fit(table_data)
transformed = pd.DataFrame({
'b': [4, 5, 6],
'a#0.0#1.0': transform(table_data[column], low, high)
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = table_data
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform_scalar_column(self):
"""Test ``Between.reverse_transform`` with ``low`` as scalar and ``high`` as a column.
It is expected to recover the original table which was transformed, but with different
column order. It does so by applying a sigmoid to the transformed column and then
scaling it back to the original space. It also replaces the transformed column with
an equal column but with the original name.
Input:
- Transformed table data (pandas.DataFrame)
Output:
- Original table data, without necessarily keepying the column order (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 0.0
high = 'b'
instance = Between(column=column, low=low, high=high, low_is_scalar=True)
table_data = pd.DataFrame({
'b': [0.5, 1, 6],
'a': [0.1, 0.5, 0.9]
})
# Run
instance.fit(table_data)
transformed = pd.DataFrame({
'b': [0.5, 1, 6],
'a#0.0#b': transform(table_data[column], low, table_data[high])
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = table_data
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform_column_scalar(self):
"""Test ``Between.reverse_transform`` with ``low`` as a column and ``high`` as scalar.
It is expected to recover the original table which was transformed, but with different
column order. It does so by applying a sigmoid to the transformed column and then
scaling it back to the original space. It also replaces the transformed column with
an equal column but with the original name.
Input:
- Transformed table data (pandas.DataFrame)
Output:
- Original table data, without necessarily keepying the column order (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 'b'
high = 1.0
instance = Between(column=column, low=low, high=high, high_is_scalar=True)
table_data = pd.DataFrame({
'b': [0, -1, 0.5],
'a': [0.1, 0.5, 0.9]
})
# Run
instance.fit(table_data)
transformed = pd.DataFrame({
'b': [0, -1, 0.5],
'a#b#1.0': transform(table_data[column], table_data[low], high)
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = table_data
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform_column_column(self):
"""Test ``Between.reverse_transform`` with ``low`` and ``high`` as columns.
It is expected to recover the original table which was transformed, but with different
column order. It does so by applying a sigmoid to the transformed column and then
scaling it back to the original space. It also replaces the transformed column with
an equal column but with the original name.
Input:
- Transformed table data (pandas.DataFrame)
Output:
- Original table data, without necessarily keepying the column order (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 'b'
high = 'c'
instance = Between(column=column, low=low, high=high)
table_data = pd.DataFrame({
'b': [0, -1, 0.5],
'c': [0.5, 1, 6],
'a': [0.1, 0.5, 0.9]
})
# Run
instance.fit(table_data)
transformed = pd.DataFrame({
'b': [0, -1, 0.5],
'c': [0.5, 1, 6],
'a#b#c': transform(table_data[column], table_data[low], table_data[high])
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = table_data
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform_datetime_datetime(self):
"""Test ``Between.reverse_transform`` with ``low`` and ``high`` as datetime.
It is expected to recover the original table which was transformed, but with different
column order. It does so by applying a sigmoid to the transformed column and then
scaling it back to the original space. It also replaces the transformed column with
an equal column but with the original name.
Input:
- Transformed table data (pandas.DataFrame)
- High and low as datetimes
Output:
- Original table data, without necessarily keepying the column order (pandas.DataFrame)
"""
# Setup
column = 'a'
low = pd.to_datetime('1900-01-01')
high = pd.to_datetime('2021-01-01')
instance = Between(column=column, low=low, high=high, high_is_scalar=True,
low_is_scalar=True)
table_data = pd.DataFrame({
'b': [4, 5, 6],
'a': [
pd.to_datetime('2020-09-03'),
pd.to_datetime('2020-08-01'),
pd.to_datetime('2020-08-03'),
],
})
# Run
instance.fit(table_data)
transformed = pd.DataFrame({
'b': [4, 5, 6],
'a#1900-01-01T00:00:00.000000000#2021-01-01T00:00:00.000000000': transform(
table_data[column], low, high)
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = table_data
pd.testing.assert_series_equal(expected_out['b'], out['b'])
pd.testing.assert_series_equal(expected_out['a'], out['a'].astype('datetime64[ms]'))
def test_reverse_transform_datetime_column(self):
"""Test ``Between.reverse_transform`` with ``low`` as datetime and ``high`` as a column.
It is expected to recover the original table which was transformed, but with different
column order. It does so by applying a sigmoid to the transformed column and then
scaling it back to the original space. It also replaces the transformed column with
an equal column but with the original name.
Input:
- Transformed table data (pandas.DataFrame)
Output:
- Original table data, without necessarily keepying the column order (pandas.DataFrame)
"""
# Setup
column = 'a'
low = pd.to_datetime('1900-01-01')
high = 'b'
instance = Between(column=column, low=low, high=high, low_is_scalar=True)
table_data = pd.DataFrame({
'b': [
pd.to_datetime('2020-10-03'),
pd.to_datetime('2020-11-01'),
pd.to_datetime('2020-11-03'),
],
'a': [
pd.to_datetime('2020-09-03'),
pd.to_datetime('2020-08-02'),
pd.to_datetime('2020-08-03'),
]
})
# Run
instance.fit(table_data)
transformed = pd.DataFrame({
'b': [
pd.to_datetime('2020-10-03'),
pd.to_datetime('2020-11-01'),
pd.to_datetime('2020-11-03'),
],
'a#1900-01-01T00:00:00.000000000#b': transform(
table_data[column], low, table_data[high])
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = table_data
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform_column_datetime(self):
"""Test ``Between.reverse_transform`` with ``low`` as a column and ``high`` as datetime.
It is expected to recover the original table which was transformed, but with different
column order. It does so by applying a sigmoid to the transformed column and then
scaling it back to the original space. It also replaces the transformed column with
an equal column but with the original name.
Input:
- Transformed table data (pandas.DataFrame)
Output:
- Original table data, without necessarily keepying the column order (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 'b'
high = pd.to_datetime('2021-01-01')
instance = Between(column=column, low=low, high=high, high_is_scalar=True)
table_data = pd.DataFrame({
'b': [
pd.to_datetime('2020-01-03'),
pd.to_datetime('2020-02-01'),
pd.to_datetime('2020-02-03'),
],
'a': [
pd.to_datetime('2020-09-03'),
pd.to_datetime('2020-08-03'),
pd.to_datetime('2020-08-04'),
],
})
# Run
instance.fit(table_data)
transformed = pd.DataFrame({
'b': [
pd.to_datetime('2020-01-03'),
pd.to_datetime('2020-02-01'),
pd.to_datetime('2020-02-03'),
],
'a#b#2021-01-01T00:00:00.000000000': transform(
table_data[column], table_data[low], high)
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = table_data
pd.testing.assert_series_equal(expected_out['b'], out['b'])
pd.testing.assert_series_equal(expected_out['a'], out['a'].astype('datetime64[ms]'))
def test_reverse_transform_column_column_datetime(self):
"""Test ``Between.reverse_transform`` with ``low`` and ``high`` as datetime columns.
It is expected to recover the original table which was transformed, but with different
column order. It does so by applying a sigmoid to the transformed column and then
scaling it back to the original space. It also replaces the transformed column with
an equal column but with the original name.
Input:
- Transformed table data (pandas.DataFrame)
Output:
- Original table data, without necessarily keepying the column order (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 'b'
high = 'c'
instance = Between(column=column, low=low, high=high)
table_data = pd.DataFrame({
'b': [
pd.to_datetime('2020-01-03'),
pd.to_datetime('2020-02-01'),
pd.to_datetime('2020-02-03'),
],
'c': [
pd.to_datetime('2020-10-03'),
pd.to_datetime('2020-11-01'),
pd.to_datetime('2020-11-03'),
],
'a': [
pd.to_datetime('2020-09-03'),
pd.to_datetime('2020-08-01'),
pd.to_datetime('2020-08-03'),
],
})
# Run
instance.fit(table_data)
transformed = pd.DataFrame({
'b': [
pd.to_datetime('2020-01-03'),
pd.to_datetime('2020-02-01'),
pd.to_datetime('2020-02-03'),
],
'c': [
pd.to_datetime('2020-10-03'),
pd.to_datetime('2020-11-01'),
pd.to_datetime('2020-11-03'),
],
'a#b#c': transform(table_data[column], table_data[low], table_data[high])
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = table_data
pd.testing.assert_frame_equal(expected_out, out)
def test_is_valid_strict_true(self):
"""Test the ``Between.is_valid`` method with strict True.
If strict is True, equal values should count as invalid.
Input:
- Table with a valid row, a strictly invalid row and an
invalid row. (pandas.DataFrame)
Output:
- True should be returned for the valid row and False
for the other two. (pandas.Series)
"""
# Setup
column = 'a'
low = 0.0
high = 1.0
instance = Between(column=column, low=low, high=high, strict=True, high_is_scalar=True,
low_is_scalar=True)
# Run
table_data = pd.DataFrame({
'a': [0.1, 1, 3],
})
instance.fit(table_data)
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, False, False])
pd.testing.assert_series_equal(expected_out, out, check_names=False)
def test_is_valid_strict_false(self):
"""Test the ``Between.is_valid`` method with strict False.
If strict is False, equal values should count as valid.
Input:
- Table with a valid row, a strictly invalid row and an
invalid row. (pandas.DataFrame)
Output:
- True should be returned for the first two rows, and False
for the last one (pandas.Series)
"""
# Setup
column = 'a'
low = 0.0
high = 1.0
instance = Between(column=column, low=low, high=high, strict=False, high_is_scalar=True,
low_is_scalar=True)
# Run
table_data = pd.DataFrame({
'a': [0.1, 1, 3],
})
instance.fit(table_data)
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, True, False])
pd.testing.assert_series_equal(expected_out, out, check_names=False)
def test_is_valid_scalar_column(self):
"""Test the ``Between.is_valid`` method with ``low`` as scalar and ``high`` as a column.
Is expected to return whether the constraint ``column`` is between the
``low`` and ``high`` values.
Input:
- Table data where the last value is greater than ``high``. (pandas.DataFrame)
Output:
- True should be returned for the two first rows, False
for the last one. (pandas.Series)
"""
# Setup
column = 'a'
low = 0.0
high = 'b'
instance = Between(column=column, low=low, high=high, low_is_scalar=True)
# Run
table_data = pd.DataFrame({
'a': [0.1, 0.5, 0.9],
'b': [0.5, 1, 0.6],
})
instance.fit(table_data)
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, True, False])
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_column_scalar(self):
"""Test the ``Between.is_valid`` method with ``low`` as a column and ``high`` as scalar.
Is expected to return whether the constraint ``column`` is between the
``low`` and ``high`` values.
Input:
- Table data where the second value is smaller than ``low`` and
last value is greater than ``high``. (pandas.DataFrame)
Output:
- True should be returned for the first row, False
for the last two. (pandas.Series)
"""
# Setup
column = 'a'
low = 'b'
high = 1.0
instance = Between(column=column, low=low, high=high, high_is_scalar=True)
# Run
table_data = pd.DataFrame({
'a': [0.1, 0.5, 1.9],
'b': [-0.5, 1, 0.6],
})
instance.fit(table_data)
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, False, False])
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_column_column(self):
"""Test the ``Between.is_valid`` method with ``low`` and ``high`` as columns.
Is expected to return whether the constraint ``column`` is between the
``low`` and ``high`` values.
Input:
- Table data where the last value is greater than ``high``. (pandas.DataFrame)
Output:
- True should be returned for the two first rows, False
for the last one. (pandas.Series)
"""
# Setup
column = 'a'
low = 'b'
high = 'c'
instance = Between(column=column, low=low, high=high)
# Run
table_data = pd.DataFrame({
'a': [0.1, 0.5, 0.9],
'b': [0, -1, 0.5],
'c': [0.5, 1, 0.6]
})
instance.fit(table_data)
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, True, False])
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_low_high_nans(self):
"""Test the ``Between.is_valid`` method with nan values in low and high columns.
If one of `low` or `high` is NaN, expect it to be ignored in the comparison.
If both are NaN or the constraint column is NaN, return True.
Input:
- Table with a NaN row
Output:
- True should be returned for the NaN row.
"""
# Setup
instance = Between(column='a', low='b', high='c')
# Run
table_data = pd.DataFrame({
'a': [0.1, 0.5, 0.9, 1.0],
'b': [0, None, None, 0.4],
'c': [0.5, None, 0.6, None]
})
instance.fit(table_data)
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, True, False, True])
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_column_nans(self):
"""Test the ``Between.is_valid`` method with nan values in constraint column.
If the constraint column is Nan, expect that `is_valid` returns True.
Input:
- Table with a NaN row
Output:
- True should be returned for the NaN row.
"""
# Setup
instance = Between(column='a', low='b', high='c')
# Run
table_data = pd.DataFrame({
'a': [0.1, 0.5, None],
'b': [0, 0.1, 0.5],
'c': [0.5, 1.5, 0.6]
})
instance.fit(table_data)
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, True, True])
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_high_scalar_low_nans(self):
"""Test the ``Between.is_valid`` method with ``high`` as scalar and ``low`` containing NaNs.
The NaNs in ``low`` should be ignored.
Input:
- Table with a NaN row
Output:
- The NaN values should be ignored when making comparisons.
"""
# Setup
column = 'a'
low = 'b'
high = 1.0
instance = Between(column=column, low=low, high=high, high_is_scalar=True)
# Run
table_data = pd.DataFrame({
'a': [0.1, 0.5, 1.9],
'b': [-0.5, None, None],
})
instance.fit(table_data)
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, True, False])
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_low_high_nans_datetime(self):
"""Test the ``Between.is_valid`` method with nan values in low and high datetime columns.
If one of `low` or `high` is NaN, expect it to be ignored in the comparison.
If both are NaN or the constraint column is NaN, return True.
Input:
- Table with row NaN containing NaNs.
Output:
- True should be returned for the NaN row.
"""
# Setup
instance = Between(column='a', low='b', high='c')
# Run
table_data = pd.DataFrame({
'a': [
pd.to_datetime('2020-09-13'),
pd.to_datetime('2020-08-12'),
pd.to_datetime('2020-08-13'),
pd.to_datetime('2020-08-14'),
],
'b': [
pd.to_datetime('2020-09-03'),
None,
None,
pd.to_datetime('2020-10-03'),
],
'c': [
pd.to_datetime('2020-10-03'),
pd.to_datetime('2020-11-01'),
None,
None,
]
})
instance.fit(table_data)
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, True, True, False])
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_column_nans_datetime(self):
"""Test the ``Between.is_valid`` method with nan values in the constraint column.
If there is a row containing NaNs, expect that `is_valid` returns True.
Input:
- Table with row NaN containing NaNs.
Output:
- True should be returned for the NaN row.
"""
# Setup
instance = Between(column='a', low='b', high='c')
# Run
table_data = pd.DataFrame({
'a': [
None,
pd.to_datetime('2020-08-12'),
pd.to_datetime('2020-08-13'),
],
'b': [
pd.to_datetime('2020-09-03'),
pd.to_datetime('2020-08-02'),
pd.to_datetime('2020-08-03'),
],
'c': [
pd.to_datetime('2020-10-03'),
pd.to_datetime('2020-11-01'),
pd.to_datetime('2020-11-01'),
]
})
instance.fit(table_data)
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, True, True])
| pd.testing.assert_series_equal(expected_out, out) | pandas.testing.assert_series_equal |
import warnings
import numpy as np
import pandas as pd
import pytest
import woodwork as ww
from pandas.testing import assert_frame_equal
from woodwork.logical_types import (
Boolean,
Categorical,
Double,
Integer,
NaturalLanguage,
)
from rayml.pipelines.components import PerColumnImputer
from rayml.utils.woodwork_utils import infer_feature_types
@pytest.fixture
def non_numeric_df():
X = pd.DataFrame(
[
["a", "a", "a", "a"],
["b", "b", "b", "b"],
["a", "a", "a", "a"],
[np.nan, np.nan, np.nan, np.nan],
]
)
X.columns = ["A", "B", "C", "D"]
return X
def test_invalid_parameters():
with pytest.raises(ValueError):
strategies = ("impute_strategy", "mean")
PerColumnImputer(impute_strategies=strategies)
with pytest.raises(ValueError):
strategies = ["mean"]
PerColumnImputer(impute_strategies=strategies)
def test_all_strategies():
X = pd.DataFrame(
{
"A": pd.Series([2, 4, 6, np.nan]),
"B": pd.Series([4, 6, 4, np.nan]),
"C": | pd.Series([6, 8, 8, np.nan]) | pandas.Series |
import pandas as pd
from services.Utils.pusher import Pusher
class AddDBObject(object):
def __init__(self, step=0, msg="", obj=None, obj_data=None):
self.obj = obj
self.obj_data = obj_data
self.step = step
self.msg = msg
# Create an empty dataframe
self.df = | pd.DataFrame() | pandas.DataFrame |
#!pip install fitbit
#!pip install -r requirements/base.txt
#!pip install -r requirements/dev.txt
#!pip install -r requirements/test.txt
from time import sleep
import fitbit
import cherrypy
import requests
import json
import datetime
import scipy.stats
import pandas as pd
import numpy as np
# plotting
import matplotlib
from matplotlib import pyplot as plt
import seaborn as sns
get_ipython().magic(u'matplotlib inline')
with open('heartrate/HR2017-12-23.json') as f:
hr_dat_sample = json.loads(f.read())
parsed_json_hr_samp = json.loads(hr_dat_sample)
list(parsed_json_hr_samp['activities-heart-intraday'].keys())
# ## Heart Rate
dates = pd.date_range('2017-12-23', '2018-01-25')
hrval = []
for date in dates:
fname = 'heartrate/HR' + date.strftime('%Y-%m-%d') + '.json'
with open(fname) as f:
date_data = json.loads(f.read())
data = pd.read_json(date_data, typ='series')
hrval.append(data['activities-heart-intraday']['dataset'][1])
HR_df = pd.DataFrame(hrval,index = dates)
HR_df.columns = ['time', 'bpm']
HR_df.head()
stats = data['activities-heart-intraday']['dataset']
HR= | pd.DataFrame(stats) | pandas.DataFrame |
# run this script if the underlying covid19 disease parameters have been modified
from ai4good.params.disease_params import covid_specific_parameters
import ai4good.utils.path_utils as pu
import pandas as pd
import numpy as np
if __name__=="__main__":
generated_params = {}
np.random.seed(42)
generated_params['R0'] = np.random.normal(covid_specific_parameters["R0_medium"], 1, 1000)
generated_params['LatentPeriod'] = np.random.normal(covid_specific_parameters["Latent_period"], 1, 1000)
generated_params['RemovalPeriod'] = np.random.normal(covid_specific_parameters["Infectious_period"], 1, 1000)
generated_params['HospPeriod'] = np.random.normal(covid_specific_parameters["Hosp_period"], 1, 1000)
generated_params['DeathICUPeriod'] = np.random.normal(covid_specific_parameters["Death_period_withICU"], 1, 1000)
generated_params['DeathNoICUPeriod'] = np.random.normal(covid_specific_parameters["Death_period"], 1, 1000)
generated_params_df = | pd.DataFrame(generated_params) | pandas.DataFrame |
import datetime
import inspect
import numpy.testing as npt
import os.path
import pandas as pd
import pandas.util.testing as pdt
import sys
from tabulate import tabulate
import unittest
# #find parent directory and import model
# parentddir = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))
# sys.path.append(parentddir)
from ..screenip_exe import Screenip
test = {}
class TestScreenip(unittest.TestCase):
"""
Unit tests for screenip.
"""
print("screenip unittests conducted at " + str(datetime.datetime.today()))
def setUp(self):
"""
Setup routine for screenip unittest.
:return:
"""
pass
# screenip2 = screenip_model.screenip(0, pd_obj_inputs, pd_obj_exp_out)
# setup the test as needed
# e.g. pandas to open screenip qaqc csv
# Read qaqc csv and create pandas DataFrames for inputs and expected outputs
def tearDown(self):
"""
Teardown routine for screenip unittest.
:return:
"""
pass
# teardown called after each test
# e.g. maybe write test results to some text file
def create_screenip_object(self):
# create empty pandas dataframes to create empty object for testing
df_empty = pd.DataFrame()
# create an empty screenip object
screenip_empty = Screenip(df_empty, df_empty)
return screenip_empty
def test_screenip_unit_fw_bird(self):
"""
unittest for function screenip.fw_bird:
:return:
"""
expected_results = pd.Series([0.0162, 0.0162, 0.0162], dtype='float')
result = pd.Series([], dtype='float')
# create empty pandas dataframes to create empty object for this unittest
screenip_empty = self.create_screenip_object()
try:
# for i in range(0,3):
# result[i] = screenip_empty.fw_bird()
screenip_empty.no_of_runs = len(expected_results)
screenip_empty.fw_bird()
result = screenip_empty.out_fw_bird
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True )
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_screenip_unit_fw_mamm(self):
"""
unittest for function screenip.fw_mamm:
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
screenip_empty = self.create_screenip_object()
expected_results = pd.Series([0.172, 0.172, 0.172], dtype='float')
result = pd.Series([], dtype='float')
try:
screenip_empty.no_of_runs = len(expected_results)
result = screenip_empty.fw_mamm()
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True )
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_screenip_unit_dose_bird(self):
"""
unittest for function screenip.dose_bird:
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
screenip_empty = self.create_screenip_object()
expected_results = pd.Series([1000000., 4805.50175, 849727.21122], dtype='float')
result = pd.Series([], dtype='float')
try:
#(self.out_fw_bird * self.solubility)/(self.bodyweight_assessed_bird / 1000.)
screenip_empty.out_fw_bird = pd.Series([10., 0.329, 1.8349], dtype='float')
screenip_empty.solubility = pd.Series([100., 34.9823, 453.83], dtype='float')
screenip_empty.bodyweight_assessed_bird = pd.Series([1.0, 2.395, 0.98], dtype='float')
result = screenip_empty.dose_bird()
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True )
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_screenip_unit_dose_mamm(self):
"""
unittest for function screenip.dose_mamm:
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
screenip_empty = self.create_screenip_object()
expected_results = pd.Series([8000000., 48205.7595, 3808036.37889], dtype='float')
result = pd.Series([], dtype='float')
try:
#(self.out_fw_mamm * self.solubility)/(self.bodyweight_assessed_mammal / 1000)
screenip_empty.out_fw_mamm = pd.Series([20., 12.843, 6.998], dtype='float')
screenip_empty.solubility = pd.Series([400., 34.9823, 453.83], dtype='float')
screenip_empty.bodyweight_assessed_mammal = pd.Series([1., 9.32, 0.834], dtype='float')
result = screenip_empty.dose_mamm()
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True )
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_screenip_unit_at_bird(self):
"""
unittest for function screenip.at_bird:
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
screenip_empty = self.create_screenip_object()
expected_results = pd.Series([1000., 687.9231, 109.3361], dtype='float')
result = pd.Series([], dtype='float')
try:
#(self.ld50_avian_water) * ((self.bodyweight_assessed_bird / self.bodyweight_tested_bird)**(self.mineau_scaling_factor - 1.))
screenip_empty.ld50_avian_water = pd.Series([2000., 938.34, 345.83], dtype='float')
screenip_empty.bodyweight_assessed_bird = pd.Series([100., 39.49, 183.54], dtype='float')
screenip_empty.ld50_bodyweight_tested_bird = pd.Series([200., 73.473, 395.485], dtype='float')
screenip_empty.mineau_scaling_factor = pd.Series([2., 1.5, 2.5], dtype='float')
result = screenip_empty.at_bird()
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True )
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_screenip_unit_at_mamm(self):
"""
unittest for function screenip.at_mamm:
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
screenip_empty = self.create_screenip_object()
expected_results = pd.Series([11.89207, 214.0572, 412.6864], dtype='float')
result = pd.Series([], dtype='float')
try:
#(self.ld50_mammal_water) * ((self.bodyweight_tested_mammal / self.bodyweight_assessed_mammal)**0.25)
screenip_empty.ld50_mammal_water = pd.Series([10., 250., 500.], dtype='float')
screenip_empty.ld50_bodyweight_tested_mammal = pd.Series([200., 39.49, 183.54], dtype='float')
screenip_empty.bodyweight_assessed_mammal = pd.Series([100., 73.473, 395.485], dtype='float')
result = screenip_empty.at_mamm()
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True )
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_screenip_unit_fi_bird(self):
"""
unittest for function screenip.fi_bird:
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
screenip_empty = self.create_screenip_object()
expected_results = pd.Series([0.012999, 0.026578, 0.020412], dtype='float')
result = pd.Series([], dtype='float')
try:
#0.0582 * ((bw_grams / 1000.)**0.651)
bw_grams = pd.Series([100., 300., 200.], dtype='float')
result = screenip_empty.fi_bird(bw_grams)
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True )
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_screenip_unit_act(self):
"""
unittest for function screenip.test_act:
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
screenip_empty = self.create_screenip_object()
expected_results = pd.Series([10.5737, 124.8032, 416.4873], dtype='float')
result = pd.Series([], dtype='float')
try:
#(self.noael_mammal_water) * ((self.bodyweight_tested_mammal / self.bodyweight_assessed_mammal)**0.25)
screenip_empty.noael_mammal_water = pd.Series([10., 120., 400.], dtype='float')
screenip_empty.noael_bodyweight_tested_mammal = pd.Series([500., 385.45, 673.854], dtype='float')
screenip_empty.bodyweight_assessed_mammal = pd.Series([400., 329.45, 573.322], dtype='float')
result = screenip_empty.act()
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True )
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_screenip_unit_det(self):
"""
unittest for function screenip.det
return:
"""
#
# '''
# Dose Equiv. Toxicity:
#
# The FI value (kg-diet) is multiplied by the reported NOAEC (mg/kg-diet) and then divided by
# the test animal's body weight to derive the dose-equivalent chronic toxicity value (mg/kg-bw):
#
# Dose Equiv. Toxicity = (NOAEC * FI) / BW
#
# NOTE: The user enters the lowest available NOAEC for the mallard duck, for the bobwhite quail,
# and for any other test species. The model calculates the dose equivalent toxicity values for
# all of the modeled values (Cells F20-24 and results worksheet) and then selects the lowest dose
# equivalent toxicity value to represent the chronic toxicity of the chemical to birds.
# '''
# try:
# # result =
# # self.assertEquals(result, )
# pass
# finally:
# pass
# return
#
#
# def test_det_duck(self):
# """
# unittest for function screenip.det_duck:
# :return:
# """
# try:
# # det_duck = (self.noaec_duck * self.fi_bird(1580.)) / (1580. / 1000.)
# screenip_empty.noaec_duck = pd.Series([1.], dtype='int')
# screenip_empty.fi_bird = pd.Series([1.], dtype='int')
# result = screenip_empty.det_duck()
# npt.assert_array_almost_equal(result, 1000., 4, '', True)
# finally:
# pass
# return
#
# def test_det_quail(self):
# """
# unittest for function screenip.det_quail:
# :return:
# """
# try:
# # det_quail = (self.noaec_quail * self.fi_bird(178.)) / (178. / 1000.)
# screenip_empty.noaec_quail = pd.Series([1.], dtype='int')
# screenip_empty.fi_bird = pd.Series([1.], dtype='int')
# result = screenip_empty.det_quail()
# npt.assert_array_almost_equal(result, 1000., 4, '', True)
# finally:
# pass
# return
#
# def test_det_other_1(self):
# """
# unittest for function screenip.det_other_1:
# :return:
# """
# try:
# #det_other_1 = (self.noaec_bird_other_1 * self.fi_bird(self.bodyweight_bird_other_1)) / (self.bodyweight_bird_other_1 / 1000.)
# #det_other_2 = (self.noaec_bird_other_2 * self.fi_bird(self.bodyweight_bird_other_1)) / (self.bodyweight_bird_other_1 / 1000.)
# screenip_empty.noaec_bird_other_1 = pd.Series([400.]) # mg/kg-diet
# screenip_empty.bodyweight_bird_other_1 = pd.Series([100]) # grams
# result = screenip_empty.det_other_1()
# npt.assert_array_almost_equal(result, 4666, 4)
# finally:
# pass
# return
#
# The following tests are configured such that:
# 1. four values are provided for each needed input
# 2. the four input values generate four values of out_det_* per bird type
# 3. the inputs per bird type are set so that calculations of out_det_* will result in
# each bird type having one minimum among the bird types;
# thus all four calculations result in one minimum per bird type
# create empty pandas dataframes to create empty object for this unittest
screenip_empty = self.create_screenip_object()
expected_results = pd.Series([4.2174, 4.96125, 7.97237, 10.664648], dtype='float')
result = pd.Series([], dtype='float')
try:
screenip_empty.bodyweight_bobwhite_quail = 178.
screenip_empty.bodyweight_mallard_duck = 1580.
screenip_empty.noaec_quail = pd.Series([100., 300., 75., 150.], dtype='float')
screenip_empty.noaec_duck = pd.Series([400., 100., 200., 350.], dtype='float')
screenip_empty.noaec_bird_other_1 = pd.Series([50., 200., 300., 250.], dtype='float')
screenip_empty.noaec_bird_other_2 = pd.Series([350., 400., 250., 100.], dtype='float')
screenip_empty.noaec_bodyweight_bird_other_1 = | pd.Series([345.34, 453.54, 649.29, 294.56], dtype='float') | pandas.Series |
import pandas as pd
import pytest
import featuretools as ft
from featuretools.entityset import EntitySet
from featuretools.utils.gen_utils import import_or_none
ks = import_or_none('databricks.koalas')
@pytest.mark.skipif('not ks')
def test_single_table_ks_entityset():
primitives_list = ['absolute', 'is_weekend', 'year', 'day', 'num_characters', 'num_words']
ks_es = EntitySet(id="ks_es")
df = pd.DataFrame({"id": [0, 1, 2, 3],
"values": [1, 12, -34, 27],
"dates": [pd.to_datetime('2019-01-10'),
pd.to_datetime('2019-02-03'),
pd.to_datetime('2019-01-01'),
pd.to_datetime('2017-08-25')],
"strings": ["I am a string",
"23",
"abcdef ghijk",
""]})
values_dd = ks.from_pandas(df)
vtypes = {
"id": ft.variable_types.Id,
"values": ft.variable_types.Numeric,
"dates": ft.variable_types.Datetime,
"strings": ft.variable_types.NaturalLanguage
}
ks_es.entity_from_dataframe(entity_id="data",
dataframe=values_dd,
index="id",
variable_types=vtypes)
ks_fm, _ = ft.dfs(entityset=ks_es,
target_entity="data",
trans_primitives=primitives_list)
pd_es = ft.EntitySet(id="pd_es")
pd_es.entity_from_dataframe(entity_id="data",
dataframe=df,
index="id",
variable_types={"strings": ft.variable_types.NaturalLanguage})
fm, _ = ft.dfs(entityset=pd_es,
target_entity="data",
trans_primitives=primitives_list)
ks_computed_fm = ks_fm.to_pandas().set_index('id').loc[fm.index][fm.columns]
# NUM_WORDS(strings) is int32 in koalas for some reason
pd.testing.assert_frame_equal(fm, ks_computed_fm, check_dtype=False)
@pytest.mark.skipif('not ks')
def test_single_table_ks_entityset_ids_not_sorted():
primitives_list = ['absolute', 'is_weekend', 'year', 'day', 'num_characters', 'num_words']
ks_es = EntitySet(id="ks_es")
df = pd.DataFrame({"id": [2, 0, 1, 3],
"values": [1, 12, -34, 27],
"dates": [pd.to_datetime('2019-01-10'),
pd.to_datetime('2019-02-03'),
pd.to_datetime('2019-01-01'),
pd.to_datetime('2017-08-25')],
"strings": ["I am a string",
"23",
"abcdef ghijk",
""]})
values_dd = ks.from_pandas(df)
vtypes = {
"id": ft.variable_types.Id,
"values": ft.variable_types.Numeric,
"dates": ft.variable_types.Datetime,
"strings": ft.variable_types.NaturalLanguage
}
ks_es.entity_from_dataframe(entity_id="data",
dataframe=values_dd,
index="id",
variable_types=vtypes)
ks_fm, _ = ft.dfs(entityset=ks_es,
target_entity="data",
trans_primitives=primitives_list)
pd_es = ft.EntitySet(id="pd_es")
pd_es.entity_from_dataframe(entity_id="data",
dataframe=df,
index="id",
variable_types={"strings": ft.variable_types.NaturalLanguage})
fm, _ = ft.dfs(entityset=pd_es,
target_entity="data",
trans_primitives=primitives_list)
# Make sure both indexes are sorted the same
pd.testing.assert_frame_equal(fm, ks_fm.to_pandas().set_index('id').loc[fm.index], check_dtype=False)
@pytest.mark.skipif('not ks')
def test_single_table_ks_entityset_with_instance_ids():
primitives_list = ['absolute', 'is_weekend', 'year', 'day', 'num_characters', 'num_words']
instance_ids = [0, 1, 3]
ks_es = EntitySet(id="ks_es")
df = pd.DataFrame({"id": [0, 1, 2, 3],
"values": [1, 12, -34, 27],
"dates": [pd.to_datetime('2019-01-10'),
pd.to_datetime('2019-02-03'),
pd.to_datetime('2019-01-01'),
pd.to_datetime('2017-08-25')],
"strings": ["I am a string",
"23",
"abcdef ghijk",
""]})
values_dd = ks.from_pandas(df)
vtypes = {
"id": ft.variable_types.Id,
"values": ft.variable_types.Numeric,
"dates": ft.variable_types.Datetime,
"strings": ft.variable_types.NaturalLanguage
}
ks_es.entity_from_dataframe(entity_id="data",
dataframe=values_dd,
index="id",
variable_types=vtypes)
ks_fm, _ = ft.dfs(entityset=ks_es,
target_entity="data",
trans_primitives=primitives_list,
instance_ids=instance_ids)
pd_es = ft.EntitySet(id="pd_es")
pd_es.entity_from_dataframe(entity_id="data",
dataframe=df,
index="id",
variable_types={"strings": ft.variable_types.NaturalLanguage})
fm, _ = ft.dfs(entityset=pd_es,
target_entity="data",
trans_primitives=primitives_list,
instance_ids=instance_ids)
# Make sure both indexes are sorted the same
pd.testing.assert_frame_equal(fm, ks_fm.to_pandas().set_index('id').loc[fm.index], check_dtype=False)
@pytest.mark.skipif('not ks')
def test_single_table_ks_entityset_single_cutoff_time():
primitives_list = ['absolute', 'is_weekend', 'year', 'day', 'num_characters', 'num_words']
ks_es = EntitySet(id="ks_es")
df = pd.DataFrame({"id": [0, 1, 2, 3],
"values": [1, 12, -34, 27],
"dates": [pd.to_datetime('2019-01-10'),
pd.to_datetime('2019-02-03'),
pd.to_datetime('2019-01-01'),
pd.to_datetime('2017-08-25')],
"strings": ["I am a string",
"23",
"abcdef ghijk",
""]})
values_dd = ks.from_pandas(df)
vtypes = {
"id": ft.variable_types.Id,
"values": ft.variable_types.Numeric,
"dates": ft.variable_types.Datetime,
"strings": ft.variable_types.NaturalLanguage
}
ks_es.entity_from_dataframe(entity_id="data",
dataframe=values_dd,
index="id",
variable_types=vtypes)
ks_fm, _ = ft.dfs(entityset=ks_es,
target_entity="data",
trans_primitives=primitives_list,
cutoff_time=pd.Timestamp("2019-01-05 04:00"))
pd_es = ft.EntitySet(id="pd_es")
pd_es.entity_from_dataframe(entity_id="data",
dataframe=df,
index="id",
variable_types={"strings": ft.variable_types.NaturalLanguage})
fm, _ = ft.dfs(entityset=pd_es,
target_entity="data",
trans_primitives=primitives_list,
cutoff_time=pd.Timestamp("2019-01-05 04:00"))
# Make sure both indexes are sorted the same
pd.testing.assert_frame_equal(fm, ks_fm.to_pandas().set_index('id').loc[fm.index], check_dtype=False)
@pytest.mark.skipif('not ks')
def test_single_table_ks_entityset_cutoff_time_df():
primitives_list = ['absolute', 'is_weekend', 'year', 'day', 'num_characters', 'num_words']
ks_es = EntitySet(id="ks_es")
df = pd.DataFrame({"id": [0, 1, 2],
"values": [1, 12, -34],
"dates": [pd.to_datetime('2019-01-10'),
pd.to_datetime('2019-02-03'),
| pd.to_datetime('2019-01-01') | pandas.to_datetime |
import pytest
import pandas as pd
import numpy as np
from ramprate.build_features import _find_uptime
def test__find_uptime_start_and_end_nonzero():
dt_idx = pd.date_range(start="2020-01-01 00:00", periods=6, freq="h", tz="UTC")
data = [2, 2, 0, 0, 0, 2]
# downtime=True
# first zero after non-zero
shutdown = pd.to_datetime(["2020-01-01 02:00"], utc=True)
# last zero before non-zero
startup = pd.to_datetime(["2020-01-01 04:00"], utc=True)
expected = | pd.DataFrame({"shutdown": shutdown, "startup": startup}) | pandas.DataFrame |
import os
import pandas as pd
import json
import sys
class NestData():
def __init__(self, year):
self.base_dir = os.path.join("./data/NEST", str(year))
valid_months = ['01', '02', '03', '04', '05', '06', '07', '08', '09', '10', '11', '12']
sensor_files = [] # list of files with sensor data
summary_files = [] # list of files with cycle summary data
for month in valid_months:
dir_ = os.path.join(self.base_dir, month)
try:
files = os.listdir(dir_)
for file in files:
if ".csv" in file:
sensor_files.append(os.path.join(month, file))
if ".json" in file:
summary_files.append(os.path.join(month, file))
except:
print("No data for month: ", month)
continue
self.process_sensors(sensor_files)
self.process_cycles(summary_files)
self.process_events(summary_files)
def get_data(self):
return self.sensor_df, self.cycles_df, self.events_df
# Events recorded in summary.json, contains target temperatures and heating cycles
def process_events(self, file_list):
to_df = []
for file in file_list:
with open(os.path.join(self.base_dir, file), 'r') as json_file:
data = json.load(json_file)
for day in data:
for event in data[day]['events']:
event_type = event['eventType'].split('_')[2] # eg "EVENT_TYPE_HEAT" take HEAT
if event_type == 'HEAT' or event_type == 'COOL': # if non-useful type, skip eg EVENT_TYPE_OFF
start_time = event['startTs']
start_time = start_time[:start_time.rfind(':')+3].replace('T', ' ')
duration = int(event['duration'][:-1])
heating_target = event['setPoint']['targets']['heatingTarget']
cooling_target = event['setPoint']['targets']['coolingTarget']
event_encoded = 0
if event_type == 'HEAT':
event_encoded = 1
elif event_type == 'COOL': # Looks like the cooler isn't actually hooked up
event_encoded = -1
d = {
'start_time' : start_time,
'event_encoded' : event_encoded,
'duration' : duration,
'heating_target' : heating_target,
'cooling_target' : cooling_target
}
to_df.append(d)
else:
continue
df = pd.DataFrame(to_df)
df['start_time'] = pd.to_datetime(df['start_time'])
df['end_time'] = df['start_time'] + pd.to_timedelta(df['duration'], unit='s')
df['start_time'] = df['start_time'].round('15T') # round to 15 minute intervals
df['end_time'] = df['end_time'].round('15T')
# generate coninuous time range encapsulating heatin intervals
d_range = pd.date_range(start = df['start_time'][0], end = df['end_time'].iloc[-1], freq='15T')
heating = []
heat_targets = []
cool_targets = []
i = 0
while i < len(d_range):
if d_range[i] in df['start_time'].values:
j = df['start_time'][df['start_time'] == d_range[i]].index
heat_target = df['heating_target'].values[j][0]
cool_target = df['cooling_target'].values[j][0]
state_target = df['event_encoded'].values[j][0]
while d_range[i] not in df['end_time'].values:
heating.append(state_target)
heat_targets.append(heat_target)
cool_targets.append(cooling_target)
i += 1
heat_targets.append(heat_target)
cool_targets.append(cooling_target)
heating.append(state_target)
i += 1
else:
heating.append(0)
heat_targets.append(0) # Setting these to zero might not be right but we will see
cool_targets.append(0)
i += 1
# Here are the names of the columns in the data frame
events_df = pd.DataFrame({'date_time': d_range,
'hvac_state': heating,
'heating_target': heat_targets,
'cooling_target': cool_targets})
events_df = events_df.set_index('date_time')
events_df = events_df.interpolate(method='time')
self.events_df = events_df
# Cycles returns the heating cycles, events contains more data
def process_cycles(self, file_list):
to_df = []
for file in file_list:
with open(os.path.join(self.base_dir, file), 'r') as json_file:
data = json.load(json_file)
for day in data:
for cycle in data[day]['cycles']:
caption = cycle['caption']['plainText'].split(' from')[0] # just take type of cycle
start_time = cycle['caption']['parameters']['startTime']
start_time = start_time[:start_time.rfind(':')+3].replace('T', ' ') # find last colon and keep the two numbers after it
endTime = cycle['caption']['parameters']['endTime']
endTime = endTime[:endTime.rfind(':')+3].replace('T', ' ')
duration = cycle['duration']
isComplete = cycle['isComplete']
# Turn data into a useable coded data frame
if 'heating' not in caption:
num_caption = 0
else:
num_caption = 1
d = {
'caption' : num_caption,
'start_time' : start_time,
'endTime' : endTime,
'duration' : duration # we don't use this at this point
}
to_df.append(d)
summary_df = pd.DataFrame(to_df)
summary_df['start_time'] = pd.to_datetime(summary_df['start_time'])
summary_df['endTime'] = pd.to_datetime(summary_df['endTime'])
summary_df['start_time'] = summary_df['start_time'].round('15T')
summary_df['endTime'] = summary_df['endTime'].round('15T')
# create date range filling in all 15 minute intervals between cycle start and end time
d_range = pd.date_range(start = summary_df['start_time'][0], end = summary_df['endTime'].iloc[-1], freq='15T')
heating = []
i = 0
while i < len(d_range):
if d_range[i] in summary_df['start_time'].values:
while d_range[i] not in summary_df['endTime'].values:
heating.append(1)
i += 1
heating.append(1) # basically once it finds the endTime it breaks and doesn't add for that interval, this can be removed by passing the index of the startTime that matched and using a conditional for the endTime with the same index
i += 1
else:
heating.append(0)
i += 1
# Here are the column names for the data frame
cycles_df = pd.DataFrame({'date_time': d_range, 'hvac_state': heating})
cycles_df = cycles_df.set_index('date_time')
cycleds_df = cycles_df.interpolate(method='time')
self.cycles_df = cycles_df
def process_sensors(self, file_list):
sensor_list = []
for file in file_list:
sensor_list.append(pd.read_csv(os.path.join(self.base_dir, file)))
sensor_df = pd.concat(sensor_list)
sensor_df['date_time'] = | pd.to_datetime(sensor_df['Date'] + ' ' + sensor_df['Time']) | pandas.to_datetime |
import re
import datetime
import numpy as np
import pandas as pd
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
# ---------------------------------------------------
# Person data methods
# ---------------------------------------------------
class TransformGenderGetFromName:
"""Gets clients' genders from theirs russian second names.
Parameters:
column_name (str): Column name in InsolverDataFrame containing clients' names, column type is string.
column_gender (str): Column name in InsolverDataFrame for clients' genders.
gender_male (str): Return value for male gender in InsolverDataFrame, 'male' by default.
gender_female (str): Return value for female gender in InsolverDataFrame, 'female' by default.
"""
def __init__(self, column_name, column_gender, gender_male='male', gender_female='female'):
self.priority = 0
self.column_name = column_name
self.column_gender = column_gender
self.gender_male = gender_male
self.gender_female = gender_female
@staticmethod
def _gender(client_name, gender_male, gender_female):
if pd.isnull(client_name):
gender = None
elif len(client_name) < 2:
gender = None
elif client_name.upper().endswith(('ИЧ', 'ОГЛЫ')):
gender = gender_male
elif client_name.upper().endswith(('НА', 'КЫЗЫ')):
gender = gender_female
else:
gender = None
return gender
def __call__(self, df):
df[self.column_gender] = df[self.column_name].apply(self._gender, args=(self.gender_male, self.gender_female,))
return df
class TransformAgeGetFromBirthday:
"""Gets clients' ages in years from theirs birth dates and policies' start dates.
Parameters:
column_date_birth (str): Column name in InsolverDataFrame containing clients' birth dates, column type is date.
column_date_start (str): Column name in InsolverDataFrame containing policies' start dates, column type is date.
column_age (str): Column name in InsolverDataFrame for clients' ages in years, column type is int.
"""
def __init__(self, column_date_birth, column_date_start, column_age):
self.priority = 0
self.column_date_birth = column_date_birth
self.column_date_start = column_date_start
self.column_age = column_age
@staticmethod
def _age_get(datebirth_datestart):
date_birth = datebirth_datestart[0]
date_start = datebirth_datestart[1]
if pd.isnull(date_birth):
age = None
elif pd.isnull(date_start):
age = None
elif date_birth > datetime.datetime.now():
age = None
elif date_birth.year < datetime.datetime.now().year - 120:
age = None
elif date_birth > date_start:
age = None
else:
age = int((date_start - date_birth).days // 365.25)
return age
def __call__(self, df):
df[self.column_age] = df[[self.column_date_birth, self.column_date_start]].apply(self._age_get, axis=1)
return df
class TransformAge:
"""Transforms values of drivers' minimum ages in years.
Values under 'age_min' are invalid. Values over 'age_max' will be grouped.
Parameters:
column_driver_minage (str): Column name in InsolverDataFrame containing drivers' minimum ages in years,
column type is integer.
age_min (int): Minimum value of drivers' age in years, lower values are invalid, 18 by default.
age_max (int): Maximum value of drivers' age in years, bigger values will be grouped, 70 by default.
"""
def __init__(self, column_driver_minage, age_min=18, age_max=70):
self.priority = 1
self.column_driver_minage = column_driver_minage
self.age_min = age_min
self.age_max = age_max
@staticmethod
def _age(age, age_min, age_max):
if pd.isnull(age):
age = None
elif age < age_min:
age = None
elif age > age_max:
age = age_max
return age
def __call__(self, df):
df[self.column_driver_minage] = df[self.column_driver_minage].apply(self._age,
args=(self.age_min, self.age_max))
return df
class TransformAgeGender:
"""Gets intersections of drivers' minimum ages and genders.
Parameters:
column_age (str): Column name in InsolverDataFrame containing clients' ages in years, column type is integer.
column_gender (str): Column name in InsolverDataFrame containing clients' genders.
column_age_m (str): Column name in InsolverDataFrame for males' ages, for females default value is applied,
column type is integer.
column_age_f (str): Column name in InsolverDataFrame for females' ages, for males default value is applied,
column type is integer.
age_default (int): Default value of the age in years,18 by default.
gender_male: Value for male gender in InsolverDataFrame, 'male' by default.
gender_female: Value for male gender in InsolverDataFrame, 'female' by default.
"""
def __init__(self, column_age, column_gender, column_age_m, column_age_f, age_default=18,
gender_male='male', gender_female='female'):
self.priority = 2
self.column_age = column_age
self.column_gender = column_gender
self.column_age_m = column_age_m
self.column_age_f = column_age_f
self.age_default = age_default
self.gender_male = gender_male
self.gender_female = gender_female
@staticmethod
def _age_gender(age_gender, age_default, gender_male, gender_female):
age = age_gender[0]
gender = age_gender[1]
if pd.isnull(age):
age_m = None
age_f = None
elif pd.isnull(gender):
age_m = None
age_f = None
elif gender == gender_male:
age_m = age
age_f = age_default
elif gender == gender_female:
age_m = age_default
age_f = age
else:
age_m = None
age_f = None
return [age_m, age_f]
def __call__(self, df):
df[self.column_age_m], df[self.column_age_f] = zip(*df[[self.column_age, self.column_gender]].apply(
self._age_gender, axis=1, args=(self.age_default, self.gender_male, self.gender_female)).to_frame()[0])
return df
class TransformExp:
"""Transforms values of drivers' minimum experiences in years with values over 'exp_max' grouped.
Parameters:
column_driver_minexp (str): Column name in InsolverDataFrame containing drivers' minimum experiences in years,
column type is integer.
exp_max (int): Maximum value of drivers' experience in years, bigger values will be grouped, 52 by default.
"""
def __init__(self, column_driver_minexp, exp_max=52):
self.priority = 1
self.column_driver_minexp = column_driver_minexp
self.exp_max = exp_max
@staticmethod
def _exp(exp, exp_max):
if pd.isnull(exp):
exp = None
elif exp < 0:
exp = None
elif exp > exp_max:
exp = exp_max
return exp
def __call__(self, df):
df[self.column_driver_minexp] = df[self.column_driver_minexp].apply(self._exp, args=(self.exp_max,))
return df
class TransformAgeExpDiff:
"""Transforms records with difference between drivers' minimum age and minimum experience less then 'diff_min'
years, sets drivers' minimum experience equal to drivers' minimum age minus 'diff_min' years.
Parameters:
column_driver_minage (str): Column name in InsolverDataFrame containing drivers' minimum ages in years,
column type is integer.
column_driver_minexp (str): Column name in InsolverDataFrame containing drivers' minimum experiences in years,
column type is integer.
diff_min (int): Minimum allowed difference between age and experience in years.
"""
def __init__(self, column_driver_minage, column_driver_minexp, diff_min=18):
self.priority = 2
self.column_driver_minage = column_driver_minage
self.column_driver_minexp = column_driver_minexp
self.diff_min = diff_min
def __call__(self, df):
self.num_errors = len(df.loc[(df[self.column_driver_minage] - df[self.column_driver_minexp]) < self.diff_min])
df[self.column_driver_minexp].loc[(df[self.column_driver_minage] - df[self.column_driver_minexp])
< self.diff_min] = df[self.column_driver_minage] - self.diff_min
return df
class TransformNameCheck:
"""Checks if clients' first names are in special list.
Names may concatenate surnames, first names and last names.
Parameters:
column_name (str): Column name in InsolverDataFrame containing clients' names, column type is string.
name_full (bool): Sign if name is the concatenation of surname, first name and last name, False by default.
column_name_check (str): Column name in InsolverDataFrame for bool values if first names are in the list or not.
names_list (list): The list of clients' first names.
"""
def __init__(self, column_name, column_name_check, names_list, name_full=False):
self.priority = 1
self.column_name = column_name
self.name_full = name_full
self.column_name_check = column_name_check
self.names_list = [n.upper() for n in names_list]
@staticmethod
def _name_get(client_name):
tokenize_re = re.compile(r'[\w\-]+', re.I)
try:
name = tokenize_re.findall(str(client_name))[1].upper()
return name
except Exception:
return 'ERROR'
def __call__(self, df):
if not self.name_full:
df[self.column_name_check] = 1 * df[self.column_name].isin(self.names_list)
else:
df[self.column_name_check] = 1 * df[self.column_name].apply(self._name_get).isin(self.names_list)
return df
# ---------------------------------------------------
# Vehicle data methods
# ---------------------------------------------------
class TransformVehPower:
"""Transforms values of vehicles' powers.
Values under 'power_min' and over 'power_max' will be grouped.
Values between 'power_min' and 'power_max' will be grouped with step 'power_step'.
Parameters:
column_veh_power (str): Column name in InsolverDataFrame containing vehicles' powers,
column type is float.
power_min (float): Minimum value of vehicles' power, lower values will be grouped, 10 by default.
power_max (float): Maximum value of vehicles' power, bigger values will be grouped, 500 by default.
power_step (int): Values of vehicles' power will be divided by this parameter, rounded to integers,
10 by default.
"""
def __init__(self, column_veh_power, power_min=10, power_max=500, power_step=10):
self.priority = 1
self.column_veh_power = column_veh_power
self.power_min = power_min
self.power_max = power_max
self.power_step = power_step
@staticmethod
def _power(power, power_min, power_max, power_step):
if pd.isnull(power):
power = None
elif power < power_min:
power = power_min
elif power > power_max:
power = power_max
else:
power = int(round(power / power_step, 0))
return power
def __call__(self, df):
df[self.column_veh_power] = df[self.column_veh_power].apply(self._power, args=(self.power_min, self.power_max,
self.power_step,))
return df
class TransformVehAgeGetFromIssueYear:
"""Gets vehicles' ages in years from issue years and policies' start dates.
Parameters:
column_veh_issue_year (str): Column name in InsolverDataFrame containing vehicles' issue years,
column type is integer.
column_date_start (str): Column name in InsolverDataFrame containing policies' start dates, column type is date.
column_veh_age (str): Column name in InsolverDataFrame for vehicles' ages in years, column type is integer.
"""
def __init__(self, column_veh_issue_year, column_date_start, column_veh_age):
self.priority = 0
self.column_veh_issue_year = column_veh_issue_year
self.column_date_start = column_date_start
self.column_veh_age = column_veh_age
@staticmethod
def _veh_age_get(issueyear_datestart):
veh_issue_year = issueyear_datestart[0]
date_start = issueyear_datestart[1]
if pd.isnull(veh_issue_year):
veh_age = None
elif pd.isnull(date_start):
veh_age = None
elif veh_issue_year > datetime.datetime.now().year:
veh_age = None
elif veh_issue_year < datetime.datetime.now().year - 90:
veh_age = None
elif veh_issue_year > date_start.year:
veh_age = None
else:
veh_age = date_start.year - veh_issue_year
return veh_age
def __call__(self, df):
df[self.column_veh_age] = df[[self.column_veh_issue_year,
self.column_date_start]].apply(self._veh_age_get, axis=1)
return df
class TransformVehAge:
"""Transforms values of vehicles' ages in years. Values over 'veh_age_max' will be grouped.
Parameters:
column_veh_age (str): Column name in InsolverDataFrame containing vehicles' ages in years,
column type is integer.
veh_age_max (int): Maximum value of vehicles' age in years, bigger values will be grouped, 25 by default.
"""
def __init__(self, column_veh_age, veh_age_max=25):
self.priority = 1
self.column_veh_age = column_veh_age
self.veh_age_max = veh_age_max
@staticmethod
def _veh_age(age, age_max):
if pd.isnull(age):
age = None
elif age < 0:
age = None
elif age > age_max:
age = age_max
return age
def __call__(self, df):
df[self.column_veh_age] = df[self.column_veh_age].apply(self._veh_age, args=(self.veh_age_max,))
return df
# ---------------------------------------------------
# Region data methods
# ---------------------------------------------------
class TransformRegionGetFromKladr:
"""Gets regions' numbers from KLADRs.
Parameters:
column_kladr (str): Column name in InsolverDataFrame containing KLADRs, column type is string.
column_region_num (str): Column name in InsolverDataFrame for regions' numbers, column type is integer.
"""
def __init__(self, column_kladr, column_region_num):
self.priority = 0
self.column_kladr = column_kladr
self.column_region_num = column_region_num
@staticmethod
def _region_get(kladr):
if pd.isnull(kladr):
region_num = None
else:
region_num = kladr[0:2]
try:
region_num = int(region_num)
except Exception:
region_num = None
return region_num
def __call__(self, df):
df[self.column_region_num] = df[self.column_kladr].apply(self._region_get)
return df
# ---------------------------------------------------
# Sorting data methods
# ---------------------------------------------------
class TransformParamUselessGroup:
"""Groups all parameter's values with few data to one group.
Parameters:
column_param (str): Column name in InsolverDataFrame containing parameter.
size_min (int): Minimum allowed number of records for each parameter value, 1000 by default.
group_name: Name of the group for parameter's values with few data.
inference (bool): Sign if the transformation is used for inference, False by default.
param_useless (list): The list of useless values of the parameter, for inference only.
"""
def __init__(self, column_param, size_min=1000, group_name=0, inference=False, param_useless=None):
self.priority = 1
self.column_param = column_param
self.size_min = size_min
self.group_name = group_name
self.inference = inference
if inference:
if param_useless is None:
raise NotImplementedError("'param_useless' should contain the list of useless values.")
self.param_useless = param_useless
else:
self.param_useless = []
@staticmethod
def _param_useless_get(df, column_param, size_min):
"""Checks the amount of data for each parameter's value.
Args:
df: InsolverDataFrame to explore.
column_param (str): Column name in InsolverDataFrame containing parameter.
size_min (int): Minimum allowed number of records for each parameter's value, 1000 by default.
Returns:
list: List of parameter's values with few data.
"""
param_size = pd.DataFrame(df.groupby(column_param).size().reset_index(name='param_size'))
param_useless = list(param_size[column_param].loc[param_size['param_size'] < size_min])
return param_useless
def __call__(self, df):
if not self.inference:
self.param_useless = self._param_useless_get(df, self.column_param, self.size_min)
df.loc[df[self.column_param].isin(self.param_useless), self.column_param] = self.group_name
return df
class TransformParamSortFreq:
"""Gets sorted by claims' frequency parameter's values.
Parameters:
column_param (str): Column name in InsolverDataFrame containing parameter.
column_param_sort_freq (str): Column name in InsolverDataFrame for sorted values of parameter,
column type is integer.
column_policies_count (str): Column name in InsolverDataFrame containing numbers of policies,
column type is integer or float.
column_claims_count (str): Column name in InsolverDataFrame containing numbers of claims,
column type is integer or float.
inference (bool): Sign if the transformation is used for inference, False by default.
param_freq_dict (dict): The dictionary of sorted values of the parameter, for inference only.
"""
def __init__(self, column_param, column_param_sort_freq, column_policies_count, column_claims_count,
inference=False, param_freq_dict=None):
self.priority = 2
self.column_param = column_param
self.column_param_sort_freq = column_param_sort_freq
self.column_policies_count = column_policies_count
self.column_claims_count = column_claims_count
self.param_freq = pd.DataFrame
self.inference = inference
if inference:
if param_freq_dict is None:
raise NotImplementedError("'param_freq_dict' should contain the dictionary of sorted values.")
self.param_freq_dict = param_freq_dict
else:
self.param_freq_dict = {}
def __call__(self, df):
if not self.inference:
self.param_freq = df.groupby([self.column_param]).sum()[[self.column_claims_count,
self.column_policies_count]]
self.param_freq['freq'] = (self.param_freq[self.column_claims_count] /
self.param_freq[self.column_policies_count])
keys = []
values = []
for i in enumerate(self.param_freq.sort_values('freq', ascending=False).index.values):
keys.append(i[1])
values.append(float(i[0]))
self.param_freq_dict = dict(zip(keys, values))
df[self.column_param_sort_freq] = df[self.column_param].map(self.param_freq_dict)
return df
class TransformParamSortAC:
"""Gets sorted by claims' average sum parameter's values.
Parameters:
column_param (str): Column name in InsolverDataFrame containing parameter.
column_param_sort_ac (str): Column name in InsolverDataFrame for sorted values of parameter,
column type is integer.
column_claims_count (str): Column name in InsolverDataFrame containing numbers of claims,
column type is integer or float.
column_claims_sum (str): Column name in InsolverDataFrame containing sums of claims,
column type is integer or float.
inference (bool): Sign if the transformation is used for inference, False by default.
param_ac_dict (dict): The dictionary of sorted values of the parameter, for inference only.
"""
def __init__(self, column_param, column_param_sort_ac, column_claims_count, column_claims_sum,
inference=False, param_ac_dict=None):
self.priority = 2
self.column_param = column_param
self.column_param_sort_ac = column_param_sort_ac
self.column_claims_count = column_claims_count
self.column_claims_sum = column_claims_sum
self.param_ac = pd.DataFrame
self.inference = inference
if inference:
if param_ac_dict is None:
raise NotImplementedError("'param_ac_dict' should contain the dictionary of sorted values.")
self.param_ac_dict = param_ac_dict
else:
self.param_ac_dict = {}
def __call__(self, df):
if not self.inference:
self.param_ac = df.groupby([self.column_param]).sum()[[self.column_claims_sum, self.column_claims_count]]
self.param_ac['avg_claim'] = self.param_ac[self.column_claims_sum] / self.param_ac[self.column_claims_count]
keys = []
values = []
for i in enumerate(self.param_ac.sort_values('avg_claim', ascending=False).index.values):
keys.append(i[1])
values.append(float(i[0]))
self.param_ac_dict = dict(zip(keys, values))
df[self.column_param_sort_ac] = df[self.column_param].map(self.param_ac_dict)
return df
# ---------------------------------------------------
# Other data methods
# ---------------------------------------------------
class TransformToNumeric:
"""Transforms parameter's values to numeric types, uses Pandas' 'to_numeric'.
Parameters:
column_param (str): Column name in InsolverDataFrame containing parameter to transform.
downcast: Target numeric dtype, equal to Pandas' 'downcast' in the 'to_numeric' function, 'integer' by default.
"""
def __init__(self, column_param, downcast='integer'):
self.priority = 0
self.column_param = column_param
self.downcast = downcast
def __call__(self, df):
df[self.column_param] = pd.to_numeric(df[self.column_param], downcast=self.downcast)
return df
class TransformMapValues:
"""Transforms parameter's values according to the dictionary.
Parameters:
column_param (str): Column name in InsolverDataFrame containing parameter to map.
dictionary (dict): The dictionary for mapping.
"""
def __init__(self, column_param, dictionary):
self.priority = 1
self.column_param = column_param
self.dictionary = dictionary
def __call__(self, df):
df[self.column_param] = df[self.column_param].map(self.dictionary)
return df
class TransformPolynomizer:
"""Gets polynomials of parameter's values.
Parameters:
column_param (str): Column name in InsolverDataFrame containing parameter to polynomize.
n (int): Polynomial degree.
"""
def __init__(self, column_param, n=2):
self.priority = 3
self.column_param = column_param
self.n = n
def __call__(self, df):
for i in range(2, self.n + 1):
a = self.column_param + '_' + str(i)
while a in list(df.columns):
a = a + '_'
df[a] = df[self.column_param] ** i
return df
class TransformGetDummies:
"""Gets dummy columns of the parameter, uses Pandas' 'get_dummies'.
Parameters:
column_param (str): Column name in InsolverDataFrame containing parameter to transform.
drop_first (bool): Whether to get k-1 dummies out of k categorical levels by removing the first level,
False by default.
inference (bool): Sign if the transformation is used for inference, False by default.
dummy_columns (list): List of the dummy columns, for inference only.
"""
def __init__(self, column_param, drop_first=False, inference=False, dummy_columns=None):
self.priority = 3
self.column_param = column_param
self.drop_first = drop_first
self.inference = inference
if inference:
if dummy_columns is None:
raise NotImplementedError("'dummy_columns' should contain the list of dummy columns.")
self.dummy_columns = dummy_columns
else:
self.dummy_columns = []
def __call__(self, df):
if not self.inference:
df_dummy = pd.get_dummies(df[[self.column_param]], prefix_sep='_', drop_first=self.drop_first)
self.dummy_columns = list(df_dummy.columns)
df = pd.concat([df, df_dummy], axis=1)
else:
for column in self.dummy_columns:
df[column] = 1 * ((self.column_param + '_' + df[self.column_param]) == column)
return df
class TransformCarFleetSize:
"""Calculates fleet sizes for policyholders.
Parameters:
column_id (str): Column name in InsolverDataFrame containing policyholders' IDs.
column_date_start (str): Column name in InsolverDataFrame containing policies' start dates, column type is date.
column_fleet_size (str): Column name in InsolverDataFrame for fleet sizes, column type is int.
"""
def __init__(self, column_id, column_date_start, column_fleet_size):
self.priority = 3
self.column_id = column_id
self.column_date_start = column_date_start
self.column_fleet_size = column_fleet_size
def __call__(self, df):
cp = pd.merge(df[[self.column_id, self.column_date_start]], df[[self.column_id, self.column_date_start]],
on=self.column_id, how='left')
cp = cp[(cp[f'{self.column_date_start}_y'] > cp[f'{self.column_date_start}_x'] - np.timedelta64(1, 'Y')) &
(cp[f'{self.column_date_start}_y'] <= cp[f'{self.column_date_start}_y'])]
cp = cp.groupby(self.column_id).size().to_dict()
df[self.column_fleet_size] = df[self.column_id].map(cp)
return df
class AutoFillNATransforms:
"""Fill NA values
Parameters:
numerical_columns (list): List of numerical columns
categorical_columns (list): List of categorical columns
numerical_method (str): Fill numerical NA values using this specified method: 'median' (by default), 'mean',
'mode' or 'remove'
categorical_method (str): Fill categorical NA values using this specified method: 'frequent' (by default),
'new_category', 'imputed_column' or 'remove'
numerical_constants (dict): Dictionary of constants for each numerical column
categorical_constants (dict): Dictionary of constants for each categorical column
"""
def __init__(self, numerical_columns=None, categorical_columns=None, numerical_method='median',
categorical_method='frequent', numerical_constants=None, categorical_constants=None):
self.priority = 0
self.numerical_columns = numerical_columns
self.categorical_columns = categorical_columns
self.numerical_constants = numerical_constants
self.categorical_constants = categorical_constants
self._num_methods = ['median', 'mean', 'mode', 'remove']
self._cat_methods = ['frequent', 'new_category', 'imputed_column', 'remove']
self.numerical_method = numerical_method
self.categorical_method = categorical_method
def _find_num_cat_features(self, df):
self.categorical_columns = [c for c in df.columns if df[c].dtype.name == 'object']
self.numerical_columns = [c for c in df.columns if df[c].dtype.name != 'object']
def _fillna_numerical(self, df):
"""Replace numerical NaN values using specified method"""
if not self.numerical_columns:
return
if self.numerical_method == 'remove':
df.dropna(subset=self.numerical_columns, inplace=True)
return
if self.numerical_constants:
for column in self.numerical_constants.keys():
df[column].fillna(self.numerical_constants[column], inplace=True)
if self.numerical_method in self._num_methods:
self._num_methods_dict = {
'median': lambda column: df[column].median(),
'mean': lambda column: df[column].mean(),
'mode': lambda column: df[column].mode()[0]
}
self.values = {}
for column in self.numerical_columns:
if df[column].isnull().all():
self.values[column] = 1
else:
self.values[column] = self._num_methods_dict[self.numerical_method](column)
df[column].fillna(self.values[column], inplace=True)
else:
raise NotImplementedError(f'Method parameter supports values in {self._num_methods}.')
def _fillnan_categorical(self, df):
"""Replace categorical NaN values using specified method"""
if not self.categorical_columns:
return
if self.categorical_method == 'remove':
df.dropna(subset=self.categorical_columns, inplace=True)
return
if self.categorical_constants:
for column in self.categorical_constants.keys():
df[column].fillna(self.categorical_constants[column], inplace=True)
if self.categorical_method in self._cat_methods:
if self.categorical_method == 'new_category':
for column in self.categorical_columns:
df[column].fillna('Unknown', inplace=True)
return
if self.categorical_method == 'imputed_column':
for column in self.categorical_columns:
df[column+"_Imputed"] = np.where(df[column].isnull(), 1, 0)
self.freq_categories = {}
for column in self.categorical_columns:
if df[column].mode().values.size > 0:
self.freq_categories[column] = df[column].mode()[0]
else:
self.freq_categories[column] = 1
df[column].fillna(self.freq_categories[column], inplace=True)
else:
raise NotImplementedError(f'Method parameter supports values in {self._cat_methods}.')
def __call__(self, df):
self._find_num_cat_features(df)
self._fillna_numerical(df)
self._fillnan_categorical(df)
return df
class EncoderTransforms:
"""Label Encoder
Parameters:
column_names (list): columns for label encoding
le_classes (dict): dictionary with label encoding classes for each column
"""
def __init__(self, column_names, le_classes=None):
self.priority = 3
self.column_names = column_names
self.le_classes = le_classes
@staticmethod
def _encode_column(column):
le = LabelEncoder()
le.fit(column)
le_classes = le.classes_.tolist()
column = le.transform(column)
return column, le_classes
def __call__(self, df):
self.le_classes = {}
for column_name in self.column_names:
df[column_name], self.le_classes[column_name] = self._encode_column(df[column_name])
return df
class OneHotEncoderTransforms:
"""OneHotEncoder Transformations
Parameters:
column_names (list): columns for one hot encoding
encoder_dict (dict): dictionary with encoder_params for each column
"""
def __init__(self, column_names, encoder_dict=None):
self.priority = 3
self.column_names = column_names
self.encoder_dict = encoder_dict
@staticmethod
def _encode_column(df, column_name):
encoder = OneHotEncoder(sparse=False)
encoder.fit(df[[column_name]])
encoder_params = encoder.categories_
encoder_params = [x.tolist() for x in encoder_params]
column_encoded = pd.DataFrame(encoder.transform(df[[column_name]]))
column_encoded.columns = encoder.get_feature_names([column_name])
for column in column_encoded.columns:
df[column] = column_encoded[column]
return encoder_params
def __call__(self, df):
self.encoder_dict = {}
for column in self.column_names:
encoder_params = self._encode_column(df, column)
self.encoder_dict[column] = encoder_params
df.drop([column], axis=1, inplace=True)
return df
class DatetimeTransforms:
"""Get selected feature from date variable.
Parameters:
column_names (list): List of columns to convert, columns in column_names can't be duplicated in column_feature.
column_types (dict): Dictionary of columns and types to return.
dayfirst (bool): Parameter from pandas.to_datetime(), specify a date parse order if arg is str or its list-likes.
yearfirst (bool): Parameter from pandas.to_datetime(), specify a date parse order if arg is str or its list-likes.
feature (str): Type of feature to get from date variable: unix (by default), date, time, month, quarter, year, day, day_of_the_week, weekend.
column_feature (dict): List of columns to preprocess using specified feature for each column in the dictionary, columns in column_feature can't be duplicated in column_names.
"""
def __init__(self, column_names, column_types=None, dayfirst=False, yearfirst=False, feature='unix', column_feature=None):
self.feature = feature
self.column_names = column_names
self.column_types = column_types
self.dayfirst = dayfirst
self.yearfirst = yearfirst
self._feature_types = ['unix', 'date', 'time', 'month', 'quarter', 'year', 'day', 'day_of_the_week', 'weekend']
self.column_feature = column_feature
def _get_date_feature(self, df):
self.feature_dict = {
'unix': lambda column: (column - | pd.Timestamp("1970-01-01") | pandas.Timestamp |
import ogtk
import pickle
import subprocess
import pyaml
import itertools
import pyfaidx
import os
import multiprocessing
import itertools
import regex
import numpy as np
import pandas as pd
import pdb
#import ltr_mtl <- how to import a script
def extract_intid_worker(args):
''' input precompiled regex and sequences'''
rxc, seqs = args
hits = []
for i in seqs:
match = rxc.search(i)
if match:
hits.append(match.group('intid1'))
return(hits)
def extract_intid_pair_worker(args):
''' input precompiled regex and sequences'''
rxcs, seqs = args
rxc, rxc2 = rxcs
hits = []
for i in seqs:
match = rxc.search(i)
if match:
match2 = rxc2.search(i)
if match2:
hits.append(match.group('intid1') +":"+ match2.group('intid2'))
return(hits)
def detect_integrations_dual(ifn, intid_len =8, anchor1 = "CTGTTCCTGTAGAAAT", error1 = 3, anchor2 = "CCGGACTCAGATCTCGAGCT", error2 = 2, anchor3="CGAGCGCTATGAGCGACTATGGGA", error3=3, limit = None, cores = 50):
''' process fastq/fastq.gz/bam file and detects number of integrations by means of anchoring seqs (anchor1, anchor2, anchor3)
Supports fuzzy matching via the errors argument.
Number cores can be adjusted
'''
# Determine the iterator in order to fetch sequences
if ifn.endswith("fastq"):
with open(ifn) as ff:
it = itertools.islice(ff, 1, limit, 4)
seqs = [i for i in it]
if ifn.endswith("fastq.gz"):
import gzip
with gzip.open(ifn, 'rt') as ff:
it = itertools.islice(ff, 1, limit, 4)
seqs = [i for i in it]
if ifn.endswith("bam"):
import pysam
it= pysam.AlignmentFile(ifn)
it = [i.seq for i in it]
seqs = [i for i in it]
# we trim the first 100bp to reduce the memory footprint
#seqs = [i[0:100] for i in it]
# not possible for paired mode
# TODO this might introduce a bug
chunks = np.array_split(seqs, cores)
rxc = regex.compile(".*({}){{e<={}}}(?P<intid1>.{{{}}}).*({}){{e<={}}}".format(anchor1, error1, intid_len, anchor2, error2))
rxc2 = regex.compile(".*(?P<intid2>.{{{}}}).*({}){{e<={}}}".format(intid_len, anchor3, error3))
pool = multiprocessing.Pool(cores)
hits = np.array(pool.map(extract_intid_worker, itertools.zip_longest([rxc], chunks, fillvalue=rxc)), dtype=object)
hits_paired = np.array(pool.map(extract_intid_pair_worker, itertools.zip_longest([[rxc, rxc2]], chunks, fillvalue=[rxc, rxc2])))
pool.close()
pool.join()
hits = [item for sublist in hits for item in sublist]
x = pd.Series(hits).value_counts()
xx = pd.Series([int(np.log10(i)) for i in x], index = x.index)
valid_ints = [i for i in x[xx>(xx[0]-1)].index]
print("Found {} valid integrations".format(len(valid_ints)))
if len(hits_paired) >0:
print("Paired ints found")
x = | pd.Series([item for sublist in hits_paired for item in sublist]) | pandas.Series |
from __future__ import division #brings in Python 3.0 mixed type calculation rules
import datetime
import inspect
import numpy as np
import numpy.testing as npt
import os.path
import pandas as pd
import sys
from tabulate import tabulate
import unittest
print("Python version: " + sys.version)
print("Numpy version: " + np.__version__)
# #find parent directory and import model
# parent_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))
# sys.path.append(parent_dir)
from ..kabam_exe import Kabam
test = {}
class TestKabam(unittest.TestCase):
"""
Unit tests for Kabam model.
: unittest will
: 1) call the setup method,
: 2) then call every method starting with "test",
: 3) then the teardown method
"""
print("kabam unittests conducted at " + str(datetime.datetime.today()))
def setUp(self):
"""
Setup routine for Kabam unit tests.
:return:
"""
pass
# setup the test as needed
# e.g. pandas to open Kabam qaqc csv
# Read qaqc csv and create pandas DataFrames for inputs and expected outputs
def tearDown(self):
"""
Teardown routine for Kabam unit tests.
:return:
"""
pass
# teardown called after each test
# e.g. maybe write test results to some text file
def create_kabam_object(self):
# create empty pandas dataframes to create empty object for testing
df_empty = pd.DataFrame()
# create an empty kabam object
kabam_empty = Kabam(df_empty, df_empty)
return kabam_empty
def test_ventilation_rate(self):
"""
:description Ventilation rate of aquatic animal
:unit L/d
:expression Kabam Eq. A5.2b (Gv)
:param zoo_wb: wet weight of animal (kg)
:param conc_do: concentration of dissolved oxygen (mg O2/L)
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
kabam_empty = self.create_kabam_object()
result = pd.Series([], dtype='float')
expected_results = pd.Series(['nan', 0.00394574, 0.468885], dtype = 'float')
try:
#use the zooplankton variables/values for the test
kabam_empty.zoo_wb = pd.Series(['nan', 1.e-07, 1.e-4], dtype = 'float')
kabam_empty.conc_do = pd.Series([5.0, 10.0, 7.5], dtype='float')
result = kabam_empty.ventilation_rate(kabam_empty.zoo_wb)
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_pest_uptake_eff_gills(self):
"""
:description Pesticide uptake efficiency by gills
:unit fraction
"expresssion Kabam Eq. A5.2a (Ew)
:param log kow: octanol-water partition coefficient ()
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
kabam_empty = self.create_kabam_object()
result = pd.Series([], dtype='float')
expected_results = pd.Series(['nan', 0.540088, 0.540495], dtype = 'float')
try:
kabam_empty.log_kow = pd.Series(['nan', 5., 6.], dtype = 'float')
kabam_empty.kow = 10.**(kabam_empty.log_kow)
result = kabam_empty.pest_uptake_eff_bygills()
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_phytoplankton_k1_calc(self):
"""
:description Uptake rate constant through respiratory area for phytoplankton
:unit: L/kg*d
:expression Kabam Eq. A5.1 (K1:unique to phytoplankton)
:param log kow: octanol-water partition coefficient ()
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
kabam_empty = self.create_kabam_object()
result = pd.Series([], dtype='float')
expected_results = pd.Series([1639.34426, 8695.6521, 15267.1755], dtype = 'float')
try:
kabam_empty.log_kow = pd.Series([4., 5., 6.], dtype = 'float')
kabam_empty.kow = 10.**(kabam_empty.log_kow)
result = kabam_empty.phytoplankton_k1_calc(kabam_empty.kow)
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_aq_animal_k1_calc(self):
"""
U:description ptake rate constant through respiratory area for aquatic animals
:unit: L/kg*d
:expression Kabam Eq. A5.2 (K1)
:param pest_uptake_eff_bygills: Pesticide uptake efficiency by gills of aquatic animals (fraction)
:param vent_rate: Ventilation rate of aquatic animal (L/d)
:param wet_wgt: wet weight of animal (kg)
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
kabam_empty = self.create_kabam_object()
result = pd.Series([], dtype='float')
expected_results = pd.Series(['nan', 1201.13849, 169.37439], dtype = 'float')
try:
pest_uptake_eff_bygills = pd.Series(['nan', 0.0304414, 0.0361228], dtype = 'float')
vent_rate = pd.Series(['nan', 0.00394574, 0.468885], dtype = 'float')
wet_wgt = pd.Series(['nan', 1.e-07, 1.e-4], dtype = 'float')
result = kabam_empty.aq_animal_k1_calc(pest_uptake_eff_bygills, vent_rate, wet_wgt)
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_animal_water_part_coef(self):
"""
:description Organism-Water partition coefficient (based on organism wet weight)
:unit ()
:expression Kabam Eq. A6a (Kbw)
:param zoo_lipid: lipid fraction of organism (kg lipid/kg organism wet weight)
:param zoo_nlom: non-lipid organic matter (NLOM) fraction of organism (kg NLOM/kg organism wet weight)
:param zoo_water: water content of organism (kg water/kg organism wet weight)
:param kow: octanol-water partition coefficient ()
:param beta: proportionality constant expressing the sorption capacity of NLOM or NLOC to
that of octanol
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
kabam_empty = self.create_kabam_object()
result = pd.Series([], dtype='float')
expected_results = pd.Series([650.87, 11000.76, 165000.64], dtype = 'float')
try:
#For test purpose we'll use the zooplankton variable names
kabam_empty.zoo_lipid_frac = pd.Series([0.03, 0.04, 0.06], dtype = 'float')
kabam_empty.zoo_nlom_frac = pd.Series([0.10, 0.20, 0.30,], dtype = 'float')
kabam_empty.zoo_water_frac = pd.Series([0.87, 0.76, 0.64], dtype = 'float')
kabam_empty.kow = pd.Series([1.e4, 1.e5, 1.e6], dtype = 'float')
beta = 0.35
result = kabam_empty.animal_water_part_coef(kabam_empty.zoo_lipid_frac,
kabam_empty.zoo_nlom_frac,
kabam_empty.zoo_water_frac, beta)
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_aq_animal_k2_calc(self):
"""
:description Elimination rate constant through the respiratory area
:unit (per day)
:expression Kabam Eq. A6 (K2)
:param zoo_k1: Uptake rate constant through respiratory area for aquatic animals
:param k_bw_zoo (Kbw): Organism-Water partition coefficient (based on organism wet weight ()
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
kabam_empty = self.create_kabam_object()
result = pd.Series([], dtype='float')
expected_results = pd.Series([2.5186969, 0.79045921, 0.09252798], dtype = 'float')
try:
#For test purpose we'll use the zooplankton variable names
kabam_empty.zoo_k1 = pd.Series([1639.34426, 8695.6521, 15267.1755], dtype = 'float')
kabam_empty.k_bw_zoo = pd.Series([650.87, 11000.76, 165000.64], dtype = 'float')
result = kabam_empty.aq_animal_k2_calc(kabam_empty.zoo_k1, kabam_empty.k_bw_zoo)
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_animal_grow_rate_const(self):
"""
:description Aquatic animal/organism growth rate constant
:unit (per day)
:expression Kabam Eq. A7.1 & A7.2
:param zoo_wb: wet weight of animal/organism (kg)
:param water_temp: water temperature (degrees C)
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
kabam_empty = self.create_kabam_object()
result = pd.Series([], dtype='float')
expected_results = pd.Series([0.01255943, 0.00125594, 0.00251], dtype = 'float')
try:
#For test purpose we'll use the zooplankton variable names
kabam_empty.zoo_wb = pd.Series([1.e-7, 1.e-2, 1.0], dtype = 'float')
kabam_empty.water_temp = pd.Series([10., 15., 20.], dtype = 'float')
result = kabam_empty.animal_grow_rate_const(kabam_empty.zoo_wb)
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_dietary_trans_eff(self):
"""
:description Aquatic animal/organizm dietary pesticide transfer efficiency
:unit fraction
:expression Kabam Eq. A8a (Ed)
:param kow: octanol-water partition coefficient ()
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
kabam_empty = self.create_kabam_object()
result = pd.Series([], dtype='float')
expected_results = pd.Series([0.499251, 0.492611, 0.434783], dtype = 'float')
try:
kabam_empty.kow = pd.Series([1.e4, 1.e5, 1.e6], dtype = 'float')
result = kabam_empty.dietary_trans_eff()
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_aq_animal_feeding_rate(self):
"""
:description Aquatic animal feeding rate (except filterfeeders)
:unit kg/d
:expression Kabam Eq. A8b1 (Gd)
:param wet_wgt: wet weight of animal/organism (kg)
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
kabam_empty = self.create_kabam_object()
result = pd.Series([], dtype='float')
expected_results = pd.Series([4.497792e-08, 1.0796617e-3, 0.073042572], dtype = 'float')
try:
#For test purpose we'll use the zooplankton variable names
kabam_empty.zoo_wb = pd.Series([1.e-7, 1.e-2, 1.], dtype = 'float')
kabam_empty.water_temp = pd.Series([10., 15., 20.])
result = kabam_empty.aq_animal_feeding_rate(kabam_empty.zoo_wb)
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_filterfeeder_feeding_rate(self):
"""
:description Filter feeder feeding rate
:unit kg/d
:expression Kabam Eq. A8b2 (Gd)
:param self.gv_filterfeeders: filterfeeder ventilation rate (L/d)
:param self.conc_ss: Concentration of Suspended Solids (Css - kg/L)
:param particle_scav_eff: efficiency of scavenging of particles absorbed from water (fraction)
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
kabam_empty = self.create_kabam_object()
result = pd.Series([], dtype='float')
expected_results = pd.Series(['nan', 1.97287e-7, 0.03282195], dtype = 'float')
try:
kabam_empty.gv_filterfeeders = pd.Series(['nan', 0.00394574, 0.468885], dtype = 'float')
kabam_empty.conc_ss = pd.Series([0.00005, 0.00005, 0.07], dtype = 'float')
kabam_empty.particle_scav_eff = 1.0
result = kabam_empty.filterfeeders_feeding_rate()
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_diet_uptake_rate_const(self):
"""
:description pesticide uptake rate constant for uptake through ingestion of food rate
:unit kg food/kg organism - day
:expression Kabam Eq. A8 (kD)
:param dietary_trans_eff: dietary pesticide transfer efficiency (fraction)
:param feeding rate: animal/organism feeding rate (kg/d)
:param wet weight of aquatic animal/organism (kg)
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
kabam_empty = self.create_kabam_object()
result = pd.Series([], dtype='float')
expected_results = pd.Series([0.22455272, 0.05318532, 0.031755767 ], dtype = 'float')
try:
#For test purpose we'll use the zooplankton variable names
kabam_empty.ed_zoo = pd.Series([0.499251, 0.492611, 0.434783], dtype = 'float')
kabam_empty.gd_zoo = pd.Series([4.497792e-08, 1.0796617e-3, 0.073042572], dtype = 'float')
kabam_empty.zoo_wb = pd.Series([1.e-7, 1.e-2, 1.0])
result = kabam_empty.diet_uptake_rate_const(kabam_empty.ed_zoo, \
kabam_empty.gd_zoo, kabam_empty.zoo_wb)
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_overall_diet_content(self):
"""
:description Overall fraction of aquatic animal/organism diet attributed to diet food component
(i.e., lipids or NLOM or water)
:unit kg/kg
:expression not shown in Kabam documentation: it is associated with Kabam Eq. A9
overall_diet_content is equal to the sum over dietary elements
: of (fraction of diet) * (content in diet element); for example zooplankton ingest seidment and
: phytoplankton, thus the overall lipid content of the zooplankton diet equals
: (fraction of sediment in zooplankton diet) * (fraction of lipids in sediment) +
: (fraction of phytoplankton in zooplankton diet) * (fraction of lipids in phytoplankton)
:param diet_fraction: list of values representing fractions of aquatic animal/organism diet attibuted
to each element of diet
:param content_fraction: list of values representing fraction of diet element attributed to a specific
component of that diet element (e.g., lipid, NLOM, or water)
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
kabam_empty = self.create_kabam_object()
result = pd.Series([], dtype='float')
expected_results = pd.Series([0.025, 0.03355, 0.0465], dtype = 'float')
try:
#For test purposes we'll use the small fish diet variables/values
kabam_empty.sfish_diet_sediment = pd.Series([0.0, 0.01, 0.05], dtype = 'float')
kabam_empty.sfish_diet_phytoplankton = pd.Series([0.0, 0.01, 0.05], dtype = 'float')
kabam_empty.sfish_diet_zooplankton = pd.Series([0.5, 0.4, 0.5], dtype = 'float')
kabam_empty.sfish_diet_benthic_invertebrates = pd.Series([0.5, 0.57, 0.35], dtype = 'float')
kabam_empty.sfish_diet_filterfeeders = pd.Series([0.0, 0.01, 0.05], dtype = 'float')
kabam_empty.sediment_lipid = pd.Series([0.0, 0.01, 0.0], dtype = 'float')
kabam_empty.phytoplankton_lipid = pd.Series([0.02, 0.015, 0.03], dtype = 'float')
kabam_empty.zoo_lipid = pd.Series([0.03, 0.04, 0.05], dtype = 'float')
kabam_empty.beninv_lipid = pd.Series([0.02, 0.03, 0.05], dtype = 'float')
kabam_empty.filterfeeders_lipid = pd.Series([0.01, 0.02, 0.05], dtype = 'float')
diet_elements = pd.Series([], dtype = 'float')
content_fracs = pd.Series([], dtype = 'float')
for i in range(len(kabam_empty.sfish_diet_sediment)):
diet_elements = [kabam_empty.sfish_diet_sediment[i],
kabam_empty.sfish_diet_phytoplankton[i],
kabam_empty.sfish_diet_zooplankton[i],
kabam_empty.sfish_diet_benthic_invertebrates[i],
kabam_empty.sfish_diet_filterfeeders[i]]
content_fracs = [kabam_empty.sediment_lipid[i],
kabam_empty.phytoplankton_lipid[i],
kabam_empty.zoo_lipid[i],
kabam_empty.beninv_lipid[i],
kabam_empty.filterfeeders_lipid[i]]
result[i] = kabam_empty.overall_diet_content(diet_elements, content_fracs)
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_fecal_egestion_rate_factor(self):
"""
Aquatic animal/organism egestion rate of fecal matter factor (to be multiplied by the
feeding rate to calculate egestion rate of fecal matter)
:unit (kg feces)/[(kg organism) - day]
:expression Kabam Eq. A9 (GF)
:param epsilonL: dietary assimilation rate of lipids (fraction)
:param epsilonN: dietary assimilation rate of NLOM (fraction)
:param epsilonW: dietary assimilation rate of water (fraction)
:param diet_lipid; lipid content of aquatic animal/organism diet (fraction)
:param diet_nlom NLOM content of aquatic animal/organism diet (fraction)
:param diet_water water content of aquatic animal/organism diet (fraction)
:param feeding_rate: aquatic animal/organism feeding rate (kg/d)
:return:
"""
#this test includes two results; 'result1' represents the overall assimilation rate of the
#aquatic animal/organism diet; and 'result' represents the product of this assimilation rate
#and the feeding rate (this multiplication will be done in the main model routine
#as opposed to within a method -- the method here is limited to the assimilation factor
#because this factor is used elsewhere as well
# create empty pandas dataframes to create empty object for this unittest
kabam_empty = self.create_kabam_object()
result = pd.Series([], dtype='float')
result1 = pd.Series([], dtype='float')
expected_results = pd.Series([1.43e-9, 5.005e-5, 4.82625e-3], dtype = 'float')
try:
#For test purposes we'll use the zooplankton variable names and relevant constant values
kabam_empty.epsilon_lipid_zoo = 0.72
kabam_empty.epsilon_nlom_zoo = 0.60
kabam_empty.epsilon_water = 0.25
kabam_empty.v_ld_zoo = pd.Series([0.025, 0.035, 0.045], dtype = 'float')
kabam_empty.v_nd_zoo = pd.Series([0.025, 0.035, 0.045], dtype = 'float')
kabam_empty.v_wd_zoo = pd.Series([0.025, 0.035, 0.045], dtype = 'float')
kabam_empty.gd_zoo = pd.Series([4.e-08, 1.e-3, 0.075], dtype = 'float')
result1 = kabam_empty.fecal_egestion_rate_factor(kabam_empty.epsilon_lipid_zoo,
kabam_empty.epsilon_nlom_zoo,
kabam_empty.epsilon_water,
kabam_empty.v_ld_zoo,
kabam_empty.v_nd_zoo,
kabam_empty.v_wd_zoo)
result = result1 * kabam_empty.gd_zoo
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_diet_elements_gut(self):
"""
Fraction of diet elements (i.e., lipid, NLOM, water) in the gut
:unit (kg lipid) / (kg digested wet weight)
:expression Kabam Eq. A9 (VLG, VNG, VWG)
:param (epison_lipid_*) relevant dietary assimilation rate (fraction)
:param (v_ld_*) relevant overall diet content of diet element (kg/kg)
:param (diet_assim_factor_*) relevant: Aquatic animal/organism egestion rate of fecal matter factor
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
kabam_empty = self.create_kabam_object()
result = pd.Series([], dtype='float')
expected_results = pd.Series([0.2, 0.196, 0.1575], dtype = 'float')
try:
#for this test we'll use the lipid content for zooplankton
kabam_empty.epsilon_lipid_zoo = 0.72
kabam_empty.v_ld_zoo = pd.Series([0.025, 0.035, 0.045], dtype = 'float')
kabam_empty.diet_assim_factor_zoo = pd.Series([0.035, 0.05, 0.08], dtype = 'float')
result = kabam_empty.diet_elements_gut(kabam_empty.epsilon_lipid_zoo,
kabam_empty.v_ld_zoo, kabam_empty.diet_assim_factor_zoo)
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_gut_organism_partition_coef(self):
"""
Partition coefficient of the pesticide between the gastrointenstinal track and the organism
:unit none
:expression Kabam Eq. A9 (KGB)
:param vlg_zoo: lipid content in the gut
:param vng_zoo: nlom content in the gut
:param vwg_zoo: water content in the gut
:param kow: pesticide Kow
:param beta_aq_animals: proportionality constant expressing the sorption capacity of NLOM to that of octanol
:param zoo_lipid_frac: lipid content in the whole organism
:param zoo_nlom_frac: nlom content in the whole organism
:param zoo_water_frac: water content in the whole organism
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
kabam_empty = self.create_kabam_object()
result = pd.Series([], dtype='float')
expected_results = pd.Series([0.991233, 1.662808, 1.560184], dtype = 'float')
try:
#for this test we'll use the zooplankton varialbles
kabam_empty.beta_aq_animals = 0.035
kabam_empty.kow = pd.Series([1.e4, 1.e5, 1.e6], dtype = 'float')
kabam_empty.vlg_zoo = pd.Series([0.2, 0.25, 0.15], dtype = 'float')
kabam_empty.vng_zoo = pd.Series([0.1, 0.15, 0.25], dtype = 'float')
kabam_empty.vwg_zoo = pd.Series([0.15, 0.35, 0.05], dtype = 'float')
kabam_empty.zoo_lipid_frac = pd.Series([0.20, 0.15, 0.10], dtype = 'float')
kabam_empty.zoo_nlom_frac = pd.Series([0.15, 0.10, 0.05], dtype = 'float')
kabam_empty.zoo_water_frac = pd.Series([0.65, 0.75, 0.85], dtype = 'float')
result = kabam_empty.gut_organism_partition_coef(kabam_empty.vlg_zoo, kabam_empty.vng_zoo,
kabam_empty.vwg_zoo, kabam_empty.kow, kabam_empty.beta_aq_animals,
kabam_empty.zoo_lipid_frac, kabam_empty.zoo_nlom_frac,
kabam_empty.zoo_water_frac)
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_fecal_elim_rate_const(self):
"""
rate constant for elimination of the pesticide through excretion of contaminated feces
:unit per day
:param gf_zoo: egestion rate of fecal matter (kg feces)/(kg organism-day)
:param ed_zoo: dietary pesticide transfer efficiency (fraction)
:param kgb_zoo: gut - partition coefficient of the pesticide between the gastrointestinal tract
and the organism (-)
:param zoo_wb: wet weight of organism (kg)
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
kabam_empty = self.create_kabam_object()
result = pd.Series([], dtype='float')
expected_results = pd.Series([7.5e-4, 0.0525, 5.625e-4], dtype = 'float')
try:
#for this test we'll use the zooplankton variables
kabam_empty.gf_zoo = pd.Series([1.5e-9, 5.0e-5, 4.5e-3], dtype = 'float')
kabam_empty.ed_zoo = pd.Series([0.5, 0.7, 0.25], dtype = 'float')
kabam_empty.kgb_zoo = pd.Series([1.0, 1.5, 0.5], dtype = 'float')
kabam_empty.zoo_wb = pd.Series([1.e-6, 1.e-3, 1.0], dtype = 'float')
result = kabam_empty.fecal_elim_rate_const(kabam_empty.gf_zoo, kabam_empty.ed_zoo,
kabam_empty.kgb_zoo, kabam_empty.zoo_wb)
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_frac_pest_freely_diss(self):
"""
Calculate Fraction of pesticide freely dissolved in water column (that can be
absorbed via membrane diffusion)
:unit fraction
:expression Kabam Eq. A2
:param conc_poc: Concentration of Particulate Organic Carbon in water column (kg OC/L)
:param kow: octonal-water partition coefficient (-)
:param conc_doc: Concentration of Dissolved Organic Carbon in water column (kg OC/L)
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
kabam_empty = self.create_kabam_object()
result = pd.Series([], dtype='float')
expected_results = pd.Series([0.13422819, 0.00462963, 0.00514139], dtype = 'float')
try:
#for this test we'll use the zooplankton variables
kabam_empty.conc_poc = pd.Series([1.5e-3, 5.0e-3, 4.5e-4], dtype = 'float')
kabam_empty.alpha_poc = 0.35
kabam_empty.kow = pd.Series([1.e4, 1.e5, 1.e6], dtype = 'float')
kabam_empty.conc_doc = pd.Series([1.5e-3, 5.0e-3, 4.5e-4], dtype = 'float')
kabam_empty.alpha_doc = 0.08
result = kabam_empty.frac_pest_freely_diss()
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_conc_freely_diss_watercol(self):
"""
concentration of freely dissolved pesticide in overlying water column
:unit g/L
:param phi: Fraction of pesticide freely dissolved in water column (that can be
absorbed via membrane diffusion) (fraction)
:param water_column_eec: Water Column 1-in-10 year EECs (ug/L)
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
kabam_empty = self.create_kabam_object()
result = pd.Series([], dtype='float')
expected_results = pd.Series([1.e-1, 2.4e-2, 1.], dtype = 'float')
try:
#for this test we'll use the zooplankton variables
kabam_empty.phi = pd.Series([0.1, 0.004, 0.05], dtype = 'float')
kabam_empty.water_column_eec = pd.Series([1., 6., 20.], dtype = 'float')
result = kabam_empty.conc_freely_diss_watercol()
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_conc_sed_norm_4oc(self):
"""
pesticide concentration in sediment normalized for organic carbon
:unit g/(kg OC)
:expression Kabam Eq. A4a
:param pore_water_eec: freely dissolved pesticide concentration in sediment pore water
:param k_oc: organic carbon partition coefficient (L/kg OC)
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
kabam_empty = self.create_kabam_object()
result = pd.Series([], dtype='float')
expected_results = pd.Series([2.5e4, 6.e4, 2.e6], dtype = 'float')
try:
#for this test we'll use the zooplankton variables
kabam_empty.k_oc = pd.Series([2.5e4, 1.e4, 1.e5], dtype = 'float')
kabam_empty.pore_water_eec = pd.Series([1., 6., 20.], dtype = 'float')
result = kabam_empty.conc_sed_norm_4oc()
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_conc_sed_dry_wgt(self):
"""
Calculate concentration of chemical in solid portion of sediment
:unit g/(kg dry)
:expression Kabam Eq. A4
:param c_soc: pesticide concentration in sediment normalized for organic carbon g/(kg OC)
:param sediment_oc: fraction organic carbon in sediment (fraction)
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
kabam_empty = self.create_kabam_object()
result = pd.Series([], dtype='float')
expected_results = pd.Series([0.001, 0.0036, 0.4], dtype = 'float')
try:
#for this test we'll use the zooplankton variables
kabam_empty.c_soc = pd.Series([0.025, 0.06, 2.00], dtype = 'float')
kabam_empty.sediment_oc = pd.Series([4., 6., 20.], dtype = 'float')
kabam_empty.sediment_oc_frac = kabam_empty.percent_to_frac(kabam_empty.sediment_oc)
result = kabam_empty.conc_sed_dry_wgt()
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_diet_pest_conc(self):
"""
overall concentration of pesticide in aquatic animal/organism diet
:unit g/(kg wet weight)
:expression Kabam Eq. A1 (SUM(Pi * CDi);
:param diet_frac_lfish: fraction of large fish diet containing prey i (Pi in Eq. A1))
:param diet_conc_lfish: concentraiton of pesticide in prey i (CDi in Eq. A1)
:param lipid_content_lfish: fraction of prey i that is lipid
:notes for this test we populate all prey items for large fish even though large fish
typically only consume medium fish
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
kabam_empty = self.create_kabam_object()
result = pd.Series([], dtype='float')
result1 = pd.Series([], dtype='float')
result2 = pd.Series([], dtype='float')
expected_results1 = pd.Series([0.2025, 0.2025, 0.205], dtype = 'float')
expected_results2 = pd.Series([5.316667, 4.819048, 4.3], dtype = 'float')
try:
#for this test we'll use the large fish variables (there are 7 prey items listed
#for large fish (sediment, phytoplankton, zooplankton, benthic invertebrates,
# filterfeeders, small fish, and medium fish --- this is the order related
#to the values in the two series below)
kabam_empty.diet_frac_lfish = pd.Series([[0.02, 0.03, 0.10, 0.05, 0.10, 0.7],
[0.0, 0.05, 0.05, 0.05, 0.10, 0.75],
[0.01, 0.02, 0.03, 0.04, 0.10, 0.8]], dtype = 'float')
kabam_empty.diet_conc_lfish = pd.Series([[0.10, 0.10, 0.20, 0.15, 0.30, 0.20],
[0.10, 0.10, 0.20, 0.15, 0.30, 0.20],
[0.10, 0.10, 0.20, 0.15, 0.30, 0.20]], dtype = 'float')
kabam_empty.diet_lipid_content_lfish = pd.Series([[0.0, 0.02, 0.03, 0.03, 0.04, 0.04],
[0.01, 0.025, 0.035, 0.03, 0.04, 0.045],
[0.0, 0.02, 0.03, 0.03, 0.05, 0.05]], dtype = 'float')
result1,result2 = kabam_empty.diet_pest_conc(kabam_empty.diet_frac_lfish,
kabam_empty.diet_conc_lfish,
kabam_empty.diet_lipid_content_lfish)
npt.assert_allclose(result1, expected_results1, rtol=1e-4, atol=0, err_msg='', verbose=True)
npt.assert_allclose(result2, expected_results2, rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result1, expected_results1, result2, expected_results2]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_pest_conc_organism(self):
"""
concentration of pesticide in aquatic animal/organism
:unit g/(kg wet weight)
:expression Kabam Eq. A1 (CB)
:param lfish_k1: pesticide uptake rate constant through respiratory area (gills, skin) (L/kg-d)
:param lfish_k2: rate constant for elimination of the peisticide through the respiratory area (gills, skin) (/d)
:param lfish_kd: pesticide uptake rate constant for uptake through ingestion of food (kg food/(kg organism - day)
:param lfish_ke: rate constant for elimination of the pesticide through excretion of feces (/d)
:param lfish_kg: animal/organism growth rate constant (/d)
:param lfish_km: rate constant for pesticide metabolic transformation (/d)
:param lfish_mp: fraction of respiratory ventilation that involves por-water of sediment (fraction)
:param lfish_mo: fraction of respiratory ventilation that involves overlying water; 1-mP (fraction)
:param phi: fraction of the overlying water pesticide concentration that is freely dissolved and can be absorbed
via membrane diffusion (fraction)
:param cwto: total pesticide concentraiton in water column above sediment (g/L)
:param pore_water_eec: freely dissovled pesticide concentration in pore-water of sediment (g/L)
:param total_diet_conc_lfish: concentration of pesticide in overall diet of aquatic animal/organism (g/kg wet weight)
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
kabam_empty = self.create_kabam_object()
result = pd.Series([], dtype='float')
expected_results = pd.Series([1.97044e-3, 1.85185e-3, 3.97389e-3], dtype = 'float')
try:
kabam_empty.phi = pd.Series([1.0, 1.0, 1.0], dtype = 'float')
kabam_empty.water_column_eec = pd.Series([1.e-3, 1.e-4, 2.e-3], dtype = 'float')
kabam_empty.pore_water_eec = pd.Series([1.e-4, 1.e-5, 2.e-3], dtype = 'float')
#for this test we'll use the large fish variables (and values that may not specifically apply to large fish
kabam_empty.lfish_k1 = pd.Series([10., 5., 2.], dtype = 'float')
kabam_empty.lfish_k2 = pd.Series( [10., 5., 3.], dtype = 'float')
kabam_empty.lfish_kd = pd.Series([0.05, 0.03, 0.02], dtype = 'float')
kabam_empty.lfish_ke = pd.Series([0.05, 0.02, 0.02], dtype = 'float')
kabam_empty.lfish_kg = pd.Series([0.1, 0.01, 0.003], dtype = 'float')
kabam_empty.lfish_km = pd.Series([0.0, 0.1, 0.5], dtype = 'float')
kabam_empty.lfish_mp = pd.Series([0.0, 0.0, 0.05], dtype = 'float')
kabam_empty.lfish_mo = pd.Series([1.0, 1.0, 0.95], dtype = 'float')
kabam_empty.total_diet_conc_lfish = pd.Series( [.20, .30, .50], dtype = 'float')
result = kabam_empty.pest_conc_organism(kabam_empty.lfish_k1, kabam_empty.lfish_k2,
kabam_empty.lfish_kd, kabam_empty.lfish_ke,
kabam_empty.lfish_kg, kabam_empty.lfish_km,
kabam_empty.lfish_mp, kabam_empty.lfish_mo,
kabam_empty.total_diet_conc_lfish)
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_lipid_norm_residue_conc(self):
"""
Lipid normalized pesticide residue in aquatic animal/organism
:unit ug/kg-lipid
:expresssion represents a factor (CB/VLB) used in Kabam Eqs. F4, F5, & F6
:param cb_lfish: total pesticide concentration in animal/organism (g/kg-ww)
:param lfish_lipid_frac: fraction of animal/organism that is lipid (fraction)
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
kabam_empty = self.create_kabam_object()
result = pd.Series([], dtype='float')
expected_results = pd.Series([0.025, 0.00833333, 0.0005], dtype = 'float')
try:
#for this test we'll use the large fish variables
kabam_empty.out_cb_lfish = pd.Series([1.e-3, 5.e-4, 1.e-5], dtype = 'float')
kabam_empty.lfish_lipid_frac = pd.Series([0.04, 0.06, 0.02], dtype = 'float')
kabam_empty.gms_to_microgms = 1.e6
result = kabam_empty.lipid_norm_residue_conc(kabam_empty.out_cb_lfish,
kabam_empty.lfish_lipid_frac)
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_pest_conc_diet_uptake(self):
"""
:description Pesticide concentration in animal/organism originating from uptake through diet
:unit g/kg ww
:expression Kabam A1 (with k1 = 0)
:param lfish_kD: pesticide uptake rate constant for uptake through ingestion of food (kg food/kg organizm - day)
:param total_diet_conc: overall concentration of pesticide in diet of animal/organism (g/kg-ww)
:param lfish_k2: rate constant for elimination of the peisticide through the respiratory area (gills, skin) (/d)
:param lfish_kE: rate constant for elimination of the pesticide through excretion of feces (/d)
:param lfish_kG: animal/organism growth rate constant (/d)
:param lfish_kM: rate constant for pesticide metabolic transformation (/d)
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
kabam_empty = self.create_kabam_object()
result = pd.Series([], dtype='float')
expected_results = pd.Series([9.8522e-4, 1.75439e-3, 2.83849e-3], dtype = 'float')
try:
#for this test we'll use the large fish variables (and values that may not specifically apply to large fish
kabam_empty.lfish_k2 = pd.Series( [10., 5., 3.], dtype = 'float')
kabam_empty.lfish_kd = pd.Series([0.05, 0.03, 0.02], dtype = 'float')
kabam_empty.lfish_ke = pd.Series([0.05, 0.02, 0.02], dtype = 'float')
kabam_empty.lfish_kg = pd.Series([0.1, 0.01, 0.003], dtype = 'float')
kabam_empty.lfish_km = pd.Series([0.0, 0.1, 0.5], dtype = 'float')
kabam_empty.total_diet_conc_lfish = pd.Series( [.20, .30, .50], dtype = 'float')
result = kabam_empty.pest_conc_diet_uptake(kabam_empty.lfish_kd, kabam_empty.lfish_k2,
kabam_empty.lfish_ke, kabam_empty.lfish_kg,
kabam_empty.lfish_km,
kabam_empty.total_diet_conc_lfish)
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_pest_conc_respir_uptake(self):
"""
:description Pesticide concentration in animal/organism originating from uptake through respiration
:unit g/kg ww
:expression Kabam A1 (with kD = 0)
:param lfish_k1: pesticide uptake rate constant through respiratory area (gills, skin) (L/kg-d)
:param lfish_k2: rate constant for elimination of the peisticide through the respiratory area (gills, skin) (/d)
:param lfish_kE: rate constant for elimination of the pesticide through excretion of feces (/d)
:param lfish_kG: animal/organism growth rate constant (/d)
:param lfish_kM: rate constant for pesticide metabolic transformation (/d)
:param lfish_mP: fraction of respiratory ventilation that involves por-water of sediment (fraction)
:param lfish_mO: fraction of respiratory ventilation that involves overlying water; 1-mP (fraction)
:param phi: fraction of the overlying water pesticide concentration that is freely dissolved and can be absorbed
via membrane diffusion (fraction)
:param water_column_eec: total pesticide concentraiton in water column above sediment (g/L)
:param pore_water_eec: freely dissovled pesticide concentration in pore-water of sediment (g/L)
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
kabam_empty = self.create_kabam_object()
result = pd.Series([], dtype='float')
expected_results = pd.Series([9.8522167e-4, 9.746588e-5, 1.1353959e-3], dtype = 'float')
try:
kabam_empty.phi = pd.Series([1.0, 1.0, 1.0], dtype = 'float')
kabam_empty.water_column_eec = pd.Series([1.e-3, 1.e-4, 2.e-3], dtype = 'float')
kabam_empty.pore_water_eec = pd.Series([1.e-4, 1.e-5, 2.e-3], dtype = 'float')
#for this test we'll use the large fish variables (and values that may not specifically apply to large fish
kabam_empty.lfish_k1 = | pd.Series([10., 5., 2.], dtype = 'float') | pandas.Series |
# https://www.statsmodels.org/stable/examples/notebooks/generated/tsa_arma_1.html?highlight=arma
import pandas as pd
from matplotlib import pyplot as plt
from statsmodels.graphics.tsaplots import plot_predict
from statsmodels.tsa.arima.model import ARIMA
data = | pd.read_json('../../DataProcessing/Datasets/Sales/sales.json') | pandas.read_json |
import requests
import json
import time
import streamlit as st
import pandas as pd
import time
from datetime import datetime as dt
import datetime
import os
from PIL import Image
import sys
import spotipy
import spotipy.util as util
from spotipy.oauth2 import SpotifyClientCredentials
import pandas as pd
import numpy as np
import re
import plotly.graph_objects as go
#To run Enter:
# streamlit run .\YearlyReportST.py
#To Create requirements "pipreqs ./"
header = st.beta_container()
yearlyData = st.beta_container()
topStuffData = st.beta_container()
playlistData = st.beta_container()
JanuaryData = st.beta_container()
FebruaryData = st.beta_container()
MarchData = st.beta_container()
AprilData = st.beta_container()
MayData = st.beta_container()
JuneData = st.beta_container()
JulyData = st.beta_container()
AugustData = st.beta_container()
SeptemberData = st.beta_container()
OctoberData = st.beta_container()
NovemberData = st.beta_container()
DecemberData = st.beta_container()
def monthConvert(monthStr,year):
if monthStr.lower() == 'january' or monthStr.lower() == 'jan':
month = 1
daysInMonth = 31
elif monthStr.lower() == 'february' or monthStr.lower() == 'feb':
month = 2
if (year % 4) == 0:
if (year % 100) == 0:
if (year % 400) == 0:
daysInMonth = 29
else:
daysInMonth = 28
else:
daysInMonth = 29
else:
daysInMonth = 28
elif monthStr.lower() == 'march' or monthStr.lower() == 'mar':
month = 3
daysInMonth = 31
elif monthStr.lower() == 'april' or monthStr.lower() == 'apr':
month = 4
daysInMonth = 30
elif monthStr.lower() == 'may' or monthStr.lower() == 'may':
month = 5
daysInMonth = 31
elif monthStr.lower() == 'june' or monthStr.lower() == 'jun':
month = 6
daysInMonth = 30
elif monthStr.lower() == 'july' or monthStr.lower() == 'jul':
month = 7
daysInMonth = 31
elif monthStr.lower() == 'august' or monthStr.lower() == 'aug':
month = 8
daysInMonth = 31
elif monthStr.lower() == 'september' or monthStr.lower() == 'sep':
month = 9
daysInMonth = 30
elif monthStr.lower() == 'october' or monthStr.lower() == 'oct':
month = 10
daysInMonth = 31
elif monthStr.lower() == 'november' or monthStr.lower() == 'nov':
month = 11
daysInMonth = 30
elif monthStr.lower() == 'december' or monthStr.lower() == 'dec':
month = 12
daysInMonth = 31
else:
month = 0
daysInMonth = 0
return [month,daysInMonth]
def getYearUniTime(yearList):
st.write("Mode 1: Current Year Data")
st.write("Mode 2: Previous Year")
mode = int(st.selectbox("Choose mode 1-2",[1,2]))
if mode == 2:
if len(yearList) == 0:
st.write("No data from previous years... displaying current year")
mode = 1
else:
year = st.selectbox("Enter year",yearList)
timeStart = time.mktime(datetime.datetime(year,1,1,0,0,0).timetuple())
timeEnd = time.mktime(datetime.datetime(year,12,31,23,59,59).timetuple())
timeStart = format(timeStart, ".0f")
timeEnd = format(timeEnd, ".0f")
floatTime = [timeStart,timeEnd]
if mode == 1:
today = dt.today()
dateM = str(dt(today.year, today.month, 1))
year = int(dateM[0:4])
timeStart = time.mktime(datetime.datetime(year,1,1,0,0,0).timetuple())
timeEnd = time.time()
timeStart = format(timeStart, ".0f")
timeEnd = format(timeEnd, ".0f")
floatTime = [timeStart,timeEnd]
return [str(floatTime) for floatTime in floatTime]
#Functions for matching Last Fm Songs with Spotify Songs
def removeComma(dictionary):
noCommaDict = {}
for k,v in dictionary.items():
noComma = k.replace(",","")
noCommaDict[noComma] = v
return noCommaDict
def decontracted(phrase):
# specific
phrase = re.sub(r"won\'t", "will not", phrase)
phrase = re.sub(r"can\'t", "can not", phrase)
# general
phrase = re.sub(r"n\'t", " not", phrase)
phrase = re.sub(r"\'re", " are", phrase)
phrase = re.sub(r"\'s", " is", phrase)
phrase = re.sub(r"\'d", " would", phrase)
phrase = re.sub(r"\'ll", " will", phrase)
phrase = re.sub(r"\'t", " not", phrase)
phrase = re.sub(r"\'ve", " have", phrase)
phrase = re.sub(r"\'m", " am", phrase)
return phrase
def removeContractions(dictionary):
newDict = {}
for k,v in dictionary.items():
t = decontracted(k)
newDict[t] = v
return newDict
def getUserData(pictureOrSTime):
headers = {"user-agent": USER_AGENT}
url = 'http://ws.audioscrobbler.com/2.0/'
payload = {'method':'user.getInfo'}
payload['user'] = USER_AGENT
payload['api_key'] = API_KEY
payload['format'] = 'json'
response = requests.get(url,headers=headers, params=payload)
data = response.json()
if pictureOrSTime == 'picture':
return data['user']['image'][2]['#text']
else:
timestamp = data['user']['registered']['#text']
datetime = str(dt.fromtimestamp(timestamp))
return int(datetime[:4])
API_KEY = str(os.environ.get('LASTFM_API_KEY')) #Environmental Variable to protect my API key
USER_AGENT = 'DonnyDew' #My Username
def lastfm_weeklyChart(timeList,method):
payload = {'method' : method}
headers = {"user-agent": USER_AGENT}
url = 'http://ws.audioscrobbler.com/2.0/'
payload['user'] = USER_AGENT
payload['api_key'] = API_KEY
payload['format'] = 'json'
payload['from'] = timeList[0]
payload['to'] = timeList[1]
response = requests.get(url,headers=headers, params=payload)
return response.json()
yearRegistered = getUserData('STime')
countYear = yearRegistered
today = dt.today()
dateM = str(dt(today.year, today.month, 1))
currYear = int(dateM[0:4])
yearList = []
while currYear > countYear:
yearList.append(countYear)
countYear += 1
timeList = getYearUniTime(yearList)
almost_year = time.ctime(int(timeList[0]))
year = int(almost_year[-4:])
doPlaylist = st.selectbox("Would you like to create a sorted playlisy?",["No","Yes"])
if doPlaylist == 'Yes': #New playlist not created if user does not wish
newName = st.text_input("Enter new playlist name: ")
#-------------------------------------------------------------------------------------------------------------------
#Spotify Set up
SPOTIPY_CLIENT_ID = str(os.environ.get('SPOTIPY_CLIENT_ID'))
SPOTIPY_CLIENT_SECRET = str(os.environ.get('SPOTIPY_CLIENT_SECRET'))
SPOTIPY_REDIRECT_URI = str(os.environ.get('SPOTIPY_REDIRECT_URI'))
os.environ['SPOTIPY_CLIENT_ID'] = SPOTIPY_CLIENT_ID
os.environ['SPOTIPY_CLIENT_SECRET'] = SPOTIPY_CLIENT_SECRET
os.environ['SPOTIPY_REDIRECT_URI'] = SPOTIPY_REDIRECT_URI
scope = "playlist-modify-public"
user = os.environ.get('SPOT_USER')
token = util.prompt_for_user_token(user, scope)
auth_manager = SpotifyClientCredentials()
sp = spotipy.Spotify(auth=token,auth_manager=auth_manager)
myPlaylistData = sp.current_user_playlists()
total_playlists = len(myPlaylistData['items'])
def createNameIDDic(data):
total_playlists = len(data['items'])
dictionary = {}
for i in range(0,total_playlists):
dictionary[data['items'][i]['name']] = data['items'][i]['id']
return dictionary
playlistNameIDDict = createNameIDDic(myPlaylistData)
def getPlaylistId():
my_playlists = []
for k,v in playlistNameIDDict.items():
my_playlists.append(k)
playlist = st.selectbox("Select a yearly playlist ",my_playlists)
return playlistNameIDDict[playlist]
playlist_id = getPlaylistId()
continue_button = st.button("Press when ready")
if continue_button == True:
st.write("Loading... (clicking the button again won't make it go faster)")
#-------------------------------------------------------------------------------------------------------------------
data = lastfm_weeklyChart(timeList,'user.getWeeklyTrackChart')
totalSongs = len(data['weeklytrackchart']['track'])
songEverythingDict = {}
songDict = {}
for i in range(0,totalSongs):
songEverythingDict[data['weeklytrackchart']['track'][i]['name'].lower()+ data['weeklytrackchart']['track'][i]['artist']['#text'].lower()] = \
{"Track":data['weeklytrackchart']['track'][i]['name'],"PlayCount":int(data['weeklytrackchart']['track'][i]['playcount']),
"Image":data['weeklytrackchart']['track'][i]['image'][2]['#text'],"Artist":data['weeklytrackchart']['track'][i]['artist']['#text']}
songDict[i] = {"Track":data['weeklytrackchart']['track'][i]['name'],"PlayCount":int(data['weeklytrackchart']['track'][i]['playcount'])
,"Artist":data['weeklytrackchart']['track'][i]['artist']['#text']}
SongList = []
ArtistList = []
SongFreqList = []
for k,v in songEverythingDict.items():
SongList.append(songEverythingDict[k]["Track"])
ArtistList.append(songEverythingDict[k]["Artist"])
SongFreqList.append(songEverythingDict[k]["PlayCount"])
data = {'Song Name' : SongList,'Artist':ArtistList,'PlayCount':SongFreqList}
df = pd.DataFrame(data=data)
#Artist Chart
artistData = lastfm_weeklyChart(timeList,'user.getWeeklyArtistChart')
totalArtists = len(artistData['weeklyartistchart']['artist'])
artArtistList = []
artistFreqList = []
for i in range(0,totalArtists):
artArtistList.append(artistData['weeklyartistchart']['artist'][i]['name'])
artistFreqList.append(artistData['weeklyartistchart']['artist'][i]['playcount'])
arData = {"Aritst Name":artArtistList,"Freq":artistFreqList}
ar = pd.DataFrame(data=arData)
#Album Chart
albumData = lastfm_weeklyChart(timeList,'user.getWeeklyAlbumChart')
totalAlbums = len(albumData['weeklyalbumchart']['album'])
alAlbumList = []
albumFreqList = []
for i in range(0,totalAlbums):
alAlbumList.append(albumData['weeklyalbumchart']['album'][i]['name'])
albumFreqList.append(albumData['weeklyalbumchart']['album'][i]['playcount'])
alData = {"Album Name":alAlbumList,"Freq":albumFreqList}
al = pd.DataFrame(data=alData)
#-------------------------------------------------------------------------------------------------
#Genre Feature
def lastfm_artistGetTag(artist):
headers = {"user-agent": USER_AGENT}
url = 'http://ws.audioscrobbler.com/2.0/'
payload = {'method' : 'artist.getTopTags'}
payload['user'] = USER_AGENT
payload['api_key'] = API_KEY
payload['format'] = 'json'
payload["autocorrect"] = 1
payload["artist"] = artist
response = requests.get(url,headers=headers, params=payload)
return response.json()
genreDic = {}
genereCounter = 0
for k,v in songEverythingDict.items():
if genereCounter >= 50:
break
aData = lastfm_artistGetTag(songEverythingDict[k]["Artist"])
genereCounter += 1
for e in range(0,5):
try:
count = aData['toptags']['tag'][e]['count']
tag = aData['toptags']['tag'][e]['name']
if tag in genreDic:
genreDic[tag] += count
else:
genreDic[tag] = count
except IndexError:
break
def sortDictbyValue2(dictionary):
sorted_keys = sorted(dictionary,reverse = True,key=lambda x: (dictionary[x]))
tempDict = {}
for i in sorted_keys:
tempDict[i] = ""
tempDict2 = {}
for (k,v),(k2,v2) in zip(dictionary.items(),tempDict.items()):
tempDict2[k2] = dictionary[k2]
return tempDict2
genreDic = sortDictbyValue2(genreDic)
genreList = []
genreCountList = []
count = 0
for k,v in genreDic.items():
genreList.append(k)
genreCountList.append(v)
count += 1
if count > 4:
break
genrePie = go.Figure(data=[go.Pie(labels=genreList,values=genreCountList)])
#-------------------------------------------------------------------------------------------------
#Picture Data
def lastfm_trackGetInfo(artist,track):
headers = {"user-agent": USER_AGENT}
url = 'http://ws.audioscrobbler.com/2.0/'
payload = {'method' : 'track.getInfo'}
payload['user'] = USER_AGENT
payload['api_key'] = API_KEY
payload['format'] = 'json'
payload["autocorrect"] = 1
payload["artist"] = artist
payload["track"] = track
payload["username"] = USER_AGENT
response = requests.get(url,headers=headers, params=payload)
return response.json()
def getpicUrl(num):
data = lastfm_trackGetInfo(songDict[num]["Artist"],songDict[num]["Track"])
picUrl = data["track"]['album']['image'][3]["#text"]
return picUrl
#Image 1
count = 0
try:
image1 = getpicUrl(0)
except KeyError:
image1 = ""
while len(image1) < 1 :
try:
image1 = getpicUrl(count+1)
count += 1
except KeyError:
count += 1
#Image 2
try:
image2 = getpicUrl(count+1)
except KeyError:
image2 = ""
while len(image2) < 1 :
try:
image2 = getpicUrl(count+1)
count += 1
except KeyError:
count += 1
while image1 == image2: #Case for when album pic is the same
try:
image2 = getpicUrl(count+1)
count += 1
except KeyError:
count += 1
#-------------------------------------------------------------------------------------------------
#Opening and Yearly printouts
with header:
st.title("Welcome to your Yearly Song Report")
image = Image.open("C:\\Users\\<NAME>\Pictures\\It's Good.jpg")
st.image(image)
st.subheader("Created by <NAME>")
with yearlyData:
st.header("Yearly Statistics")
picol1,picol2 = st.beta_columns(2)
picol1.image(image1)
picol2.image(image2)
st.dataframe(df)
tabcol1,tabcol2 = st.beta_columns(2)
tabcol1.dataframe(ar)
tabcol2.dataframe(al)
st.subheader("Genre Pie Chart")
st.write(genrePie)
#-----------------------------------------------------------------------------------------------------------
#Spotify Stats
for k,v in playlistNameIDDict.items(): #Get playlist name for print statement later
if v == playlist_id:
playlist = k
for i in range(0,total_playlists):
if myPlaylistData["items"][i]['id'] == playlist_id:
playlistImage = myPlaylistData["items"][i]["images"][0]["url"]
playListItemData = sp.playlist_items(playlist_id=playlist_id,fields="items(track,artist(name,images,trackuri(name)))",market="ES")
totalPlayListSongs = len(playListItemData["items"])
playlistSongs = []
ArtistNames = []
AlbumNames = []
playListURIS = []
playlistEverythingDict = {}
for i in range(0,totalPlayListSongs):
playlistSongs.append(playListItemData["items"][i]["track"]["name"])
ArtistNames.append(playListItemData["items"][i]["track"]["artists"][0]['name'])
AlbumNames.append(playListItemData["items"][i]["track"]["album"]['name'])
playListURIS.append(playListItemData["items"][i]["track"]["uri"])
playlistEverythingDict[playListItemData["items"][i]["track"]["name"].lower()+playListItemData["items"][i]["track"]["artists"][0]['name'].lower()] = \
{"track":playListItemData["items"][i]["track"]["name"],"artist":playListItemData["items"][i]["track"]["artists"][0]['name'],
"album":playListItemData["items"][i]["track"]["album"]['name'],"uri":playListItemData["items"][i]["track"]["uri"],"freq":""}
try:
for (k,v),(k2,v2) in zip(songEverythingDict.items(),playlistEverythingDict.items()):
playlistEverythingDict[k2]['freq'] = songEverythingDict[k2]['PlayCount']
except KeyError as error:
st.write(error)
for i in range(len(playlistSongs)): #Lower case because Spotify and LastFm capilization not consistent
playlistSongs[i] = playlistSongs[i].lower()
ArtistNames[i] = ArtistNames[i].lower()
AlbumNames[i] = AlbumNames[i].lower()
totalTracksPlayed = 0
for k,v in songEverythingDict.items():
totalTracksPlayed += songEverythingDict[k]['PlayCount']
songsInMonthPlayList = 0
for k,v in playlistEverythingDict.items():
songsInMonthPlayList += int(playlistEverythingDict[k]['freq'])
percentInMonthPlayList = format(songsInMonthPlayList/totalTracksPlayed,'.2%')
monthSongList = []
monthArtistList = []
monthAlbumList = []
monthSongFreqList = []
for k,v in playlistEverythingDict.items():
monthSongList.append(playlistEverythingDict[k]["track"])
monthArtistList.append(playlistEverythingDict[k]['artist'])
monthSongFreqList.append(playlistEverythingDict[k]['freq'])
monthData = {'Song Name' : monthSongList,'Artist':monthArtistList,'Freq':monthSongFreqList}
md = pd.DataFrame(data=monthData)
#-----------------------------------------------------------------------------------------------------------
#Sort Playlist setup
def sortDictbyValue(dictionary,value):
sorted_keys = sorted(dictionary,reverse = True,key=lambda x: (dictionary[x][value]))
tempDict = {}
for i in sorted_keys:
tempDict[i] = ""
tempDict2 = {}
for (k,v),(k2,v2) in zip(dictionary.items(),tempDict.items()):
tempDict2[k2] = dictionary[k2]
return tempDict2
playlistEverythingDict = sortDictbyValue(playlistEverythingDict,"freq")
#Set up Track uris for sort playlist
track_uris = []
for k,v in playlistEverythingDict.items():
track_uris.append(playlistEverythingDict[k]["uri"])
def createPlaylist(name):
sp.user_playlist_create(user=user,name=name,public=True,collaborative=False,description="Created by DonnyDew's code ;)")
return name
def getPlaylistId2(name):
return playlistNameIDDict[name]
def sortedPlaylist(user,id,tracks):
sp.user_playlist_replace_tracks(user=user,playlist_id=id,tracks=tracks)
#-----------------------------------------------------------------------------------------------------------
with playlistData:
st.header("Playlist Data")
st.image(playlistImage)
st.write("Image from " + playlist)
st.dataframe(md)
if doPlaylist == "Yes":
newPlaylistName = createPlaylist(newName) #Creates Blank Playlist
#Re-load Data with addition of new playlist
myPlaylistData = sp.current_user_playlists()
playlistNameIDDict = createNameIDDic(myPlaylistData)
playlist_id2 = getPlaylistId2(newPlaylistName) #Getting id from playist just created
sortedPlaylist(user,playlist_id2,track_uris)
st.write("New sorted playlist created")
#-----------------------------------------------------------------------------------------------------------
#Each Month Setup
def getMonthUniTime(year,month):
monthDayList = monthConvert(month,year)
timeStart = time.mktime(datetime.datetime(year,monthDayList[0],1,0,0,0).timetuple())
timeEnd = time.mktime(datetime.datetime(year,monthDayList[0],monthDayList[1],23,59,59).timetuple())
timeStart = format(timeStart, ".0f")
timeEnd = format(timeEnd, ".0f")
floatTime = [timeStart,timeEnd]
return [str(floatTime) for floatTime in floatTime]
def getMonthlyTables(year,month):
timeList = getMonthUniTime(year,month)
#Tracks
trackData = lastfm_weeklyChart(timeList,'user.getWeeklyTrackChart')
totalSongs = len(trackData['weeklytrackchart']['track'])
songDict = {}
for i in range(0,totalSongs):
songDict[i] = {"Track":trackData['weeklytrackchart']['track'][i]['name'],"PlayCount":int(trackData['weeklytrackchart']['track'][i]['playcount'])
,"Artist":trackData['weeklytrackchart']['track'][i]['artist']['#text']}
totalTracks = 0
for i in range(0,totalSongs):
totalTracks += songDict[i]["PlayCount"]
SongList = []
ArtistList = []
SongFreqList = []
for i in range(0,totalSongs):
SongList.append(songDict[i]["Track"])
ArtistList.append(songDict[i]["Artist"])
SongFreqList.append(songDict[i]["PlayCount"])
tData = {'Song Name' : SongList,'Artist':ArtistList,'PlayCount':SongFreqList}
td = pd.DataFrame(data=tData)
#Artists
artistData = lastfm_weeklyChart(timeList,'user.getWeeklyArtistChart')
totalArtists = len(artistData['weeklyartistchart']['artist'])
artArtistList = []
artistFreqList = []
for i in range(0,totalArtists):
artArtistList.append(artistData['weeklyartistchart']['artist'][i]['name'])
artistFreqList.append(artistData['weeklyartistchart']['artist'][i]['playcount'])
arData = {"Artist Name":artArtistList,"PlayCount":artistFreqList}
ar = pd.DataFrame(data=arData)
#Albums
albumData = lastfm_weeklyChart(timeList,'user.getWeeklyAlbumChart')
totalAlbums = len(albumData['weeklyalbumchart']['album'])
alAlbumList = []
albumFreqList = []
for i in range(0,totalAlbums):
alAlbumList.append(albumData['weeklyalbumchart']['album'][i]['name'])
albumFreqList.append(albumData['weeklyalbumchart']['album'][i]['playcount'])
alData = {"Album Name":alAlbumList,"Freq":albumFreqList}
al = pd.DataFrame(data=alData)
#Return 3 tables
return [td,ar,al,tData,arData]
def getTotalSongs(year,month):
timeList = getMonthUniTime(year,month)
#Tracks
trackData = lastfm_weeklyChart(timeList,'user.getWeeklyTrackChart')
totalSongs = len(trackData['weeklytrackchart']['track'])
songDict = {}
for i in range(0,totalSongs):
songDict[i] = {"Track":trackData['weeklytrackchart']['track'][i]['name'],
"PlayCount":int(trackData['weeklytrackchart']['track'][i]['playcount'])}
totalTracks = 0
for i in range(0,totalSongs):
totalTracks += songDict[i]["PlayCount"]
return totalTracks
def getPercentinPlaylist(year,month):
timeList = getMonthUniTime(year,month)
#Tracks
data = lastfm_weeklyChart(timeList,'user.getWeeklyTrackChart')
playlist_id = getPlaylistId2(f"{month} {year}")
totalSongs = len(data['weeklytrackchart']['track'])
songDict = {}
for i in range(0,totalSongs):
songDict[data['weeklytrackchart']['track'][i]['name'].lower()+ data['weeklytrackchart']['track'][i]['artist']['#text'].lower()] = \
{"Track":data['weeklytrackchart']['track'][i]['name'],"PlayCount":int(data['weeklytrackchart']['track'][i]['playcount']),
"Artist":data['weeklytrackchart']['track'][i]['artist']['#text']}
songDict = removeComma(songDict)
playListItemData = sp.playlist_items(playlist_id=playlist_id,fields="items(track,artist(name,images,trackuri(name)))",market="ES")
totalPlayListSongs = len(playListItemData["items"])
playlistDict = {}
for i in range(0,totalPlayListSongs):
playlistDict[playListItemData["items"][i]["track"]["name"].lower()+playListItemData["items"][i]["track"]["artists"][0]['name'].lower()] = \
{"track":playListItemData["items"][i]["track"]["name"],"artist":playListItemData["items"][i]["track"]["artists"][0]['name'],"freq":""}
playlistDict = removeComma(playlistDict)
playlistDict = removeContractions(playlistDict)
songDict = removeContractions(songDict)
for (k,v),(k2,v2) in zip(songDict.items(),playlistDict.items()):
playlistDict[k2]['freq'] = songDict[k2]['PlayCount']
totalTracksPlayed = 0
for k,v in songDict.items():
totalTracksPlayed += songDict[k]['PlayCount']
songsInMonthPlayList = 0
for k,v in playlistDict.items():
songsInMonthPlayList += int(playlistDict[k]['freq'])
percentInMonthPlayList = format(songsInMonthPlayList/totalTracksPlayed,'.2%')
return percentInMonthPlayList
theMonths = ["January","February","March","April","May","June","July","August","September","October","November","December"]
monthTables = []
totalSongsList = []
percentInPlaylistList = []
totalSongsList = []
for month in theMonths:
monthTables.append(getMonthlyTables(year,month))
totalSongsList.append(getTotalSongs(year,month))
for month in theMonths:
try:
percentInPlaylistList.append(float(getPercentinPlaylist(year,month)[:-1]))
except KeyError:
percentInPlaylistList.append(0)
pp = pd.DataFrame(data=percentInPlaylistList,index=[1,2,3,4,5,6,7,8,9,10,11,12])
ts = | pd.DataFrame(data=totalSongsList,index=[1,2,3,4,5,6,7,8,9,10,11,12]) | pandas.DataFrame |
"""
This module contains a set of functions that parse the training data set and
compute the centers for the data clusters.
Here you will also find dictionaries contatining Sentinel2 and Landsat7/8
bands, as well as distionaries containing the mean values for each class.
"""
import numpy as np
import pandas as pd
import os
import matplotlib.pyplot as plt
HCRF_FILE = os.path.join(os.getcwd(), 'TrainingData', 'TrainingData.csv')
SAVEFIG_PATH = os.getcwd()
HA = {}
LA = {}
CI = {}
CC = {}
WAT = {}
SN = {}
HA_L8 = {}
LA_L8 = {}
CI_L8 = {}
CC_L8 = {}
WAT_L8 = {}
SN_L8 = {}
HA_L7 = {}
LA_L7 = {}
CI_L7 = {}
CC_L7 = {}
WAT_L7 = {}
SN_L7 = {}
BANDS = {
1: [433, 453],
2: [457, 522],
3: [542, 578],
4: [650, 680],
5: [697, 712],
6: [732, 747],
7: [776, 796],
8: [855, 875], # 8a
9: [935, 955],
10: [1365, 1385],
11: [1565, 1655],
12: [2100, 2280]
}
BANDS_LANDSAT_8 = {
1: [430, 450],
2: [450, 510],
3: [530, 590],
4: [640, 670],
5: [850, 880],
6: [1570, 1650],
7: [2110, 2290],
8: [500, 680],
9: [1360, 1380]
}
BANDS_LANDSAT_7 = {
1: [450, 520],
2: [520, 600],
3: [630, 690],
4: [770, 900],
5: [1550, 1750],
7: [2064, 2354],
8: [520, 900]
}
def plot_training_spectra(BANDS, HA, LA, CI, CC, WAT, SN, mission="Sentinel2"):
ax = plt.subplot(1, 1, 1)
xpoints = BANDS.keys()
plt.plot(xpoints, HA.values(), 'o:g', label="High Algae")
plt.plot(xpoints, LA.values(), 'o:y', label="Low Algae")
plt.plot(xpoints, CI.values(), 'o:b', label="Clean Ice")
plt.plot(xpoints, CC.values(), 'o:m', label="Cryoconite")
plt.plot(xpoints, WAT.values(), 'o:k', label="Water")
plt.plot(xpoints, SN.values(), 'o:c', label="Snow")
handles, labels = ax.get_legend_handles_labels()
ax.legend(labels)
plt.grid()
plt.xlabel("{} bands".format(mission))
plt.ylabel("Albedo")
plt.title("Spectra of training data")
plt.savefig(os.path.join(SAVEFIG_PATH, 'TrainingSpectra{}.png'.format(mission)))
plt.close()
def create_dataset(file=HCRF_FILE, savefig=True):
hcrf_master = pd.read_csv(file)
HA_hcrf = pd.DataFrame()
LA_hcrf = pd.DataFrame()
HA_hcrf_S2 = pd.DataFrame()
LA_hcrf_S2 = pd.DataFrame()
CI_hcrf = pd.DataFrame()
CC_hcrf = pd.DataFrame()
WAT_hcrf = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
import json
import base64
import datetime
import io
import os
import glob
import pandas as pd
import numpy as np
import dash_table
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output, State
from app import app, indicator
from core import define
from core import prepare
from core import fselect
from core import evaluate
current_path = os.getcwd()
MARKET_PATH = os.path.join(current_path, 'market')
# @functools.lru_cache(maxsize=32)
def parse_contents(contents, filename, date):
content_type, content_string = contents.split(',')
decoded = base64.b64decode(content_string)
try:
if 'csv' in filename:
# Assume that the user uploaded a CSV file
df = pd.read_csv(
io.StringIO(decoded.decode('utf-8')))
elif 'xls' in filename:
# Assume that the user uploaded an excel file
df = pd.read_excel(io.BytesIO(decoded))
except Exception as e:
print(e)
return html.Div([
'There was an error processing this file.'
])
return df, filename, datetime.datetime.fromtimestamp(date)
def list_files_market():
files_path = glob.glob(os.path.join(MARKET_PATH, '*', '*.csv'))
# folders = [folder for folder in os.listdir(MARKET_PATH)]
files = [os.path.basename(file) for file in files_path]
files_dict = [
{"label": file, "value": file} for file in files
]
return files_dict
def list_models(problem_type='classification'):
# models = ['GradientBoostingClassifier', 'ExtraTreesClassifier',
# 'RandomForestClassifier', 'DecisionTreeClassifier',
# 'LinearDiscriminantAnalysis', 'SVC', 'KNeighborsClassifier',
# 'LogisticRegression', 'AdaBoostClassifier', 'VotingClassifier',
# 'GaussianNB', 'MLPClassifier']
models = []
if problem_type == 'classification':
models = ['AdaBoostClassifier', 'GradientBoostingClassifier',
'BaggingClassifier', 'RandomForestClassifier',
'KNeighborsClassifier', 'DecisionTreeClassifier',
'MLPClassifier', 'ExtraTreesClassifier', 'SVC',
'LinearDiscriminantAnalysis', 'GaussianNB',
'LogisticRegression', 'VotingClassifier',
'XGBoostClassifier', 'LGBMClassifier']
elif problem_type == 'regression':
models = ['AdaBoostRegressor', 'GradientBoostingRegressor',
'BaggingRegressor', 'RandomForestRegressor',
'KNeighborsRegressor', 'DecisionTreeRegressor',
'MLPRegressor', 'ExtraTreesRegressor', 'SVR',
'LinearRegression', 'BayesianRidge',
'XGBoostRegressor', 'LGBMRegressor']
# files_dict = [
# {"label": m, "value": m} for m in models
# ]
return models
def list_prepare():
preparers = ['MinMaxScaler', 'Normalizer',
'StandardScaler', 'RobustScaler']
files_dict = [
{"label": m, "value": m} for m in preparers
]
return files_dict
def list_select():
selectors = ['SelectKBest', 'PrincipalComponentAnalysis',
'ExtraTrees', ]
files_dict = [
{"label": m, "value": m} for m in selectors
]
return files_dict
layout = [
html.Div([
########################### Indicators I ##################################
html.Div(
[
indicator(
"#119DFF", "Type of Problem", "problem_type_model_indicator"
),
indicator(
"#119DFF", "Filename", "filename_model_indicator"
),
html.Div(
[
html.P(
'Uploaded files',
className="twelve columns indicator_text"
),
dcc.Dropdown(
id="files_uploaded_model_dropdown",
options=list_files_market(),
value="",
clearable=False,
searchable=False,
className='indicator_value'
),
],
className="four columns indicator",
),
# indicatorii(
# "#EF553B",
# "Size",
# "right_leads_indicator",
# ),
],
className="row",
),
# dash_table.DataTable(id='datatable-upload-container'),
# dcc.Graph(id='datatable-upload-graph')
],
className="row",
style={"marginBottom": "10"},
),
########################### Indicators II ##################################
html.Div(
[
indicator(
"#00cc96", "Number of samples", "n_samples_model_indicator"
),
indicator(
"#119DFF", "Number of features", "n_features_model_indicator"
),
indicator(
"#EF553B",
"Size in memory",
"size_model_indicator",
),
],
className="row",
style={"marginBottom": "10"},
),
########################### Indicators III ##################################
html.Div(
[
html.Div(
[
html.P(
'Categorical features',
className="twelve columns indicator_text"
),
dcc.Dropdown(
id="cat_features_model_dropdown",
options=[],
value=[],
multi=True,
clearable=False,
searchable=False,
disabled=True,
# className='indicator_value'
),
],
className="four columns indicator",
),
html.Div(
[
html.P(
'Numerical features',
className="twelve columns indicator_text"
),
dcc.Dropdown(
id="num_features_model_dropdown",
options=[],
value=[],
multi=True,
clearable=False,
searchable=False,
disabled=True,
# className='indicator_value'
),
],
className="four columns indicator",
),
html.Div(
[
html.P(
'Response variable',
className="twelve columns indicator_text"
),
dcc.Dropdown(
id="response_model_dropdown",
options=[],
value="",
clearable=False,
searchable=False,
disabled=True,
# className='indicator_value'
),
],
className="four columns indicator",
),
],
className="row",
),
########################### Table ##################################
# html.Div(
# [
#
# html.Div(
# [
# # html.P("Agrupacion por cantidad de CPE"),
#
# # html.Div(id='output-data-upload'),
# dash_table.DataTable(
# id='table-paging-and-sorting',
# # data=dff.to_dict('rows'),
# # columns=[
# # {'name': i, 'id': i, 'deletable': True} for i in sorted(dff.columns)
# # ],
# style_header={
# # 'backgroundColor': 'white',
# 'backgroundColor': '#2a3f5f',
# 'color': 'white',
# 'fontWeight': 'bold'
# },
# style_cell_conditional=[{
# 'if': {'row_index': 'odd'},
# 'backgroundColor': 'rgb(248, 248, 248)'
# }],
# pagination_settings={
# 'current_page': 0,
# 'page_size': 10
# },
# pagination_mode='be',
#
# sorting='be',
# sorting_type='single',
# sorting_settings=[]
# )
#
# ],
# className="twelve columns",
# ),
# html.Div(id='intermediate-value', style={'display': 'none'})
#
# ],
# className="row",
# style={"marginBottom": "10", "marginTop": "10"},
# ),
########################### Models ##################################
html.Div([
html.Div(
[
html.P(
'Preparers',
className="twelve columns indicator_text"
),
html.Hr(),
dcc.Dropdown(
id="prepare_dropdown",
options=list_prepare(),
# options = [
# {"label": 'model'+str(file), "value": file} for file in range(20)
# ],
value=list_prepare(),
# value=[],
clearable=False,
searchable=False,
disabled=True,
className="twelve columns indicator_text",
multi=True,
# className="four columns chart_div"
# className='indicator_value'
),
# dcc.Checklist(
# id='models_checklist',
# options=[
# {'label': 'New York City', 'value': 'NYC'},
# {'label': 'Montréal', 'value': 'MTL'},
# {'label': 'San Francisco', 'value': 'SF'}
# ],
# values=['MTL', 'SF']
# )
],
className="four columns chart_div",
),
html.Div(
[
html.P(
'Selectors',
className="twelve columns indicator_text"
),
html.Hr(),
dcc.Dropdown(
id="selector_dropdown",
options=list_select(),
value=list_select(),
# value=[],
clearable=False,
searchable=False,
disabled=True,
className="twelve columns indicator_text",
multi=True,
# className="four columns chart_div"
# className='indicator_value'
),
# dcc.Checklist(
# id='models_checklist',
# options=[
# {'label': 'New York City', 'value': 'NYC'},
# {'label': 'Montréal', 'value': 'MTL'},
# {'label': 'San Francisco', 'value': 'SF'}
# ],
# values=['MTL', 'SF']
# )
],
className="four columns chart_div",
),
html.Div(
[
html.P(
'Modelers',
className="twelve columns indicator_text"
),
html.Hr(),
dcc.Dropdown(
id="models_model_dropdown",
# options=[],
options=[],
# options = [
# {"label": 'model'+str(file), "value": file} for file in range(20)
# ],
value=[],
# value=[],
clearable=False,
searchable=False,
# disabled=True,
className="twelve columns indicator_text",
multi=True,
# className="four columns chart_div"
# className='indicator_value'
),
# dcc.Checklist(
# id='models_checklist',
# options=[
# {'label': 'New York City', 'value': 'NYC'},
# {'label': 'Montréal', 'value': 'MTL'},
# {'label': 'San Francisco', 'value': 'SF'}
# ],
# values=['MTL', 'SF']
# )
html.Div(
[
# submit button
html.Span(
"All models",
id="autocomplete_models_model_button",
n_clicks=0,
# className="btn btn-primary"
className="button button--primary add"
),
# dcc.Input(
# id="output_chatbot",
# placeholder="Respuesta de Adaline: ",
# type="text",
# value="",
# disabled=True,
# style={"width": "100%"},
# ),
],
# className="six columns",
className="two columns",
# style={"paddingRight": "15"},
),
],
className="four columns chart_div",
),
],
className="row",
style={"marginBottom": "10", "marginTop": "10"},
),
########################### Plots ##################################
html.Div(
[
# html.Div(
# [
# html.P("Agrupacion por monto de CPE" ),
# dcc.Graph(
# id="monto_cpe",
# style={"height": "90%", "width": "98%"},
# config=dict(displayModeBar=False),
# ),
# ],
# className="four columns chart_div"
# ),
html.Div(
[
# html.P("Agrupacion por cantidad de CPE"),
dcc.Graph(
id="out_model_graph",
# figure=grafo_3d_cpe(1,2),
style={"height": "200%", "width": "100%"},
config=dict(displayModeBar=True,
showLink=False),
),
],
className="twelve columns",
),
],
className="row",
style={"marginBottom": "10", "marginTop": "10"},
),
html.Hr(),
########################### Table results ##################################
html.Div(
[
html.Div(
[
# html.P("Agrupacion por cantidad de CPE"),
html.P(
'Ranking Models - CV=10 - Train dataset',
className="twelve columns indicator_text"
),
# html.Div(id='output-data-upload'),
dash_table.DataTable(
id='results_model_table',
# data=dff.to_dict('rows'),
# columns=[
# {'name': i, 'id': i, 'deletable': True} for i in sorted(dff.columns)
# ],
style_header={
# 'backgroundColor': 'white',
'backgroundColor': '#248f24',
'color': 'white',
'fontWeight': 'bold'
},
style_cell_conditional=[{
'if': {'row_index': 'odd'},
'backgroundColor': 'rgb(248, 248, 248)'
}],
row_selectable="multi",
row_deletable=True,
selected_rows=[0],
# pagination_settings={
# 'current_page': 0,
# 'page_size': 10
# },
# pagination_mode='be',
# sorting='be',
# sorting_type='single',
# sorting_settings=[]
)
],
className="four columns",
),
html.Div(id='metrics_model_graph',
className="four columns",
),
html.Div(id='fi_model_graph',
className="four columns"),
# style={"paddingRight": "15"},
# html.Div(
# [
# html.P(
# 'Metrics',
# className="twelve columns indicator_text"
# ),
# dcc.Graph(
# id="metrics_model_graph",
# style={"height": "200%", "width": "100%"},
# config=dict(displayModeBar=True,
# showLink=False),
# ),
#
# ],
# className="four columns",
# ),
# html.Div(
# [
# html.P(
# 'Feature Importance',
# className="twelve columns indicator_text"
# ),
# dcc.Graph(
# id="importance_model_graph",
# style={"height": "200%", "width": "100%"},
# config=dict(displayModeBar=True,
# showLink=False),
# ),
#
# ],
# className="four columns",
# ),
],
className="row",
style={"marginBottom": "10", "marginTop": "10"},
),
########################### Save file ##################################
html.Div(id='hidden_model_div', style={'display': 'none'}),
# html.Div(id='hidden_model_div'),
html.Div(
[
html.Div(
[
# submit button
html.Span(
"RUN",
id="run_model_button",
n_clicks=0,
# className="btn btn-primary"
className="button button--primary add"
),
# dcc.Input(
# id="output_chatbot",
# placeholder="Respuesta de Adaline: ",
# type="text",
# value="",
# disabled=True,
# style={"width": "100%"},
# ),
],
# className="six columns",
className="two columns",
# style={"paddingRight": "15"},
),
html.Div(
id='save_file_model_div',
className="two columns",
# style={"paddingRight": "15"},
),
],
className="row",
style={"marginBottom": "10", "marginTop": "10"},
),
]
@app.callback(
[Output('filename_model_indicator', 'children'),
Output('n_samples_model_indicator', 'children'),
Output('n_features_model_indicator', 'children'),
Output('size_model_indicator', 'children'),
Output('cat_features_model_dropdown', 'options'),
Output('cat_features_model_dropdown', 'value'),
Output('num_features_model_dropdown', 'options'),
Output('num_features_model_dropdown', 'value'),
Output('response_model_dropdown', 'options'),
Output('response_model_dropdown', 'value'),
Output('problem_type_model_indicator', 'children'),
Output('models_model_dropdown', 'options'),
Output('models_model_dropdown', 'value')],
[Input('files_uploaded_model_dropdown', 'value')])
def update_metadata_model(uploaded_file):
if uploaded_file != '':
metadata_folder = os.path.join(MARKET_PATH, uploaded_file.replace('.csv', ''))
metadata_filename = uploaded_file.replace('.csv', '') + '_meta.json'
metadata_path = os.path.join(metadata_folder, metadata_filename)
with open(metadata_path, 'r') as f:
metadata = json.load(f)
filename = uploaded_file
n_samples = metadata['n_samples']
n_features = metadata['n_features']
num_features = metadata['num_features']
cat_features = metadata['cat_features']
response = metadata['response']
problem_type = metadata['problem_type']
size = metadata['size']
else:
filename = ''
n_samples = ''
n_features = ''
size = ''
cat_features = []
num_features = []
response = ''
problem_type = ''
num_options = [
{"label": file, "value": file} for file in num_features
]
cat_options = [
{"label": file, "value": file} for file in cat_features
]
response_options = [
{"label": file, "value": file} for file in [response]
]
models_options = [
{"label": file, "value": file} for file in list_models(problem_type)
]
models_value = np.random.choice(list_models(problem_type), 3, replace=False)
out = tuple([filename, n_samples, n_features, size,
cat_options, cat_features[:10], num_options, num_features[:10],
response_options, response, problem_type,
models_options, models_value])
return out
@app.callback([Output('out_model_graph', 'figure'),
Output('results_model_table', 'data'),
Output('results_model_table', 'columns')],
[Input('run_model_button', 'n_clicks')],
[State('files_uploaded_model_dropdown', 'value'),
State('models_model_dropdown', 'value')])
def show_out_models_model(n_clicks, uploaded_file, models):
if n_clicks > 0 :
# info = json.loads(uploaded_file)
# uploaded_file = info['file']
# df = pd.read_json(info['df'], orient='split')
if uploaded_file != '':
metadata_folder = os.path.join(MARKET_PATH, uploaded_file.replace('.csv', ''))
metadata_filename = uploaded_file.replace('.csv', '') + '_meta.json'
metadata_path = os.path.join(metadata_folder, metadata_filename)
with open(metadata_path, 'r') as f:
metadata = json.load(f)
filename_path = os.path.join(metadata_folder, uploaded_file)
df = pd.read_csv(filename_path)
definer = define.Define(df=df, num_features=metadata['num_features'],
cat_features=metadata['cat_features'],
response=metadata['response']).pipeline()
folder_path = os.path.join(metadata_folder, 'model')
path_report = os.path.join(folder_path, 'report.csv')
path_raw_report = os.path.join(folder_path, 'raw_report.json')
path_metrics = os.path.join(folder_path, 'metrics.json')
path_fi = os.path.join(folder_path, 'feature_importance.json')
# report = None
# raw_report = None
# plot = None
preparer = prepare.Prepare(definer).pipeline()
selector = fselect.Select(definer).pipeline()
if os.path.exists(folder_path):
print(f'Models already exist.')
report_uploaded = pd.read_csv(path_report)
report = report_uploaded[report_uploaded.Model.isin(models)]
report.sort_values(['Mean'], ascending=False, inplace=True)
with open(path_raw_report, 'r') as f:
raw_report_uploaded = json.load(f)
raw_report = [d for d in raw_report_uploaded if d['name'] in models]
evaluator = evaluate.Evaluate(definer, preparer, selector)
if len(raw_report) != 0:
evaluator.raw_report = raw_report
plot = evaluator.plot_models()
uploaded_models = list(report['Model'].values)
diff_models = set(models) - set(uploaded_models)
if len(diff_models) > 0:
evaluator = evaluator.pipeline(diff_models)
# plot = evaluator.plot_models()
report_diff = evaluator.report
report_raw_diff = evaluator.raw_report
# Save reports
total_report = pd.concat([report, report_diff])
report_to_save = pd.concat([report_uploaded, report_diff])
total_raw_report = raw_report + report_raw_diff
raw_report_to_save = raw_report_uploaded + report_raw_diff
evaluator.raw_report = total_raw_report
plot = evaluator.plot_models()
evaluator.report = report_to_save
evaluator.raw_report = raw_report_to_save
evaluator.save_report(path_report)
evaluator.save_raw_report(path_raw_report)
evaluator.get_metrics()
evaluator.get_feature_importance()
# Save metrics
with open(path_metrics, 'r') as f:
metrics = json.load(f)
total_metrics = {**metrics, **evaluator.metrics}
evaluator.metrics = total_metrics
evaluator.save_metrics(path_metrics)
# Save feature importance
with open(path_fi, 'r') as f:
fi = json.load(f)
for k,v in fi.items():
fi[k] = pd.DataFrame(v)
total_fi = {**fi, **evaluator.feature_importance}
evaluator.feature_importance = total_fi
evaluator.save_feature_importance(path_fi)
for name_model, model in evaluator.estimators.items():
path_model = os.path.join(folder_path, name_model+'.model')
evaluator.save_model(model, path_model)
report = total_report
report.sort_values(['Mean'], ascending=False, inplace=True)
report['Mean'] = np.round(report['Mean'], 3)
report['STD'] = np.round(report['STD'], 3)
else:
# to save the figures
os.makedirs(folder_path)
evaluator = evaluate.Evaluate(definer, preparer, selector).pipeline(models)
report = evaluator.report
report['Mean'] = np.round(report['Mean'], 3)
report['STD'] = np.round(report['STD'], 3)
plot = evaluator.plot_models()
evaluator.save_report(path_report)
evaluator.save_raw_report(path_raw_report)
evaluator.get_metrics()
evaluator.get_feature_importance()
evaluator.save_metrics(path_metrics)
evaluator.save_feature_importance(path_fi)
for name_model, model in evaluator.estimators.items():
path_model = os.path.join(folder_path, name_model+'.model')
evaluator.save_model(model, path_model)
# models = list_models(metadata['problem_type'])
# start = time.time()
# evaluator = evaluate.Evaluate(definer, preparer, selector).pipeline(models)
# end = time.time()
# duration = end - start
# print(round(duration, 3))
# folder_path = os.path.join(metadata_folder, 'model')
# evaluator.save_model(filename_path)
# report = evaluator.report
# print(evaluator.best_pipelines)
# plot = evaluator.plot_models()
columns_table=[
{'name': i, 'id': i, 'deletable': True} for i in report.columns
]
# name = 'AdaBoostClassifier'
# return tuple([plot, report.to_dict('rows'), columns_table, evaluator.plot_metrics(name)])
return tuple([plot, report.to_dict('rows'), columns_table])
return tuple([None for _ in range(3)])
# else:
# return tuple([None for _ in range(3)])
# return tuple([None for _ in range(3)]) #causes error
# if selected_rows is None:
# selected_rows = []
#
# if len(selected_rows) > 0:
# print('selected>>>', selected_rows)
# for index in selected_rows:
# name = rows[index]['Model']
# print(name)
# return tuple([plot, report.to_dict('rows'), columns_table, evaluator.plot_metrics(name)])
@app.callback(Output('models_model_dropdown', 'value'),
[Input('autocomplete_models_model_button', 'n_clicks')],
[State('problem_type_model_indicator', 'children')])
def autocomplete_models_model(n_clicks, problem_type):
if n_clicks > 0:
models_value = list_models(problem_type)
print(models_value)
return models_value
# return tuple([None for _ in range(3)])
@app.callback(
[Output('metrics_model_graph', "children"),
Output('fi_model_graph', "children")],
[Input('results_model_table', "derived_virtual_data"),
Input('results_model_table', "derived_virtual_selected_rows")],
[State('files_uploaded_model_dropdown', 'value')])
def show_confmatrix_featimportance(rows, selected_rows, uploaded_file):
# print('selected>>>', selected_rows)
if selected_rows is None:
selected_rows = []
if len(selected_rows) > 0:
metadata_folder = os.path.join(MARKET_PATH, uploaded_file.replace('.csv', ''))
folder_path = os.path.join(metadata_folder, 'model')
path_metrics = os.path.join(folder_path, 'metrics.json')
path_fi = os.path.join(folder_path, 'feature_importance.json')
with open(path_metrics, 'r') as f:
metrics = json.load(f)
with open(path_fi, 'r') as f:
fi = json.load(f)
for k, v in fi.items():
fi[k] = | pd.DataFrame(v) | pandas.DataFrame |
from IMLearn import BaseEstimator
from challenge.agoda_cancellation_estimator import AgodaCancellationEstimator
from IMLearn.utils import split_train_test
from datetime import datetime as dt
import plotly.graph_objects as go
import numpy as np
import pandas as pd
SATURDAY = 5
def load_data(filename: str):
"""
Load Agoda booking cancellation dataset
Parameters
----------
filename: str
Path to house prices dataset
Returns
-------
Design matrix and response vector in either of the following formats:
1) Single dataframe with last column representing the response
2) Tuple of pandas.DataFrame and Series
3) Tuple of ndarray of shape (n_samples, n_features) and ndarray of shape (n_samples,)
"""
# TODO - replace below code with any desired preprocessing
full_data = pd.read_csv(filename).drop_duplicates()
full_data = full_data.drop(
full_data[full_data["cancellation_policy_code"] == "UNKNOWN"].index)
full_data["cancellation_datetime"].fillna(0, inplace=True)
return full_data.dropna()
def str_to_time(x):
return dt.strptime(x, r"%Y-%m-%d %H:%M:%S")
def preprocess(full_data: pd.DataFrame):
full_data.dropna(inplace=True)
features = full_data[["h_booking_id",
"hotel_star_rating",
"guest_is_not_the_customer",
"no_of_adults",
"no_of_children",
"no_of_extra_bed",
"no_of_room",
"original_selling_amount",
"is_user_logged_in",
"is_first_booking",
"request_nonesmoke",
"request_latecheckin",
"request_highfloor",
"request_largebed",
"request_twinbeds",
"request_airport",
"request_earlycheckin"]].copy()
to_days = lambda x: x.days
booking_date = full_data["booking_datetime"].apply(str_to_time)
checkin_date = full_data["checkin_date"].apply(str_to_time)
checkout_date = full_data["checkout_date"].apply(str_to_time)
features["hotel_live_time"] = (pd.Timestamp.now() - pd.to_datetime(
full_data.hotel_live_date)).dt.days
features["booking_checkin_difference"] = (
checkin_date - booking_date).apply(to_days)
features["length_of_stay"] = (checkout_date - checkin_date).apply(to_days)
arrival_day = checkin_date.apply(lambda x: x.weekday())
features["stay_over_weekend"] = (features["length_of_stay"] > 6) | (
(arrival_day <= SATURDAY) & (SATURDAY <= (arrival_day + features[
"length_of_stay"])))
features = pd.concat([features,
pd.get_dummies(full_data.accommadation_type_name,
drop_first=True),
pd.get_dummies(full_data.charge_option,
drop_first=True)], axis=1)
features["has_cancellation_history"] = df["h_customer_id"].apply(
number_of_times_cancelled)
def current_policy(days_from_checkin, length_of_stay, penalty_code):
penalties = []
for penalty in penalty_code.split("_"):
if "D" not in penalty:
continue
penalty_days, penalty_calculation = penalty.split("D")
if penalty_calculation[-1] == "N":
percentage = int(penalty_calculation[:-1]) / length_of_stay
else:
percentage = float(penalty_calculation[:-1])
penalties.append((float(penalty_days), percentage))
penalties.sort(key=lambda x: x[0], reverse=True)
current_penalty = 0
for days, penalty in penalties:
if days < days_from_checkin:
break
current_penalty = penalty
return current_penalty
features["cancellation_policy_at_time_of_order"] = pd.concat(
[features[["booking_checkin_difference", "length_of_stay"]],
full_data["cancellation_policy_code"]], axis=1).apply(
lambda x: current_policy(x["booking_checkin_difference"],
x["length_of_stay"],
x["cancellation_policy_code"]), axis=1)
cancellation_window_start_diff = features.booking_checkin_difference - 7
cancellation_window_start_diff.name = "cancellation_window_start"
features[
"cancellation_policy_at_start_of_cancellation_window"] = pd.concat(
[cancellation_window_start_diff, features["length_of_stay"],
full_data["cancellation_policy_code"]], axis=1).apply(
lambda x: current_policy(x["cancellation_window_start"],
x["length_of_stay"],
x["cancellation_policy_code"]), axis=1)
cancellation_window_end_diff = features.booking_checkin_difference - 35
cancellation_window_end_diff.name = "cancellation_window_end"
features[
"cancellation_policy_at_end_of_cancellation_window"] = pd.concat(
[cancellation_window_end_diff, features["length_of_stay"],
full_data["cancellation_policy_code"]], axis=1).apply(
lambda x: current_policy(x["cancellation_window_end"],
x["length_of_stay"],
x["cancellation_policy_code"]), axis=1)
features["cancellation_ploicy_change_during_window"] = \
features.cancellation_policy_at_end_of_cancellation_window - \
features.cancellation_policy_at_start_of_cancellation_window
return features.dropna()
def evaluate_and_export(estimator: BaseEstimator, X: np.ndarray,
filename: str):
"""
Export to specified file the prediction results of given estimator on given testset.
File saved is in csv format with a single column named 'predicted_values' and n_samples rows containing
predicted values.
Parameters
----------
estimator: BaseEstimator or any object implementing predict() method as in BaseEstimator (for example sklearn)
Fitted estimator to use for prediction
X: ndarray of shape (n_samples, n_features)
Test design matrix to predict its responses
filename:
path to store file at
"""
pd.DataFrame(estimator.predict(X), columns=["predicted_values"]).to_csv(
filename, index=False)
def preprocess_labels(cancellation_date: pd.Series,
booking_datetime: pd.Series):
def str_to_date(x):
return dt.strptime(x, r"%Y-%m-%d")
cancellation = cancellation_date.apply(
lambda x: dt.now() if x == 0 else str_to_date(x))
booking = booking_datetime.apply(str_to_time)
diff = (pd.to_datetime(cancellation, unit="s") - pd.to_datetime(booking,
unit="s")).dt.days
return (diff >= 7) & (diff < 35)
number_of_times_customer_canceled = dict()
def number_of_times_cancelled(id):
if id in number_of_times_customer_canceled:
return number_of_times_customer_canceled[id]
return 0
if __name__ == '__main__':
np.random.seed(0)
# Load data
df = load_data(
"../datasets/agoda_cancellation_train.csv")
design_matrix = preprocess(df)
for id, cancellation in df[
["h_customer_id", "cancellation_datetime"]].itertuples(index=False):
if cancellation == 0:
if id in number_of_times_customer_canceled:
number_of_times_customer_canceled[id] += 1
else:
number_of_times_customer_canceled[id] = 1
cancellation_labels = preprocess_labels(df.cancellation_datetime,
df.booking_datetime)
# Fit model over data
model = AgodaCancellationEstimator().fit(design_matrix, cancellation_labels)
print( | pd.read_csv("test_set_week_1.csv") | pandas.read_csv |
# -*- coding: utf-8 -*-
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import pytest
import re
from numpy import nan as NA
import numpy as np
from numpy.random import randint
from pandas.compat import range, u
import pandas.compat as compat
from pandas import Index, Series, DataFrame, isna, MultiIndex, notna
from pandas.util.testing import assert_series_equal
import pandas.util.testing as tm
import pandas.core.strings as strings
class TestStringMethods(object):
def test_api(self):
# GH 6106, GH 9322
assert Series.str is strings.StringMethods
assert isinstance(Series(['']).str, strings.StringMethods)
# GH 9184
invalid = Series([1])
with tm.assert_raises_regex(AttributeError,
"only use .str accessor"):
invalid.str
assert not hasattr(invalid, 'str')
def test_iter(self):
# GH3638
strs = 'google', 'wikimedia', 'wikipedia', 'wikitravel'
ds = Series(strs)
for s in ds.str:
# iter must yield a Series
assert isinstance(s, Series)
# indices of each yielded Series should be equal to the index of
# the original Series
tm.assert_index_equal(s.index, ds.index)
for el in s:
# each element of the series is either a basestring/str or nan
assert isinstance(el, compat.string_types) or isna(el)
# desired behavior is to iterate until everything would be nan on the
# next iter so make sure the last element of the iterator was 'l' in
# this case since 'wikitravel' is the longest string
assert s.dropna().values.item() == 'l'
def test_iter_empty(self):
ds = Series([], dtype=object)
i, s = 100, 1
for i, s in enumerate(ds.str):
pass
# nothing to iterate over so nothing defined values should remain
# unchanged
assert i == 100
assert s == 1
def test_iter_single_element(self):
ds = Series(['a'])
for i, s in enumerate(ds.str):
pass
assert not i
assert_series_equal(ds, s)
def test_iter_object_try_string(self):
ds = Series([slice(None, randint(10), randint(10, 20)) for _ in range(
4)])
i, s = 100, 'h'
for i, s in enumerate(ds.str):
pass
assert i == 100
assert s == 'h'
def test_cat(self):
one = np.array(['a', 'a', 'b', 'b', 'c', NA], dtype=np.object_)
two = np.array(['a', NA, 'b', 'd', 'foo', NA], dtype=np.object_)
# single array
result = strings.str_cat(one)
exp = 'aabbc'
assert result == exp
result = strings.str_cat(one, na_rep='NA')
exp = 'aabbcNA'
assert result == exp
result = strings.str_cat(one, na_rep='-')
exp = 'aabbc-'
assert result == exp
result = strings.str_cat(one, sep='_', na_rep='NA')
exp = 'a_a_b_b_c_NA'
assert result == exp
result = strings.str_cat(two, sep='-')
exp = 'a-b-d-foo'
assert result == exp
# Multiple arrays
result = strings.str_cat(one, [two], na_rep='NA')
exp = np.array(['aa', 'aNA', 'bb', 'bd', 'cfoo', 'NANA'],
dtype=np.object_)
tm.assert_numpy_array_equal(result, exp)
result = strings.str_cat(one, two)
exp = np.array(['aa', NA, 'bb', 'bd', 'cfoo', NA], dtype=np.object_)
tm.assert_almost_equal(result, exp)
def test_count(self):
values = np.array(['foo', 'foofoo', NA, 'foooofooofommmfoo'],
dtype=np.object_)
result = strings.str_count(values, 'f[o]+')
exp = np.array([1, 2, NA, 4])
tm.assert_numpy_array_equal(result, exp)
result = Series(values).str.count('f[o]+')
exp = Series([1, 2, NA, 4])
assert isinstance(result, Series)
tm.assert_series_equal(result, exp)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_count(mixed, 'a')
xp = np.array([1, NA, 0, NA, NA, 0, NA, NA, NA])
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.count('a')
xp = Series([1, NA, 0, NA, NA, 0, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = [u('foo'), u('foofoo'), NA, u('foooofooofommmfoo')]
result = strings.str_count(values, 'f[o]+')
exp = np.array([1, 2, NA, 4])
tm.assert_numpy_array_equal(result, exp)
result = Series(values).str.count('f[o]+')
exp = Series([1, 2, NA, 4])
assert isinstance(result, Series)
tm.assert_series_equal(result, exp)
def test_contains(self):
values = np.array(['foo', NA, 'fooommm__foo',
'mmm_', 'foommm[_]+bar'], dtype=np.object_)
pat = 'mmm[_]+'
result = strings.str_contains(values, pat)
expected = np.array([False, NA, True, True, False], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
result = strings.str_contains(values, pat, regex=False)
expected = np.array([False, NA, False, False, True], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
values = ['foo', 'xyz', 'fooommm__foo', 'mmm_']
result = strings.str_contains(values, pat)
expected = np.array([False, False, True, True])
assert result.dtype == np.bool_
tm.assert_numpy_array_equal(result, expected)
# case insensitive using regex
values = ['Foo', 'xYz', 'fOOomMm__fOo', 'MMM_']
result = strings.str_contains(values, 'FOO|mmm', case=False)
expected = np.array([True, False, True, True])
tm.assert_numpy_array_equal(result, expected)
# case insensitive without regex
result = strings.str_contains(values, 'foo', regex=False, case=False)
expected = np.array([True, False, True, False])
tm.assert_numpy_array_equal(result, expected)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_contains(mixed, 'o')
xp = np.array([False, NA, False, NA, NA, True, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.contains('o')
xp = Series([False, NA, False, NA, NA, True, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = np.array([u'foo', NA, u'fooommm__foo', u'mmm_'],
dtype=np.object_)
pat = 'mmm[_]+'
result = strings.str_contains(values, pat)
expected = np.array([False, np.nan, True, True], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
result = strings.str_contains(values, pat, na=False)
expected = np.array([False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
values = np.array(['foo', 'xyz', 'fooommm__foo', 'mmm_'],
dtype=np.object_)
result = strings.str_contains(values, pat)
expected = np.array([False, False, True, True])
assert result.dtype == np.bool_
tm.assert_numpy_array_equal(result, expected)
# na
values = Series(['om', 'foo', np.nan])
res = values.str.contains('foo', na="foo")
assert res.loc[2] == "foo"
def test_startswith(self):
values = Series(['om', NA, 'foo_nom', 'nom', 'bar_foo', NA, 'foo'])
result = values.str.startswith('foo')
exp = Series([False, NA, True, False, False, NA, True])
tm.assert_series_equal(result, exp)
# mixed
mixed = np.array(['a', NA, 'b', True, datetime.today(),
'foo', None, 1, 2.], dtype=np.object_)
rs = strings.str_startswith(mixed, 'f')
xp = np.array([False, NA, False, NA, NA, True, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.startswith('f')
assert isinstance(rs, Series)
xp = Series([False, NA, False, NA, NA, True, NA, NA, NA])
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('foo_nom'), u('nom'), u('bar_foo'), NA,
u('foo')])
result = values.str.startswith('foo')
exp = Series([False, NA, True, False, False, NA, True])
tm.assert_series_equal(result, exp)
result = values.str.startswith('foo', na=True)
tm.assert_series_equal(result, exp.fillna(True).astype(bool))
def test_endswith(self):
values = Series(['om', NA, 'foo_nom', 'nom', 'bar_foo', NA, 'foo'])
result = values.str.endswith('foo')
exp = Series([False, NA, False, False, True, NA, True])
tm.assert_series_equal(result, exp)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_endswith(mixed, 'f')
xp = np.array([False, NA, False, NA, NA, False, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.endswith('f')
xp = Series([False, NA, False, NA, NA, False, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('foo_nom'), u('nom'), u('bar_foo'), NA,
u('foo')])
result = values.str.endswith('foo')
exp = Series([False, NA, False, False, True, NA, True])
tm.assert_series_equal(result, exp)
result = values.str.endswith('foo', na=False)
tm.assert_series_equal(result, exp.fillna(False).astype(bool))
def test_title(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.title()
exp = Series(["Foo", "Bar", NA, "Blah", "Blurg"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "blah", None,
1, 2.])
mixed = mixed.str.title()
exp = Series(["Foo", NA, "Bar", NA, NA, "Blah", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.title()
exp = Series([u("Foo"), NA, u("Bar"), u("Blurg")])
tm.assert_series_equal(results, exp)
def test_lower_upper(self):
values = Series(['om', NA, 'nom', 'nom'])
result = values.str.upper()
exp = Series(['OM', NA, 'NOM', 'NOM'])
tm.assert_series_equal(result, exp)
result = result.str.lower()
tm.assert_series_equal(result, values)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'foo', None, 1,
2.])
mixed = mixed.str.upper()
rs = Series(mixed).str.lower()
xp = Series(['a', NA, 'b', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('nom'), u('nom')])
result = values.str.upper()
exp = Series([u('OM'), NA, u('NOM'), u('NOM')])
tm.assert_series_equal(result, exp)
result = result.str.lower()
tm.assert_series_equal(result, values)
def test_capitalize(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.capitalize()
exp = Series(["Foo", "Bar", NA, "Blah", "Blurg"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "blah", None,
1, 2.])
mixed = mixed.str.capitalize()
exp = Series(["Foo", NA, "Bar", NA, NA, "Blah", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.capitalize()
exp = Series([u("Foo"), NA, u("Bar"), u("Blurg")])
tm.assert_series_equal(results, exp)
def test_swapcase(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.swapcase()
exp = Series(["foo", "bar", NA, "bLAH", "BLURG"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "Blah", None,
1, 2.])
mixed = mixed.str.swapcase()
exp = Series(["foo", NA, "BAR", NA, NA, "bLAH", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.swapcase()
exp = Series([u("foo"), NA, u("BAR"), u("bLURG")])
tm.assert_series_equal(results, exp)
def test_casemethods(self):
values = ['aaa', 'bbb', 'CCC', 'Dddd', 'eEEE']
s = Series(values)
assert s.str.lower().tolist() == [v.lower() for v in values]
assert s.str.upper().tolist() == [v.upper() for v in values]
assert s.str.title().tolist() == [v.title() for v in values]
assert s.str.capitalize().tolist() == [v.capitalize() for v in values]
assert s.str.swapcase().tolist() == [v.swapcase() for v in values]
def test_replace(self):
values = Series(['fooBAD__barBAD', NA])
result = values.str.replace('BAD[_]*', '')
exp = Series(['foobar', NA])
tm.assert_series_equal(result, exp)
result = values.str.replace('BAD[_]*', '', n=1)
exp = Series(['foobarBAD', NA])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['aBAD', NA, 'bBAD', True, datetime.today(), 'fooBAD',
None, 1, 2.])
rs = Series(mixed).str.replace('BAD[_]*', '')
xp = Series(['a', NA, 'b', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA])
result = values.str.replace('BAD[_]*', '')
exp = Series([u('foobar'), NA])
tm.assert_series_equal(result, exp)
result = values.str.replace('BAD[_]*', '', n=1)
exp = Series([u('foobarBAD'), NA])
tm.assert_series_equal(result, exp)
# flags + unicode
values = Series([b"abcd,\xc3\xa0".decode("utf-8")])
exp = Series([b"abcd, \xc3\xa0".decode("utf-8")])
result = values.str.replace(r"(?<=\w),(?=\w)", ", ", flags=re.UNICODE)
tm.assert_series_equal(result, exp)
# GH 13438
for klass in (Series, Index):
for repl in (None, 3, {'a': 'b'}):
for data in (['a', 'b', None], ['a', 'b', 'c', 'ad']):
values = klass(data)
pytest.raises(TypeError, values.str.replace, 'a', repl)
def test_replace_callable(self):
# GH 15055
values = Series(['fooBAD__barBAD', NA])
# test with callable
repl = lambda m: m.group(0).swapcase()
result = values.str.replace('[a-z][A-Z]{2}', repl, n=2)
exp = Series(['foObaD__baRbaD', NA])
tm.assert_series_equal(result, exp)
# test with wrong number of arguments, raising an error
if compat.PY2:
p_err = r'takes (no|(exactly|at (least|most)) ?\d+) arguments?'
else:
p_err = (r'((takes)|(missing)) (?(2)from \d+ to )?\d+ '
r'(?(3)required )positional arguments?')
repl = lambda: None
with tm.assert_raises_regex(TypeError, p_err):
values.str.replace('a', repl)
repl = lambda m, x: None
with tm.assert_raises_regex(TypeError, p_err):
values.str.replace('a', repl)
repl = lambda m, x, y=None: None
with tm.assert_raises_regex(TypeError, p_err):
values.str.replace('a', repl)
# test regex named groups
values = Series(['Foo Bar Baz', NA])
pat = r"(?P<first>\w+) (?P<middle>\w+) (?P<last>\w+)"
repl = lambda m: m.group('middle').swapcase()
result = values.str.replace(pat, repl)
exp = Series(['bAR', NA])
tm.assert_series_equal(result, exp)
def test_replace_compiled_regex(self):
# GH 15446
values = Series(['fooBAD__barBAD', NA])
# test with compiled regex
pat = re.compile(r'BAD[_]*')
result = values.str.replace(pat, '')
exp = Series(['foobar', NA])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['aBAD', NA, 'bBAD', True, datetime.today(), 'fooBAD',
None, 1, 2.])
rs = Series(mixed).str.replace(pat, '')
xp = Series(['a', NA, 'b', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA])
result = values.str.replace(pat, '')
exp = Series([u('foobar'), NA])
tm.assert_series_equal(result, exp)
result = values.str.replace(pat, '', n=1)
exp = Series([u('foobarBAD'), NA])
tm.assert_series_equal(result, exp)
# flags + unicode
values = Series([b"abcd,\xc3\xa0".decode("utf-8")])
exp = Series([b"abcd, \xc3\xa0".decode("utf-8")])
pat = re.compile(r"(?<=\w),(?=\w)", flags=re.UNICODE)
result = values.str.replace(pat, ", ")
tm.assert_series_equal(result, exp)
# case and flags provided to str.replace will have no effect
# and will produce warnings
values = Series(['fooBAD__barBAD__bad', NA])
pat = re.compile(r'BAD[_]*')
with tm.assert_raises_regex(ValueError,
"case and flags cannot be"):
result = values.str.replace(pat, '', flags=re.IGNORECASE)
with tm.assert_raises_regex(ValueError,
"case and flags cannot be"):
result = values.str.replace(pat, '', case=False)
with tm.assert_raises_regex(ValueError,
"case and flags cannot be"):
result = values.str.replace(pat, '', case=True)
# test with callable
values = Series(['fooBAD__barBAD', NA])
repl = lambda m: m.group(0).swapcase()
pat = re.compile('[a-z][A-Z]{2}')
result = values.str.replace(pat, repl, n=2)
exp = Series(['foObaD__baRbaD', NA])
tm.assert_series_equal(result, exp)
def test_repeat(self):
values = Series(['a', 'b', NA, 'c', NA, 'd'])
result = values.str.repeat(3)
exp = Series(['aaa', 'bbb', NA, 'ccc', NA, 'ddd'])
tm.assert_series_equal(result, exp)
result = values.str.repeat([1, 2, 3, 4, 5, 6])
exp = Series(['a', 'bb', NA, 'cccc', NA, 'dddddd'])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'foo', None, 1,
2.])
rs = Series(mixed).str.repeat(3)
xp = Series(['aaa', NA, 'bbb', NA, NA, 'foofoofoo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('a'), u('b'), NA, u('c'), NA, u('d')])
result = values.str.repeat(3)
exp = Series([u('aaa'), u('bbb'), NA, u('ccc'), NA, u('ddd')])
tm.assert_series_equal(result, exp)
result = values.str.repeat([1, 2, 3, 4, 5, 6])
exp = Series([u('a'), u('bb'), NA, u('cccc'), NA, u('dddddd')])
tm.assert_series_equal(result, exp)
def test_match(self):
# New match behavior introduced in 0.13
values = Series(['fooBAD__barBAD', NA, 'foo'])
result = values.str.match('.*(BAD[_]+).*(BAD)')
exp = Series([True, NA, False])
tm.assert_series_equal(result, exp)
values = Series(['fooBAD__barBAD', NA, 'foo'])
result = values.str.match('.*BAD[_]+.*BAD')
exp = Series([True, NA, False])
tm.assert_series_equal(result, exp)
# test passing as_indexer still works but is ignored
values = Series(['fooBAD__barBAD', NA, 'foo'])
exp = Series([True, NA, False])
with tm.assert_produces_warning(FutureWarning):
result = values.str.match('.*BAD[_]+.*BAD', as_indexer=True)
tm.assert_series_equal(result, exp)
with tm.assert_produces_warning(FutureWarning):
result = values.str.match('.*BAD[_]+.*BAD', as_indexer=False)
tm.assert_series_equal(result, exp)
with tm.assert_produces_warning(FutureWarning):
result = values.str.match('.*(BAD[_]+).*(BAD)', as_indexer=True)
tm.assert_series_equal(result, exp)
pytest.raises(ValueError, values.str.match, '.*(BAD[_]+).*(BAD)',
as_indexer=False)
# mixed
mixed = Series(['aBAD_BAD', NA, 'BAD_b_BAD', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.match('.*(BAD[_]+).*(BAD)')
xp = Series([True, NA, True, NA, NA, False, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo')])
result = values.str.match('.*(BAD[_]+).*(BAD)')
exp = Series([True, NA, False])
tm.assert_series_equal(result, exp)
# na GH #6609
res = Series(['a', 0, np.nan]).str.match('a', na=False)
exp = Series([True, False, False])
assert_series_equal(exp, res)
res = Series(['a', 0, np.nan]).str.match('a')
exp = Series([True, np.nan, np.nan])
assert_series_equal(exp, res)
def test_extract_expand_None(self):
values = Series(['fooBAD__barBAD', NA, 'foo'])
with tm.assert_produces_warning(FutureWarning):
values.str.extract('.*(BAD[_]+).*(BAD)', expand=None)
def test_extract_expand_unspecified(self):
values = Series(['fooBAD__barBAD', NA, 'foo'])
with tm.assert_produces_warning(FutureWarning):
values.str.extract('.*(BAD[_]+).*(BAD)')
def test_extract_expand_False(self):
# Contains tests like those in test_match and some others.
values = Series(['fooBAD__barBAD', NA, 'foo'])
er = [NA, NA] # empty row
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=False)
exp = DataFrame([['BAD__', 'BAD'], er, er])
tm.assert_frame_equal(result, exp)
# mixed
mixed = Series(['aBAD_BAD', NA, 'BAD_b_BAD', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.extract('.*(BAD[_]+).*(BAD)', expand=False)
exp = DataFrame([['BAD_', 'BAD'], er, ['BAD_', 'BAD'], er, er, er, er,
er, er])
tm.assert_frame_equal(rs, exp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo')])
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=False)
exp = DataFrame([[u('BAD__'), u('BAD')], er, er])
tm.assert_frame_equal(result, exp)
# GH9980
# Index only works with one regex group since
# multi-group would expand to a frame
idx = Index(['A1', 'A2', 'A3', 'A4', 'B5'])
with tm.assert_raises_regex(ValueError, "supported"):
idx.str.extract('([AB])([123])', expand=False)
# these should work for both Series and Index
for klass in [Series, Index]:
# no groups
s_or_idx = klass(['A1', 'B2', 'C3'])
f = lambda: s_or_idx.str.extract('[ABC][123]', expand=False)
pytest.raises(ValueError, f)
# only non-capturing groups
f = lambda: s_or_idx.str.extract('(?:[AB]).*', expand=False)
pytest.raises(ValueError, f)
# single group renames series/index properly
s_or_idx = klass(['A1', 'A2'])
result = s_or_idx.str.extract(r'(?P<uno>A)\d', expand=False)
assert result.name == 'uno'
exp = klass(['A', 'A'], name='uno')
if klass == Series:
tm.assert_series_equal(result, exp)
else:
tm.assert_index_equal(result, exp)
s = Series(['A1', 'B2', 'C3'])
# one group, no matches
result = s.str.extract('(_)', expand=False)
exp = Series([NA, NA, NA], dtype=object)
tm.assert_series_equal(result, exp)
# two groups, no matches
result = s.str.extract('(_)(_)', expand=False)
exp = DataFrame([[NA, NA], [NA, NA], [NA, NA]], dtype=object)
tm.assert_frame_equal(result, exp)
# one group, some matches
result = s.str.extract('([AB])[123]', expand=False)
exp = Series(['A', 'B', NA])
tm.assert_series_equal(result, exp)
# two groups, some matches
result = s.str.extract('([AB])([123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one named group
result = s.str.extract('(?P<letter>[AB])', expand=False)
exp = Series(['A', 'B', NA], name='letter')
tm.assert_series_equal(result, exp)
# two named groups
result = s.str.extract('(?P<letter>[AB])(?P<number>[123])',
expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]],
columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# mix named and unnamed groups
result = s.str.extract('([AB])(?P<number>[123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]],
columns=[0, 'number'])
tm.assert_frame_equal(result, exp)
# one normal group, one non-capturing group
result = s.str.extract('([AB])(?:[123])', expand=False)
exp = Series(['A', 'B', NA])
tm.assert_series_equal(result, exp)
# two normal groups, one non-capturing group
result = Series(['A11', 'B22', 'C33']).str.extract(
'([AB])([123])(?:[123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one optional group followed by one normal group
result = Series(['A1', 'B2', '3']).str.extract(
'(?P<letter>[AB])?(?P<number>[123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, '3']],
columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# one normal group followed by one optional group
result = Series(['A1', 'B2', 'C']).str.extract(
'(?P<letter>[ABC])(?P<number>[123])?', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], ['C', NA]],
columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# GH6348
# not passing index to the extractor
def check_index(index):
data = ['A1', 'B2', 'C']
index = index[:len(data)]
s = Series(data, index=index)
result = s.str.extract(r'(\d)', expand=False)
exp = Series(['1', '2', NA], index=index)
tm.assert_series_equal(result, exp)
result = Series(data, index=index).str.extract(
r'(?P<letter>\D)(?P<number>\d)?', expand=False)
e_list = [
['A', '1'],
['B', '2'],
['C', NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'], index=index)
tm.assert_frame_equal(result, exp)
i_funs = [
tm.makeStringIndex, tm.makeUnicodeIndex, tm.makeIntIndex,
tm.makeDateIndex, tm.makePeriodIndex, tm.makeRangeIndex
]
for index in i_funs:
check_index(index())
# single_series_name_is_preserved.
s = Series(['a3', 'b3', 'c2'], name='bob')
r = s.str.extract(r'(?P<sue>[a-z])', expand=False)
e = Series(['a', 'b', 'c'], name='sue')
tm.assert_series_equal(r, e)
assert r.name == e.name
def test_extract_expand_True(self):
# Contains tests like those in test_match and some others.
values = Series(['fooBAD__barBAD', NA, 'foo'])
er = [NA, NA] # empty row
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=True)
exp = DataFrame([['BAD__', 'BAD'], er, er])
tm.assert_frame_equal(result, exp)
# mixed
mixed = Series(['aBAD_BAD', NA, 'BAD_b_BAD', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.extract('.*(BAD[_]+).*(BAD)', expand=True)
exp = DataFrame([['BAD_', 'BAD'], er, ['BAD_', 'BAD'], er, er,
er, er, er, er])
tm.assert_frame_equal(rs, exp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo')])
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=True)
exp = DataFrame([[u('BAD__'), u('BAD')], er, er])
tm.assert_frame_equal(result, exp)
# these should work for both Series and Index
for klass in [Series, Index]:
# no groups
s_or_idx = klass(['A1', 'B2', 'C3'])
f = lambda: s_or_idx.str.extract('[ABC][123]', expand=True)
pytest.raises(ValueError, f)
# only non-capturing groups
f = lambda: s_or_idx.str.extract('(?:[AB]).*', expand=True)
pytest.raises(ValueError, f)
# single group renames series/index properly
s_or_idx = klass(['A1', 'A2'])
result_df = s_or_idx.str.extract(r'(?P<uno>A)\d', expand=True)
assert isinstance(result_df, DataFrame)
result_series = result_df['uno']
assert_series_equal(result_series, Series(['A', 'A'], name='uno'))
def test_extract_series(self):
# extract should give the same result whether or not the
# series has a name.
for series_name in None, "series_name":
s = Series(['A1', 'B2', 'C3'], name=series_name)
# one group, no matches
result = s.str.extract('(_)', expand=True)
exp = DataFrame([NA, NA, NA], dtype=object)
tm.assert_frame_equal(result, exp)
# two groups, no matches
result = s.str.extract('(_)(_)', expand=True)
exp = DataFrame([[NA, NA], [NA, NA], [NA, NA]], dtype=object)
tm.assert_frame_equal(result, exp)
# one group, some matches
result = s.str.extract('([AB])[123]', expand=True)
exp = DataFrame(['A', 'B', NA])
tm.assert_frame_equal(result, exp)
# two groups, some matches
result = s.str.extract('([AB])([123])', expand=True)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one named group
result = s.str.extract('(?P<letter>[AB])', expand=True)
exp = DataFrame({"letter": ['A', 'B', NA]})
tm.assert_frame_equal(result, exp)
# two named groups
result = s.str.extract(
'(?P<letter>[AB])(?P<number>[123])',
expand=True)
e_list = [
['A', '1'],
['B', '2'],
[NA, NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# mix named and unnamed groups
result = s.str.extract('([AB])(?P<number>[123])', expand=True)
exp = DataFrame(e_list, columns=[0, 'number'])
tm.assert_frame_equal(result, exp)
# one normal group, one non-capturing group
result = s.str.extract('([AB])(?:[123])', expand=True)
exp = DataFrame(['A', 'B', NA])
tm.assert_frame_equal(result, exp)
def test_extract_optional_groups(self):
# two normal groups, one non-capturing group
result = Series(['A11', 'B22', 'C33']).str.extract(
'([AB])([123])(?:[123])', expand=True)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one optional group followed by one normal group
result = Series(['A1', 'B2', '3']).str.extract(
'(?P<letter>[AB])?(?P<number>[123])', expand=True)
e_list = [
['A', '1'],
['B', '2'],
[NA, '3']
]
exp = DataFrame(e_list, columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# one normal group followed by one optional group
result = Series(['A1', 'B2', 'C']).str.extract(
'(?P<letter>[ABC])(?P<number>[123])?', expand=True)
e_list = [
['A', '1'],
['B', '2'],
['C', NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# GH6348
# not passing index to the extractor
def check_index(index):
data = ['A1', 'B2', 'C']
index = index[:len(data)]
result = Series(data, index=index).str.extract(
r'(\d)', expand=True)
exp = DataFrame(['1', '2', NA], index=index)
tm.assert_frame_equal(result, exp)
result = Series(data, index=index).str.extract(
r'(?P<letter>\D)(?P<number>\d)?', expand=True)
e_list = [
['A', '1'],
['B', '2'],
['C', NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'], index=index)
tm.assert_frame_equal(result, exp)
i_funs = [
tm.makeStringIndex, tm.makeUnicodeIndex, tm.makeIntIndex,
tm.makeDateIndex, tm.makePeriodIndex, tm.makeRangeIndex
]
for index in i_funs:
check_index(index())
def test_extract_single_group_returns_frame(self):
# GH11386 extract should always return DataFrame, even when
# there is only one group. Prior to v0.18.0, extract returned
# Series when there was only one group in the regex.
s = Series(['a3', 'b3', 'c2'], name='series_name')
r = s.str.extract(r'(?P<letter>[a-z])', expand=True)
e = DataFrame({"letter": ['a', 'b', 'c']})
tm.assert_frame_equal(r, e)
def test_extractall(self):
subject_list = [
'<EMAIL>',
'<EMAIL>',
'<EMAIL>',
'<EMAIL> some text <EMAIL>',
'<EMAIL> some text c@d.<EMAIL> and <EMAIL>',
np.nan,
"",
]
expected_tuples = [
("dave", "google", "com"),
("tdhock5", "gmail", "com"),
("maudelaperriere", "gmail", "com"),
("rob", "gmail", "com"), ("steve", "gmail", "com"),
("a", "b", "com"), ("c", "d", "com"), ("e", "f", "com"),
]
named_pattern = r"""
(?P<user>[a-z0-9]+)
@
(?P<domain>[a-z]+)
\.
(?P<tld>[a-z]{2,4})
"""
expected_columns = ["user", "domain", "tld"]
S = Series(subject_list)
# extractall should return a DataFrame with one row for each
# match, indexed by the subject from which the match came.
expected_index = MultiIndex.from_tuples([
(0, 0),
(1, 0),
(2, 0),
(3, 0),
(3, 1),
(4, 0),
(4, 1),
(4, 2),
], names=(None, "match"))
expected_df = DataFrame(
expected_tuples, expected_index, expected_columns)
computed_df = S.str.extractall(named_pattern, re.VERBOSE)
tm.assert_frame_equal(computed_df, expected_df)
# The index of the input Series should be used to construct
# the index of the output DataFrame:
series_index = MultiIndex.from_tuples([
("single", "Dave"),
("single", "Toby"),
("single", "Maude"),
("multiple", "robAndSteve"),
("multiple", "abcdef"),
("none", "missing"),
("none", "empty"),
])
Si = Series(subject_list, series_index)
expected_index = MultiIndex.from_tuples([
("single", "Dave", 0),
("single", "Toby", 0),
("single", "Maude", 0),
("multiple", "robAndSteve", 0),
("multiple", "robAndSteve", 1),
("multiple", "abcdef", 0),
("multiple", "abcdef", 1),
("multiple", "abcdef", 2),
], names=(None, None, "match"))
expected_df = DataFrame(
expected_tuples, expected_index, expected_columns)
computed_df = Si.str.extractall(named_pattern, re.VERBOSE)
tm.assert_frame_equal(computed_df, expected_df)
# MultiIndexed subject with names.
Sn = Series(subject_list, series_index)
Sn.index.names = ("matches", "description")
expected_index.names = ("matches", "description", "match")
expected_df = DataFrame(
expected_tuples, expected_index, expected_columns)
computed_df = Sn.str.extractall(named_pattern, re.VERBOSE)
tm.assert_frame_equal(computed_df, expected_df)
# optional groups.
subject_list = ['', 'A1', '32']
named_pattern = '(?P<letter>[AB])?(?P<number>[123])'
computed_df = Series(subject_list).str.extractall(named_pattern)
expected_index = MultiIndex.from_tuples([
(1, 0),
(2, 0),
(2, 1),
], names=(None, "match"))
expected_df = DataFrame([
('A', '1'),
(NA, '3'),
(NA, '2'),
], expected_index, columns=['letter', 'number'])
tm.assert_frame_equal(computed_df, expected_df)
# only one of two groups has a name.
pattern = '([AB])?(?P<number>[123])'
computed_df = Series(subject_list).str.extractall(pattern)
expected_df = DataFrame([
('A', '1'),
(NA, '3'),
(NA, '2'),
], expected_index, columns=[0, 'number'])
tm.assert_frame_equal(computed_df, expected_df)
def test_extractall_single_group(self):
# extractall(one named group) returns DataFrame with one named
# column.
s = Series(['a3', 'b3', 'd4c2'], name='series_name')
r = s.str.extractall(r'(?P<letter>[a-z])')
i = MultiIndex.from_tuples([
(0, 0),
(1, 0),
(2, 0),
(2, 1),
], names=(None, "match"))
e = DataFrame({"letter": ['a', 'b', 'd', 'c']}, i)
tm.assert_frame_equal(r, e)
# extractall(one un-named group) returns DataFrame with one
# un-named column.
r = s.str.extractall(r'([a-z])')
e = DataFrame(['a', 'b', 'd', 'c'], i)
tm.assert_frame_equal(r, e)
def test_extractall_single_group_with_quantifier(self):
# extractall(one un-named group with quantifier) returns
# DataFrame with one un-named column (GH13382).
s = Series(['ab3', 'abc3', 'd4cd2'], name='series_name')
r = s.str.extractall(r'([a-z]+)')
i = MultiIndex.from_tuples([
(0, 0),
(1, 0),
(2, 0),
(2, 1),
], names=(None, "match"))
e = DataFrame(['ab', 'abc', 'd', 'cd'], i)
tm.assert_frame_equal(r, e)
def test_extractall_no_matches(self):
s = Series(['a3', 'b3', 'd4c2'], name='series_name')
# one un-named group.
r = s.str.extractall('(z)')
e = DataFrame(columns=[0])
tm.assert_frame_equal(r, e)
# two un-named groups.
r = s.str.extractall('(z)(z)')
e = DataFrame(columns=[0, 1])
tm.assert_frame_equal(r, e)
# one named group.
r = s.str.extractall('(?P<first>z)')
e = DataFrame(columns=["first"])
tm.assert_frame_equal(r, e)
# two named groups.
r = s.str.extractall('(?P<first>z)(?P<second>z)')
e = DataFrame(columns=["first", "second"])
tm.assert_frame_equal(r, e)
# one named, one un-named.
r = s.str.extractall('(z)(?P<second>z)')
e = DataFrame(columns=[0,
"second"])
tm.assert_frame_equal(r, e)
def test_extractall_stringindex(self):
s = Series(["a1a2", "b1", "c1"], name='xxx')
res = s.str.extractall(r"[ab](?P<digit>\d)")
exp_idx = MultiIndex.from_tuples([(0, 0), (0, 1), (1, 0)],
names=[None, 'match'])
exp = DataFrame({'digit': ["1", "2", "1"]}, index=exp_idx)
tm.assert_frame_equal(res, exp)
# index should return the same result as the default index without name
# thus index.name doesn't affect to the result
for idx in [Index(["a1a2", "b1", "c1"]),
| Index(["a1a2", "b1", "c1"], name='xxx') | pandas.Index |
import numpy as np
import pandas as pd
import LDA
from sklearn import preprocessing
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
#read data from csv
df = | pd.read_csv("student-por.csv", sep=";") | pandas.read_csv |
# -*- coding: utf-8 -*-
import csv
import os
import platform
import codecs
import re
import sys
from datetime import datetime
import pytest
import numpy as np
from pandas._libs.lib import Timestamp
import pandas as pd
import pandas.util.testing as tm
from pandas import DataFrame, Series, Index, MultiIndex
from pandas import compat
from pandas.compat import (StringIO, BytesIO, PY3,
range, lrange, u)
from pandas.errors import DtypeWarning, EmptyDataError, ParserError
from pandas.io.common import URLError
from pandas.io.parsers import TextFileReader, TextParser
class ParserTests(object):
"""
Want to be able to test either C+Cython or Python+Cython parsers
"""
data1 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
def test_empty_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
# Parsers support only length-1 decimals
msg = 'Only length-1 decimal markers supported'
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(data), decimal='')
def test_bad_stream_exception(self):
# Issue 13652:
# This test validates that both python engine
# and C engine will raise UnicodeDecodeError instead of
# c engine raising ParserError and swallowing exception
# that caused read to fail.
handle = open(self.csv_shiftjs, "rb")
codec = codecs.lookup("utf-8")
utf8 = codecs.lookup('utf-8')
# stream must be binary UTF8
stream = codecs.StreamRecoder(
handle, utf8.encode, utf8.decode, codec.streamreader,
codec.streamwriter)
if compat.PY3:
msg = "'utf-8' codec can't decode byte"
else:
msg = "'utf8' codec can't decode byte"
with tm.assert_raises_regex(UnicodeDecodeError, msg):
self.read_csv(stream)
stream.close()
def test_read_csv(self):
if not compat.PY3:
if compat.is_platform_windows():
prefix = u("file:///")
else:
prefix = u("file://")
fname = prefix + compat.text_type(self.csv1)
self.read_csv(fname, index_col=0, parse_dates=True)
def test_1000_sep(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334, 13],
'C': [5, 10.]
})
df = self.read_csv(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
def test_squeeze(self):
data = """\
a,1
b,2
c,3
"""
idx = Index(['a', 'b', 'c'], name=0)
expected = Series([1, 2, 3], name=1, index=idx)
result = self.read_table(StringIO(data), sep=',', index_col=0,
header=None, squeeze=True)
assert isinstance(result, Series)
tm.assert_series_equal(result, expected)
def test_squeeze_no_view(self):
# see gh-8217
# Series should not be a view
data = """time,data\n0,10\n1,11\n2,12\n4,14\n5,15\n3,13"""
result = self.read_csv(StringIO(data), index_col='time', squeeze=True)
assert not result._is_view
def test_malformed(self):
# see gh-6607
# all
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
"""
msg = 'Expected 3 fields in line 4, saw 5'
with tm.assert_raises_regex(Exception, msg):
self.read_table(StringIO(data), sep=',',
header=1, comment='#')
# first chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
msg = 'Expected 3 fields in line 6, saw 5'
with tm.assert_raises_regex(Exception, msg):
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1,
skiprows=[2])
it.read(5)
# middle chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
msg = 'Expected 3 fields in line 6, saw 5'
with tm.assert_raises_regex(Exception, msg):
it = self.read_table(StringIO(data), sep=',', header=1,
comment='#', iterator=True, chunksize=1,
skiprows=[2])
it.read(3)
# last chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
msg = 'Expected 3 fields in line 6, saw 5'
with tm.assert_raises_regex(Exception, msg):
it = self.read_table(StringIO(data), sep=',', header=1,
comment='#', iterator=True, chunksize=1,
skiprows=[2])
it.read()
# skipfooter is not supported with the C parser yet
if self.engine == 'python':
# skipfooter
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
footer
"""
msg = 'Expected 3 fields in line 4, saw 5'
with tm.assert_raises_regex(Exception, msg):
self.read_table(StringIO(data), sep=',',
header=1, comment='#',
skipfooter=1)
def test_quoting(self):
bad_line_small = """printer\tresult\tvariant_name
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jacob
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jakob
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\t"Furststiftische Hofdruckerei, <Kempten""
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tGaller, Alois
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tHochfurstliche Buchhandlung <Kempten>""" # noqa
pytest.raises(Exception, self.read_table, StringIO(bad_line_small),
sep='\t')
good_line_small = bad_line_small + '"'
df = self.read_table(StringIO(good_line_small), sep='\t')
assert len(df) == 3
def test_unnamed_columns(self):
data = """A,B,C,,
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
expected = np.array([[1, 2, 3, 4, 5],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]], dtype=np.int64)
df = self.read_table(StringIO(data), sep=',')
tm.assert_almost_equal(df.values, expected)
tm.assert_index_equal(df.columns,
Index(['A', 'B', 'C', 'Unnamed: 3',
'Unnamed: 4']))
def test_csv_mixed_type(self):
data = """A,B,C
a,1,2
b,3,4
c,4,5
"""
expected = DataFrame({'A': ['a', 'b', 'c'],
'B': [1, 3, 4],
'C': [2, 4, 5]})
out = self.read_csv(StringIO(data))
tm.assert_frame_equal(out, expected)
def test_read_csv_dataframe(self):
df = self.read_csv(self.csv1, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv1, sep=',', index_col=0,
parse_dates=True)
tm.assert_index_equal(df.columns, pd.Index(['A', 'B', 'C', 'D']))
assert df.index.name == 'index'
assert isinstance(
df.index[0], (datetime, np.datetime64, Timestamp))
assert df.values.dtype == np.float64
tm.assert_frame_equal(df, df2)
def test_read_csv_no_index_name(self):
df = self.read_csv(self.csv2, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv2, sep=',', index_col=0,
parse_dates=True)
tm.assert_index_equal(df.columns,
pd.Index(['A', 'B', 'C', 'D', 'E']))
assert isinstance(df.index[0], (datetime, np.datetime64, Timestamp))
assert df.loc[:, ['A', 'B', 'C', 'D']].values.dtype == np.float64
tm.assert_frame_equal(df, df2)
def test_read_table_unicode(self):
fin = BytesIO(u('\u0141aski, Jan;1').encode('utf-8'))
df1 = self.read_table(fin, sep=";", encoding="utf-8", header=None)
assert isinstance(df1[0].values[0], compat.text_type)
def test_read_table_wrong_num_columns(self):
# too few!
data = """A,B,C,D,E,F
1,2,3,4,5,6
6,7,8,9,10,11,12
11,12,13,14,15,16
"""
pytest.raises(ValueError, self.read_csv, StringIO(data))
def test_read_duplicate_index_explicit(self):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
result = self.read_csv(StringIO(data), index_col=0)
expected = self.read_csv(StringIO(data)).set_index(
'index', verify_integrity=False)
tm.assert_frame_equal(result, expected)
result = self.read_table(StringIO(data), sep=',', index_col=0)
expected = self.read_table(StringIO(data), sep=',', ).set_index(
'index', verify_integrity=False)
tm.assert_frame_equal(result, expected)
def test_read_duplicate_index_implicit(self):
data = """A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
# make sure an error isn't thrown
self.read_csv(StringIO(data))
self.read_table(StringIO(data), sep=',')
def test_parse_bools(self):
data = """A,B
True,1
False,2
True,3
"""
data = self.read_csv(StringIO(data))
assert data['A'].dtype == np.bool_
data = """A,B
YES,1
no,2
yes,3
No,3
Yes,3
"""
data = self.read_csv(StringIO(data),
true_values=['yes', 'Yes', 'YES'],
false_values=['no', 'NO', 'No'])
assert data['A'].dtype == np.bool_
data = """A,B
TRUE,1
FALSE,2
TRUE,3
"""
data = self.read_csv(StringIO(data))
assert data['A'].dtype == np.bool_
data = """A,B
foo,bar
bar,foo"""
result = self.read_csv(StringIO(data), true_values=['foo'],
false_values=['bar'])
expected = DataFrame({'A': [True, False], 'B': [False, True]})
tm.assert_frame_equal(result, expected)
def test_int_conversion(self):
data = """A,B
1.0,1
2.0,2
3.0,3
"""
data = self.read_csv(StringIO(data))
assert data['A'].dtype == np.float64
assert data['B'].dtype == np.int64
def test_read_nrows(self):
expected = self.read_csv(StringIO(self.data1))[:3]
df = self.read_csv(StringIO(self.data1), nrows=3)
tm.assert_frame_equal(df, expected)
# see gh-10476
df = self.read_csv(StringIO(self.data1), nrows=3.0)
tm.assert_frame_equal(df, expected)
msg = r"'nrows' must be an integer >=0"
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), nrows=1.2)
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), nrows='foo')
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), nrows=-1)
def test_read_chunksize(self):
reader = self.read_csv(StringIO(self.data1), index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
# with invalid chunksize value:
msg = r"'chunksize' must be an integer >=1"
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), chunksize=1.3)
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), chunksize='foo')
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), chunksize=0)
def test_read_chunksize_and_nrows(self):
# gh-15755
# With nrows
reader = self.read_csv(StringIO(self.data1), index_col=0,
chunksize=2, nrows=5)
df = self.read_csv(StringIO(self.data1), index_col=0, nrows=5)
tm.assert_frame_equal(pd.concat(reader), df)
# chunksize > nrows
reader = self.read_csv(StringIO(self.data1), index_col=0,
chunksize=8, nrows=5)
df = self.read_csv(StringIO(self.data1), index_col=0, nrows=5)
tm.assert_frame_equal(pd.concat(reader), df)
# with changing "size":
reader = self.read_csv(StringIO(self.data1), index_col=0,
chunksize=8, nrows=5)
df = self.read_csv(StringIO(self.data1), index_col=0, nrows=5)
tm.assert_frame_equal(reader.get_chunk(size=2), df.iloc[:2])
tm.assert_frame_equal(reader.get_chunk(size=4), df.iloc[2:5])
with pytest.raises(StopIteration):
reader.get_chunk(size=3)
def test_read_chunksize_named(self):
reader = self.read_csv(
StringIO(self.data1), index_col='index', chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col='index')
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_get_chunk_passed_chunksize(self):
data = """A,B,C
1,2,3
4,5,6
7,8,9
1,2,3"""
result = self.read_csv(StringIO(data), chunksize=2)
piece = result.get_chunk()
assert len(piece) == 2
def test_read_chunksize_generated_index(self):
# GH 12185
reader = self.read_csv(StringIO(self.data1), chunksize=2)
df = self.read_csv(StringIO(self.data1))
tm.assert_frame_equal(pd.concat(reader), df)
reader = self.read_csv(StringIO(self.data1), chunksize=2, index_col=0)
df = self.read_csv(StringIO(self.data1), index_col=0)
tm.assert_frame_equal(pd.concat(reader), df)
def test_read_text_list(self):
data = """A,B,C\nfoo,1,2,3\nbar,4,5,6"""
as_list = [['A', 'B', 'C'], ['foo', '1', '2', '3'], ['bar',
'4', '5', '6']]
df = self.read_csv(StringIO(data), index_col=0)
parser = TextParser(as_list, index_col=0, chunksize=2)
chunk = parser.read(None)
tm.assert_frame_equal(chunk, df)
def test_iterator(self):
# See gh-6607
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunk = reader.read(3)
tm.assert_frame_equal(chunk, df[:3])
last_chunk = reader.read(5)
tm.assert_frame_equal(last_chunk, df[3:])
# pass list
lines = list(csv.reader(StringIO(self.data1)))
parser = TextParser(lines, index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
# pass skiprows
parser = TextParser(lines, index_col=0, chunksize=2, skiprows=[1])
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[1:3])
treader = self.read_table(StringIO(self.data1), sep=',', index_col=0,
iterator=True)
assert isinstance(treader, TextFileReader)
# gh-3967: stopping iteration when chunksize is specified
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
reader = self.read_csv(StringIO(data), iterator=True)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
tm.assert_frame_equal(result[0], expected)
# chunksize = 1
reader = self.read_csv(StringIO(data), chunksize=1)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
assert len(result) == 3
tm.assert_frame_equal(pd.concat(result), expected)
# skipfooter is not supported with the C parser yet
if self.engine == 'python':
# test bad parameter (skipfooter)
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True, skipfooter=1)
pytest.raises(ValueError, reader.read, 3)
def test_pass_names_with_index(self):
lines = self.data1.split('\n')
no_header = '\n'.join(lines[1:])
# regular index
names = ['index', 'A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=0, names=names)
expected = self.read_csv(StringIO(self.data1), index_col=0)
tm.assert_frame_equal(df, expected)
# multi index
data = """index1,index2,A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
lines = data.split('\n')
no_header = '\n'.join(lines[1:])
names = ['index1', 'index2', 'A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=[0, 1],
names=names)
expected = self.read_csv(StringIO(data), index_col=[0, 1])
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(data), index_col=['index1', 'index2'])
tm.assert_frame_equal(df, expected)
def test_multi_index_no_level_names(self):
data = """index1,index2,A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
data2 = """A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
lines = data.split('\n')
no_header = '\n'.join(lines[1:])
names = ['A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=[0, 1],
header=None, names=names)
expected = self.read_csv(StringIO(data), index_col=[0, 1])
tm.assert_frame_equal(df, expected, check_names=False)
# 2 implicit first cols
df2 = self.read_csv(StringIO(data2))
tm.assert_frame_equal(df2, df)
# reverse order of index
df = self.read_csv(StringIO(no_header), index_col=[1, 0], names=names,
header=None)
expected = self.read_csv(StringIO(data), index_col=[1, 0])
tm.assert_frame_equal(df, expected, check_names=False)
def test_multi_index_blank_df(self):
# GH 14545
data = """a,b
"""
df = self.read_csv(StringIO(data), header=[0])
expected = DataFrame(columns=['a', 'b'])
tm.assert_frame_equal(df, expected)
round_trip = self.read_csv(StringIO(
expected.to_csv(index=False)), header=[0])
tm.assert_frame_equal(round_trip, expected)
data_multiline = """a,b
c,d
"""
df2 = self.read_csv(StringIO(data_multiline), header=[0, 1])
cols = MultiIndex.from_tuples([('a', 'c'), ('b', 'd')])
expected2 = DataFrame(columns=cols)
tm.assert_frame_equal(df2, expected2)
round_trip = self.read_csv(StringIO(
expected2.to_csv(index=False)), header=[0, 1])
tm.assert_frame_equal(round_trip, expected2)
def test_no_unnamed_index(self):
data = """ id c0 c1 c2
0 1 0 a b
1 2 0 c d
2 2 2 e f
"""
df = self.read_table(StringIO(data), sep=' ')
assert df.index.name is None
def test_read_csv_parse_simple_list(self):
text = """foo
bar baz
qux foo
foo
bar"""
df = self.read_csv(StringIO(text), header=None)
expected = DataFrame({0: ['foo', 'bar baz', 'qux foo',
'foo', 'bar']})
tm.assert_frame_equal(df, expected)
@tm.network
def test_url(self):
# HTTP(S)
url = ('https://raw.github.com/pandas-dev/pandas/master/'
'pandas/tests/io/parser/data/salaries.csv')
url_table = self.read_table(url)
dirpath = tm.get_data_path()
localtable = os.path.join(dirpath, 'salaries.csv')
local_table = self.read_table(localtable)
tm.assert_frame_equal(url_table, local_table)
# TODO: ftp testing
@pytest.mark.slow
def test_file(self):
dirpath = tm.get_data_path()
localtable = os.path.join(dirpath, 'salaries.csv')
local_table = self.read_table(localtable)
try:
url_table = self.read_table('file://localhost/' + localtable)
except URLError:
# fails on some systems
pytest.skip("failing on %s" %
' '.join(platform.uname()).strip())
tm.assert_frame_equal(url_table, local_table)
def test_path_pathlib(self):
df = tm.makeDataFrame()
result = tm.round_trip_pathlib(df.to_csv,
lambda p: self.read_csv(p, index_col=0))
tm.assert_frame_equal(df, result)
def test_path_localpath(self):
df = tm.makeDataFrame()
result = tm.round_trip_localpath(
df.to_csv,
lambda p: self.read_csv(p, index_col=0))
tm.assert_frame_equal(df, result)
def test_nonexistent_path(self):
# gh-2428: pls no segfault
# gh-14086: raise more helpful FileNotFoundError
path = '%s.csv' % tm.rands(10)
pytest.raises(compat.FileNotFoundError, self.read_csv, path)
def test_missing_trailing_delimiters(self):
data = """A,B,C,D
1,2,3,4
1,3,3,
1,4,5"""
result = self.read_csv(StringIO(data))
assert result['D'].isna()[1:].all()
def test_skipinitialspace(self):
s = ('"09-Apr-2012", "01:10:18.300", 2456026.548822908, 12849, '
'1.00361, 1.12551, 330.65659, 0355626618.16711, 73.48821, '
'314.11625, 1917.09447, 179.71425, 80.000, 240.000, -350, '
'70.06056, 344.98370, 1, 1, -0.689265, -0.692787, '
'0.212036, 14.7674, 41.605, -9999.0, -9999.0, '
'-9999.0, -9999.0, -9999.0, -9999.0, 000, 012, 128')
sfile = StringIO(s)
# it's 33 columns
result = self.read_csv(sfile, names=lrange(33), na_values=['-9999.0'],
header=None, skipinitialspace=True)
assert pd.isna(result.iloc[0, 29])
def test_utf16_bom_skiprows(self):
# #2298
data = u("""skip this
skip this too
A\tB\tC
1\t2\t3
4\t5\t6""")
data2 = u("""skip this
skip this too
A,B,C
1,2,3
4,5,6""")
path = '__%s__.csv' % tm.rands(10)
with tm.ensure_clean(path) as path:
for sep, dat in [('\t', data), (',', data2)]:
for enc in ['utf-16', 'utf-16le', 'utf-16be']:
bytes = dat.encode(enc)
with open(path, 'wb') as f:
f.write(bytes)
s = BytesIO(dat.encode('utf-8'))
if compat.PY3:
# somewhat False since the code never sees bytes
from io import TextIOWrapper
s = TextIOWrapper(s, encoding='utf-8')
result = self.read_csv(path, encoding=enc, skiprows=2,
sep=sep)
expected = self.read_csv(s, encoding='utf-8', skiprows=2,
sep=sep)
s.close()
tm.assert_frame_equal(result, expected)
def test_utf16_example(self):
path = tm.get_data_path('utf16_ex.txt')
# it works! and is the right length
result = self.read_table(path, encoding='utf-16')
assert len(result) == 50
if not compat.PY3:
buf = BytesIO(open(path, 'rb').read())
result = self.read_table(buf, encoding='utf-16')
assert len(result) == 50
def test_unicode_encoding(self):
pth = tm.get_data_path('unicode_series.csv')
result = self.read_csv(pth, header=None, encoding='latin-1')
result = result.set_index(0)
got = result[1][1632]
expected = u('\xc1 k\xf6ldum klaka (Cold Fever) (1994)')
assert got == expected
def test_trailing_delimiters(self):
# #2442. grumble grumble
data = """A,B,C
1,2,3,
4,5,6,
7,8,9,"""
result = self.read_csv(StringIO(data), index_col=False)
expected = DataFrame({'A': [1, 4, 7], 'B': [2, 5, 8],
'C': [3, 6, 9]})
tm.assert_frame_equal(result, expected)
def test_escapechar(self):
# http://stackoverflow.com/questions/13824840/feature-request-for-
# pandas-read-csv
data = '''SEARCH_TERM,ACTUAL_URL
"bra tv bord","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"
"tv p\xc3\xa5 hjul","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"
"SLAGBORD, \\"Bergslagen\\", IKEA:s 1700-tals serie","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"''' # noqa
result = self.read_csv(StringIO(data), escapechar='\\',
quotechar='"', encoding='utf-8')
assert result['SEARCH_TERM'][2] == ('SLAGBORD, "Bergslagen", '
'IKEA:s 1700-tals serie')
tm.assert_index_equal(result.columns,
Index(['SEARCH_TERM', 'ACTUAL_URL']))
def test_int64_min_issues(self):
# #2599
data = 'A,B\n0,0\n0,'
result = self.read_csv(StringIO(data))
expected = DataFrame({'A': [0, 0], 'B': [0, np.nan]})
tm.assert_frame_equal(result, expected)
def test_parse_integers_above_fp_precision(self):
data = """Numbers
17007000002000191
17007000002000191
17007000002000191
17007000002000191
17007000002000192
17007000002000192
17007000002000192
17007000002000192
17007000002000192
17007000002000194"""
result = self.read_csv(StringIO(data))
expected = DataFrame({'Numbers': [17007000002000191,
17007000002000191,
17007000002000191,
17007000002000191,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000194]})
tm.assert_series_equal(result['Numbers'], expected['Numbers'])
def test_chunks_have_consistent_numerical_type(self):
integers = [str(i) for i in range(499999)]
data = "a\n" + "\n".join(integers + ["1.0", "2.0"] + integers)
with tm.assert_produces_warning(False):
df = self.read_csv(StringIO(data))
# Assert that types were coerced.
assert type(df.a[0]) is np.float64
assert df.a.dtype == np.float
def test_warn_if_chunks_have_mismatched_type(self):
warning_type = False
integers = [str(i) for i in range(499999)]
data = "a\n" + "\n".join(integers + ['a', 'b'] + integers)
# see gh-3866: if chunks are different types and can't
# be coerced using numerical types, then issue warning.
if self.engine == 'c' and self.low_memory:
warning_type = DtypeWarning
with tm.assert_produces_warning(warning_type):
df = self.read_csv(StringIO(data))
assert df.a.dtype == np.object
def test_integer_overflow_bug(self):
# see gh-2601
data = "65248E10 11\n55555E55 22\n"
result = self.read_csv(StringIO(data), header=None, sep=' ')
assert result[0].dtype == np.float64
result = self.read_csv(StringIO(data), header=None, sep=r'\s+')
assert result[0].dtype == np.float64
def test_catch_too_many_names(self):
# see gh-5156
data = """\
1,2,3
4,,6
7,8,9
10,11,12\n"""
pytest.raises(ValueError, self.read_csv, StringIO(data),
header=0, names=['a', 'b', 'c', 'd'])
def test_ignore_leading_whitespace(self):
# see gh-3374, gh-6607
data = ' a b c\n 1 2 3\n 4 5 6\n 7 8 9'
result = self.read_table(StringIO(data), sep=r'\s+')
expected = DataFrame({'a': [1, 4, 7], 'b': [2, 5, 8], 'c': [3, 6, 9]})
tm.assert_frame_equal(result, expected)
def test_chunk_begins_with_newline_whitespace(self):
# see gh-10022
data = '\n hello\nworld\n'
result = self.read_csv(StringIO(data), header=None)
assert len(result) == 2
# see gh-9735: this issue is C parser-specific (bug when
# parsing whitespace and characters at chunk boundary)
if self.engine == 'c':
chunk1 = 'a' * (1024 * 256 - 2) + '\na'
chunk2 = '\n a'
result = self.read_csv(StringIO(chunk1 + chunk2), header=None)
expected = DataFrame(['a' * (1024 * 256 - 2), 'a', ' a'])
tm.assert_frame_equal(result, expected)
def test_empty_with_index(self):
# see gh-10184
data = 'x,y'
result = self.read_csv(StringIO(data), index_col=0)
expected = DataFrame([], columns=['y'], index=Index([], name='x'))
tm.assert_frame_equal(result, expected)
def test_empty_with_multiindex(self):
# see gh-10467
data = 'x,y,z'
result = self.read_csv(StringIO(data), index_col=['x', 'y'])
expected = DataFrame([], columns=['z'],
index=MultiIndex.from_arrays(
[[]] * 2, names=['x', 'y']))
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_with_reversed_multiindex(self):
data = 'x,y,z'
result = self.read_csv(StringIO(data), index_col=[1, 0])
expected = DataFrame([], columns=['z'],
index=MultiIndex.from_arrays(
[[]] * 2, names=['y', 'x']))
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_float_parser(self):
# see gh-9565
data = '45e-1,4.5,45.,inf,-inf'
result = self.read_csv(StringIO(data), header=None)
expected = DataFrame([[float(s) for s in data.split(',')]])
tm.assert_frame_equal(result, expected)
def test_scientific_no_exponent(self):
# see gh-12215
df = DataFrame.from_items([('w', ['2e']), ('x', ['3E']),
('y', ['42e']), ('z', ['632E'])])
data = df.to_csv(index=False)
for prec in self.float_precision_choices:
df_roundtrip = self.read_csv(
StringIO(data), float_precision=prec)
tm.assert_frame_equal(df_roundtrip, df)
def test_int64_overflow(self):
data = """ID
00013007854817840016671868
00013007854817840016749251
00013007854817840016754630
00013007854817840016781876
00013007854817840017028824
00013007854817840017963235
00013007854817840018860166"""
# 13007854817840016671868 > UINT64_MAX, so this
# will overflow and return object as the dtype.
result = self.read_csv(StringIO(data))
assert result['ID'].dtype == object
# 13007854817840016671868 > UINT64_MAX, so attempts
# to cast to either int64 or uint64 will result in
# an OverflowError being raised.
for conv in (np.int64, np.uint64):
pytest.raises(OverflowError, self.read_csv,
StringIO(data), converters={'ID': conv})
# These numbers fall right inside the int64-uint64 range,
# so they should be parsed as string.
ui_max = np.iinfo(np.uint64).max
i_max = np.iinfo(np.int64).max
i_min = np.iinfo(np.int64).min
for x in [i_max, i_min, ui_max]:
result = self.read_csv(StringIO(str(x)), header=None)
expected = DataFrame([x])
tm.assert_frame_equal(result, expected)
# These numbers fall just outside the int64-uint64 range,
# so they should be parsed as string.
too_big = ui_max + 1
too_small = i_min - 1
for x in [too_big, too_small]:
result = self.read_csv(StringIO(str(x)), header=None)
expected = DataFrame([str(x)])
tm.assert_frame_equal(result, expected)
# No numerical dtype can hold both negative and uint64 values,
# so they should be cast as string.
data = '-1\n' + str(2**63)
expected = DataFrame([str(-1), str(2**63)])
result = self.read_csv(StringIO(data), header=None)
tm.assert_frame_equal(result, expected)
data = str(2**63) + '\n-1'
expected = DataFrame([str(2**63), str(-1)])
result = self.read_csv(StringIO(data), header=None)
tm.assert_frame_equal(result, expected)
def test_empty_with_nrows_chunksize(self):
# see gh-9535
expected = DataFrame([], columns=['foo', 'bar'])
result = self.read_csv(StringIO('foo,bar\n'), nrows=10)
tm.assert_frame_equal(result, expected)
result = next(iter(self.read_csv(
StringIO('foo,bar\n'), chunksize=10)))
tm.assert_frame_equal(result, expected)
with tm.assert_produces_warning(
FutureWarning, check_stacklevel=False):
result = self.read_csv(StringIO('foo,bar\n'),
nrows=10, as_recarray=True)
result = DataFrame(result[2], columns=result[1],
index=result[0])
tm.assert_frame_equal(DataFrame.from_records(
result), expected, check_index_type=False)
with tm.assert_produces_warning(
FutureWarning, check_stacklevel=False):
result = next(iter(self.read_csv(StringIO('foo,bar\n'),
chunksize=10, as_recarray=True)))
result = DataFrame(result[2], columns=result[1], index=result[0])
tm.assert_frame_equal(DataFrame.from_records(result), expected,
check_index_type=False)
def test_eof_states(self):
# see gh-10728, gh-10548
# With skip_blank_lines = True
expected = DataFrame([[4, 5, 6]], columns=['a', 'b', 'c'])
# gh-10728: WHITESPACE_LINE
data = 'a,b,c\n4,5,6\n '
result = self.read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
# gh-10548: EAT_LINE_COMMENT
data = 'a,b,c\n4,5,6\n#comment'
result = self.read_csv(StringIO(data), comment='#')
tm.assert_frame_equal(result, expected)
# EAT_CRNL_NOP
data = 'a,b,c\n4,5,6\n\r'
result = self.read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
# EAT_COMMENT
data = 'a,b,c\n4,5,6#comment'
result = self.read_csv(StringIO(data), comment='#')
tm.assert_frame_equal(result, expected)
# SKIP_LINE
data = 'a,b,c\n4,5,6\nskipme'
result = self.read_csv(StringIO(data), skiprows=[2])
tm.assert_frame_equal(result, expected)
# With skip_blank_lines = False
# EAT_LINE_COMMENT
data = 'a,b,c\n4,5,6\n#comment'
result = self.read_csv(
StringIO(data), comment='#', skip_blank_lines=False)
expected = DataFrame([[4, 5, 6]], columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
# IN_FIELD
data = 'a,b,c\n4,5,6\n '
result = self.read_csv(StringIO(data), skip_blank_lines=False)
expected = DataFrame(
[['4', 5, 6], [' ', None, None]], columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
# EAT_CRNL
data = 'a,b,c\n4,5,6\n\r'
result = self.read_csv(StringIO(data), skip_blank_lines=False)
expected = DataFrame(
[[4, 5, 6], [None, None, None]], columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
# Should produce exceptions
# ESCAPED_CHAR
data = "a,b,c\n4,5,6\n\\"
pytest.raises(Exception, self.read_csv,
StringIO(data), escapechar='\\')
# ESCAPE_IN_QUOTED_FIELD
data = 'a,b,c\n4,5,6\n"\\'
pytest.raises(Exception, self.read_csv,
StringIO(data), escapechar='\\')
# IN_QUOTED_FIELD
data = 'a,b,c\n4,5,6\n"'
pytest.raises(Exception, self.read_csv,
StringIO(data), escapechar='\\')
def test_uneven_lines_with_usecols(self):
# See gh-12203
csv = r"""a,b,c
0,1,2
3,4,5,6,7
8,9,10
"""
# make sure that an error is still thrown
# when the 'usecols' parameter is not provided
msg = r"Expected \d+ fields in line \d+, saw \d+"
with tm.assert_raises_regex(ValueError, msg):
df = self.read_csv(StringIO(csv))
expected = DataFrame({
'a': [0, 3, 8],
'b': [1, 4, 9]
})
usecols = [0, 1]
df = self.read_csv(StringIO(csv), usecols=usecols)
tm.assert_frame_equal(df, expected)
usecols = ['a', 'b']
df = self.read_csv(StringIO(csv), usecols=usecols)
tm.assert_frame_equal(df, expected)
def test_read_empty_with_usecols(self):
# See gh-12493
names = ['Dummy', 'X', 'Dummy_2']
usecols = names[1:2] # ['X']
# first, check to see that the response of
# parser when faced with no provided columns
# throws the correct error, with or without usecols
errmsg = "No columns to parse from file"
with tm.assert_raises_regex(EmptyDataError, errmsg):
self.read_csv(StringIO(''))
with tm.assert_raises_regex(EmptyDataError, errmsg):
self.read_csv(StringIO(''), usecols=usecols)
expected = DataFrame(columns=usecols, index=[0], dtype=np.float64)
df = self.read_csv(StringIO(',,'), names=names, usecols=usecols)
tm.assert_frame_equal(df, expected)
expected = DataFrame(columns=usecols)
df = self.read_csv(StringIO(''), names=names, usecols=usecols)
tm.assert_frame_equal(df, expected)
def test_trailing_spaces(self):
data = "A B C \nrandom line with trailing spaces \nskip\n1,2,3\n1,2.,4.\nrandom line with trailing tabs\t\t\t\n \n5.1,NaN,10.0\n" # noqa
expected = DataFrame([[1., 2., 4.],
[5.1, np.nan, 10.]])
# gh-8661, gh-8679: this should ignore six lines including
# lines with trailing whitespace and blank lines
df = self.read_csv(StringIO(data.replace(',', ' ')),
header=None, delim_whitespace=True,
skiprows=[0, 1, 2, 3, 5, 6], skip_blank_lines=True)
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data.replace(',', ' ')),
header=None, delim_whitespace=True,
skiprows=[0, 1, 2, 3, 5, 6],
skip_blank_lines=True)
tm.assert_frame_equal(df, expected)
# gh-8983: test skipping set of rows after a row with trailing spaces
expected = DataFrame({"A": [1., 5.1], "B": [2., np.nan],
"C": [4., 10]})
df = self.read_table(StringIO(data.replace(',', ' ')),
delim_whitespace=True,
skiprows=[1, 2, 3, 5, 6], skip_blank_lines=True)
tm.assert_frame_equal(df, expected)
def test_raise_on_sep_with_delim_whitespace(self):
# see gh-6607
data = 'a b c\n1 2 3'
with tm.assert_raises_regex(ValueError,
'you can only specify one'):
self.read_table(StringIO(data), sep=r'\s', delim_whitespace=True)
def test_single_char_leading_whitespace(self):
# see gh-9710
data = """\
MyColumn
a
b
a
b\n"""
expected = DataFrame({'MyColumn': list('abab')})
result = self.read_csv(StringIO(data), delim_whitespace=True,
skipinitialspace=True)
tm.assert_frame_equal(result, expected)
result = self.read_csv(StringIO(data), skipinitialspace=True)
tm.assert_frame_equal(result, expected)
def test_empty_lines(self):
data = """\
A,B,C
1,2.,4.
5.,NaN,10.0
-70,.4,1
"""
expected = np.array([[1., 2., 4.],
[5., np.nan, 10.],
[-70., .4, 1.]])
df = self.read_csv(StringIO(data))
tm.assert_numpy_array_equal(df.values, expected)
df = self.read_csv(StringIO(data.replace(',', ' ')), sep=r'\s+')
tm.assert_numpy_array_equal(df.values, expected)
expected = np.array([[1., 2., 4.],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[5., np.nan, 10.],
[np.nan, np.nan, np.nan],
[-70., .4, 1.]])
df = self.read_csv(StringIO(data), skip_blank_lines=False)
tm.assert_numpy_array_equal(df.values, expected)
def test_whitespace_lines(self):
data = """
\t \t\t
\t
A,B,C
\t 1,2.,4.
5.,NaN,10.0
"""
expected = np.array([[1, 2., 4.],
[5., np.nan, 10.]])
df = self.read_csv(StringIO(data))
tm.assert_numpy_array_equal(df.values, expected)
def test_regex_separator(self):
# see gh-6607
data = """ A B C D
a 1 2 3 4
b 1 2 3 4
c 1 2 3 4
"""
df = self.read_table(StringIO(data), sep=r'\s+')
expected = self.read_csv(StringIO(re.sub('[ ]+', ',', data)),
index_col=0)
assert expected.index.name is None
tm.assert_frame_equal(df, expected)
data = ' a b c\n1 2 3 \n4 5 6\n 7 8 9'
result = self.read_table(StringIO(data), sep=r'\s+')
expected = DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]],
columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
@tm.capture_stdout
def test_verbose_import(self):
text = """a,b,c,d
one,1,2,3
one,1,2,3
,1,2,3
one,1,2,3
,1,2,3
,1,2,3
one,1,2,3
two,1,2,3"""
# Engines are verbose in different ways.
self.read_csv(StringIO(text), verbose=True)
output = sys.stdout.getvalue()
if self.engine == 'c':
assert 'Tokenization took:' in output
assert 'Parser memory cleanup took:' in output
else: # Python engine
assert output == 'Filled 3 NA values in column a\n'
# Reset the stdout buffer.
sys.stdout = StringIO()
text = """a,b,c,d
one,1,2,3
two,1,2,3
three,1,2,3
four,1,2,3
five,1,2,3
,1,2,3
seven,1,2,3
eight,1,2,3"""
self.read_csv(StringIO(text), verbose=True, index_col=0)
output = sys.stdout.getvalue()
# Engines are verbose in different ways.
if self.engine == 'c':
assert 'Tokenization took:' in output
assert 'Parser memory cleanup took:' in output
else: # Python engine
assert output == 'Filled 1 NA values in column a\n'
def test_iteration_open_handle(self):
if PY3:
pytest.skip(
"won't work in Python 3 {0}".format(sys.version_info))
with tm.ensure_clean() as path:
with open(path, 'wb') as f:
f.write('AAA\nBBB\nCCC\nDDD\nEEE\nFFF\nGGG')
with open(path, 'rb') as f:
for line in f:
if 'CCC' in line:
break
if self.engine == 'c':
pytest.raises(Exception, self.read_table,
f, squeeze=True, header=None)
else:
result = self.read_table(f, squeeze=True, header=None)
expected = Series(['DDD', 'EEE', 'FFF', 'GGG'], name=0)
tm.assert_series_equal(result, expected)
def test_1000_sep_with_decimal(self):
data = """A|B|C
1|2,334.01|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334.01, 13],
'C': [5, 10.]
})
assert expected.A.dtype == 'int64'
assert expected.B.dtype == 'float'
assert expected.C.dtype == 'float'
df = self.read_csv(StringIO(data), sep='|', thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|',
thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
data_with_odd_sep = """A|B|C
1|2.334,01|5
10|13|10,
"""
df = self.read_csv(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
def test_euro_decimal_format(self):
data = """Id;Number1;Number2;Text1;Text2;Number3
1;1521,1541;187101,9543;ABC;poi;4,738797819
2;121,12;14897,76;DEF;uyt;0,377320872
3;878,158;108013,434;GHI;rez;2,735694704"""
df2 = self.read_csv(StringIO(data), sep=';', decimal=',')
assert df2['Number1'].dtype == float
assert df2['Number2'].dtype == float
assert df2['Number3'].dtype == float
def test_inf_parsing(self):
data = """\
,A
a,inf
b,-inf
c,+Inf
d,-Inf
e,INF
f,-INF
g,+INf
h,-INf
i,inF
j,-inF"""
inf = float('inf')
expected = Series([inf, -inf] * 5)
df = self.read_csv(StringIO(data), index_col=0)
tm.assert_almost_equal(df['A'].values, expected.values)
df = self.read_csv(StringIO(data), index_col=0, na_filter=False)
tm.assert_almost_equal(df['A'].values, expected.values)
def test_raise_on_no_columns(self):
# single newline
data = "\n"
pytest.raises(EmptyDataError, self.read_csv, StringIO(data))
# test with more than a single newline
data = "\n\n\n"
pytest.raises(EmptyDataError, self.read_csv, | StringIO(data) | pandas.compat.StringIO |
##############################################################
# #
# <NAME> and <NAME> (2017) #
# Machine Learning for the Quantified Self #
# Springer #
# Chapter 8 #
# #
##############################################################
import pandas as pd
import scipy.linalg
import copy
import random
import numpy as np
from scipy import linalg
import inspyred
from Chapter8.dynsys.Model import Model
from Chapter8.dynsys.Evaluator import Evaluator
from pybrain.structure import RecurrentNetwork
from pybrain.structure import LinearLayer, SigmoidLayer, FullConnection
from pybrain.datasets import SequentialDataSet
from pybrain.supervised.trainers import BackpropTrainer
from pybrain.supervised.trainers import RPropMinusTrainer
from pybrain.tools.validation import testOnSequenceData
from pybrain.tools.shortcuts import buildNetwork
from Chapter7.Evaluation import ClassificationEvaluation
from Chapter7.Evaluation import RegressionEvaluation
import sys
import matplotlib.pyplot as plot
import pyflux as pf
from statsmodels.tsa.arima_model import ARIMA
# The class includes several algorithms that capture the temporal dimension explicitly for
# classification problems.
class TemporalClassificationAlgorithms:
# This function converts a single dataset (no test or train split up) with possibly
# categorical attributes to a numerical dataset, where categorical attributes are
# taken as dummy variables (i.e. binary columns for each possible value).
def create_numerical_single_dataset(self, dataset):
return copy.deepcopy(pd.get_dummies(pd.DataFrame(dataset), prefix='', prefix_sep=''))
# This function converts a train and test dataset with possibly
# categorical attributes to a numerical dataset, where categorical attributes are
# taken as dummy variables (i.e. binary columns for each possible value).
def create_numerical_multiple_dataset(self, train, test):
# Combine the two datasets as we want to include all possible values
# for the categorical attribute.
total_dataset = train.append(test)
# Convert and split up again.
total_dataset = pd.get_dummies(pd.DataFrame(total_dataset), prefix='', prefix_sep='')
new_train = copy.deepcopy(total_dataset.iloc[0:len(train.index),:])
new_test = copy.deepcopy(total_dataset.iloc[len(train.index):len(train.index)+len(test.index),:])
return new_train, new_test
# This function initializes an echo state network given the specified number of
# inputs, outputs, and nodes in the reservoir. It returns the weight matrices W_in,
# W, and W_back.
def initialize_echo_state_network(self, inputs, outputs, reservoir):
# http://minds.jacobs-university.de/mantas/code
# Create random matrices.
Win = (np.random.rand(reservoir,1+inputs)-0.5) * 1
W = np.random.rand(reservoir,reservoir)-0.5
Wback = (np.random.rand(reservoir,outputs)-0.5) * 1
# Adjust W to "guarantee" the echo state property.
rhoW = max(abs(linalg.eig(W)[0]))
W *= 1.25 / rhoW
return Win, W, Wback
# Predict the values of an echo state network given the matrices Win, W, Wback, Wout, the setting for a,
# the reservoir size, and the dataset (which potentially includes the target as well). The cols are
# the relevant columns of X. Finally, per_time_step=True means that we feed to correct output back into
# the network instead of our prediction (this requires a non empty y_true). It returns the predicted class
# and probabilites per class in the form a pandas dataframe with a column per class value.
# http://minds.jacobs-university.de/sites/default/files/uploads/mantas/code/minimalESN.py.txt
def predict_values_echo_state_network(self, Win, W, Wback, Wout, a, reservoir_size, X, y_true, cols, per_time_step):
# http://minds.jacobs-university.de/mantas/code
# Set the initial activation to zero.
x = np.zeros((reservoir_size,1))
Y = []
# Predict all time points.
for t in range(0, len(X.index)):
# Set the input according to X.
u = X.iloc[t,:].values
# If we have a previous time point
if t > 0:
# If we predict per time step, set the previous value
# to the true previous value.
if per_time_step:
y_prev = y_true.iloc[t-1,:].values
# Otherwise set it to the predicted value.
else:
y_prev = y
# If we do not have a previous time point, set the values to 0.
else:
y_prev = np.array([0]*len(cols))
# Compute the activation of the reservoir.
x = (1-a)*x + a*np.tanh( np.dot( Win, np.vstack(np.insert(u,0,1)) ) + np.dot( W, x ) + np.dot( Wback, np.vstack(y_prev) ))
# And the output.
y = np.tanh( np.dot( Wout, np.hstack(np.insert(np.insert(x, 0, u), 0, 1)) ))
Y.append(y)
y_result = pd.DataFrame(Y, columns=cols, index=X.index)
return y_result.idxmax(axis=1), y_result
# Given a dictionary with an ordered list of parameter values to try, return
# all possible combinations in the form of a list.
def generate_parameter_combinations(self, parameter_dict, params):
combinations = []
if len(params) == 1:
values = parameter_dict[params[0]]
for val in values:
combinations.append([val])
return combinations
else:
params_without_first_element = copy.deepcopy(list(params))
params_without_first_element.pop(0)
params_without_first_element_combinations = self.generate_parameter_combinations(parameter_dict, params_without_first_element)
values_first_element = parameter_dict[list(params)[0]]
for i in range(0, len(values_first_element)):
for j in range(0, len(params_without_first_element_combinations)):
list_obj = [values_first_element[i]]
list_obj.extend(params_without_first_element_combinations[j])
combinations.append(list_obj)
return combinations
def gridsearch_reservoir_computing(self, train_X, train_y, test_X, test_y, per_time_step=False, error = 'mse', gridsearch_training_frac=0.7):
tuned_parameters = {'a': [0.6, 0.8], 'reservoir_size':[400, 700, 1000]}
# tuned_parameters = {'a': [0.4], 'reservoir_size':[250]}
params = tuned_parameters.keys()
combinations = self.generate_parameter_combinations(tuned_parameters, params)
split_point = int(gridsearch_training_frac * len(train_X.index))
train_params_X = train_X.iloc[0:split_point,]
test_params_X = train_X.iloc[split_point:len(train_X.index),]
train_params_y = train_y.iloc[0:split_point,]
test_params_y = train_y.iloc[split_point:len(train_X.index),]
if error == 'mse':
best_error = sys.float_info.max
elif error == 'accuracy':
best_error = 0
best_combination = []
for comb in combinations:
print(comb)
# Order of the keys might have changed.
keys = list(tuned_parameters.keys())
pred_train_y, pred_test_y, pred_train_y_prob, pred_test_y_prob = self.reservoir_computing(train_params_X, train_params_y, test_params_X, test_params_y,
reservoir_size=comb[keys.index('reservoir_size')], a=comb[keys.index('a')], per_time_step=per_time_step,
gridsearch=False)
if error == 'mse':
eval = RegressionEvaluation()
mse = eval.mean_squared_error(test_params_y, pred_test_y_prob)
if mse < best_error:
best_error = mse
best_combination = comb
elif error == 'accuracy':
eval = ClassificationEvaluation()
acc = eval.accuracy(test_params_y, pred_test_y)
if acc > best_error:
best_error = acc
best_combination = comb
print('-------')
print(best_combination)
print('-------')
return best_combination[keys.index('reservoir_size')], best_combination[keys.index('a')]
def normalize(self, train, test, range_min, range_max):
total = copy.deepcopy(train).append(test, ignore_index=True)
max = total.max()
min = total.min()
difference = max - min
difference = difference.replace(0, 1)
new_train = (((train - min)/difference) * (range_max - range_min)) + range_min
new_test = (((test - min)/difference) * (range_max - range_min)) + range_min
return new_train, new_test, min, max
def denormalize(self, y, min, max, range_min, range_max):
difference = max - min
difference = difference.replace(0, 1)
y = (y - range_min)/(range_max - range_min)
return (y * difference) + min
# Apply an echo state network for classification upon the training data (with the specified reservoir size),
# and use the created network to predict the outcome for both the
# test and training set. It returns the categorical predictions for the training and test set as well as the
# probabilities associated with each class, each class being represented as a column in the data frame.
def reservoir_computing(self, train_X, train_y, test_X, test_y, reservoir_size=100, a=0.8, per_time_step=False, gridsearch=True, gridsearch_training_frac=0.7, error='accuracy'):
# Inspired by http://minds.jacobs-university.de/mantas/code
if gridsearch:
reservoir_size, a = self.gridsearch_reservoir_computing(train_X, train_y, test_X, test_y, per_time_step=per_time_step, gridsearch_training_frac=gridsearch_training_frac, error=error)
# We assume these parameters as fixed, but feel free to change them as well.
washout_period = 10
# Create a numerical dataset without categorical attributes.
new_train_X, new_test_X = self.create_numerical_multiple_dataset(train_X, test_X)
if test_y is None:
new_train_y = self.create_numerical_single_dataset(train_y)
new_test_y = None
else:
new_train_y, new_test_y = self.create_numerical_multiple_dataset(train_y, test_y)
# We normalize the input.....
new_train_X, new_test_X, min_X, max_X = self.normalize(new_train_X, new_test_X, 0, 1)
new_train_y, new_test_y, min_y, max_y = self.normalize(new_train_y, new_test_y, -0.9, 0.9)
inputs = len(new_train_X.columns)
outputs = len(new_train_y.columns)
# Randomly initialize our weight vectors.
Win, W, Wback = self.initialize_echo_state_network(inputs, outputs, reservoir_size)
# Allocate memory for our result matrices.
X = np.zeros((len(train_X.index)-washout_period, 1+inputs+reservoir_size))
Yt = new_train_y.iloc[washout_period:len(new_train_y.index),:].values
Yt = np.arctanh( Yt )
x = np.zeros((reservoir_size,1))
# Train over all time points.
for t in range(0, len(new_train_X.index)):
# Set the inputs according to the values seen in the training set.
u = new_train_X.iloc[t,:].values
# Set the previous target value to the real value if available.
if t > 0:
y_prev= new_train_y.iloc[t-1,:].values
else:
y_prev = np.array([0]*outputs)
# Determine the activation of the reservoir.
x = (1-a)*x + a*np.tanh(np.dot(Win, np.vstack(np.insert(u,0,1)) ) + np.dot( W, x ) + np.dot( Wback, np.vstack(y_prev) ))
# And store the values obtained after the washout period.
if t >= washout_period:
X[t-washout_period,:] = np.hstack(np.insert(np.insert(x, 0, u), 0, 1))
# Train Wout.
X_p = linalg.pinv(X)
Wout = np.transpose(np.dot( X_p, Yt ))
# And predict for both training and test set.
pred_train_y, pred_train_y_prob = self.predict_values_echo_state_network(Win, W, Wback, Wout, a, reservoir_size, new_train_X, new_train_y, new_train_y.columns, per_time_step)
pred_test_y, pred_test_y_prob = self.predict_values_echo_state_network(Win, W, Wback, Wout, a, reservoir_size, new_test_X, new_test_y, new_train_y.columns, per_time_step)
pred_train_y_prob = self.denormalize(pred_train_y_prob, min_y, max_y, -0.9, 0.9)
pred_test_y_prob = self.denormalize(pred_test_y_prob, min_y, max_y, -0.9, 0.9)
return pred_train_y, pred_test_y, pred_train_y_prob, pred_test_y_prob
# Creates a recurrent neural network dataset according to the pybrain specification.
# Returns this new format.
def rnn_dataset(self, X, y):
# Create an empty dataset.
ds = SequentialDataSet(len(X.columns), len(y.columns))
# And add all rows...
for i in range(0, len(X.index)):
ds.addSample(tuple(X.iloc[i,:].values), tuple(y.iloc[i,:].values))
return ds
# Do a gridsearch for the recurrent neural network...
def gridsearch_recurrent_neural_network(self, train_X, train_y, test_X, test_y, error='accuracy', gridsearch_training_frac=0.7):
tuned_parameters = {'n_hidden_neurons': [50, 100], 'iterations':[250, 500], 'outputbias': [True]}
params = list(tuned_parameters.keys())
combinations = self.generate_parameter_combinations(tuned_parameters, params)
split_point = int(gridsearch_training_frac * len(train_X.index))
train_params_X = train_X.iloc[0:split_point,]
test_params_X = train_X.iloc[split_point:len(train_X.index),]
train_params_y = train_y.iloc[0:split_point,]
test_params_y = train_y.iloc[split_point:len(train_X.index),]
if error == 'mse':
best_error = sys.float_info.max
elif error == 'accuracy':
best_error = 0
best_combination = []
for comb in combinations:
print(comb)
# Order of the keys might have changed.
keys = list(tuned_parameters.keys())
# print(keys)
pred_train_y, pred_test_y, pred_train_y_prob, pred_test_y_prob = self.recurrent_neural_network(
train_params_X, train_params_y, test_params_X, test_params_y,
n_hidden_neurons=comb[keys.index('n_hidden_neurons')],
iterations=comb[keys.index('iterations')],
outputbias=comb[keys.index('outputbias')], gridsearch=False
)
if error == 'mse':
eval = RegressionEvaluation()
mse = eval.mean_squared_error(test_params_y, pred_test_y_prob)
if mse < best_error:
best_error = mse
best_combination = comb
elif error == 'accuracy':
eval = ClassificationEvaluation()
acc = eval.accuracy(test_params_y, pred_test_y)
if acc > best_error:
best_error = acc
best_combination = comb
print ('-------')
print (best_combination)
print ('-------')
return best_combination[params.index('n_hidden_neurons')], best_combination[params.index('iterations')], best_combination[params.index('outputbias')]
# Apply a recurrent neural network for classification upon the training data (with the specified number of
# hidden neurons and iterations), and use the created network to predict the outcome for both the
# test and training set. It returns the categorical predictions for the training and test set as well as the
# probabilities associated with each class, each class being represented as a column in the data frame.
def recurrent_neural_network(self, train_X, train_y, test_X, test_y, n_hidden_neurons=50, iterations=100, gridsearch=False, gridsearch_training_frac=0.7, outputbias=False, error='accuracy'):
if gridsearch:
n_hidden_neurons, iterations, outputbias = self.gridsearch_recurrent_neural_network(train_X, train_y, test_X, test_y, gridsearch_training_frac=gridsearch_training_frac, error=error)
# Create numerical datasets first.
new_train_X, new_test_X = self.create_numerical_multiple_dataset(train_X, test_X)
new_train_y, new_test_y = self.create_numerical_multiple_dataset(train_y, test_y)
# We normalize the input.....
new_train_X, new_test_X, min_X, max_X = self.normalize(new_train_X, new_test_X, 0, 1)
new_train_y, new_test_y, min_y, max_y = self.normalize(new_train_y, new_test_y, 0.1, 0.9)
# Create the proper pybrain datasets.
ds_training = self.rnn_dataset(new_train_X, new_train_y)
ds_test = self.rnn_dataset(new_test_X, new_test_y)
inputs = len(new_train_X.columns)
outputs = len(new_train_y.columns)
# Build the network with the proper parameters.
n = buildNetwork(inputs, n_hidden_neurons, outputs, hiddenclass=SigmoidLayer, outclass=SigmoidLayer, outputbias=outputbias, recurrent=True)
# Train using back propagation through time.
#trainer = BackpropTrainer(n, dataset=ds_training, verbose=False, momentum=0.9, learningrate=0.01)
trainer = RPropMinusTrainer(n, dataset=ds_training, verbose=False)
for i in range(0, iterations):
trainer.train()
# for mod in n.modules:
# for conn in n.connections[mod]:
# print conn
# for cc in range(len(conn.params)):
# print conn.whichBuffers(cc), conn.params[cc]
# Determine performance on the training and test set....
# Y_train = []
# for i in range(0, len(new_train_X.index)):
# input = tuple(new_train_X.iloc[i,:].values)
# output = n.activate(input)
# Y_train.append(output)
# Y_test = []
# for i in range(0, len(new_test_X.index)):
# Y_test.append(n.activate(tuple(new_test_X.iloc[i,:].values)))
Y_train = []
Y_test = []
for sample, target in ds_training.getSequenceIterator(0):
Y_train.append(n.activate(sample).tolist())
for sample, target in ds_test.getSequenceIterator(0):
Y_test.append(n.activate(sample).tolist())
y_train_result = pd.DataFrame(Y_train, columns=new_train_y.columns, index=train_y.index)
y_test_result = | pd.DataFrame(Y_test, columns=new_test_y.columns, index=test_y.index) | pandas.DataFrame |
'''
Tests for bipartitepandas
DATE: March 2021
'''
import pytest
import numpy as np
import pandas as pd
import bipartitepandas as bpd
import pickle
###################################
##### Tests for BipartiteBase #####
###################################
def test_refactor_1():
# 2 movers between firms 0 and 1, and 1 stayer at firm 2.
worker_data = []
# Firm 0 -> 1
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 1})
worker_data.append({'i': 0, 'j': 1, 'y': 1., 't': 2})
# Firm 1 -> 0
worker_data.append({'i': 1, 'j': 1, 'y': 1., 't': 1})
worker_data.append({'i': 1, 'j': 0, 'y': 1., 't': 2})
# Firm 2 -> 2
worker_data.append({'i': 2, 'j': 2, 'y': 1., 't': 1})
worker_data.append({'i': 2, 'j': 2, 'y': 2., 't': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
bdf = bpd.BipartiteLong(data=df)
bdf = bdf.clean_data()
bdf = bdf.get_collapsed_long()
bdf = bdf.get_es()
stayers = bdf[bdf['m'] == 0]
movers = bdf[bdf['m'] > 0]
assert len(stayers) == 0
assert movers.iloc[0]['i'] == 0
assert movers.iloc[0]['j1'] == 0
assert movers.iloc[0]['j2'] == 1
assert movers.iloc[0]['y1'] == 2
assert movers.iloc[0]['y2'] == 1
assert movers.iloc[1]['i'] == 1
assert movers.iloc[1]['j1'] == 1
assert movers.iloc[1]['j2'] == 0
assert movers.iloc[1]['y1'] == 1
assert movers.iloc[1]['y2'] == 1
def test_refactor_2():
# 2 movers between firms 0 and 1, and 1 stayer at firm 2. Time has jumps.
worker_data = []
# Firm 0 -> 1
# Time 1 -> 3
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 1})
worker_data.append({'i': 0, 'j': 1, 'y': 1., 't': 3})
# Firm 1 -> 0
worker_data.append({'i': 1, 'j': 1, 'y': 1., 't': 1})
worker_data.append({'i': 1, 'j': 0, 'y': 1., 't': 2})
# Firm 2 -> 2
worker_data.append({'i': 2, 'j': 2, 'y': 1., 't': 1})
worker_data.append({'i': 2, 'j': 2, 'y': 2., 't': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
bdf = bpd.BipartiteLong(data=df)
bdf = bdf.clean_data()
bdf = bdf.get_collapsed_long()
bdf = bdf.get_es()
stayers = bdf[bdf['m'] == 0]
movers = bdf[bdf['m'] > 0]
assert len(stayers) == 0
assert movers.iloc[0]['i'] == 0
assert movers.iloc[0]['j1'] == 0
assert movers.iloc[0]['j2'] == 1
assert movers.iloc[0]['y1'] == 2
assert movers.iloc[0]['y2'] == 1
assert movers.iloc[1]['i'] == 1
assert movers.iloc[1]['j1'] == 1
assert movers.iloc[1]['j2'] == 0
assert movers.iloc[1]['y1'] == 1
assert movers.iloc[1]['y2'] == 1
def test_refactor_3():
# 1 mover between firms 0 and 1, and 2 between firms 1 and 2.
worker_data = []
# Firm 0 -> 1
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 1})
worker_data.append({'i': 0, 'j': 1, 'y': 1., 't': 2})
# Firm 1 -> 2
worker_data.append({'i': 1, 'j': 1, 'y': 1., 't': 1})
worker_data.append({'i': 1, 'j': 2, 'y': 1., 't': 2})
# Firm 2 -> 1
worker_data.append({'i': 2, 'j': 2, 'y': 1., 't': 1})
worker_data.append({'i': 2, 'j': 1, 'y': 2., 't': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
bdf = bpd.BipartiteLong(data=df)
bdf = bdf.clean_data()
bdf = bdf.get_collapsed_long()
bdf = bdf.get_es()
stayers = bdf[bdf['m'] == 0]
movers = bdf[bdf['m'] > 0]
assert len(stayers) == 0
assert movers.iloc[0]['i'] == 0
assert movers.iloc[0]['j1'] == 0
assert movers.iloc[0]['j2'] == 1
assert movers.iloc[0]['y1'] == 2
assert movers.iloc[0]['y2'] == 1
assert movers.iloc[1]['i'] == 1
assert movers.iloc[1]['j1'] == 1
assert movers.iloc[1]['j2'] == 2
assert movers.iloc[1]['y1'] == 1
assert movers.iloc[1]['y2'] == 1
assert movers.iloc[2]['i'] == 2
assert movers.iloc[2]['j1'] == 2
assert movers.iloc[2]['j2'] == 1
assert movers.iloc[2]['y1'] == 1
assert movers.iloc[2]['y2'] == 2
def test_refactor_4():
# 1 mover between firms 0 and 1, and 2 between firms 1 and 2.
worker_data = []
# Firm 0 -> 1 -> 0
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 1})
worker_data.append({'i': 0, 'j': 1, 'y': 1., 't': 2})
worker_data.append({'i': 0, 'j': 0, 'y': 1., 't': 3})
# Firm 1 -> 2
worker_data.append({'i': 1, 'j': 1, 'y': 1., 't': 1})
worker_data.append({'i': 1, 'j': 2, 'y': 1., 't': 2})
# Firm 2 -> 1
worker_data.append({'i': 2, 'j': 2, 'y': 1., 't': 1})
worker_data.append({'i': 2, 'j': 1, 'y': 2., 't': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
bdf = bpd.BipartiteLong(data=df)
bdf = bdf.clean_data()
bdf = bdf.get_collapsed_long()
bdf = bdf.get_es()
stayers = bdf[bdf['m'] == 0]
movers = bdf[bdf['m'] > 0]
assert len(stayers) == 0
assert movers.iloc[0]['i'] == 0
assert movers.iloc[0]['j1'] == 0
assert movers.iloc[0]['j2'] == 1
assert movers.iloc[0]['y1'] == 2
assert movers.iloc[0]['y2'] == 1
assert movers.iloc[1]['i'] == 0
assert movers.iloc[1]['j1'] == 1
assert movers.iloc[1]['j2'] == 0
assert movers.iloc[1]['y1'] == 1
assert movers.iloc[1]['y2'] == 1
assert movers.iloc[2]['i'] == 1
assert movers.iloc[2]['j1'] == 1
assert movers.iloc[2]['j2'] == 2
assert movers.iloc[2]['y1'] == 1
assert movers.iloc[2]['y2'] == 1
assert movers.iloc[3]['i'] == 2
assert movers.iloc[3]['j1'] == 2
assert movers.iloc[3]['j2'] == 1
assert movers.iloc[3]['y1'] == 1
assert movers.iloc[3]['y2'] == 2
def test_refactor_5():
# 1 mover between firms 0 and 1, and 2 between firms 1 and 2. Time has jumps.
worker_data = []
# Firm 0 -> 1 -> 0
# Time 1 -> 2 -> 4
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 1})
worker_data.append({'i': 0, 'j': 1, 'y': 1., 't': 2})
worker_data.append({'i': 0, 'j': 0, 'y': 1., 't': 4})
# Firm 1 -> 2
worker_data.append({'i': 1, 'j': 1, 'y': 1., 't': 1})
worker_data.append({'i': 1, 'j': 2, 'y': 1., 't': 2})
# Firm 2 -> 1
worker_data.append({'i': 2, 'j': 2, 'y': 1., 't': 1})
worker_data.append({'i': 2, 'j': 1, 'y': 2., 't': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
bdf = bpd.BipartiteLong(data=df)
bdf = bdf.clean_data()
bdf = bdf.get_collapsed_long()
bdf = bdf.get_es()
stayers = bdf[bdf['m'] == 0]
movers = bdf[bdf['m'] > 0]
assert len(stayers) == 0
assert movers.iloc[0]['i'] == 0
assert movers.iloc[0]['j1'] == 0
assert movers.iloc[0]['j2'] == 1
assert movers.iloc[0]['y1'] == 2
assert movers.iloc[0]['y2'] == 1
assert movers.iloc[1]['i'] == 0
assert movers.iloc[1]['j1'] == 1
assert movers.iloc[1]['j2'] == 0
assert movers.iloc[1]['y1'] == 1
assert movers.iloc[1]['y2'] == 1
assert movers.iloc[2]['i'] == 1
assert movers.iloc[2]['j1'] == 1
assert movers.iloc[2]['j2'] == 2
assert movers.iloc[2]['y1'] == 1
assert movers.iloc[2]['y2'] == 1
assert movers.iloc[3]['i'] == 2
assert movers.iloc[3]['j1'] == 2
assert movers.iloc[3]['j2'] == 1
assert movers.iloc[3]['y1'] == 1
assert movers.iloc[3]['y2'] == 2
def test_refactor_6():
# 1 mover between firms 0 and 1, and 2 between firms 1 and 2. Time has jumps.
worker_data = []
# Firm 0 -> 0 -> 1 -> 0
# Time 1 -> 2 -> 3 -> 5
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 1})
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 2})
worker_data.append({'i': 0, 'j': 1, 'y': 1., 't': 3})
worker_data.append({'i': 0, 'j': 0, 'y': 1., 't': 5})
# Firm 1 -> 2
worker_data.append({'i': 1, 'j': 1, 'y': 1., 't': 1})
worker_data.append({'i': 1, 'j': 2, 'y': 1., 't': 2})
# Firm 2 -> 1
worker_data.append({'i': 2, 'j': 2, 'y': 1., 't': 1})
worker_data.append({'i': 2, 'j': 1, 'y': 2., 't': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
bdf = bpd.BipartiteLong(data=df)
bdf = bdf.clean_data()
bdf = bdf.get_collapsed_long()
bdf = bdf.get_es()
stayers = bdf[bdf['m'] == 0]
movers = bdf[bdf['m'] > 0]
assert len(stayers) == 0
assert movers.iloc[0]['i'] == 0
assert movers.iloc[0]['j1'] == 0
assert movers.iloc[0]['j2'] == 1
assert movers.iloc[0]['y1'] == 2
assert movers.iloc[0]['y2'] == 1
assert movers.iloc[1]['i'] == 0
assert movers.iloc[1]['j1'] == 1
assert movers.iloc[1]['j2'] == 0
assert movers.iloc[1]['y1'] == 1
assert movers.iloc[1]['y2'] == 1
assert movers.iloc[2]['i'] == 1
assert movers.iloc[2]['j1'] == 1
assert movers.iloc[2]['j2'] == 2
assert movers.iloc[2]['y1'] == 1
assert movers.iloc[2]['y2'] == 1
assert movers.iloc[3]['i'] == 2
assert movers.iloc[3]['j1'] == 2
assert movers.iloc[3]['j2'] == 1
assert movers.iloc[3]['y1'] == 1
assert movers.iloc[3]['y2'] == 2
def test_refactor_7():
# 1 mover between firms 0 and 1, and 2 between firms 1 and 2. Time has jumps.
worker_data = []
# Firm 0 -> 0 -> 1 -> 0
# Time 1 -> 3 -> 4 -> 6
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 1})
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 3})
worker_data.append({'i': 0, 'j': 1, 'y': 1., 't': 4})
worker_data.append({'i': 0, 'j': 0, 'y': 1., 't': 6})
# Firm 1 -> 2
worker_data.append({'i': 1, 'j': 1, 'y': 1., 't': 1})
worker_data.append({'i': 1, 'j': 2, 'y': 1., 't': 2})
# Firm 2 -> 1
worker_data.append({'i': 2, 'j': 2, 'y': 1., 't': 1})
worker_data.append({'i': 2, 'j': 1, 'y': 2., 't': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
bdf = bpd.BipartiteLong(data=df)
bdf = bdf.clean_data()
bdf = bdf.get_collapsed_long()
bdf = bdf.get_es()
stayers = bdf[bdf['m'] == 0]
movers = bdf[bdf['m'] > 0]
assert len(stayers) == 0
assert movers.iloc[0]['i'] == 0
assert movers.iloc[0]['j1'] == 0
assert movers.iloc[0]['j2'] == 1
assert movers.iloc[0]['y1'] == 2
assert movers.iloc[0]['y2'] == 1
assert movers.iloc[1]['i'] == 0
assert movers.iloc[1]['j1'] == 1
assert movers.iloc[1]['j2'] == 0
assert movers.iloc[1]['y1'] == 1
assert movers.iloc[1]['y2'] == 1
assert movers.iloc[2]['i'] == 1
assert movers.iloc[2]['j1'] == 1
assert movers.iloc[2]['j2'] == 2
assert movers.iloc[2]['y1'] == 1
assert movers.iloc[2]['y2'] == 1
assert movers.iloc[3]['i'] == 2
assert movers.iloc[3]['j1'] == 2
assert movers.iloc[3]['j2'] == 1
assert movers.iloc[3]['y1'] == 1
assert movers.iloc[3]['y2'] == 2
def test_refactor_8():
# 2 movers between firms 0 and 1, and 1 between firms 1 and 2. Time has jumps.
worker_data = []
# Firm 0 -> 0 -> 1 -> 0
# Time 1 -> 3 -> 4 -> 6
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 1})
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 3})
worker_data.append({'i': 0, 'j': 1, 'y': 1., 't': 4})
worker_data.append({'i': 0, 'j': 0, 'y': 1., 't': 6})
# Firm 0 -> 1
worker_data.append({'i': 1, 'j': 0, 'y': 1., 't': 1})
worker_data.append({'i': 1, 'j': 1, 'y': 1., 't': 2})
# Firm 2 -> 1
worker_data.append({'i': 2, 'j': 2, 'y': 1., 't': 1})
worker_data.append({'i': 2, 'j': 1, 'y': 2., 't': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
bdf = bpd.BipartiteLong(data=df)
bdf = bdf.clean_data()
bdf = bdf.get_collapsed_long()
bdf = bdf.get_es()
stayers = bdf[bdf['m'] == 0]
movers = bdf[bdf['m'] > 0]
assert len(stayers) == 0
assert movers.iloc[0]['i'] == 0
assert movers.iloc[0]['j1'] == 0
assert movers.iloc[0]['j2'] == 1
assert movers.iloc[0]['y1'] == 2
assert movers.iloc[0]['y2'] == 1
assert movers.iloc[1]['i'] == 0
assert movers.iloc[1]['j1'] == 1
assert movers.iloc[1]['j2'] == 0
assert movers.iloc[1]['y1'] == 1
assert movers.iloc[1]['y2'] == 1
assert movers.iloc[2]['i'] == 1
assert movers.iloc[2]['j1'] == 0
assert movers.iloc[2]['j2'] == 1
assert movers.iloc[2]['y1'] == 1
assert movers.iloc[2]['y2'] == 1
assert movers.iloc[3]['i'] == 2
assert movers.iloc[3]['j1'] == 2
assert movers.iloc[3]['j2'] == 1
assert movers.iloc[3]['y1'] == 1
assert movers.iloc[3]['y2'] == 2
def test_refactor_9():
# 2 movers between firms 0 and 1, and 1 between firms 1 and 2. Time has jumps.
worker_data = []
# Firm 0 -> 0 -> 1 -> 0
# Time 1 -> 3 -> 4 -> 6
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 1})
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 3})
worker_data.append({'i': 0, 'j': 1, 'y': 1., 't': 4})
worker_data.append({'i': 0, 'j': 0, 'y': 1., 't': 6})
# Firm 1 -> 0
worker_data.append({'i': 1, 'j': 1, 'y': 1., 't': 1})
worker_data.append({'i': 1, 'j': 0, 'y': 1., 't': 2})
# Firm 2 -> 1
worker_data.append({'i': 2, 'j': 2, 'y': 1., 't': 1})
worker_data.append({'i': 2, 'j': 1, 'y': 2., 't': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
bdf = bpd.BipartiteLong(data=df)
bdf = bdf.clean_data()
bdf = bdf.get_collapsed_long()
bdf = bdf.get_es()
stayers = bdf[bdf['m'] == 0]
movers = bdf[bdf['m'] > 0]
assert len(stayers) == 0
assert movers.iloc[0]['i'] == 0
assert movers.iloc[0]['j1'] == 0
assert movers.iloc[0]['j2'] == 1
assert movers.iloc[0]['y1'] == 2
assert movers.iloc[0]['y2'] == 1
assert movers.iloc[1]['i'] == 0
assert movers.iloc[1]['j1'] == 1
assert movers.iloc[1]['j2'] == 0
assert movers.iloc[1]['y1'] == 1
assert movers.iloc[1]['y2'] == 1
assert movers.iloc[2]['i'] == 1
assert movers.iloc[2]['j1'] == 1
assert movers.iloc[2]['j2'] == 0
assert movers.iloc[2]['y1'] == 1
assert movers.iloc[2]['y2'] == 1
assert movers.iloc[3]['i'] == 2
assert movers.iloc[3]['j1'] == 2
assert movers.iloc[3]['j2'] == 1
assert movers.iloc[3]['y1'] == 1
assert movers.iloc[3]['y2'] == 2
def test_refactor_10():
# 1 mover between firms 0 and 1, 1 between firms 1 and 2, and 1 stayer at firm 2.
worker_data = []
# Firm 0 -> 1
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 1})
worker_data.append({'i': 0, 'j': 1, 'y': 1., 't': 2})
# Firm 1 -> 2
worker_data.append({'i': 1, 'j': 1, 'y': 1., 't': 1})
worker_data.append({'i': 1, 'j': 2, 'y': 1., 't': 2})
# Firm 2 -> 2
worker_data.append({'i': 2, 'j': 2, 'y': 1., 't': 1})
worker_data.append({'i': 2, 'j': 2, 'y': 1., 't': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
bdf = bpd.BipartiteLong(data=df)
bdf = bdf.clean_data()
bdf = bdf.get_collapsed_long()
bdf = bdf.get_es()
stayers = bdf[bdf['m'] == 0]
movers = bdf[bdf['m'] > 0]
assert stayers.iloc[0]['i'] == 2
assert stayers.iloc[0]['j1'] == 2
assert stayers.iloc[0]['j2'] == 2
assert stayers.iloc[0]['y1'] == 1
assert stayers.iloc[0]['y2'] == 1
assert movers.iloc[0]['i'] == 0
assert movers.iloc[0]['j1'] == 0
assert movers.iloc[0]['j2'] == 1
assert movers.iloc[0]['y1'] == 2
assert movers.iloc[0]['y2'] == 1
assert movers.iloc[1]['i'] == 1
assert movers.iloc[1]['j1'] == 1
assert movers.iloc[1]['j2'] == 2
assert movers.iloc[1]['y1'] == 1
assert movers.iloc[1]['y2'] == 1
def test_refactor_11():
# 1 mover between firms 0 and 1 and 2 and 3, 1 between firms 1 and 2, and 1 stayer at firm 2.
# Check going to event study and back to long, for data where movers have extended periods where they stay at the same firm
worker_data = []
# Firm 0 -> 1 -> 2 -> 3
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 1})
worker_data.append({'i': 0, 'j': 1, 'y': 1., 't': 2})
worker_data.append({'i': 0, 'j': 2, 'y': 0.5, 't': 3})
worker_data.append({'i': 0, 'j': 2, 'y': 0.5, 't': 4})
worker_data.append({'i': 0, 'j': 2, 'y': 0.75, 't': 5})
worker_data.append({'i': 0, 'j': 3, 'y': 1.5, 't': 6})
# Firm 1 -> 2
worker_data.append({'i': 1, 'j': 1, 'y': 1., 't': 1})
worker_data.append({'i': 1, 'j': 2, 'y': 1., 't': 2})
# Firm 2 -> 2
worker_data.append({'i': 2, 'j': 2, 'y': 1., 't': 1})
worker_data.append({'i': 2, 'j': 2, 'y': 1., 't': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
bdf = bpd.BipartiteLong(data=df).clean_data().get_es()
stayers = bdf[bdf['m'] == 0]
movers = bdf[bdf['m'] > 0]
assert stayers.iloc[0]['i'] == 0
assert stayers.iloc[0]['j1'] == 2
assert stayers.iloc[0]['j2'] == 2
assert stayers.iloc[0]['y1'] == 0.5
assert stayers.iloc[0]['y2'] == 0.5
assert stayers.iloc[0]['t1'] == 4
assert stayers.iloc[0]['t2'] == 4
assert stayers.iloc[1]['i'] == 2
assert stayers.iloc[1]['j1'] == 2
assert stayers.iloc[1]['j2'] == 2
assert stayers.iloc[1]['y1'] == 1.
assert stayers.iloc[1]['y2'] == 1.
assert stayers.iloc[1]['t1'] == 1
assert stayers.iloc[1]['t2'] == 1
assert stayers.iloc[2]['i'] == 2
assert stayers.iloc[2]['j1'] == 2
assert stayers.iloc[2]['j2'] == 2
assert stayers.iloc[2]['y1'] == 1.
assert stayers.iloc[2]['y2'] == 1.
assert stayers.iloc[2]['t1'] == 2
assert stayers.iloc[2]['t2'] == 2
assert movers.iloc[0]['i'] == 0
assert movers.iloc[0]['j1'] == 0
assert movers.iloc[0]['j2'] == 1
assert movers.iloc[0]['y1'] == 2.
assert movers.iloc[0]['y2'] == 1.
assert movers.iloc[0]['t1'] == 1
assert movers.iloc[0]['t2'] == 2
assert movers.iloc[1]['i'] == 0
assert movers.iloc[1]['j1'] == 1
assert movers.iloc[1]['j2'] == 2
assert movers.iloc[1]['y1'] == 1.
assert movers.iloc[1]['y2'] == 0.5
assert movers.iloc[1]['t1'] == 2
assert movers.iloc[1]['t2'] == 3
assert movers.iloc[2]['i'] == 0
assert movers.iloc[2]['j1'] == 2
assert movers.iloc[2]['j2'] == 3
assert movers.iloc[2]['y1'] == 0.75
assert movers.iloc[2]['y2'] == 1.5
assert movers.iloc[2]['t1'] == 5
assert movers.iloc[2]['t2'] == 6
assert movers.iloc[3]['i'] == 1
assert movers.iloc[3]['j1'] == 1
assert movers.iloc[3]['j2'] == 2
assert movers.iloc[3]['y1'] == 1.
assert movers.iloc[3]['y2'] == 1.
assert movers.iloc[3]['t1'] == 1
assert movers.iloc[3]['t2'] == 2
bdf = bdf.get_long()
for row in range(len(bdf)):
df_row = df.iloc[row]
bdf_row = bdf.iloc[row]
for col in ['i', 'j', 'y', 't']:
assert df_row[col] == bdf_row[col]
def test_refactor_12():
# Check going to event study and back to long
df = bpd.SimBipartite(bpd.sim_params({'p_move': 0.05, 'rng': np.random.default_rng(1234)})).sim_network()
bdf = bpd.BipartiteLong(df).clean_data()
assert len(bdf) == len(bdf.get_es().get_long())
def test_contiguous_fids_11():
# Check contiguous_ids() with firm ids.
worker_data = []
# Firm 0 -> 1
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 1})
worker_data.append({'i': 0, 'j': 1, 'y': 1., 't': 2})
# Firm 1 -> 3
worker_data.append({'i': 1, 'j': 1, 'y': 1., 't': 1})
worker_data.append({'i': 1, 'j': 3, 'y': 1., 't': 2})
# Firm 3 -> 3
worker_data.append({'i': 2, 'j': 3, 'y': 1., 't': 1})
worker_data.append({'i': 2, 'j': 3, 'y': 1., 't': 2})
df = pd.concat([ | pd.DataFrame(worker, index=[i]) | pandas.DataFrame |
"""Helper functions for the import functionality"""
import pathlib
from datetime import datetime
import typing
from json import JSONDecodeError
import httpx
import numpy as np
import pandas as pd
from castoredc_api.client.castoredc_api_client import CastorException
if typing.TYPE_CHECKING:
from castoredc_api import CastorStudy
from castoredc_api.study.castor_objects import CastorField
def read_excel(path: str) -> pd.DataFrame:
"""Opens an xls(x) file as a pandas dataframe."""
dataframe = pd.read_excel(path, dtype=str)
dataframe = dataframe.where( | pd.notnull(dataframe) | pandas.notnull |
import covid.util as util
import pandas as pd
import matplotlib.pyplot as plt
import sys
start = sys.argv[1]
forecast_start = sys.argv[2]
samples_directory = sys.argv[3]
import numpy as np
from epiweeks import Week, Year
num_weeks = 8
data = util.load_state_data()
places = sorted(list(data.keys()))
#places = ['AK', 'AL']
allQuantiles = [0.01,0.025]+list(np.arange(0.05,0.95+0.05,0.05)) + [0.975,0.99]
forecast_date = pd.to_datetime(forecast_start)
currentEpiWeek = Week.fromdate(forecast_date)
forecast = {'quantile':[],'target_end_date':[], 'value':[], 'type':[], 'location':[], 'target':[]}
for place in places:
prior_samples, mcmc_samples, post_pred_samples, forecast_samples = util.load_samples(place, path=samples_directory)
forecast_samples = forecast_samples['mean_z_future']
t = pd.date_range(start=forecast_start, periods=forecast_samples.shape[1], freq='D')
weekly_df = pd.DataFrame(index=t, data=np.transpose(forecast_samples)).resample("1w",label='left').last()
weekly_df[weekly_df<0.] = 0.
for time, samples in weekly_df.iterrows():
for q in allQuantiles:
deathPrediction = np.percentile(samples,q*100)
forecast["quantile"].append("{:.3f}".format(q))
forecast["value"].append(deathPrediction)
forecast["type"].append("quantile")
forecast["location"].append(place)
horizon_date = Week.fromdate(time)
week_ahead = horizon_date.week - currentEpiWeek.week + 1
forecast["target"].append("{:d} wk ahead cum death".format(week_ahead))
currentEpiWeek_datetime = currentEpiWeek.startdate()
forecast["forecast_date"] = "{:4d}-{:02d}-{:02d}".format(currentEpiWeek_datetime.year,currentEpiWeek_datetime.month,currentEpiWeek_datetime.day)
next_saturday = | pd.Timedelta('6 days') | pandas.Timedelta |
import unittest
import numpy as np
import pandas as pd
from pydatview.Tables import Table
import os
class TestTable(unittest.TestCase):
@classmethod
def setUpClass(cls):
d ={'ColA': np.linspace(0,1,100)+1,'ColB': np.random.normal(0,1,100)+0}
cls.df1 = pd.DataFrame(data=d)
d ={'ColA': np.linspace(0,1,100)+1,'ColB': np.random.normal(0,1,100)+0}
cls.df2 = | pd.DataFrame(data=d) | pandas.DataFrame |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import os
import torch
from easydict import EasyDict as edict
import json
import pandas as pd
import numpy as np
import cv2
from PIL import Image, ImageFile
from torchvision import transforms
import torchvision.datasets.folder
from torch.utils.data import TensorDataset, Dataset
from torchvision.datasets import MNIST, ImageFolder
from torchvision.transforms.functional import rotate
from .utils import transform, GetTransforms
ImageFile.LOAD_TRUNCATED_IMAGES = True
DATASETS = [
# Debug
"Debug28",
"Debug224",
# Small images
"ColoredMNIST",
"RotatedMNIST",
# Big images
"VLCS",
"PACS",
"OfficeHome",
"TerraIncognita",
"DomainNet",
"SVIRO",
'chestXR'
]
diseases = ['Atelectasis', 'Cardiomegaly', 'Consolidation', 'Edema', 'Pneumonia']
# diseases = ['Pneumonia']
class MultipleDomainDataset:
N_STEPS = 5001 # Default, subclasses may override
CHECKPOINT_FREQ = 100 # Default, subclasses may override
N_WORKERS = 8 # Default, subclasses may override
ENVIRONMENTS = None # Subclasses should override
INPUT_SHAPE = None # Subclasses should override
def __getitem__(self, index):
return self.datasets[index]
def __len__(self):
return len(self.datasets)
class ChestDataset(Dataset):
def __len__(self):
return self._num_image
def __getitem__(self, idx):
image = cv2.imread(self._image_paths[idx], 0)
try:
image = Image.fromarray(image)
except:
raise Exception('None image path: {}'.format(self._image_paths[idx]))
if self._mode == 'train':
image = GetTransforms(image, type=self.cfg.use_transforms_type)
image = np.array(image)
image = transform(image, self.cfg)
labels = np.array([self._labels[idx][-1]]).astype(np.float32)
path = self._image_paths[idx]
if self._mode == 'train' or self._mode == 'dev':
return (image, labels)
elif self._mode == 'test':
return (image, path)
else:
raise Exception('Unknown mode : {}'.format(self._mode))
class CheXpertDataset(ChestDataset):
def __init__(self, label_path, cfg='configs/chexpert_config.json', mode='train'):
with open(cfg) as f:
self.cfg = edict(json.load(f))
self._label_header = None
self._image_paths = []
self._labels = []
self._mode = mode
self.dict = [{'1.0': True, '': False, '0.0': False, '-1.0': False},
{'1.0': True, '': False, '0.0': False, '-1.0': True}, ]
self._data_path = label_path.rsplit('/', 2)[0]
with open(label_path) as f:
header = f.readline().strip('\n').split(',')
self._label_header = np.array([
header[7],
header[10],
header[11],
header[13],
header[12]])
for _, line in zip(range(6), f):
labels = []
fields = line.strip('\n').split(',')
image_path = self._data_path + '/' + fields[0]
for index, value in enumerate(fields[5:]):
if index == 5 or index == 8:
labels.append(self.dict[1].get(value))
elif index == 2 or index == 6 or index == 7:
labels.append(self.dict[0].get(value))
labels = np.array(list(map(int, labels)))[np.argsort(self._label_header)]
self._image_paths.append(image_path)
assert os.path.exists(image_path), image_path
self._labels.append(labels)
if self._mode == 'train' and labels[-1] == 1:
for i in range(40):
self._image_paths.append(image_path)
self._labels.append(labels)
self._num_image = len(self._image_paths)
class MimicCXRDataset(ChestDataset):
def __init__(self, label_path, cfg='configs/mimic_config.json', mode='train'):
with open(cfg) as f:
self.cfg = edict(json.load(f))
self._label_header = None
self._image_paths = []
self._labels = []
self._mode = mode
self.dict = [{'1.0': True, '': False, '0.0': False, '-1.0': False},
{'1.0': True, '': False, '0.0': False, '-1.0': True}, ]
self._data_path = ""
with open(label_path) as f:
header = f.readline().strip('\n').split(',')
self._label_header = np.array([
header[3],
header[5],
header[4],
header[2],
header[13]])
for _, line in zip(range(6), f):
labels = []
fields = line.strip('\n').split(',')
subject_id, study_id, dicom_id, split = fields[0], fields[1], fields[-3], fields[-1]
if split != mode:
continue
if int(subject_id[:2]) < 15:
self._data_path = '/mimic-cxr_1'
else:
self._data_path = '/mimic-cxr_2'
image_path = self._data_path + '/p' + subject_id[:2] + '/p' + subject_id + \
'/s' + study_id + '/' + dicom_id + '.jpg'
for index, value in enumerate(fields[2:]):
if index == 3 or index == 0:
labels.append(self.dict[1].get(value))
elif index == 1 or index == 2 or index == 11:
labels.append(self.dict[0].get(value))
labels = np.array(list(map(int, labels)))[np.argsort(self._label_header)]
self._image_paths.append(image_path)
assert os.path.exists(image_path), image_path
self._labels.append(labels)
if self._mode == 'train' and labels[-1] == 1:
for i in range(8):
self._image_paths.append(image_path)
self._labels.append(labels)
self._num_image = len(self._image_paths)
class ChestXR8Dataset(ChestDataset):
def __init__(self, label_path, cfg='configs/chestxray8_config.json', mode='train'):
def get_labels(label_strs):
all_labels = []
for label in label_strs:
labels_split = label.split('|')
label_final = [d in labels_split for d in diseases]
all_labels.append(label_final)
return all_labels
self._data_path = label_path.rsplit('/', 1)[0]
self._mode = mode
with open(cfg) as f:
self.cfg = edict(json.load(f))
labels = pd.read_csv(label_path)
labels = labels[labels['Finding Labels'].str.contains('|'.join(diseases + ['No Finding']))]
labels = labels.head(6)
if self._mode == 'train':
labels_neg = labels[labels['Finding Labels'].str.contains('No Finding')]
labels_pos = labels[~labels['Finding Labels'].str.contains('No Finding')]
upweight_ratio = len(labels_neg)/len(labels_pos)
labels_pos = labels_pos.loc[labels_pos.index.repeat(upweight_ratio)]
labels = | pd.concat([labels_neg, labels_pos]) | pandas.concat |
import pymongo
import pandas as pd
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.cluster import KMeans
import numpy as np
from sklearn.decomposition import PCA
import matplotlib.pyplot as plt
import collections
from mlxtend.frequent_patterns import fpgrowth
import plotly
import plotly.express as px
from sklearn import metrics
# MongoDB
uri = "mongodb://localhost:27017/"
# Mongo client
client = pymongo.MongoClient(uri)
# Import database by name
db = client.ININ
# Get collection from DB
CollectionName = 'myLeaderboardsNew'
# set collection
collection = db[CollectionName]
#def hashtags_distribution():
def get_top_features_cluster(tf_idf_array, prediction, n_feats):
labels = np.unique(prediction)
dfs = []
for label in labels:
id_temp = np.where(prediction==label) # indices for each cluster
x_means = np.mean(tf_idf_array[id_temp], axis = 0) # returns average score across cluster
sorted_means = np.argsort(x_means)[::-1][:n_feats] # indices with top 20 scores
features = tfidf.get_feature_names()
best_features = [(features[i], x_means[i]) for i in sorted_means]
df = pd.DataFrame(best_features, columns = ['features', 'score'])
dfs.append(df)
return dfs
def get_post_hashtags():
"""
Gets a dataframe with the description of posts grouped by influencer's category
:param: -
:return posts_description_df: dataframe with posts' description
"""
print('\nLoading posts description from MongoDB..')
cursor = collection.aggregate([
{'$group': {'_id': '$category',
'hashtags': {'$push': '$Posts.Hashtags'}}}
])
posts_description_df = pd.DataFrame(list(cursor))
posts_description_df.columns = ['category', 'hashtags']
return posts_description_df
def get_top_keywords(data, clusters, labels, n_terms):
df = pd.DataFrame(data.todense()).groupby(clusters).mean()
for i, r in df.iterrows():
print('\nCluster {}'.format(i))
print(','.join([labels[t] for t in np.argsort(r)[-n_terms:]]))
def plot_2d():
# Tf idf vectorize
vectorizer = TfidfVectorizer()
tfidf_hashtags = vectorizer.fit_transform(all_hashtags)
#print(tfidf_hashtags.shape)
# PCA to 2 dimensions
pca = PCA(n_components=2)
pca_hashtags = pca.fit_transform(tfidf_hashtags.toarray())
#print(pca_hashtags.shape)
dataset = pd.DataFrame(pca_hashtags, columns=['x', 'y'])
merged_df = pd.concat([df.reset_index(drop=True), dataset], axis=1)
merged_df = merged_df.dropna()
print(merged_df)
#dataset['hashtags'] = df['hashtags']
#merged_df.to_csv('web_app/data_csv/hashtags/hashtags_2d_2.csv', index=False)
plt.scatter(pca_hashtags[:, 0], pca_hashtags[:, 1], s=50, cmap='viridis')
plt.show()
def kmeans():
vectorizer = TfidfVectorizer()
tfidf_hashtags = vectorizer.fit_transform(all_hashtags)
df = pd.DataFrame(tfidf_hashtags.toarray(), columns=vectorizer.get_feature_names())
# PCA to 2 dimensions
pca = PCA(n_components=2)
pca_hashtags = pca.fit_transform(tfidf_hashtags.toarray())
print(pca_hashtags.shape)
kmeans = KMeans(n_clusters=3, random_state=0).fit(pca_hashtags)
print(len(kmeans.labels_))
df = get_post_hashtags()
#print(df)
all_hashtags = []
categories_list = []
list_for_df = []
# Create a list of strings (each string consists of each post's hashtags)
for hashtag_list in df['hashtags']:
categories_list.append(df['category'])
for influencer in hashtag_list:
for post in influencer:
if post:
hashtag_string = ''
for hashtag in post:
list_for_df.append(hashtag)
if hashtag_string == '':
hashtag_string = hashtag
else:
hashtag_string = hashtag_string + ' ' + hashtag
all_hashtags.append(hashtag_string)
df = | pd.DataFrame({'hashtags': list_for_df}) | pandas.DataFrame |
import numpy as np
import operator
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib
from matplotlib.colors import LinearSegmentedColormap
import seaborn as sns
import math
from tkinter import *
# Functions age_encode, race_encode, state_encode, and self_core_dict used to create the core dict.
def age_encode(age):
# Returns the utf-8 object of an arbitrary integer age input.
if age < 1:
return '1'.encode('utf-8')
elif age < 5:
return '1-4'.encode('utf-8')
elif age < 10:
return '5-9'.encode('utf-8')
elif age < 15:
return '10-14'.encode('utf-8')
elif age < 20:
return '15-19'.encode('utf-8')
elif age < 25:
return '20-24'.encode('utf-8')
elif age < 30:
return '25-29'.encode('utf-8')
elif age < 35:
return '30-34'.encode('utf-8')
elif age < 40:
return '35-39'.encode('utf-8')
elif age < 45:
return '40-44'.encode('utf-8')
elif age < 50:
return '45-49'.encode('utf-8')
elif age < 55:
return '50-54'.encode('utf-8')
elif age < 60:
return '55-59'.encode('utf-8')
elif age < 65:
return '60-64'.encode('utf-8')
elif age < 70:
return '65-69'.encode('utf-8')
elif age < 75:
return '70-74'.encode('utf-8')
elif age < 80:
return '75-79'.encode('utf-8')
elif age < 85:
return '80-84'.encode('utf-8')
elif age < 90:
return '85-89'.encode('utf-8')
elif age < 95:
return '90-94'.encode('utf-8')
elif age < 100:
return '95-99'.encode('utf-8')
elif age >= 100:
return '100+'.encode('utf-8')
else:
print('Insert age between 1-85+.')
return
def race_encode(race):
# Insert full name string, return utf-8 object of race code.
race_key = {'White': '2106-3'.encode('utf-8'),
'Asian or Pacific Islander': 'A-PI'.encode('utf-8'),
'Black or African American': '2054-5'.encode('utf-8'),
'American Indian or Alaska Native': '1002-5'.encode('utf-8')}
if race not in race_key.keys():
raise KeyError("%s not present" %race)
else:
return race_key[race]
def state_encode(state):
state_dict = {'Alabama': 1,
'Alaska': 2,
'Arizona': 4,
'Arkansas': 5,
'California': 6,
'Colorado': 8,
'Connecticut': 9,
'Delaware': 10,
'District of Columbia': 11,
'Florida': 12,
'Georgia': 13,
'Hawaii': 15,
'Idaho': 16,
'Illinois': 17,
'Indiana': 18,
'Iowa': 19,
'Kansas': 20,
'Kentucky': 21,
'Louisiana': 22,
'Maine': 23,
'Maryland': 24,
'Massachusetts': 25,
'Michigan': 26,
'Minnesota': 27,
'Mississippi': 28,
'Missouri': 29,
'Montana': 30,
'Nebraska': 31,
'Nevada': 32,
'New Hampshire': 33,
'New Jersey': 34,
'New Mexico': 35,
'New York': 36,
'North Carolina': 37,
'North Dakota': 38,
'Ohio': 39,
'Oklahoma': 40,
'Oregon': 41,
'Pennsylvania': 42,
'Rhode Island': 44,
'South Carolina': 45,
'South Dakota': 46,
'Tennessee': 47,
'Texas': 48,
'Utah': 49,
'Vermont': 50,
'Virginia': 51,
'Washington': 53,
'West Virginia': 54,
'Wisconsin': 55,
'Wyoming': 56}
if state not in state_dict.keys():
raise KeyError('%s not in states' % state)
else:
return state_dict[state]
def hispanic_encode(hispanic):
hispanic_key = {'Not Hispanic': '2186-2'.encode('utf-8'),
'Hispanic': '2135-2'.encode('utf-8'),
'Unspecific': 'NS'.encode('utf-8')}
if hispanic not in hispanic_key.keys():
raise KeyError("%s not present" % hispanic)
else:
return hispanic_key[hispanic]
def self_core_dict(age, race, gender, hispanic, state):
# Produces a dictionary of the person's stats for numpy manipulation.
tester = {}
tester.update({'age': age_encode(age)})
tester.update({'race': race_encode(race)})
tester.update({'gender': gender.encode('utf-8')})
tester.update({'hispanic': hispanic_encode(hispanic)})
tester.update({'state': str(state_encode(state)).encode('utf-8')})
return tester
# Functions age_range_encode, mortality_core_raw used to create the total mortality matrix for the core.
def age_range_encode(age):
#ages = ['<1', '1-4', '5-9', '10-14', '15-19', '20-24', '25-29', '30-34', '35-39', '40-44', '45-49', '50-54',
# '55-59', '60-64', '65-69', '70-74', '75-79', '80-84']
ages = ['1', '1-4', '5-9', '10-14', '15-19', '20-24', '25-29', '30-34', '35-39', '40-44', '45-49', '50-54',
'55-59', '60-64', '65-69', '70-74', '75-79', '80-84', '85-89', '90-94', '95-99', '100+']
byte_ages = [x.encode('utf-8') for x in ages]
return byte_ages[byte_ages.index(age):]
def mortality_core_raw(person_dict, age_range):
# Imports CDC mortality and 85~100+ population data.
mortality_path = 'C:\\Users\Amy\Desktop\Research\data\\070617_113_causeofdeath_cancer.txt'
mortality_data = np.genfromtxt(mortality_path,
dtype=(object, object, object, object, object, object, object, '<i8', '<i8'),
delimiter='\t',
names=True)
pop_85_path = 'C:\\Users\Amy\Desktop\Research\data\85to100_estimates_final.txt'
pop_85_data = np.genfromtxt(pop_85_path,
dtype=(object, object, object, object, object, '<i8'),
delimiter='\t',
names=True)
pop_85_ages = ['85-89'.encode('utf-8'), '90-94'.encode('utf-8'), '95-99'.encode('utf-8'), '100+'.encode('utf-8')]
total_deaths_path = 'C:\\Users\Amy\Desktop\Research\data\\total_deaths.txt'
totald_data = np.genfromtxt(total_deaths_path,
dtype=(object, object, object, object, object, '<i8', '<i8'),
delimiter='\t',
names=True)
age_dict = {'85-89'.encode('utf-8'): 'A',
'90-94'.encode('utf-8'): 'B',
'95-99'.encode('utf-8'): 'C',
'100+'.encode('utf-8'): 'D'}
race_dict = {'2106-3'.encode('utf-8'): '1',
'1002-5'.encode('utf-8'): '2',
'2054-5'.encode('utf-8'): '3',
'A-PI'.encode('utf-8'): '4'}
ethnicity_dict = {'2186-2'.encode('utf-8'): '0',
'2135-2'.encode('utf-8'): '1'}
population_dict = dict()
for entry in pop_85_data:
age = entry[0]
state = entry[1]
gender = entry[2]
race = entry[3]
eth = entry[4]
population = entry[5]
label = age_dict[age] + state.decode('utf-8') + gender.decode('utf-8') + race_dict[race] + ethnicity_dict[eth]
population_dict.update({label: population})
for entry in mortality_data:
age = entry[0]
ethnicity = entry[2]
if age in pop_85_ages and ethnicity != 'NS'.encode('utf-8'):
race = entry[1]
ethnicity = entry[2]
state = entry[3]
gender = entry[4]
label = age_dict[age] + state.decode('utf-8') + gender.decode('utf-8') + race_dict[race] + ethnicity_dict[
ethnicity]
entry[8] = population_dict[label]
# Produces the set of the person for comparison to mortality entries.
person_set = {person_dict['race'], person_dict['gender'], person_dict['hispanic'], person_dict['state']}
# Produces the dictionary of all deaths associated with the core by age.
total_deaths_all = {age: 0 for age in age_range}
for entry in totald_data:
age = entry[0]
deaths = entry[5]
population = entry[6]
if person_set.issubset(set(entry)) and age in age_range:
total_deaths_all.update({age: total_deaths_all[age] + deaths})
# Produces the list of sets of all mortalities associated with the core and total count of all deaths.
mortalities = []
total_deaths_selected = {age: 0 for age in age_range}
total_population_by_age = {age: 0 for age in age_range}
for row in mortality_data:
age = row[0]
mortality_name = row[5]
if person_set.issubset(set(row)) and age in age_range:
mortality_code = row[6]
deaths = row[7]
population = row[8]
rate = row[7] / row[8] * 100000
mortalities.append((age, mortality_name, mortality_code, deaths, population, rate))
total_deaths_selected.update({age: total_deaths_selected[age] + deaths})
total_population_by_age.update({age: population})
# Converts the result from list of sets to a matrix.
mortality_matches = np.array([tuple(x) for x in mortalities], dtype='object, object, object, <i8, <i8, <i8')
mortality_matches.reshape((len(mortality_matches), 1))
# Obtains list of unique mortalities.
mortality_names = set([i[1] for i in mortality_matches])
print('There are', len(mortality_names), 'total unique mortalities.', '\n')
if len(mortality_names) == 0:
print('Congrats! Not enough of you are dying. Perhaps try another state.')
return mortality_matches, mortality_names, total_deaths_selected, total_deaths_all, total_population_by_age
# Function death_ramking used to create the top 12 mortality matrix.
def death_ranking(matches, names, cutoff_num):
scores = {name: 0 for name in names}
# Filters through all raw mortality rows to create a death score for each mortality.
for entry in matches:
current_disease = entry[1]
deaths = entry[3]
scores.update({current_disease: scores[current_disease] + deaths})
sorted_scores = sorted(scores.items(), key=operator.itemgetter(1), reverse=True)
# Returns top cutoff number mortality entries if there are >cutoff_num death scores listed.
if len(sorted_scores) >= cutoff_num:
# Top cutoff_num scores and mortality names obtained.
trim_scores = sorted_scores[0:cutoff_num]
names = [entry[0] for entry in trim_scores]
# Finds which rows are not in the top cutoff_num mortalities and removes them. Returns the trimmed matrix.
to_delete = [i for i in range(len(matches)) if matches[i][1] not in names]
trimmed_matches = np.delete(matches, to_delete, axis=0)
return trimmed_matches, names
else:
names = [entry[0] for entry in sorted_scores]
return matches, names
# Functions bar_chart, stacked_histogram, scatter_plot used for visualization.
def age_bracket_avg(person_dict, ages):
population_path = 'C:\\Users\Amy\Desktop\Research\data\\year_age_popestimate.txt'
population_data = np.genfromtxt(population_path,
dtype=('<i8', object, object, object, object, '<i8'),
delimiter='\t',
names=True)
population_dict = {age: np.array([0, 0, 0, 0, 0]) for age in ages}
person_set = {person_dict['race'], person_dict['gender'], person_dict['hispanic'], person_dict['state']}
ages = ['1', '1-4', '5-9', '10-14', '15-19', '20-24', '25-29', '30-34', '35-39', '40-44', '45-49', '50-54',
'55-59', '60-64', '65-69', '70-74', '75-79', '80-84', '85-89', '90-94', '95-99', '100+']
byte_ages = [x.encode('utf-8') for x in ages]
age_min = (byte_ages.index(person_dict['age'])-1) * 5
for entry in population_data:
current_age = entry[0]
if person_set.issubset(entry) and current_age >= age_min:
age = entry[0]
age_bracket = byte_ages[age // 5 + 1]
age_bracket_year = age % 5
population = entry[5]
population_dict[age_bracket][age_bracket_year] = population
for age, counts in population_dict.items():
tens = (byte_ages.index(age) - 1) // 2 * 10 + (byte_ages.index(age) - 1) % 2 * 5
dists = counts/sum(counts)
avg = np.dot(dists, [0, 1, 2, 3, 4])
population_dict.update({age: round((tens + avg), 2)})
return population_dict
def age_of_death(matches, names, total_deaths_all, age_avgs, just_mortalities):
age_list = list(age_avgs.keys())
names_path = 'C:\\Users\Amy\Desktop\Research\data\\070617_113_listofdeaths.txt'
names_data = np.genfromtxt(names_path,
dtype=(object, object),
delimiter='\t',
names=True)
names_dict = {row[0]: row[1] for row in names_data}
if just_mortalities:
mortality_counts = {name: {age: 0 for age in age_list} for name in names}
mortality_results = {}
for entry in matches:
age = entry[0]
name = entry[1]
deaths = entry[3]
mortality_counts[name].update({age: deaths})
for name, ages in mortality_counts.items():
counts = np.array(list(ages.values()))
indices = list(range(len(list(ages.values()))))
avg_index = math.ceil(np.dot(counts/sum(counts), indices))
mortality_results.update({names_dict[name]: age_avgs[age_list[avg_index]]})
print('Average age of death from these mortalities:')
for key, val in mortality_results.items():
print(key.decode('utf-8'), ' - ', val, sep='')
return mortality_results
else:
counts = np.array(list(total_deaths_all.values()))
indices = list(range(len(list(total_deaths_all.values()))))
avg_index = math.ceil(np.dot(counts/sum(counts), indices))
avg_age = age_avgs[age_list[avg_index]]
print('Average age of death: ', avg_age, '\n', sep='')
return avg_age
def stacked_bar_chart(matches, names, total_deaths_all):
# ABOUT: Takes top 12 mortality data and creates a stacked bar chart of them.
# Creates the dictionary of mortality to death rate per 100,000.
percentage = {name: 0 for name in names}
for entry in matches:
current_mortality = entry[1]
if current_mortality in names:
deaths = entry[3]
percentage.update({current_mortality: (percentage[current_mortality] + deaths)})
names_path = 'C:\\Users\Amy\Desktop\Research\data\\070617_113_listofdeaths.txt'
names_data = np.genfromtxt(names_path,
dtype=(object, object),
delimiter='\t',
names=True)
names_dict = {row[0]: row[1] for row in names_data}
# Sums all the death rates then divides all individual rates by the sum to obtain each percentage of deaths.
for disease, deaths in percentage.items():
percentage.update({disease: int(round(deaths/sum(total_deaths_all.values())*100))})
clean_percentage = {}
for disease, deaths in percentage.items():
new_key = names_dict[disease].decode('utf-8')
clean_percentage.update({new_key: deaths})
# Creates the stacked bar chart.
df = pd.Series(clean_percentage, name=' ').to_frame()
df = df.sort_values(by=' ', ascending=False).T
matplotlib.style.use('ggplot')
my_colors = ['#8dd3c7', '#91818c', '#bebada', '#fb8072', '#80b1d3',
'#fdb462', '#b3de69', '#fccde5', '#2ecc71',
'#abf99a', '#ffed6f', "#9b59b6"]
colors = sns.color_palette(my_colors, n_colors=df.shape[1])
cmap1 = LinearSegmentedColormap.from_list('my_colormap', colors)
df.plot(kind='barh', stacked=True, colormap=cmap1)
lgd = plt.legend(loc=9, bbox_to_anchor=(0.5, -0.2), ncol=2)
plt.subplots_adjust(top=0.94, bottom=0.70, left=0.07, right=0.92)
plt.xlim(0, 100)
ax = plt.gca()
# ax.set_facecolor('#ededed')
ax.yaxis.grid(False)
plt.title('Percentage of deaths (ignoring age) in 2015')
plt.savefig('stacked_barplot.svg', format='svg',
additional_artists=[lgd], bbox_inches='tight',
dpi=1200) # add , transparent=True if you want a clear background.
plt.show()
return percentage
def bar_plot(total_deaths_all, total_population_by_age, age_range):
bar_dict = {age.decode('utf-8'): 0 for age in age_range}
for age, rate in bar_dict.items():
age_e = age.encode('utf-8')
if total_population_by_age[age_e] != 0:
bar_dict.update({age: total_deaths_all[age_e] / total_population_by_age[age_e] * 100000})
X = np.arange(len(bar_dict))
plt.bar(X, bar_dict.values(), align='center', width=0.9)
plt.xticks(X, bar_dict.keys(), rotation='vertical')
plt.subplots_adjust(top=0.94, bottom=0.56, left=0.12, right=0.60)
ax = plt.gca()
ax.xaxis.grid(False)
plt.title('Death Rates Across Age')
plt.xlabel('Age')
plt.ylabel('Deaths per 100k')
plt.savefig('barplot.svg',
format='svg', bbox_inches='tight',
dpi=1200)
plt.show()
def stacked_histogram(matches, names, age_range, show_rate, stacked100):
# ABOUT: Creates a fill chart of the top 12 mortalities over the age range.
# Reference: https://stackoverflow.com/questions/40960437/using-a-custom-color-palette-in-stacked-bar-chart-python
# Creates the array of age vs. mortality, each entry contains the respective death rate.
bar_data = np.zeros((len(age_range), len(names)))
for entry in matches:
current_age = entry[0]
current_mortality = entry[1]
deaths = entry[3]
population = entry[4]
if show_rate:
bar_data[age_range.index(current_age), names.index(current_mortality)] = deaths/population*100000
else:
bar_data[age_range.index(current_age), names.index(current_mortality)] = deaths
if stacked100:
# Rescales to sum to 100% across each age.
sum_t = bar_data.sum(axis=1)
for row_age in range(bar_data.shape[0]):
# Checks if there are any at all.
if sum_t[row_age] != 0:
bar_data[row_age, :] = bar_data[row_age, :]/sum_t[row_age]*100
# X-axis values based on age.
age_labels = [age.decode('utf-8') for age in age_range]
# Stacked histogram values of mortality names.
names_path = 'C:\\Users\Amy\Desktop\Research\data\\070617_113_listofdeaths.txt'
names_data = np.genfromtxt(names_path,
dtype=(object, object),
delimiter='\t',
names=True)
names_dict = {row[0]: row[1] for row in names_data}
name_labels = [names_dict[name].decode('utf-8') for name in names]
# Labels for concatenated mortality name + data matrix for the histogram.
bar_columns = ['Age'] + name_labels
# Creating the stacked histogram.
matplotlib.style.use('ggplot')
my_colors = ['#8dd3c7', '#91818c', '#bebada', '#fb8072', '#80b1d3',
'#fdb462', '#b3de69', '#fccde5', '#2ecc71',
'#abf99a', '#ffed6f', "#9b59b6"]
colors = sns.color_palette(my_colors, n_colors=len(name_labels))
cmap1 = LinearSegmentedColormap.from_list('my_colormap', colors)
bar_data = np.hstack((np.array(age_labels)[np.newaxis].T, bar_data))
df = | pd.DataFrame(data=bar_data) | pandas.DataFrame |
import pytest
import logging
import datetime
import json
import pandas as pd
from astropy.table import Table
from b_to_zooniverse import upload_decals
# logging.basicConfig(
# format='%(asctime)s %(message)s',
# level=logging.DEBUG)
@pytest.fixture
def calibration_dir(tmpdir):
return tmpdir.mkdir('calibration_dir').strpath
@pytest.fixture
def fits_dir(tmpdir):
return tmpdir.mkdir('fits_dir').strpath
@pytest.fixture
def png_dir(tmpdir):
return tmpdir.mkdir('png_dir').strpath
@pytest.fixture
def fits_loc(fits_dir):
return fits_dir + '/' + 'test_image.fits'
@pytest.fixture
def png_loc(png_dir):
return png_dir + '/' + 'test_image.png'
@pytest.fixture()
def nsa_catalog():
return Table([
{'iauname': 'gal_a',
'ra': 146.,
'dec': -1.,
'petroth50': 2.,
'petroth90': 5.},
# adversarial example identical to gal_a
{'iauname': 'gal_dr1',
'ra': 14.,
'dec': -1.,
'petroth50': 2.,
'petroth90': 5.},
{'iauname': 'gal_dr2',
'ra': 1.,
'dec': -1.,
'petroth50': 2.,
'petroth90': 5.}
])
@pytest.fixture()
def fake_metadata():
# manifest expects many columns from joint catalog
return {
'petrotheta': 4.,
'petroflux': [20., 21., 22., 23., 24., 25., 26.],
'nsa_version': '1_0_0',
'z': 0.1,
'mag': [0., 1., 2., 3., 4., 5., 6.],
'absmag': [10., 11., 12., 13., 14., 15., 16.],
'nmgy': [30., 31., 32., 33., 34., 35., 36.],
'another_column': 'sadness'}
@pytest.fixture()
def joint_catalog(fits_dir, png_dir, fake_metadata):
# saved from downloader, which adds fits_loc, png_loc and png_ready to nsa_catalog + decals bricks
gal_a = {
'iauname': 'gal_a',
'nsa_id': 0,
'fits_loc': '{}/gal_a.fits'.format(fits_dir),
'png_loc': '{}/gal_a.png'.format(png_dir),
'png_ready': True,
'ra': 146.,
'dec': -1.,
'petroth50': 2.,
'petroth90': 5.}
gal_a.update(fake_metadata)
gal_dr1 = {
'iauname': 'gal_dr1',
'nsa_id': 1,
'fits_loc': '{}/gal_b.fits'.format(fits_dir),
'png_loc': '{}/gal_b.png'.format(png_dir),
'png_ready': True,
'ra': 14.,
'dec': -1.,
'petroth50': 2.,
'petroth90': 5.}
gal_dr1.update(fake_metadata)
gal_dr2 = {
'iauname': 'gal_dr2',
'nsa_id': 2,
'fits_loc': '{}/gal_c.fits'.format(fits_dir),
'png_loc': '{}/gal_c.png'.format(png_dir),
'png_ready': True,
'ra': 1.,
'dec': -1.,
'petroth50': 2.,
'petroth90': 5.}
gal_dr2.update(fake_metadata)
return Table([gal_a, gal_dr1, gal_dr2])
@pytest.fixture()
def previous_subjects():
# loaded from GZ data dump. Metadata already corrected in setup (name = main stage).
return Table([
# DR1 entries have provided_image_id filled with iau name, nsa_id blank, dr blank
{'_id': 'ObjectId(0)',
'zooniverse_id': 'gz_dr1',
'iauname': 'gal_dr1',
'nsa_id': 1,
'dr': 'DR1',
'ra': 14.,
'dec': -1.,
'petroth50': 2.,
'petroth90': 5.
},
# DR2 entries have provided_image_id blank, nsa_id filled with NSA_[number], dr filled with 'DR2'
{'_id': 'ObjectId(1)',
'zooniverse_id': 'gz_dr2',
'iauname': 'gal_dr2',
'nsa_id': 2,
'dr': 'DR2',
'ra': 1.,
'dec': -1.,
'petroth50': 2.,
'petroth90': 5.
}
])
@pytest.fixture()
def expert_catalog():
return Table([
{
# gal a is both a bar and ring galaxy, and so should be included in the calibration set
'iauname': 'gz_a',
'ra': 146.,
'dec': -1.,
'bar': 2 ** 5,
'ring': 2 ** 3,
}
])
#
# def test_upload_decals_to_panoptes(joint_catalog, previous_subjects, expert_catalog, calibration_dir):
# # TODO mock the uploader here
# main_subjects, calibration_subjects = upload_decals_to_panoptes(
# joint_catalog, previous_subjects, expert_catalog, calibration_dir)
#
# print(main_subjects)
# print(calibration_subjects)
#
# assert len(main_subjects) == 1
# assert len(calibration_subjects) == len(main_subjects) * 2
#
# first_main_subject = main_subjects[0]
# assert first_main_subject['png_loc'][-17:] == 'png_dir/gal_a.png'
# assert first_main_subject['key_data']['ra'] == 146.0
# assert first_main_subject['key_data']['dec'] == -1.
# assert first_main_subject['key_data']['nsa_id'] == 0
# assert first_main_subject['key_data']['petroth50'] == 2.0
# assert first_main_subject['key_data']['mag_abs_r'] == 14.0
# TODO better unit tests for calibration image manifest
# wrong, should have 1 of each version not two
# assert calibration_subjects[0]['png_loc'][-29:] == 'calibration_dir/gal_a_dr2.png'
# assert calibration_subjects[0]['key_data']['selected_image'] == 'dr2_png_loc'
# assert calibration_subjects[1]['png_loc'][-32:] == 'calibration_dir/gal_a_colour.png'
@pytest.fixture()
def subject_extract():
return pd.DataFrame([
{
'subject_id': 'classified', # gal_dr2 should be removed from joint catalog - has been uploaded/classified
'workflow_id': '6122',
'metadata': json.dumps({ # read by subject loader
'ra': 1., # in joint catalog as 'gal_a'
'dec': -1,
'locations': json.dumps({'0': 'url.png'}) # expected by subject loader. Value is itself a json.
})
},
{
'subject_id': 'used_twice',
'workflow_id': '6122',
'metadata': json.dumps({
'ra': 146., # should still exclude gal_a
'dec': -1,
'locations': json.dumps({'0': 'url.png'})
})
},
{
'subject_id': 'used_twice',
'workflow_id': '9999', # duplicate subject due to being attached to another workflow
'metadata': json.dumps({
'ra': 146., # should still exclude gal_a
'dec': -1,
'locations': json.dumps({'0': 'url.png'})
})
},
{
'subject_id': 'different_workflow',
'workflow_id': '9999',
'metadata': json.dumps({
'ra': 14., # should NOT exclude gal_dr1, classified elsewhere
'dec': -1,
'locations': json.dumps({'0': 'url.png'})
})
},
{
'subject_id': 'early',
'workflow_id': '6122',
'metadata': json.dumps({
'ra': 146., # should NOT exclude gal_dr1, classified early
'dec': -1,
'locations': json.dumps({'0': 'url.png'})
})
},
])
@pytest.fixture()
def classification_extract(): # note: subject_ids, with an s, from Panoptes
return pd.DataFrame([
{
'subject_ids': 'classified',
'created_at': '2018-01-01', # should ensure gal_dr2 is removed for being classified
'workflow_id': '6122'
},
{
'subject_ids': 'used_twice',
'created_at': | pd.to_datetime('2018-01-01') | pandas.to_datetime |
import toml
import logging
import numpy as np
import pandas as pd
import os
from wann_genetic import Individual, RecurrentIndividual
from wann_genetic.tasks import select_task
from wann_genetic import GeneticAlgorithm
from .util import get_version, TimeStore
from .evaluation_util import (get_objective_values, update_hall_of_fame,
make_measurements)
from wann_genetic.util import ParamTree
from wann_genetic.postopt import Report
class Environment(ParamTree):
"""Environment for executing training and post training evaluations.
Takes care of process pool, reporting, and experiment parameters.
Parameters
----------
params : dict or str
Dictionary containing the parameters or a path to a parameters spec
file.
"""
from .util import (default_params, setup_params, open_data,
store_gen, store_gen_metrics, load_gen_metrics,
stored_populations, stored_indiv_measurements,
store_hof, load_hof, load_pop,
load_indiv_measurements,
env_path)
def __init__(self, params):
"""Initialize an environment for training or post training analysis."""
super().__init__()
self.setup_params(params)
self.metrics = list()
self.pool = None
self.data_file = None
self.hall_of_fame = list()
# choose task
self.task = select_task(self['task', 'name'])
# choose adequate type of individuals
if self['config', 'backend'].lower() == 'torch':
import wann_genetic.individual.torch as backend
else:
import wann_genetic.individual.numpy as backend
if self.task.is_recurrent:
self.ind_class = backend.RecurrentIndividual
else:
self.ind_class = backend.Individual
# only use enabled activations functions
available_funcs = self.ind_class.Phenotype.available_act_functions
enabled_acts = self['population', 'enabled_activation_funcs']
if self['population', 'enabled_activation_funcs'] != 'all':
self.ind_class.Phenotype.enabled_act_functions = [
available_funcs[i] for i in enabled_acts
]
def seed(self, seed):
"""Set seed to `seed` or from parameters.
Parameters
----------
seed : int
Seed to use.
"""
np.random.seed(seed)
@property
def elite_size(self):
"""Size of the elite (:math:`population\\ size * elite\\ ratio`)."""
return int(np.floor(self['selection', 'elite_ratio']
* self['population', 'size']))
def sample_weights(self, n=None):
if n is None:
n = self['sampling']['num_weights_per_iteration']
dist = self['sampling', 'distribution'].lower()
if dist == 'one':
w = 1
elif dist == 'uniform':
lower = self['sampling', 'lower_bound']
upper = self['sampling', 'upper_bound']
assert lower is not None and upper is not None
w = np.random.uniform(lower, upper, size=n)
elif dist == 'linspace':
lower = self['sampling', 'lower_bound']
upper = self['sampling', 'upper_bound']
assert lower is not None and upper is not None
w = np.linspace(lower, upper, num=n)
elif dist == 'lognormal':
mu = self['sampling', 'mean']
sigma = self['sampling', 'sigma']
assert mu is not None and sigma is not None
w = np.random.lognormal(mu, sigma, size=n)
elif dist == 'normal':
mu = self['sampling', 'mean']
sigma = self['sampling', 'sigma']
assert mu is not None and sigma is not None
w = np.random.normal(mu, sigma, size=n)
else:
raise RuntimeError(f'Distribution {dist} not implemented.')
self['sampling', 'current_weight'] = w
return w
def setup_pool(self, n=None):
"""Setup process pool."""
if n is None:
n = self['config', 'num_workers']
if n == 1:
self.pool = None
else:
if self['config', 'backend'].lower() == 'torch':
logging.info('Using torch multiprocessing')
from torch.multiprocessing import Pool
self.pool = Pool(n)
else:
logging.info('Using usual multiprocessing')
from multiprocessing import Pool
self.pool = Pool(n)
def pool_map(self, func, iter):
if self.pool is None:
return map(func, iter)
else:
return self.pool.imap(func, iter)
def setup_optimization(self):
"""Setup everything that is required for training (eg. loading test
samples).
"""
log_path = self.env_path(self['storage', 'log_filename'])
logging.info(f"Check log ('{log_path}') for details.")
logger = logging.getLogger()
fh = logging.FileHandler(log_path)
fh.setFormatter(logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s'))
logger.addHandler(fh)
if self['config', 'debug']:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
logging.info(f"Package version {get_version()}")
p = os.path.abspath(self['experiment_path'])
logging.info(f'Saving data at {p}.')
logging.debug('Loading training dataset')
self.task.load_training(env=self)
# log used parameters
params_toml = toml.dumps(self.params)
logging.debug(f"Running experiments with the following parameters:\n"
f"{params_toml}")
with open(self.env_path('params.toml'), 'w') as f:
params = dict(self.params)
# mark stored params as part of a report
params['is_report'] = True
toml.dump(params, f)
def run(self):
"""Run optization and post optimization (if enabled)."""
# set up logging, write params
self.setup_optimization()
# set up pool of workers
self.setup_pool()
with self.open_data('w'):
# run optimization
self.optimize()
if self['postopt', 'run_postopt']:
with self.open_data('r'):
# evaluate individuals in hall of fame
self.post_optimization()
if self.pool is not None:
logging.info('Closing pool')
self.pool.close()
def optimize(self):
logging.info("Starting evolutionary algorithm")
ts = TimeStore()
alg = GeneticAlgorithm(self)
first_generation = True
self.seed(self['sampling', 'seed'])
ts.start()
for gen in np.arange(self['population', 'num_generations']) + 1:
ts.start()
pop = alg.ask()
seed = self['sampling', 'post_init_seed']
if (first_generation and not isinstance(seed, bool)):
self.seed(seed)
# evaluate indivs
weights = self.sample_weights()
logging.debug(f'Sampled weight {weights}')
make_measurements(self, pop, weights=weights)
obj_values = np.array([
get_objective_values(ind, self.objectives)
for ind in pop
])
alg.tell(obj_values)
logging.debug('Updating hall of fame')
self.hall_of_fame = update_hall_of_fame(self, pop)
ts.stop()
avg = (ts.total / gen)
expected_time = (self['population', 'num_generations'] - gen) * avg
logging.info(f'Completed generation {gen}; {ts.dt:.02}s elapsed, {avg:.02}s avg, {ts.total:.02}s total. '
f'Expected time remaining: {expected_time:.02}s')
self.store_data(gen, pop, dt=ts.dt)
self.last_population = pop
self.store_hof()
def post_optimization(self):
r = Report(self).run_evaluations( # run evaluations on test data
num_weights=self['postopt', 'num_weights'],
num_samples=self['postopt', 'num_samples'] # all
)
if self['postopt', 'compile_report']:
r.compile() # plot metrics, derive stats
else:
r.compile_stats() # at least derive and store stats
def store_data(self, gen, pop, dt=-1):
gen_metrics, indiv_metrics = self.population_metrics(
gen=gen, population=pop, return_indiv_measurements=True, dt=dt)
metric, metric_sign = self.hof_metric
p = ("MAX" if metric_sign > 0 else "MIN")
metric_value = gen_metrics[f"{p}:{metric}"]
logging.info(f"#{gen} {p}:{metric}: {metric_value:.2}")
self.metrics.append(gen_metrics)
commit_freq = self['storage', 'commit_elite_freq']
if (commit_freq > 0 and gen % commit_freq == 0):
self.store_gen(
gen, population=pop[:self.elite_size],
indiv_metrics=indiv_metrics)
commit_freq = self['storage', 'commit_metrics_freq']
if (commit_freq > 0 and gen % commit_freq == 0):
self.store_gen_metrics( | pd.DataFrame(data=self.metrics) | pandas.DataFrame |
"""
This script adds the ACS 2019 population totals extracted from Redisticting Data Hub
(https://redistrictingdatahub.org/dataset/texas-block-group-acs5-data-2019/)
to the shape file provided by MGGG (https://www.dropbox.com/sh/k78n2hyixmv9xdg/AABmZG5ntMbXtX1VKThR7_t8a?dl=0)
for updated demographic info since this analysis is conducted prior to the release of the 2020 US Census Data.
Additionally, a seed plan is created using the 2019 ACS population info and 38 districts that
conforms to the 'ensemble_inclusion' constraint (minimum of 11 minority effective districts)
in 03_TX_model.py.
"""
import os
import sys
import geopandas as gpd
import pandas as pd
import maup
from gerrychain import (
Election,
Graph,
MarkovChain,
Partition,
GeographicPartition,
accept,
constraints,
updaters,
tree
)
from gerrychain.metrics import efficiency_gap, mean_median
from gerrychain.proposals import recom, propose_random_flip
from gerrychain.updaters import cut_edges
from gerrychain.tree import recursive_tree_part, bipartition_tree_random, PopulatedGraph, \
find_balanced_edge_cuts_memoization, random_spanning_tree
from run_functions import compute_final_dist, compute_W2, prob_conf_conversion, cand_pref_outcome_sum, \
cand_pref_all_draws_outcomes, precompute_state_weights, compute_district_weights
import warnings
warnings.filterwarnings('ignore', 'GeoSeries.isna', UserWarning)
# Read in Texas shapefile created by MGGG
df = gpd.read_file("Data/TX_VTDs/TX_VTDs.shp")
# ACS block group data from redistricting hub (TX has 15,811 block groups)
block_groups = gpd.read_file("Data/tx_acs5_2019_bg/tx_acs5_2019_bg.shp")
# maup.doctor(block_groups, df)
# update projection coordinate reference system to resolve area inaccuracy issues
df = df.to_crs('epsg:3083')
block_groups = block_groups.to_crs('epsg:3083')
with maup.progress():
block_groups['geometry'] = maup.autorepair(block_groups)
df['geometry'] = maup.autorepair(df)
# map block groups to VTDs based on geometric intersections
# Include area_cutoff=0 to ignore any intersections with no area,
# like boundary intersections, which we do not want to include in
# our proration.
pieces = maup.intersections(block_groups, df, area_cutoff=0)
# Option 1: Weight by prorated population from blocks
# block_proj = gpd.read_file("Data/tx_b_proj_P1_2020tiger/tx_b_proj_P1_2020tiger.shp")
# block_proj = block_proj.to_crs('epsg:3083')
# block_proj['geometry'] = maup.autorepair(block_proj)
#
# with maup.progress():
# bg2pieces = maup.assign(block_proj, pieces.reset_index())
# weights = block_proj['p20_total'].groupby(bg2pieces).sum()
# weights = maup.normalize(weights, level=0)
# Option 2: Alternative: Weight by relative area
weights = pieces.geometry.area
weights = maup.normalize(weights, level=0)
with maup.progress():
df['TOTPOP19'] = maup.prorate(pieces, block_groups['TOTPOP19'], weights=weights)
# sanity check for Harris County
print(len(df[df.TOTPOP19.isna()]))
print(df[df.CNTY_x == 201].sum()[['TOTPOP_x', 'TOTPOP19']])
# Create a random seed plan for 38 districts that has a minimum number of minority
# effective districts (enacted_distinct = 11) to satisfy 'ensemble_inclusion' constraint
# relevant code excerpts copied from '03_TX_model.py'
# user input parameters######################################
tot_pop = 'TOTPOP19'
num_districts = 38 #38 Congressional districts in 2020
pop_tol = .01 #U.S. Congress (deviation from ideal district population)
effectiveness_cutoff = .6
record_statewide_modes = True
record_district_mode = False
model_mode = 'statewide' # 'district', 'equal', 'statewide'
# fixed parameters#################################################
enacted_black = 4 # number of districts in enacted map with Black effectiveness> 60%
enacted_hisp = 8 # number of districts in enacted map with Latino effectiveness > 60%
enacted_distinct = 11 # number of districts in enacted map with B > 60% or L > 60% or both
##################################################################
# key column names from Texas VTD shapefile
white_pop = 'NH_WHITE'
CVAP = "1_2018"
WCVAP = "7_2018"
HCVAP = "13_2018"
BCVAP = "5_2018" # with new CVAP codes!
geo_id = 'CNTYVTD'
C_X = "C_X"
C_Y = "C_Y"
# read files###################################################################
elec_data = pd.read_csv("TX_elections.csv")
TX_columns = list(pd.read_csv("TX_columns.csv")["Columns"])
dropped_elecs = pd.read_csv("dropped_elecs.csv")["Dropped Elections"]
recency_weights = pd.read_csv("recency_weights.csv")
min_cand_weights = pd.read_csv("ingroup_weight.csv")
cand_race_table = pd.read_csv("Candidate_Race_Party.csv")
EI_statewide = | pd.read_csv("statewide_rxc_EI_preferences.csv") | pandas.read_csv |
import numpy as np
from numpy.testing import assert_equal, assert_, assert_raises
import pandas as pd
import pandas.util.testing as tm
import pytest
from statsmodels.base import data as sm_data
from statsmodels.formula import handle_formula_data
from statsmodels.regression.linear_model import OLS
from statsmodels.genmod.generalized_linear_model import GLM
from statsmodels.genmod import families
from statsmodels.discrete.discrete_model import Logit
# FIXME: do not leave commented-out, enable or move/remove
# class TestDates(object):
# @classmethod
# def setup_class(cls):
# nrows = 10
# cls.dates_result = cls.dates_results = np.random.random(nrows)
#
# def test_dates(self):
# np.testing.assert_equal(data.wrap_output(self.dates_input, 'dates'),
# self.dates_result)
class TestArrays(object):
@classmethod
def setup_class(cls):
cls.endog = np.random.random(10)
cls.exog = np.c_[np.ones(10), np.random.random((10, 2))]
cls.data = sm_data.handle_data(cls.endog, cls.exog)
nrows = 10
nvars = 3
cls.col_result = cls.col_input = np.random.random(nvars)
cls.row_result = cls.row_input = np.random.random(nrows)
cls.cov_result = cls.cov_input = np.random.random((nvars, nvars))
cls.xnames = ['const', 'x1', 'x2']
cls.ynames = 'y'
cls.row_labels = None
def test_orig(self):
np.testing.assert_equal(self.data.orig_endog, self.endog)
np.testing.assert_equal(self.data.orig_exog, self.exog)
def test_endogexog(self):
np.testing.assert_equal(self.data.endog, self.endog)
np.testing.assert_equal(self.data.exog, self.exog)
def test_attach(self):
data = self.data
# this makes sure what the wrappers need work but not the wrapped
# results themselves
np.testing.assert_equal(data.wrap_output(self.col_input, 'columns'),
self.col_result)
np.testing.assert_equal(data.wrap_output(self.row_input, 'rows'),
self.row_result)
np.testing.assert_equal(data.wrap_output(self.cov_input, 'cov'),
self.cov_result)
def test_names(self):
data = self.data
np.testing.assert_equal(data.xnames, self.xnames)
np.testing.assert_equal(data.ynames, self.ynames)
def test_labels(self):
# HACK: because numpy master after NA stuff assert_equal fails on
# pandas indices
# FIXME: see if this can be de-hacked
np.testing.assert_(np.all(self.data.row_labels == self.row_labels))
class TestArrays2dEndog(TestArrays):
@classmethod
def setup_class(cls):
super(TestArrays2dEndog, cls).setup_class()
cls.endog = np.random.random((10, 1))
cls.exog = np.c_[np.ones(10), np.random.random((10, 2))]
cls.data = sm_data.handle_data(cls.endog, cls.exog)
def test_endogexog(self):
np.testing.assert_equal(self.data.endog, self.endog.squeeze())
np.testing.assert_equal(self.data.exog, self.exog)
class TestArrays1dExog(TestArrays):
@classmethod
def setup_class(cls):
super(TestArrays1dExog, cls).setup_class()
cls.endog = np.random.random(10)
exog = np.random.random(10)
cls.data = sm_data.handle_data(cls.endog, exog)
cls.exog = exog[:, None]
cls.xnames = ['x1']
cls.ynames = 'y'
def test_orig(self):
np.testing.assert_equal(self.data.orig_endog, self.endog)
np.testing.assert_equal(self.data.orig_exog, self.exog.squeeze())
class TestDataFrames(TestArrays):
@classmethod
def setup_class(cls):
cls.endog = pd.DataFrame(np.random.random(10), columns=['y_1'])
exog = pd.DataFrame(np.random.random((10, 2)),
columns=['x_1', 'x_2'])
exog.insert(0, 'const', 1)
cls.exog = exog
cls.data = sm_data.handle_data(cls.endog, cls.exog)
nrows = 10
nvars = 3
cls.col_input = np.random.random(nvars)
cls.col_result = pd.Series(cls.col_input,
index=exog.columns)
cls.row_input = np.random.random(nrows)
cls.row_result = pd.Series(cls.row_input,
index=exog.index)
cls.cov_input = np.random.random((nvars, nvars))
cls.cov_result = pd.DataFrame(cls.cov_input,
index=exog.columns,
columns=exog.columns)
cls.xnames = ['const', 'x_1', 'x_2']
cls.ynames = 'y_1'
cls.row_labels = cls.exog.index
def test_orig(self):
tm.assert_frame_equal(self.data.orig_endog, self.endog)
tm.assert_frame_equal(self.data.orig_exog, self.exog)
def test_endogexog(self):
np.testing.assert_equal(self.data.endog, self.endog.values.squeeze())
np.testing.assert_equal(self.data.exog, self.exog.values)
def test_attach(self):
data = self.data
# this makes sure what the wrappers need work but not the wrapped
# results themselves
tm.assert_series_equal(data.wrap_output(self.col_input, 'columns'),
self.col_result)
tm.assert_series_equal(data.wrap_output(self.row_input, 'rows'),
self.row_result)
tm.assert_frame_equal(data.wrap_output(self.cov_input, 'cov'),
self.cov_result)
class TestDataFramesWithMultiIndex(TestDataFrames):
@classmethod
def setup_class(cls):
cls.endog = pd.DataFrame(np.random.random(10), columns=['y_1'])
mi = pd.MultiIndex.from_product([['x'], ['1', '2']])
exog = pd.DataFrame(np.random.random((10, 2)), columns=mi)
exog_flattened_idx = pd.Index(['const', 'x_1', 'x_2'])
exog.insert(0, 'const', 1)
cls.exog = exog
cls.data = sm_data.handle_data(cls.endog, cls.exog)
nrows = 10
nvars = 3
cls.col_input = np.random.random(nvars)
cls.col_result = pd.Series(cls.col_input, index=exog_flattened_idx)
cls.row_input = np.random.random(nrows)
cls.row_result = pd.Series(cls.row_input, index=exog.index)
cls.cov_input = np.random.random((nvars, nvars))
cls.cov_result = pd.DataFrame(cls.cov_input,
index=exog_flattened_idx,
columns=exog_flattened_idx)
cls.xnames = ['const', 'x_1', 'x_2']
cls.ynames = 'y_1'
cls.row_labels = cls.exog.index
class TestLists(TestArrays):
@classmethod
def setup_class(cls):
super(TestLists, cls).setup_class()
cls.endog = np.random.random(10).tolist()
cls.exog = np.c_[np.ones(10), np.random.random((10, 2))].tolist()
cls.data = sm_data.handle_data(cls.endog, cls.exog)
class TestRecarrays(TestArrays):
@classmethod
def setup_class(cls):
super(TestRecarrays, cls).setup_class()
cls.endog = np.random.random(9).view([('y_1', 'f8')]).view(np.recarray)
exog = np.random.random(9*3).view([('const', 'f8'), ('x_1', 'f8'),
('x_2', 'f8')]).view(np.recarray)
exog['const'] = 1
cls.exog = exog
cls.data = sm_data.handle_data(cls.endog, cls.exog)
cls.xnames = ['const', 'x_1', 'x_2']
cls.ynames = 'y_1'
def test_endogexog(self):
np.testing.assert_equal(self.data.endog,
self.endog.view(float, type=np.ndarray))
np.testing.assert_equal(self.data.exog,
self.exog.view((float, 3), type=np.ndarray))
class TestStructarrays(TestArrays):
@classmethod
def setup_class(cls):
super(TestStructarrays, cls).setup_class()
cls.endog = np.random.random(9).view([('y_1', 'f8')]).view(np.recarray)
exog = np.random.random(9*3).view([('const', 'f8'), ('x_1', 'f8'),
('x_2', 'f8')]).view(np.recarray)
exog['const'] = 1
cls.exog = exog
cls.data = sm_data.handle_data(cls.endog, cls.exog)
cls.xnames = ['const', 'x_1', 'x_2']
cls.ynames = 'y_1'
def test_endogexog(self):
np.testing.assert_equal(self.data.endog,
self.endog.view(float, type=np.ndarray))
np.testing.assert_equal(self.data.exog,
self.exog.view((float, 3), type=np.ndarray))
class TestListDataFrame(TestDataFrames):
@classmethod
def setup_class(cls):
cls.endog = np.random.random(10).tolist()
exog = pd.DataFrame(np.random.random((10, 2)),
columns=['x_1', 'x_2'])
exog.insert(0, 'const', 1)
cls.exog = exog
cls.data = sm_data.handle_data(cls.endog, cls.exog)
nrows = 10
nvars = 3
cls.col_input = np.random.random(nvars)
cls.col_result = pd.Series(cls.col_input,
index=exog.columns)
cls.row_input = np.random.random(nrows)
cls.row_result = pd.Series(cls.row_input,
index=exog.index)
cls.cov_input = np.random.random((nvars, nvars))
cls.cov_result = pd.DataFrame(cls.cov_input,
index=exog.columns,
columns=exog.columns)
cls.xnames = ['const', 'x_1', 'x_2']
cls.ynames = 'y'
cls.row_labels = cls.exog.index
def test_endogexog(self):
np.testing.assert_equal(self.data.endog, self.endog)
np.testing.assert_equal(self.data.exog, self.exog.values)
def test_orig(self):
np.testing.assert_equal(self.data.orig_endog, self.endog)
tm.assert_frame_equal(self.data.orig_exog, self.exog)
class TestDataFrameList(TestDataFrames):
@classmethod
def setup_class(cls):
cls.endog = pd.DataFrame(np.random.random(10), columns=['y_1'])
exog = pd.DataFrame(np.random.random((10, 2)),
columns=['x1', 'x2'])
exog.insert(0, 'const', 1)
cls.exog = exog.values.tolist()
cls.data = sm_data.handle_data(cls.endog, cls.exog)
nrows = 10
nvars = 3
cls.col_input = np.random.random(nvars)
cls.col_result = pd.Series(cls.col_input,
index=exog.columns)
cls.row_input = np.random.random(nrows)
cls.row_result = pd.Series(cls.row_input,
index=exog.index)
cls.cov_input = np.random.random((nvars, nvars))
cls.cov_result = pd.DataFrame(cls.cov_input,
index=exog.columns,
columns=exog.columns)
cls.xnames = ['const', 'x1', 'x2']
cls.ynames = 'y_1'
cls.row_labels = cls.endog.index
def test_endogexog(self):
np.testing.assert_equal(self.data.endog, self.endog.values.squeeze())
np.testing.assert_equal(self.data.exog, self.exog)
def test_orig(self):
tm.assert_frame_equal(self.data.orig_endog, self.endog)
np.testing.assert_equal(self.data.orig_exog, self.exog)
class TestArrayDataFrame(TestDataFrames):
@classmethod
def setup_class(cls):
cls.endog = np.random.random(10)
exog = pd.DataFrame(np.random.random((10, 2)),
columns=['x_1', 'x_2'])
exog.insert(0, 'const', 1)
cls.exog = exog
cls.data = sm_data.handle_data(cls.endog, exog)
nrows = 10
nvars = 3
cls.col_input = np.random.random(nvars)
cls.col_result = pd.Series(cls.col_input,
index=exog.columns)
cls.row_input = np.random.random(nrows)
cls.row_result = pd.Series(cls.row_input,
index=exog.index)
cls.cov_input = np.random.random((nvars, nvars))
cls.cov_result = pd.DataFrame(cls.cov_input,
index=exog.columns,
columns=exog.columns)
cls.xnames = ['const', 'x_1', 'x_2']
cls.ynames = 'y'
cls.row_labels = cls.exog.index
def test_endogexog(self):
np.testing.assert_equal(self.data.endog, self.endog)
np.testing.assert_equal(self.data.exog, self.exog.values)
def test_orig(self):
np.testing.assert_equal(self.data.orig_endog, self.endog)
tm.assert_frame_equal(self.data.orig_exog, self.exog)
class TestDataFrameArray(TestDataFrames):
@classmethod
def setup_class(cls):
cls.endog = pd.DataFrame(np.random.random(10), columns=['y_1'])
exog = pd.DataFrame(np.random.random((10, 2)),
columns=['x1', 'x2']) # names mimic defaults
exog.insert(0, 'const', 1)
cls.exog = exog.values
cls.data = sm_data.handle_data(cls.endog, cls.exog)
nrows = 10
nvars = 3
cls.col_input = np.random.random(nvars)
cls.col_result = pd.Series(cls.col_input,
index=exog.columns)
cls.row_input = np.random.random(nrows)
cls.row_result = pd.Series(cls.row_input,
index=exog.index)
cls.cov_input = np.random.random((nvars, nvars))
cls.cov_result = pd.DataFrame(cls.cov_input,
index=exog.columns,
columns=exog.columns)
cls.xnames = ['const', 'x1', 'x2']
cls.ynames = 'y_1'
cls.row_labels = cls.endog.index
def test_endogexog(self):
np.testing.assert_equal(self.data.endog, self.endog.values.squeeze())
np.testing.assert_equal(self.data.exog, self.exog)
def test_orig(self):
tm.assert_frame_equal(self.data.orig_endog, self.endog)
np.testing.assert_equal(self.data.orig_exog, self.exog)
class TestSeriesDataFrame(TestDataFrames):
@classmethod
def setup_class(cls):
cls.endog = pd.Series(np.random.random(10), name='y_1')
exog = pd.DataFrame(np.random.random((10, 2)),
columns=['x_1', 'x_2'])
exog.insert(0, 'const', 1)
cls.exog = exog
cls.data = sm_data.handle_data(cls.endog, cls.exog)
nrows = 10
nvars = 3
cls.col_input = np.random.random(nvars)
cls.col_result = pd.Series(cls.col_input,
index=exog.columns)
cls.row_input = np.random.random(nrows)
cls.row_result = pd.Series(cls.row_input,
index=exog.index)
cls.cov_input = np.random.random((nvars, nvars))
cls.cov_result = pd.DataFrame(cls.cov_input,
index=exog.columns,
columns=exog.columns)
cls.xnames = ['const', 'x_1', 'x_2']
cls.ynames = 'y_1'
cls.row_labels = cls.exog.index
def test_orig(self):
tm.assert_series_equal(self.data.orig_endog, self.endog)
tm.assert_frame_equal(self.data.orig_exog, self.exog)
class TestSeriesSeries(TestDataFrames):
@classmethod
def setup_class(cls):
cls.endog = pd.Series(np.random.random(10), name='y_1')
exog = pd.Series(np.random.random(10), name='x_1')
cls.exog = exog
cls.data = sm_data.handle_data(cls.endog, cls.exog)
nrows = 10
nvars = 1
cls.col_input = np.random.random(nvars)
cls.col_result = pd.Series(cls.col_input,
index=[exog.name])
cls.row_input = np.random.random(nrows)
cls.row_result = pd.Series(cls.row_input,
index=exog.index)
cls.cov_input = np.random.random((nvars, nvars))
cls.cov_result = pd.DataFrame(cls.cov_input,
index=[exog.name],
columns=[exog.name])
cls.xnames = ['x_1']
cls.ynames = 'y_1'
cls.row_labels = cls.exog.index
def test_orig(self):
tm.assert_series_equal(self.data.orig_endog, self.endog)
tm.assert_series_equal(self.data.orig_exog, self.exog)
def test_endogexog(self):
np.testing.assert_equal(self.data.endog, self.endog.values.squeeze())
np.testing.assert_equal(self.data.exog, self.exog.values[:, None])
def test_alignment():
# Fix Issue GH#206
from statsmodels.datasets.macrodata import load_pandas
d = load_pandas().data
# growth rates
gs_l_realinv = 400 * np.log(d['realinv']).diff().dropna()
gs_l_realgdp = 400 * np.log(d['realgdp']).diff().dropna()
lint = d['realint'][:-1] # incorrect indexing for test purposes
endog = gs_l_realinv
# re-index because they will not conform to lint
realgdp = gs_l_realgdp.reindex(lint.index, method='bfill')
data = dict(const=np.ones_like(lint), lrealgdp=realgdp, lint=lint)
exog = pd.DataFrame(data)
# TODO: which index do we get??
np.testing.assert_raises(ValueError, OLS, *(endog, exog))
class TestMultipleEqsArrays(TestArrays):
@classmethod
def setup_class(cls):
cls.endog = np.random.random((10, 4))
cls.exog = np.c_[np.ones(10), np.random.random((10, 2))]
cls.data = sm_data.handle_data(cls.endog, cls.exog)
nrows = 10
nvars = 3
neqs = 4
cls.col_result = cls.col_input = np.random.random(nvars)
cls.row_result = cls.row_input = np.random.random(nrows)
cls.cov_result = cls.cov_input = np.random.random((nvars, nvars))
cls.cov_eq_result = cls.cov_eq_input = np.random.random((neqs, neqs))
cls.col_eq_result = cls.col_eq_input = np.array((neqs, nvars))
cls.xnames = ['const', 'x1', 'x2']
cls.ynames = ['y1', 'y2', 'y3', 'y4']
cls.row_labels = None
def test_attach(self):
data = self.data
# this makes sure what the wrappers need work but not the wrapped
# results themselves
np.testing.assert_equal(data.wrap_output(self.col_input, 'columns'),
self.col_result)
np.testing.assert_equal(data.wrap_output(self.row_input, 'rows'),
self.row_result)
np.testing.assert_equal(data.wrap_output(self.cov_input, 'cov'),
self.cov_result)
np.testing.assert_equal(data.wrap_output(self.cov_eq_input, 'cov_eq'),
self.cov_eq_result)
np.testing.assert_equal(data.wrap_output(self.col_eq_input,
'columns_eq'),
self.col_eq_result)
class TestMultipleEqsDataFrames(TestDataFrames):
@classmethod
def setup_class(cls):
cls.endog = endog = pd.DataFrame(np.random.random((10, 4)),
columns=['y_1', 'y_2', 'y_3', 'y_4'])
exog = pd.DataFrame(np.random.random((10, 2)),
columns=['x_1', 'x_2'])
exog.insert(0, 'const', 1)
cls.exog = exog
cls.data = sm_data.handle_data(cls.endog, cls.exog)
nrows = 10
nvars = 3
neqs = 4
cls.col_input = np.random.random(nvars)
cls.col_result = pd.Series(cls.col_input,
index=exog.columns)
cls.row_input = np.random.random(nrows)
cls.row_result = pd.Series(cls.row_input,
index=exog.index)
cls.cov_input = np.random.random((nvars, nvars))
cls.cov_result = pd.DataFrame(cls.cov_input,
index=exog.columns,
columns=exog.columns)
cls.cov_eq_input = np.random.random((neqs, neqs))
cls.cov_eq_result = pd.DataFrame(cls.cov_eq_input,
index=endog.columns,
columns=endog.columns)
cls.col_eq_input = np.random.random((nvars, neqs))
cls.col_eq_result = pd.DataFrame(cls.col_eq_input,
index=exog.columns,
columns=endog.columns)
cls.xnames = ['const', 'x_1', 'x_2']
cls.ynames = ['y_1', 'y_2', 'y_3', 'y_4']
cls.row_labels = cls.exog.index
def test_attach(self):
data = self.data
tm.assert_series_equal(data.wrap_output(self.col_input, 'columns'),
self.col_result)
tm.assert_series_equal(data.wrap_output(self.row_input, 'rows'),
self.row_result)
tm.assert_frame_equal(data.wrap_output(self.cov_input, 'cov'),
self.cov_result)
tm.assert_frame_equal(data.wrap_output(self.cov_eq_input, 'cov_eq'),
self.cov_eq_result)
tm.assert_frame_equal(data.wrap_output(self.col_eq_input,
'columns_eq'),
self.col_eq_result)
class TestMissingArray(object):
@classmethod
def setup_class(cls):
X = np.random.random((25, 4))
y = np.random.random(25)
y[10] = np.nan
X[2, 3] = np.nan
X[14, 2] = np.nan
cls.y, cls.X = y, X
@pytest.mark.smoke
def test_raise_no_missing(self):
# GH#1700
sm_data.handle_data(np.random.random(20), np.random.random((20, 2)),
'raise')
def test_raise(self):
with pytest.raises(Exception):
# TODO: be more specific about exception
sm_data.handle_data(self.y, self.X, 'raise')
def test_drop(self):
y = self.y
X = self.X
combined = np.c_[y, X]
idx = ~np.isnan(combined).any(axis=1)
y = y[idx]
X = X[idx]
data = sm_data.handle_data(self.y, self.X, 'drop')
np.testing.assert_array_equal(data.endog, y)
np.testing.assert_array_equal(data.exog, X)
def test_none(self):
data = sm_data.handle_data(self.y, self.X, 'none', hasconst=False)
np.testing.assert_array_equal(data.endog, self.y)
np.testing.assert_array_equal(data.exog, self.X)
assert data.k_constant == 0
def test_endog_only_raise(self):
with pytest.raises(Exception):
# TODO: be more specific about exception
sm_data.handle_data(self.y, None, 'raise')
def test_endog_only_drop(self):
y = self.y
y = y[~np.isnan(y)]
data = sm_data.handle_data(self.y, None, 'drop')
np.testing.assert_array_equal(data.endog, y)
def test_mv_endog(self):
y = self.X
y = y[~np.isnan(y).any(axis=1)]
data = sm_data.handle_data(self.X, None, 'drop')
np.testing.assert_array_equal(data.endog, y)
def test_extra_kwargs_2d(self):
sigma = np.random.random((25, 25))
sigma = sigma + sigma.T - np.diag(np.diag(sigma))
data = sm_data.handle_data(self.y, self.X, 'drop', sigma=sigma)
idx = ~np.isnan(np.c_[self.y, self.X]).any(axis=1)
sigma = sigma[idx][:, idx]
np.testing.assert_array_equal(data.sigma, sigma)
def test_extra_kwargs_1d(self):
weights = np.random.random(25)
data = sm_data.handle_data(self.y, self.X, 'drop', weights=weights)
idx = ~np.isnan(np.c_[self.y, self.X]).any(axis=1)
weights = weights[idx]
np.testing.assert_array_equal(data.weights, weights)
class TestMissingPandas(object):
@classmethod
def setup_class(cls):
X = np.random.random((25, 4))
y = np.random.random(25)
y[10] = np.nan
X[2, 3] = np.nan
X[14, 2] = np.nan
cls.y = pd.Series(y)
cls.X = pd.DataFrame(X)
@pytest.mark.smoke
def test_raise_no_missing(self):
# GH#1700
sm_data.handle_data(pd.Series(np.random.random(20)),
pd.DataFrame(np.random.random((20, 2))),
'raise')
def test_raise(self):
with pytest.raises(Exception):
# TODO: be more specific about exception
sm_data.handle_data(self.y, self.X, 'raise')
def test_drop(self):
y = self.y
X = self.X
combined = np.c_[y, X]
idx = ~np.isnan(combined).any(axis=1)
y = y.loc[idx]
X = X.loc[idx]
data = sm_data.handle_data(self.y, self.X, 'drop')
np.testing.assert_array_equal(data.endog, y.values)
tm.assert_series_equal(data.orig_endog, self.y.loc[idx])
np.testing.assert_array_equal(data.exog, X.values)
| tm.assert_frame_equal(data.orig_exog, self.X.loc[idx]) | pandas.util.testing.assert_frame_equal |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 31 15:16:47 2017
@author: wasifaahmed
"""
from flask import Flask, flash,render_template, request, Response, redirect, url_for, send_from_directory,jsonify,session
import json as json
from datetime import datetime,timedelta,date
from sklearn.cluster import KMeans
import numpy as np
from PIL import Image
from flask.ext.sqlalchemy import SQLAlchemy
import matplotlib.image as mpimg
from io import StringIO
from skimage import data, exposure, img_as_float ,io,color
import scipy
from scipy import ndimage
import time
import tensorflow as tf
import os , sys
import shutil
import numpy as np
import pandas as pd
from PIL import Image
from model import *
from sqlalchemy.sql import text
from sqlalchemy import *
from forms import *
import math
from io import StringIO
import csv
from sqlalchemy.orm import load_only
from datetime import datetime,date
from numpy import genfromtxt
from sqlalchemy.ext.serializer import loads, dumps
from sqlalchemy.orm import sessionmaker, scoped_session
from flask_bootstrap import Bootstrap
graph = tf.Graph()
with graph.as_default():
sess = tf.Session(graph=graph)
init_op = tf.global_variables_initializer()
pointsarray=[]
def load_model():
sess.run(init_op)
saver = tf.train.import_meta_graph('E:/FRAS Windows/FRAS_production/Simulation/FRAS_20170726/FRAS_20170727.meta')
#saver = tf.train.import_meta_graph('/Users/wasifaahmed/Documents/FRAS/Fras_production_v.0.1/FRAS Windows/FRAS Windows/FRAS_production/Simulation/FRAS_20170726/FRAS_20170727.meta')
print('The model is loading...')
#saver.restore(sess, "/Users/wasifaahmed/Documents/FRAS/Fras_production_v.0.1/FRAS Windows/FRAS Windows/FRAS_production/Simulation/FRAS_20170726/FRAS_20170727")
saver.restore(sess, 'E:/FRAS Windows/FRAS_production/Simulation/FRAS_20170726/FRAS_20170727')
print('loaded...')
pass
engine =create_engine('postgresql://postgres:user@localhost/postgres')
Session = scoped_session(sessionmaker(bind=engine))
mysession = Session()
app = Flask(__name__)
app.config.update(
DEBUG=True,
SECRET_KEY='\<KEY>')
app.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql://postgres:user@localhost/fras_production'
db.init_app(app)
Bootstrap(app)
@app.after_request
def add_header(response):
response.headers['X-UA-Compatible'] = 'IE=Edge,chrome=1'
response.headers['Cache-Control'] = 'public, max-age=0'
return response
@app.route('/',methods=['GET', 'POST'])
def login():
form = LoginForm()
return render_template('forms/login.html', form=form)
@app.route('/home',methods=['GET', 'POST'])
def index():
return render_template('pages/home.html')
@app.route('/detail_setup/')
def Detail_Setup():
curdate=time.strftime("%Y-%m-%d")
selection=Shooting_Session.query.filter(Shooting_Session.date>=curdate).order_by(Shooting_Session.datetimestamp.desc()).all()
firer_1 = [row.service_id for row in Shooter.query.all()]
return render_template('pages/detail_setup.html',
data=selection,
firer_1=firer_1)
@app.route('/auto_setup/')
def auto_setup():
drop=[]
curdate=time.strftime("%Y-%m-%d")
form=BulkRegistrationForm()
selection_2=Shooting_Session.query.filter(Shooting_Session.date>=curdate).order_by(Shooting_Session.datetimestamp.desc()).all()
selection=TGroup.query.distinct(TGroup.group_no).filter(TGroup.date==curdate).all()
return render_template('pages/auto_setup.html',
data=selection, data_2=selection_2,form=form)
@app.route('/auto_setup_1/')
def auto_setup_1():
drop=[]
curdate=time.strftime("%Y-%m-%d")
form=BulkRegistrationForm()
selection_2=Shooting_Session.query.filter(Shooting_Session.date>=curdate).order_by(Shooting_Session.datetimestamp.desc()).all()
selection=TGroup.query.distinct(TGroup.group_no).all()
return render_template('pages/auto_setup_1.html',
data=selection, data_2=selection_2,form=form)
@app.route('/group_gen/',methods=['GET', 'POST'])
def group_gen():
da_1=None
da_2=None
da_3=None
da_4=None
da_5=None
da_6=None
da_7=None
da_8=None
if request.method == "POST":
data = request.get_json()
group=data['data']
session['group']=group
data=TGroup.query.filter(TGroup.group_no==group).scalar()
da_1=data.target_1_no
da_2=data.target_2_no
da_3=data.target_3_no
da_4=data.target_4_no
da_5=data.target_5_no
da_6=data.target_6_no
da_7=data.target_7_no
da_8=data.target_8_no
return jsonify(data1=da_1,
data2=da_2,
data3=da_3,
data4=da_4,
data5=da_5,
data6=da_6,
data7=da_7,
data8=da_8
)
@app.route('/detail_exitence_1/',methods=['GET', 'POST'])
def detail_exitence_1():
ra_1=None
da_1=None
detail=None
service_id_1=None
session=None
paper=None
set_no=None
cant=None
if request.method == "POST":
data = request.get_json()
detail=data['data']
dt=time.strftime("%Y-%m-%d")
data=db.session.query(Session_Detail).filter(Session_Detail.detail_no==detail).scalar()
db.session.query(TShooting).delete()
db.session.commit()
Tdetail_shots =TShooting(
date=datetime.now(),
datetimestamp=time.strftime("%Y-%m-%d %H:%M"),
session_id=data.session_id,
detail_no=data.detail_no,
target_1_id=data.target_1_id,
target_2_id=data.target_2_id,
target_3_id=data.target_3_id,
target_4_id=data.target_4_id,
target_5_id=data.target_5_id,
target_6_id=data.target_6_id,
target_7_id=data.target_7_id,
target_8_id=data.target_8_id,
paper_ref=data.paper_ref,
set_no=data.set_no,
save_flag=0
)
db.session.add(Tdetail_shots)
db.session.commit()
res=[]
ten=[]
gp_len=[]
tten=db.session.query(MPI.tendency_code).filter(MPI.firer_id==data.target_1_id).order_by(MPI.datetimestamp.desc()).limit(5).all()[::-1]
tres = db.session.query(Grouping.result).filter(Grouping.firer_id==data.target_1_id).order_by(Grouping.datetimestamp.desc()).limit(5).all()[::-1]
tgp = db.session.query(Grouping.grouping_length_f).filter(Grouping.firer_id==data.target_1_id).order_by(Grouping.datetimestamp.desc()).limit(5).all()[::-1]
for ele in tres:
for ele2 in ele:
res.append(ele2)
for ele3 in tten:
for ele4 in ele3:
ten.append(ele4)
for ele5 in tgp:
for ele6 in ele5:
gp_len.append(ele6)
da_1=db.session.query(Shooter.name).filter(Shooter.id==data.target_1_id).scalar()
cant_id=db.session.query(Shooter.cantonment_id).filter(Shooter.id==data.target_1_id).scalar()
cant=db.session.query(Cantonment.cantonment).filter(Cantonment.id==cant_id).scalar()
ra_1_id=db.session.query(Shooter.rank_id).filter(Shooter.id==data.target_1_id).scalar()
ra_1 = db.session.query(Rank.name).filter(Rank.id==ra_1_id).scalar()
session=db.session.query(TShooting.session_id).scalar()
paper=db.session.query(TShooting.paper_ref).scalar()
set_no=db.session.query(TShooting.set_no).scalar()
service_id_1 = db.session.query(Shooter.service_id).filter(Shooter.id==data.target_1_id).scalar()
return jsonify(
data1=da_1,
ra_1=ra_1,
detail=detail,
service_id_1=service_id_1,
session=session,
paper=paper,
set_no=set_no,
cant=cant,
res=res,
ten=ten,
gp_len=gp_len
)
@app.route('/generate_ref/' ,methods=['GET', 'POST'])
def generate_ref():
g=None
if request.method == "POST":
data = request.get_json()
paper_ref =data['data']
if (paper_ref == 'New'):
g=0
else:
obj=TPaper_ref.query.scalar()
g= obj.paper_ref
return jsonify(gen=int(g))
@app.route('/create_detail_target_2/', methods=['GET', 'POST'])
def create_detail_target_2():
curdate=time.strftime("%Y-%m-%d")
firer_1 = [row.service_id for row in Shooter.query.all()]
detail_data=TShooting.query.scalar()
return render_template('pages/create_detail_target_2.html',
detail_data=detail_data,
firer_1=firer_1
)
@app.route('/save_target_2/', methods=['GET', 'POST'])
def save_target_2():
r=request.form['tag']
r_object=Shooter.query.filter(Shooter.service_id==r).scalar()
r_id=r_object.id
ses=Session_Detail.query.first()
ses.target_2_id=r_id
db.session.commit()
temp =TShooting.query.first()
temp.target_2_id=r_id
db.session.commit()
return redirect(url_for('individual_score_target_2'))
@app.route('/create_detail_target_1/', methods=['GET', 'POST'])
def create_detail_target_1():
curdate=time.strftime("%Y-%m-%d")
selection=Shooting_Session.query.filter(Shooting_Session.date==curdate).all()
firer_1 = [row.service_id for row in Shooter.query.all()]
return render_template('pages/create_detail_target_1.html',
data=selection,
firer_1=firer_1
)
@app.route('/create_session/', methods=['GET', 'POST'])
def create_session():
try:
data = Shooter.query.all()
rang= Range.query.all()
firearms = Firearms.query.all()
ammunation = Ammunation.query.all()
rang_name = request.form.get('comp_select_4')
fire_name = request.form.get('comp_select_5')
ammu_name = request.form.get('comp_select_6')
form=SessionForm()
if(rang_name is None):
range_id=999
fire_id=999
ammu_id=999
else:
range_id = db.session.query(Range.id).filter(Range.name==rang_name).scalar()
fire_id = db.session.query(Firearms.id).filter(Firearms.name==fire_name).scalar()
ammu_id = db.session.query(Ammunation.id).filter(Ammunation.name==ammu_name).scalar()
if form.validate_on_submit():
shooting=Shooting_Session(
date=form.date.data.strftime('%Y-%m-%d'),
datetimestamp=time.strftime("%Y-%m-%d %H:%M"),
shooting_range_id=range_id,
firearms_id=fire_id,
ammunation_id=ammu_id,
target_distance = form.target_distance.data,
weather_notes = form.weather_notes.data,
comments = form.comments.data,
session_no=form.session_no.data,
occasion=form.occ.data
)
db.session.add(shooting)
db.session.commit()
return redirect(url_for('create_detail_target_1'))
except Exception as e:
return redirect(url_for('error5_505.html'))
return render_template('forms/shooting_form.html', form=form, data =data ,rang=rang , firearmns=firearms, ammunation = ammunation)
@app.route('/monthly_report/',methods=['GET','POST'])
def monthly_report():
year=None
month=None
date_start=None
try:
if request.method=='POST':
month=request.form.get('comp_select')
year = datetime.now().year
if (month == 'October'):
dt_start='-10-01'
dt_end ='-10-31'
str_date_start = str(year)+dt_start
date_start=datetime.strptime(str_date_start, "%Y-%m-%d")
str_date_end = str(year)+dt_end
date_end=datetime.strptime(str_date_end, "%Y-%m-%d")
dat1=db.session.query(Grouping.date,Shooter.service_id,Rank.name,Shooter.name.label('firer'),Shooter.unit,Shooter.brigade,Grouping.detail_no,Grouping.result,Grouping.grouping_length_f,MPI.tendency_text).filter(Grouping.date.between(date_start,date_end), Grouping.firer_id==Shooter.id,Shooter.rank_id==Rank.id and Grouping.date==MPI.date, Grouping.session_id==MPI.session_id,Grouping.firer_id==MPI.firer_id,Grouping.detail_no==MPI.detail_no,Grouping.target_no==MPI.target_no,Grouping.spell_no==MPI.spell_no,Grouping.paper_ref==MPI.paper_ref).all()
elif(month=='January'):
dt_start='-01-01'
dt_end ='-01-31'
str_date_start = str(year)+dt_start
date_start=datetime.strptime(str_date_start, "%Y-%m-%d")
str_date_end = str(year)+dt_end
date_end=datetime.strptime(str_date_end, "%Y-%m-%d")
dat1=db.session.query(Grouping.date,Shooter.service_id,Rank.name,Shooter.name.label('firer'),Shooter.unit,Shooter.brigade,Grouping.detail_no,Grouping.result,Grouping.grouping_length_f,MPI.tendency_text).filter(Grouping.date.between(date_start,date_end), Grouping.firer_id==Shooter.id,Shooter.rank_id==Rank.id and Grouping.date==MPI.date, Grouping.session_id==MPI.session_id,Grouping.firer_id==MPI.firer_id,Grouping.detail_no==MPI.detail_no,Grouping.target_no==MPI.target_no,Grouping.spell_no==MPI.spell_no,Grouping.paper_ref==MPI.paper_ref).all()
elif(month=='February'):
dt_start='-02-01'
dt_end ='-02-28'
str_date_start = str(year)+dt_start
date_start=datetime.strptime(str_date_start, "%Y-%m-%d")
str_date_end = str(year)+dt_end
date_end=datetime.strptime(str_date_end, "%Y-%m-%d")
dat1=db.session.query(Grouping.date,Shooter.service_id,Rank.name,Shooter.name.label('firer'),Shooter.unit,Shooter.brigade,Grouping.detail_no,Grouping.result,Grouping.grouping_length_f,MPI.tendency_text).filter(Grouping.date.between(date_start,date_end), Grouping.firer_id==Shooter.id,Shooter.rank_id==Rank.id and Grouping.date==MPI.date, Grouping.session_id==MPI.session_id,Grouping.firer_id==MPI.firer_id,Grouping.detail_no==MPI.detail_no,Grouping.target_no==MPI.target_no,Grouping.spell_no==MPI.spell_no,Grouping.paper_ref==MPI.paper_ref).all()
elif(month=='March'):
dt_start='-03-01'
dt_end ='-03-31'
str_date_start = str(year)+dt_start
date_start=datetime.strptime(str_date_start, "%Y-%m-%d")
str_date_end = str(year)+dt_end
date_end=datetime.strptime(str_date_end, "%Y-%m-%d")
dat1=db.session.query(Grouping.date,Shooter.service_id,Rank.name,Shooter.name.label('firer'),Shooter.unit,Shooter.brigade,Grouping.detail_no,Grouping.result,Grouping.grouping_length_f,MPI.tendency_text).filter(Grouping.date.between(date_start,date_end), Grouping.firer_id==Shooter.id,Shooter.rank_id==Rank.id and Grouping.date==MPI.date, Grouping.session_id==MPI.session_id,Grouping.firer_id==MPI.firer_id,Grouping.detail_no==MPI.detail_no,Grouping.target_no==MPI.target_no,Grouping.spell_no==MPI.spell_no,Grouping.paper_ref==MPI.paper_ref).all()
elif(month=='April'):
dt_start='-04-01'
dt_end ='-04-30'
str_date_start = str(year)+dt_start
date_start=datetime.strptime(str_date_start, "%Y-%m-%d")
str_date_end = str(year)+dt_end
date_end=datetime.strptime(str_date_end, "%Y-%m-%d")
dat1=db.session.query(Grouping.date,Shooter.service_id,Rank.name,Shooter.name.label('firer'),Shooter.unit,Shooter.brigade,Grouping.detail_no,Grouping.result,Grouping.grouping_length_f,MPI.tendency_text).filter(Grouping.date.between(date_start,date_end), Grouping.firer_id==Shooter.id,Shooter.rank_id==Rank.id and Grouping.date==MPI.date, Grouping.session_id==MPI.session_id,Grouping.firer_id==MPI.firer_id,Grouping.detail_no==MPI.detail_no,Grouping.target_no==MPI.target_no,Grouping.spell_no==MPI.spell_no,Grouping.paper_ref==MPI.paper_ref).all()
elif(month=='May'):
dt_start='-05-01'
dt_end ='-05-31'
str_date_start = str(year)+dt_start
date_start=datetime.strptime(str_date_start, "%Y-%m-%d")
str_date_end = str(year)+dt_end
date_end=datetime.strptime(str_date_end, "%Y-%m-%d")
dat1=db.session.query(Grouping.date,Shooter.service_id,Rank.name,Shooter.name.label('firer'),Shooter.unit,Shooter.brigade,Grouping.detail_no,Grouping.result,Grouping.grouping_length_f,MPI.tendency_text).filter(Grouping.date.between(date_start,date_end), Grouping.firer_id==Shooter.id,Shooter.rank_id==Rank.id and Grouping.date==MPI.date, Grouping.session_id==MPI.session_id,Grouping.firer_id==MPI.firer_id,Grouping.detail_no==MPI.detail_no,Grouping.target_no==MPI.target_no,Grouping.spell_no==MPI.spell_no,Grouping.paper_ref==MPI.paper_ref).all()
elif(month=='June'):
dt_start='-06-01'
dt_end ='-06-30'
str_date_start = str(year)+dt_start
date_start=datetime.strptime(str_date_start, "%Y-%m-%d")
str_date_end = str(year)+dt_end
date_end=datetime.strptime(str_date_end, "%Y-%m-%d")
dat1=db.session.query(Grouping.date,Shooter.service_id,Rank.name,Shooter.name.label('firer'),Shooter.unit,Shooter.brigade,Grouping.detail_no,Grouping.result,Grouping.grouping_length_f,MPI.tendency_text).filter(Grouping.date.between(date_start,date_end), Grouping.firer_id==Shooter.id,Shooter.rank_id==Rank.id and Grouping.date==MPI.date, Grouping.session_id==MPI.session_id,Grouping.firer_id==MPI.firer_id,Grouping.detail_no==MPI.detail_no,Grouping.target_no==MPI.target_no,Grouping.spell_no==MPI.spell_no,Grouping.paper_ref==MPI.paper_ref).all()
elif(month=='July'):
dt_start='-07-01'
dt_end ='-07-31'
str_date_start = str(year)+dt_start
date_start=datetime.strptime(str_date_start, "%Y-%m-%d")
str_date_end = str(year)+dt_end
date_end=datetime.strptime(str_date_end, "%Y-%m-%d")
dat1=db.session.query(Grouping.date,Shooter.service_id,Rank.name,Shooter.name.label('firer'),Shooter.unit,Shooter.brigade,Grouping.detail_no,Grouping.result,Grouping.grouping_length_f,MPI.tendency_text).filter(Grouping.date.between(date_start,date_end), Grouping.firer_id==Shooter.id,Shooter.rank_id==Rank.id and Grouping.date==MPI.date, Grouping.session_id==MPI.session_id,Grouping.firer_id==MPI.firer_id,Grouping.detail_no==MPI.detail_no,Grouping.target_no==MPI.target_no,Grouping.spell_no==MPI.spell_no,Grouping.paper_ref==MPI.paper_ref).all()
elif(month=='August'):
dt_start='-08-01'
dt_end ='-08-31'
str_date_start = str(year)+dt_start
date_start=datetime.strptime(str_date_start, "%Y-%m-%d")
str_date_end = str(year)+dt_end
date_end=datetime.strptime(str_date_end, "%Y-%m-%d")
dat1=db.session.query(Grouping.date,Shooter.service_id,Rank.name,Shooter.name.label('firer'),Shooter.unit,Shooter.brigade,Grouping.detail_no,Grouping.result,Grouping.grouping_length_f,MPI.tendency_text).filter(Grouping.date.between(date_start,date_end), Grouping.firer_id==Shooter.id,Shooter.rank_id==Rank.id and Grouping.date==MPI.date, Grouping.session_id==MPI.session_id,Grouping.firer_id==MPI.firer_id,Grouping.detail_no==MPI.detail_no,Grouping.target_no==MPI.target_no,Grouping.spell_no==MPI.spell_no,Grouping.paper_ref==MPI.paper_ref).all()
elif(month=='September'):
dt_start='-09-01'
dt_end ='-09-30'
str_date_start = str(year)+dt_start
date_start=datetime.strptime(str_date_start, "%Y-%m-%d")
str_date_end = str(year)+dt_end
date_end=datetime.strptime(str_date_end, "%Y-%m-%d")
dat1=db.session.query(Grouping.date,Shooter.service_id,Rank.name,Shooter.name.label('firer'),Shooter.unit,Shooter.brigade,Grouping.detail_no,Grouping.result,Grouping.grouping_length_f,MPI.tendency_text).filter(Grouping.date.between(date_start,date_end), Grouping.firer_id==Shooter.id,Shooter.rank_id==Rank.id and Grouping.date==MPI.date, Grouping.session_id==MPI.session_id,Grouping.firer_id==MPI.firer_id,Grouping.detail_no==MPI.detail_no,Grouping.target_no==MPI.target_no,Grouping.spell_no==MPI.spell_no,Grouping.paper_ref==MPI.paper_ref).all()
elif(month=='November'):
dt_start='-11-01'
dt_end ='-11-30'
str_date_start = str(year)+dt_start
date_start=datetime.strptime(str_date_start, "%Y-%m-%d")
str_date_end = str(year)+dt_end
date_end=datetime.strptime(str_date_end, "%Y-%m-%d")
dat1=db.session.query(Grouping.date,Shooter.service_id,Rank.name,Shooter.name.label('firer'),Shooter.unit,Shooter.brigade,Grouping.detail_no,Grouping.result,Grouping.grouping_length_f,MPI.tendency_text).filter(Grouping.date.between(date_start,date_end), Grouping.firer_id==Shooter.id,Shooter.rank_id==Rank.id and Grouping.date==MPI.date, Grouping.session_id==MPI.session_id,Grouping.firer_id==MPI.firer_id,Grouping.detail_no==MPI.detail_no,Grouping.target_no==MPI.target_no,Grouping.spell_no==MPI.spell_no,Grouping.paper_ref==MPI.paper_ref).all()
else:
dt_start='-12-01'
dt_end ='-12-31'
str_date_start = str(year)+dt_start
date_start=datetime.strptime(str_date_start, "%Y-%m-%d")
str_date_end = str(year)+dt_end
date_end=datetime.strptime(str_date_end, "%Y-%m-%d")
dat1=db.session.query(Grouping.date,Shooter.service_id,Rank.name,Shooter.name.label('firer'),Shooter.unit,Shooter.brigade,Grouping.detail_no,Grouping.result,Grouping.grouping_length_f,MPI.tendency_text).filter(Grouping.date.between(date_start,date_end), Grouping.firer_id==Shooter.id,Shooter.rank_id==Rank.id and Grouping.date==MPI.date, Grouping.session_id==MPI.session_id,Grouping.firer_id==MPI.firer_id,Grouping.detail_no==MPI.detail_no,Grouping.target_no==MPI.target_no,Grouping.spell_no==MPI.spell_no,Grouping.paper_ref==MPI.paper_ref).all()
return render_template('pages/monthly_report.html', dat1=dat1 ,month=month)
except Exception as e:
return render_template('errors/month_session.html')
return render_template('pages/monthly_report.html')
@app.route('/save_target_1/', methods=['GET', 'POST'])
def save_target_1():
ref_1=None
try:
if request.method == 'POST':
detail_no = request.form['game_id_1']
r=request.form['tag']
r_object=Shooter.query.filter(Shooter.service_id==r).scalar()
r_id=r_object.id
r2_id=999
r3_id=999
r4_id=999
r5_id=999
r6_id=999
r7_id=999
r8_id=999
ref=request.form['business']
set_no = request.form.get('comp_select_6')
shots = request.form['tag_8']
sess=request.form.get('comp_select')
ref_1 = None
paper=db.session.query(TPaper_ref).scalar()
if(ref == ""):
ref_1=paper.paper_ref
else:
ref_1=ref
temp_shooting=db.session.query(TShooting).scalar()
if(temp_shooting is None):
detail_shots =Session_Detail(
date=datetime.now(),
datetimestamp=time.strftime("%Y-%m-%d %H:%M"),
session_id=sess,
detail_no=detail_no,
target_1_id=r_id,
target_2_id=r2_id,
target_3_id=r3_id,
target_4_id=r4_id,
target_5_id=r5_id,
target_6_id=r6_id,
target_7_id=r7_id,
target_8_id=r8_id,
paper_ref=ref_1,
set_no=set_no,
save_flag=0
)
db.session.add(detail_shots)
db.session.commit()
db.session.query(TPaper_ref).delete()
db.session.commit()
ref_db = TPaper_ref(
paper_ref=ref_1,
detail_no=detail_no,
session_no=sess
)
db.session.add(ref_db)
db.session.commit()
Tdetail_shots =TShooting(
date=datetime.now(),
datetimestamp=time.strftime("%Y-%m-%d %H:%M"),
session_id=sess,
detail_no=detail_no,
target_1_id=r_id,
target_2_id=r2_id,
target_3_id=r3_id,
target_4_id=r4_id,
target_5_id=r5_id,
target_6_id=r6_id,
target_7_id=r7_id,
target_8_id=r8_id,
paper_ref=ref_1,
set_no=set_no,
save_flag=0
)
db.session.add(Tdetail_shots)
db.session.commit()
else:
db.session.query(TShooting).delete()
db.session.commit()
db.session.query(TPaper_ref).delete()
db.session.commit()
ref_db = TPaper_ref(
paper_ref=ref_1,
detail_no=detail_no,
session_no=sess
)
db.session.add(ref_db)
db.session.commit()
detail_shots =Session_Detail(
date=datetime.now(),
datetimestamp=time.strftime("%Y-%m-%d %H:%M"),
session_id=sess,
detail_no=detail_no,
target_1_id=r_id,
target_2_id=r2_id,
target_3_id=r3_id,
target_4_id=r4_id,
target_5_id=r5_id,
target_6_id=r6_id,
target_7_id=r7_id,
target_8_id=r8_id,
paper_ref=ref_1,
set_no=set_no,
save_flag=0
)
db.session.add(detail_shots)
db.session.commit()
Tdetail_shots =TShooting(
date=datetime.now(),
datetimestamp=time.strftime("%Y-%m-%d %H:%M"),
session_id=sess,
detail_no=detail_no,
target_1_id=r_id,
target_2_id=r2_id,
target_3_id=r3_id,
target_4_id=r4_id,
target_5_id=r5_id,
target_6_id=r6_id,
target_7_id=r7_id,
target_8_id=r8_id,
paper_ref=ref_1,
set_no=set_no,
save_flag=0
)
db.session.add(Tdetail_shots)
db.session.commit()
except Exception as e:
return redirect(url_for('error_target_1'))
return redirect(url_for('individual_score_target_1'))
@app.route('/FRAS/', methods=['GET', 'POST'])
def load ():
try:
ref_1=None
if request.method == 'POST':
detail_no = request.form['game_id_1']
tmp_list = []
duplicate = False
r=request.form['tag']
if (r== ""):
r_id = 999
else:
r_object=Shooter.query.filter(Shooter.service_id==r).scalar()
r_id=r_object.id
r1=request.form['tag_1']
if(r1== ""):
r1_id=999
else:
r1_object=Shooter.query.filter(Shooter.service_id==r1).scalar()
r1_id=r1_object.id
r2=request.form['tag_2']
if (r2==""):
r2_id=999
else:
r2_object=Shooter.query.filter(Shooter.service_id==r2).scalar()
r2_id=r2_object.id
r3=request.form['tag_3']
if(r3==""):
r3_id=999
else:
r3_object=Shooter.query.filter(Shooter.service_id==r3).scalar()
r3_id=r3_object.id
r4=request.form['tag_4']
if(r4==""):
r4_id=999
else:
r4_object=Shooter.query.filter(Shooter.service_id==r4).scalar()
r4_id=r4_object.id
r5=request.form['tag_5']
if(r5==""):
r5_id=999
else:
r5_object=Shooter.query.filter(Shooter.service_id==r5).scalar()
r5_id=r5_object.id
r6=request.form['tag_6']
if(r6==""):
r6_id=999
else:
r6_object=Shooter.query.filter(Shooter.service_id==r6).scalar()
r6_id=r6_object.id
r7=request.form['tag_7']
if(r7== ""):
r7_id=999
else:
r7_object=Shooter.query.filter(Shooter.service_id==r7).scalar()
r7_id=r7_object.id
ref=request.form['business']
set_no = request.form.get('comp_select_6')
shots = request.form['tag_8']
sess=request.form.get('comp_select')
tmp_list.append(r_id)
tmp_list.append(r1_id)
tmp_list.append(r2_id)
tmp_list.append(r3_id)
tmp_list.append(r4_id)
tmp_list.append(r5_id)
tmp_list.append(r6_id)
tmp_list.append(r7_id)
if ref == None or ref =="":
ref_obj=TPaper_ref.query.scalar()
ref_1=ref_obj.paper_ref
else :
print("Inside ref _4 else")
ref_1=ref
print(ref_1)
print("Inside ref _4 else 1")
if(int(set_no)>5):
print("Inside ref _5 else")
return redirect(url_for('paper_duplicate_error'))
else:
print("Inside TPaper_ref")
db.session.query(TPaper_ref).delete()
print("Inside TPaper_ref")
db.session.commit()
ref_db = TPaper_ref(
paper_ref=ref_1,
detail_no=detail_no,
session_no=sess
)
db.session.add(ref_db)
db.session.commit()
print("Inside load 3")
for i in range(len(tmp_list)):
for j in range(len(tmp_list)):
if(tmp_list[i]== 999 and tmp_list[j]==999):
duplicate = False
elif(i!=j and tmp_list[i]==tmp_list[j]):
duplicate = True
print("temp1")
if(duplicate):
return redirect(url_for('duplicate_firer_error'))
else:
print("temp")
temp=db.session.query(TShooting.save_flag).scalar()
print(temp)
if(temp is None):
print("Inside the temp if")
print(sess)
print(detail_no)
Tdetail_shots =TShooting(
date=datetime.now(),
datetimestamp=time.strftime("%Y-%m-%d %H:%M"),
session_id=sess,
detail_no=detail_no,
target_1_id=r_id,
target_2_id=r1_id,
target_3_id=r2_id,
target_4_id=r3_id,
target_5_id=r4_id,
target_6_id=r5_id,
target_7_id=r6_id,
target_8_id=r7_id,
paper_ref=ref_1,
set_no=set_no,
save_flag=0
)
print(Tdetail_shots)
print("Tdetail_shots")
db.session.add(Tdetail_shots)
db.session.commit()
print(""
)
detail_shots =Session_Detail(
date=datetime.now(),
datetimestamp=time.strftime("%Y-%m-%d %H:%M"),
session_id=sess,
detail_no=detail_no,
target_1_id=r_id,
target_2_id=r1_id,
target_3_id=r2_id,
target_4_id=r3_id,
target_5_id=r4_id,
target_6_id=r5_id,
target_7_id=r6_id,
target_8_id=r7_id,
paper_ref=ref_1,
set_no=set_no,
save_flag=0
)
db.session.add(detail_shots)
db.session.commit()
else:
db.session.query(TShooting).filter(TShooting.id != 999).delete()
db.session.commit()
Tdetail_shots =TShooting(
date=datetime.now(),
datetimestamp=time.strftime("%Y-%m-%d %H:%M"),
session_id=sess,
detail_no=detail_no,
target_1_id=r_id,
target_2_id=r1_id,
target_3_id=r2_id,
target_4_id=r3_id,
target_5_id=r4_id,
target_6_id=r5_id,
target_7_id=r6_id,
target_8_id=r7_id,
paper_ref=ref_1,
set_no=set_no,
save_flag=0
)
db.session.add(Tdetail_shots)
db.session.commit()
detail_shots =Session_Detail(
date=datetime.now(),
datetimestamp=time.strftime("%Y-%m-%d %H:%M"),
session_id=sess,
detail_no=detail_no,
target_1_id=r_id,
target_2_id=r1_id,
target_3_id=r2_id,
target_4_id=r3_id,
target_5_id=r4_id,
target_6_id=r5_id,
target_7_id=r6_id,
target_8_id=r7_id,
paper_ref=ref_1,
set_no=set_no,
save_flag=0
)
db.session.add(detail_shots)
db.session.commit()
except Exception as e:
print(e)
return redirect(url_for('error_2'))
return redirect(url_for('image_process'))
@app.route('/FRAS_1/', methods=['GET', 'POST'])
def load_1 ():
ref_1=None
try:
if request.method == 'POST':
print("This is inside Post")
detail_no = request.form['game_id_1']
print("this is detail_no")
print(detail_no)
tmp_list = []
duplicate = False
gr=session.get('group',None)
data=TGroup.query.filter(TGroup.group_no==gr).scalar()
da_1=data.target_1_no
da_2=data.target_2_no
da_3=data.target_3_no
da_4=data.target_4_no
da_5=data.target_5_no
da_6=data.target_6_no
da_7=data.target_7_no
da_8=data.target_8_no
if(da_1==""):
r_id=999
else:
r=Shooter.query.filter(Shooter.service_id==da_1).scalar()
r_id=r.id
if(da_2==""):
r1_id=999
else:
r1=Shooter.query.filter(Shooter.service_id==da_2).scalar()
r1_id=r1.id
if(da_3==""):
r2_id=999
else:
r2=Shooter.query.filter(Shooter.service_id==da_3).scalar()
r2_id=r2.id
if(da_4==""):
r3_id=999
else:
r3=Shooter.query.filter(Shooter.service_id==da_4).scalar()
r3_id=r3.id
if(da_5==""):
r4_id=999
else:
r4=Shooter.query.filter(Shooter.service_id==da_5).scalar()
r4_id=r4.id
if(da_6==""):
r5_id=999
else:
r5=Shooter.query.filter(Shooter.service_id==da_6).scalar()
r5_id=r5.id
if(da_7==""):
r6_id=999
else:
r6=Shooter.query.filter(Shooter.service_id==da_7).scalar()
r6_id=r6.id
if(da_8==""):
r7_id=999
else:
r7=Shooter.query.filter(Shooter.service_id==da_8).scalar()
r7_id=r7.id
ref=request.form['business']
set_no = request.form.get('comp_select_6')
shots = request.form['tag_8']
sess=request.form.get('comp_select')
tmp_list.append(r_id)
tmp_list.append(r1_id)
tmp_list.append(r2_id)
tmp_list.append(r3_id)
tmp_list.append(r4_id)
tmp_list.append(r5_id)
tmp_list.append(r6_id)
tmp_list.append(r7_id)
print(tmp_list)
if ref == None or ref =="":
ref_obj=TPaper_ref.query.scalar()
ref_1=ref_obj.paper_ref
else :
ref_1=ref
check=TPaper_ref.query.scalar()
cses=check.session_no
det=check.detail_no
if(int(set_no)>5):
return redirect(url_for('paper_duplicate_error'))
else:
db.session.query(TPaper_ref).delete()
db.session.commit()
ref_db = TPaper_ref(
paper_ref=ref_1,
detail_no=detail_no,
session_no=sess
)
db.session.add(ref_db)
db.session.commit()
for i in range(len(tmp_list)):
for j in range(len(tmp_list)):
if(tmp_list[i]== 999 and tmp_list[j]==999):
duplicate = False
elif(i!=j and tmp_list[i]==tmp_list[j]):
duplicate = True
if(duplicate):
return redirect(url_for('duplicate_firer_error'))
else:
temp_shooting=db.session.query(TShooting).scalar()
if(temp_shooting is None):
detail_shots =Session_Detail(
date=datetime.now(),
datetimestamp=time.strftime("%Y-%m-%d %H:%M"),
session_id=sess,
detail_no=detail_no,
target_1_id=r_id,
target_2_id=r1_id,
target_3_id=r2_id,
target_4_id=r3_id,
target_5_id=r4_id,
target_6_id=r5_id,
target_7_id=r6_id,
target_8_id=r7_id,
paper_ref=ref_1,
set_no=set_no,
save_flag=0
)
db.session.add(detail_shots)
db.session.commit()
Tdetail_shots =TShooting(
date=datetime.now(),
datetimestamp=time.strftime("%Y-%m-%d %H:%M"),
session_id=sess,
detail_no=detail_no,
target_1_id=r_id,
target_2_id=r1_id,
target_3_id=r2_id,
target_4_id=r3_id,
target_5_id=r4_id,
target_6_id=r5_id,
target_7_id=r6_id,
target_8_id=r7_id,
paper_ref=ref_1,
set_no=set_no,
save_flag=0
)
db.session.add(Tdetail_shots)
db.session.commit()
else:
db.session.query(TShooting).filter(TShooting.id != 999).delete()
db.session.commit()
Tdetail_shots =TShooting(
date=datetime.now(),
datetimestamp=time.strftime("%Y-%m-%d %H:%M"),
session_id=sess,
detail_no=detail_no,
target_1_id=r_id,
target_2_id=r1_id,
target_3_id=r2_id,
target_4_id=r3_id,
target_5_id=r4_id,
target_6_id=r5_id,
target_7_id=r6_id,
target_8_id=r7_id,
paper_ref=ref_1,
set_no=set_no,
save_flag=0
)
db.session.add(Tdetail_shots)
db.session.commit()
detail_shots =Session_Detail(
date=datetime.now(),
datetimestamp=time.strftime("%Y-%m-%d %H:%M"),
session_id=sess,
detail_no=detail_no,
target_1_id=r_id,
target_2_id=r1_id,
target_3_id=r2_id,
target_4_id=r3_id,
target_5_id=r4_id,
target_6_id=r5_id,
target_7_id=r6_id,
target_8_id=r7_id,
paper_ref=ref_1,
set_no=set_no,
save_flag=0
)
db.session.add(detail_shots)
db.session.commit()
except Exception as e:
return redirect(url_for('error_102'))
return redirect(url_for('detail_view'))
@app.route('/FRAS_2/', methods=['GET', 'POST'])
def load_2 ():
ref_1=None
try:
if request.method == 'POST':
print("This is inside Post")
detail_no = request.form['game_id_1']
print("this is detail_no")
print(detail_no)
tmp_list = []
duplicate = False
gr=session.get('group',None)
data=TGroup.query.filter(TGroup.group_no==gr).scalar()
da_1=data.target_1_no
da_2=data.target_2_no
da_3=data.target_3_no
da_4=data.target_4_no
da_5=data.target_5_no
da_6=data.target_6_no
da_7=data.target_7_no
da_8=data.target_8_no
if(da_1==""):
r_id=999
else:
r=Shooter.query.filter(Shooter.service_id==da_1).scalar()
r_id=r.id
if(da_2==""):
r1_id=999
else:
r1=Shooter.query.filter(Shooter.service_id==da_2).scalar()
r1_id=r1.id
if(da_3==""):
r2_id=999
else:
r2=Shooter.query.filter(Shooter.service_id==da_3).scalar()
r2_id=r2.id
if(da_4==""):
r3_id=999
else:
r3=Shooter.query.filter(Shooter.service_id==da_4).scalar()
r3_id=r3.id
if(da_5==""):
r4_id=999
else:
r4=Shooter.query.filter(Shooter.service_id==da_5).scalar()
r4_id=r4.id
if(da_6==""):
r5_id=999
else:
r5=Shooter.query.filter(Shooter.service_id==da_6).scalar()
r5_id=r5.id
if(da_7==""):
r6_id=999
else:
r6=Shooter.query.filter(Shooter.service_id==da_7).scalar()
r6_id=r6.id
if(da_8==""):
r7_id=999
else:
r7=Shooter.query.filter(Shooter.service_id==da_8).scalar()
r7_id=r7.id
ref=request.form['business']
set_no = request.form.get('comp_select_6')
shots = request.form['tag_8']
sess=request.form.get('comp_select')
tmp_list.append(r_id)
tmp_list.append(r1_id)
tmp_list.append(r2_id)
tmp_list.append(r3_id)
tmp_list.append(r4_id)
tmp_list.append(r5_id)
tmp_list.append(r6_id)
tmp_list.append(r7_id)
print(tmp_list)
if ref == None or ref =="":
ref_obj=TPaper_ref.query.scalar()
ref_1=ref_obj.paper_ref
else :
ref_1=ref
check=TPaper_ref.query.scalar()
cses=check.session_no
det=check.detail_no
if(int(set_no)>5):
return redirect(url_for('paper_duplicate_error'))
else:
db.session.query(TPaper_ref).delete()
db.session.commit()
ref_db = TPaper_ref(
paper_ref=ref_1,
detail_no=detail_no,
session_no=sess
)
db.session.add(ref_db)
db.session.commit()
for i in range(len(tmp_list)):
for j in range(len(tmp_list)):
if(tmp_list[i]== 999 and tmp_list[j]==999):
duplicate = False
elif(i!=j and tmp_list[i]==tmp_list[j]):
duplicate = True
if(duplicate):
return redirect(url_for('duplicate_firer_error'))
else:
temp_shooting=db.session.query(TShooting).scalar()
if(temp_shooting is None):
detail_shots =Session_Detail(
date=datetime.now(),
datetimestamp=time.strftime("%Y-%m-%d %H:%M"),
session_id=sess,
detail_no=detail_no,
target_1_id=r_id,
target_2_id=r1_id,
target_3_id=r2_id,
target_4_id=r3_id,
target_5_id=r4_id,
target_6_id=r5_id,
target_7_id=r6_id,
target_8_id=r7_id,
paper_ref=ref_1,
set_no=set_no,
save_flag=0
)
db.session.add(detail_shots)
db.session.commit()
Tdetail_shots =TShooting(
date=datetime.now(),
datetimestamp=time.strftime("%Y-%m-%d %H:%M"),
session_id=sess,
detail_no=detail_no,
target_1_id=r_id,
target_2_id=r1_id,
target_3_id=r2_id,
target_4_id=r3_id,
target_5_id=r4_id,
target_6_id=r5_id,
target_7_id=r6_id,
target_8_id=r7_id,
paper_ref=ref_1,
set_no=set_no,
save_flag=0
)
db.session.add(Tdetail_shots)
db.session.commit()
else:
db.session.query(TShooting).filter(TShooting.id != 999).delete()
db.session.commit()
Tdetail_shots =TShooting(
date=datetime.now(),
datetimestamp=time.strftime("%Y-%m-%d %H:%M"),
session_id=sess,
detail_no=detail_no,
target_1_id=r_id,
target_2_id=r1_id,
target_3_id=r2_id,
target_4_id=r3_id,
target_5_id=r4_id,
target_6_id=r5_id,
target_7_id=r6_id,
target_8_id=r7_id,
paper_ref=ref_1,
set_no=set_no,
save_flag=0
)
db.session.add(Tdetail_shots)
db.session.commit()
detail_shots =Session_Detail(
date=datetime.now(),
datetimestamp=time.strftime("%Y-%m-%d %H:%M"),
session_id=sess,
detail_no=detail_no,
target_1_id=r_id,
target_2_id=r1_id,
target_3_id=r2_id,
target_4_id=r3_id,
target_5_id=r4_id,
target_6_id=r5_id,
target_7_id=r6_id,
target_8_id=r7_id,
paper_ref=ref_1,
set_no=set_no,
save_flag=0
)
db.session.add(detail_shots)
db.session.commit()
except Exception as e:
print(e)
return redirect(url_for('error'))
return redirect(url_for('image_process'))
@app.route('/detail_view/', methods=['GET', 'POST'])
def detail_view():
detail = Session_Detail.query.all()
for details in detail:
details.target_1=Shooter.query.filter(Shooter.id==details.target_1_id).scalar()
details.target_2=Shooter.query.filter(Shooter.id==details.target_2_id).scalar()
details.target_3=Shooter.query.filter(Shooter.id==details.target_3_id).scalar()
details.target_4=Shooter.query.filter(Shooter.id==details.target_4_id).scalar()
details.target_5=Shooter.query.filter(Shooter.id==details.target_5_id).scalar()
details.target_6=Shooter.query.filter(Shooter.id==details.target_6_id).scalar()
details.target_7=Shooter.query.filter(Shooter.id==details.target_7_id).scalar()
details.target_8=Shooter.query.filter(Shooter.id==details.target_8_id).scalar()
return render_template('pages/detail_view.html',detail=detail)
@app.route('/detail_view/detail/<id>', methods=['GET', 'POST'])
def view_detail(id):
detail=Session_Detail.query.filter(Session_Detail.id == id)
for details in detail:
details.target_1=Shooter.query.filter(Shooter.id==details.target_1_id).scalar()
details.target_2=Shooter.query.filter(Shooter.id==details.target_2_id).scalar()
details.target_3=Shooter.query.filter(Shooter.id==details.target_3_id).scalar()
details.target_4=Shooter.query.filter(Shooter.id==details.target_4_id).scalar()
details.target_5=Shooter.query.filter(Shooter.id==details.target_5_id).scalar()
details.target_6=Shooter.query.filter(Shooter.id==details.target_6_id).scalar()
details.target_7=Shooter.query.filter(Shooter.id==details.target_7_id).scalar()
details.target_8=Shooter.query.filter(Shooter.id==details.target_8_id).scalar()
return render_template('pages/detail_view_id.html',data=detail)
@app.route('/detail_view/edit/<id>', methods=['GET', 'POST'])
def view_detail_edit(id):
try:
detail=Session_Detail.query.filter(Session_Detail.id == id).first()
form=DetailEditForm(obj=detail)
if form.validate_on_submit():
tmp_list = []
target_1=Shooter.query.filter(Shooter.service_id == form.target_1_service.data).scalar()
tmp_list.append(target_1.id)
target_2=Shooter.query.filter(Shooter.service_id == form.target_2_service.data).scalar()
tmp_list.append(target_2.id)
target_3=Shooter.query.filter(Shooter.service_id == form.target_3_service.data).scalar()
tmp_list.append(target_3.id)
target_4=Shooter.query.filter(Shooter.service_id == form.target_4_service.data).scalar()
tmp_list.append(target_4.id)
target_5=Shooter.query.filter(Shooter.service_id == form.target_5_service.data).scalar()
tmp_list.append(target_5.id)
target_6=Shooter.query.filter(Shooter.service_id == form.target_6_service.data).scalar()
tmp_list.append(target_6.id)
target_7=Shooter.query.filter(Shooter.service_id == form.target_7_service.data).scalar()
tmp_list.append(target_7.id)
target_8=Shooter.query.filter(Shooter.service_id == form.target_8_service.data).scalar()
tmp_list.append(target_8.id)
duplicate = False
for i in range(len(tmp_list)):
for j in range(len(tmp_list)):
if(tmp_list[i]== 999 and tmp_list[j]==999):
duplicate = False
elif(i!=j and tmp_list[i]==tmp_list[j]):
duplicate = True
if(duplicate):
return redirect(url_for('duplicate_firer_error'))
else:
detail.date=form.date.data
detail.session_id=form.session_id.data
detail.detail_no=form.detail_no.data
detail.paper_ref=form.paper_ref.data
detail.set_no=form.set_no.data
target_1_obj=Shooter.query.filter(Shooter.service_id == form.target_1_service.data).scalar()
detail.target_1_id=target_1_obj.id
target_2_obj=Shooter.query.filter(Shooter.service_id == form.target_2_service.data).scalar()
detail.target_2_id=target_2_obj.id
target_3_obj=Shooter.query.filter(Shooter.service_id == form.target_3_service.data).scalar()
detail.target_3_id=target_3_obj.id
target_4_obj=Shooter.query.filter(Shooter.service_id == form.target_4_service.data).scalar()
detail.target_4_id=target_4_obj.id
target_5_obj=Shooter.query.filter(Shooter.service_id == form.target_5_service.data).scalar()
detail.target_5_id=target_5_obj.id
target_6_obj=Shooter.query.filter(Shooter.service_id == form.target_6_service.data).scalar()
detail.target_6_id=target_6_obj.id
target_7_obj=Shooter.query.filter(Shooter.service_id == form.target_7_service.data).scalar()
detail.target_7_id=target_7_obj.id
target_8_obj=Shooter.query.filter(Shooter.service_id == form.target_8_service.data).scalar()
detail.target_8_id=target_8_obj.id
db.session.commit()
db.session.query(TPaper_ref).delete()
db.session.commit()
ref_edit = TPaper_ref(
paper_ref=form.paper_ref.data,
detail_no=form.detail_no.data,
session_no=form.session_id.data
)
db.session.add(ref_edit)
db.session.commit()
target_1_obj=Shooter.query.filter(Shooter.service_id == form.target_1_service.data).scalar()
target_2_obj=Shooter.query.filter(Shooter.service_id == form.target_2_service.data).scalar()
target_3_obj=Shooter.query.filter(Shooter.service_id == form.target_3_service.data).scalar()
target_4_obj=Shooter.query.filter(Shooter.service_id == form.target_4_service.data).scalar()
target_5_obj=Shooter.query.filter(Shooter.service_id == form.target_5_service.data).scalar()
target_6_obj=Shooter.query.filter(Shooter.service_id == form.target_6_service.data).scalar()
target_7_obj=Shooter.query.filter(Shooter.service_id == form.target_7_service.data).scalar()
target_8_obj=Shooter.query.filter(Shooter.service_id == form.target_8_service.data).scalar()
temp_shooting=db.session.query(TShooting).scalar()
if(temp_shooting.save_flag==1):
return redirect(url_for('data_save'))
else:
db.session.query(TShooting).filter(TShooting.id != 999).delete()
db.session.commit()
Tdetail_edit =TShooting(
date=form.date.data,
datetimestamp=time.strftime("%Y-%m-%d %H:%M"),
session_id=form.session_id.data,
detail_no=form.detail_no.data,
target_1_id=target_1_obj.id,
target_2_id=target_2_obj.id,
target_3_id=target_3_obj.id,
target_4_id=target_4_obj.id,
target_5_id=target_5_obj.id,
target_6_id=target_6_obj.id,
target_7_id=target_7_obj.id,
target_8_id=target_8_obj.id,
paper_ref=form.paper_ref.data,
set_no=form.set_no.data,
save_flag=0
)
db.session.add(Tdetail_edit)
db.session.commit()
return redirect(url_for('detail_view'))
form.date.data=detail.date
form.session_id.data=detail.session_id
form.detail_no.data=detail.detail_no
form.paper_ref.data=detail.paper_ref
form.set_no.data=detail.set_no
name_1= Shooter.query.filter(Shooter.id==detail.target_1_id).scalar()
form.target_1_service.data=data=name_1.service_id
name_2= Shooter.query.filter(Shooter.id==detail.target_2_id).scalar()
form.target_2_service.data=data=name_2.service_id
name_3= Shooter.query.filter(Shooter.id==detail.target_3_id).scalar()
form.target_3_service.data=data=name_3.service_id
name_4= Shooter.query.filter(Shooter.id==detail.target_4_id).scalar()
form.target_4_service.data=data=name_4.service_id
name_5=Shooter.query.filter(Shooter.id==detail.target_5_id).scalar()
form.target_5_service.data=data=name_5.service_id
name_6=Shooter.query.filter(Shooter.id==detail.target_6_id).scalar()
form.target_6_service.data=data=name_6.service_id
name_7=Shooter.query.filter(Shooter.id==detail.target_7_id).scalar()
form.target_7_service.data=data=name_7.service_id
name_8=Shooter.query.filter(Shooter.id==detail.target_8_id).scalar()
form.target_8_service.data=data=name_8.service_id
except Exception as e:
return render_template('errors/detail_view.html')
return render_template('pages/detail_view_edit.html' , detail=detail,form=form)
@app.route('/data_save', methods=['GET', 'POST'])
def data_save():
return render_template('pages/data_save.html')
@app.route('/target_registration/', methods=['GET', 'POST'])
def target_registration():
result=None
if request.method=="POST":
data1 = request.get_json()
print(data1)
cant=data1['cant']
div=data1['div']
rank=data1['rank']
gen=data1['gender']
dt=data1['date']
name=data1['name']
army_no=data1['service']
unit=data1['unit']
brigade=data1['brig']
gender_id=db.session.query(Gender.id).filter(Gender.name==gen).scalar()
rank_id=db.session.query(Rank.id).filter(Rank.name==rank).scalar()
cant_id=db.session.query(Cantonment.id).filter(Cantonment.cantonment==cant ,Cantonment.division==div).scalar()
print("cant_id")
print(cant_id)
shooter = Shooter(
name=name,
service_id = army_no,
registration_date = dt,
gender_id=gender_id,
cantonment_id = cant_id,
rank_id =rank_id,
unit=unit,
brigade=brigade
)
db.session.add(shooter)
db.session.commit()
result="Data Saved Sucessfully"
return jsonify(result=result)
@app.route('/shooter_registration/', methods=['GET', 'POST'])
def registration():
try:
cantonment=Cantonment.query.distinct(Cantonment.cantonment)
gender =Gender.query.all()
rank = Rank.query.all()
ran = request.form.get('comp_select4')
cant = request.form.get('comp_select')
gen = request.form.get('comp_select5')
brig = request.form.get('comp_select1')
form = RegistrationForm(request.form)
if(ran is None):
pass
else:
ran_object=Rank.query.filter(Rank.name==ran).scalar()
rank_id = ran_object.id
cant_object = Cantonment.query.filter(Cantonment.cantonment==cant,Cantonment.division==brig).scalar()
cant_id = cant_object.id
gen_obj=Gender.query.filter(Gender.name==gen).scalar()
gender_id = gen_obj.id
if form.validate_on_submit():
shooter = Shooter(
name=form.name.data,
service_id = form.service_id.data,
registration_date = form.dt.data.strftime('%Y-%m-%d'),
gender_id=gender_id,
cantonment_id = cant_id,
rank_id =rank_id,
unit=form.unit.data,
brigade=form.brig.data
)
db.session.add(shooter)
db.session.commit()
new_form = RegistrationForm(request.form)
return redirect(url_for('firer_details'))
except Exception as e:
return redirect(url_for('error_4'))
return render_template('forms/registration.html',
cantonment = cantonment ,
form=form ,
rank = rank,
gender=gender)
@app.route('/get_brigade/')
def get_brigade():
cant = request.args.get('customer')
da = da = Cantonment.query.filter(Cantonment.cantonment==cant).distinct(Cantonment.division)
data = [{"name": x.division} for x in da]
return jsonify(data)
@app.route('/firer_details/', methods=['GET', 'POST'])
def firer_details():
firer = Shooter.query.all()
for firers in firer:
firers.cantonment_name= Cantonment.query.filter(Cantonment.id==firers.cantonment_id).scalar()
firers.division = Cantonment.query.filter(Cantonment.id==firers.cantonment_id).scalar()
firers.rank = Rank.query.filter(Rank.id==firers.rank_id).scalar()
firers.gender_name = Gender.query.filter(Gender.id==firers.gender_id).scalar()
return render_template('pages/firer_details.html' , firer = firer)
@app.route('/bulk_registration_group')
def bulk_registration_group():
form=BulkRegistrationForm(request.form)
return render_template('pages/bulk_registration_group.html',form=form)
@app.route('/bulk_registration')
def bulk_registration():
cantonment=db.session.query(Cantonment).distinct(Cantonment.cantonment)
form=RegistrationForm(request.form)
return render_template('pages/bulk_registration.html',cantonment=cantonment,form=form)
@app.route('/upload', methods=['POST'])
def upload():
try:
f = request.files['data_file']
cant = request.form.get('comp_select')
div = request.form.get('comp_select1')
form=RegistrationForm(request.form)
unit = request.form['game_id_1']
brig = request.form['game_id_2']
cant_id = db.session.query(Cantonment.id).filter(Cantonment.cantonment==cant,
Cantonment.division==div
).scalar()
if form.is_submitted():
stream = StringIO(f.stream.read().decode("UTF8"))
csv_input = csv.reader(stream)
lis =list(csv_input)
for i in range(len(lis)):
if (i==0):
pass
else:
shooters = Shooter(
name = lis[i][0],
service_id=lis[i][3],
registration_date=datetime.now(),
gender_id=db.session.query(Gender.id).filter(Gender.name==lis[i][2]).scalar(),
cantonment_id = cant_id,
rank_id = db.session.query(Rank.id).filter(Rank.name==lis[i][1]).scalar(),
unit=unit,
brigade=brig
)
db.session.add(shooters)
db.session.commit()
except Exception as e:
return redirect(url_for('error_3'))
return redirect(url_for('firer_details'))
@app.route('/uploadgroup', methods=['POST'])
def uploadgroup():
try:
f = request.files['data_file']
form=BulkRegistrationForm(request.form)
if form.is_submitted():
curdate_p=(date.today())- timedelta(1)
if(db.session.query(db.exists().where(TGroup.date <= curdate_p)).scalar()):
db.session.query(TGroup).delete()
db.session.commit()
stream = StringIO(f.stream.read().decode("UTF8"))
csv_input = csv.reader(stream)
lis =list(csv_input)
for i in range(len(lis)):
if (i==0):
pass
else:
group = TGroup(
date=datetime.now(),
group_no=lis[i][0],
target_1_no=lis[i][1],
target_2_no=lis[i][2],
target_3_no=lis[i][3],
target_4_no=lis[i][4],
target_5_no=lis[i][5],
target_6_no=lis[i][6],
target_7_no=lis[i][7],
target_8_no=lis[i][8]
)
db.session.add(group)
db.session.commit()
else:
stream = StringIO(f.stream.read().decode("UTF8"))
csv_input = csv.reader(stream)
lis =list(csv_input)
for i in range(len(lis)):
if (i==0):
pass
else:
group = TGroup(
date=datetime.now(),
group_no=lis[i][0],
target_1_no=lis[i][1],
target_2_no=lis[i][2],
target_3_no=lis[i][3],
target_4_no=lis[i][4],
target_5_no=lis[i][5],
target_6_no=lis[i][6],
target_7_no=lis[i][7],
target_8_no=lis[i][8]
)
db.session.add(group)
db.session.commit()
except Exception as e:
return redirect(url_for('error_duplicate'))
return redirect(url_for('group_view'))
@app.route('/new_group')
def new_group():
firer = [row.service_id for row in Shooter.query.all()]
return render_template('pages/new_group.html',firer_1=firer)
@app.route('/individual_group/', methods=['GET', 'POST'])
def individual_group():
try:
curdate_p=(date.today())- timedelta(1)
#check=mysession.query(TGroup).filter(date==curdate_p).all()
if request.method=="POST":
grp = request.form['game_id_1']
tmp_list = []
duplicate = False
r=request.form['tag']
if (r== ""):
r_id = 999
else:
r_object=Shooter.query.filter(Shooter.service_id==r).scalar()
r_id=r_object.id
r1=request.form['tag_1']
if(r1== ""):
r1_id=999
else:
r1_object=Shooter.query.filter(Shooter.service_id==r1).scalar()
r1_id=r1_object.id
r2=request.form['tag_2']
if (r2==""):
r2_id=999
else:
r2_object=Shooter.query.filter(Shooter.service_id==r2).scalar()
r2_id=r2_object.id
r3=request.form['tag_3']
if(r3==""):
r3_id=999
else:
r3_object=Shooter.query.filter(Shooter.service_id==r3).scalar()
r3_id=r3_object.id
r4=request.form['tag_4']
if(r4==""):
r4_id=999
else:
r4_object=Shooter.query.filter(Shooter.service_id==r4).scalar()
r4_id=r4_object.id
r5=request.form['tag_5']
if(r5==""):
r5_id=999
else:
r5_object=Shooter.query.filter(Shooter.service_id==r5).scalar()
r5_id=r5_object.id
r6=request.form['tag_6']
if(r6==""):
r6_id=999
else:
r6_object=Shooter.query.filter(Shooter.service_id==r6).scalar()
r6_id=r6_object.id
r7=request.form['tag_7']
if(r7== ""):
r7_id=999
else:
r7_object=Shooter.query.filter(Shooter.service_id==r7).scalar()
r7_id=r7_object.id
tmp_list.append(r_id)
tmp_list.append(r1_id)
tmp_list.append(r2_id)
tmp_list.append(r3_id)
tmp_list.append(r4_id)
tmp_list.append(r5_id)
tmp_list.append(r6_id)
tmp_list.append(r7_id)
for i in range(len(tmp_list)):
for j in range(len(tmp_list)):
if(tmp_list[i]== 999 and tmp_list[j]==999):
duplicate = False
elif(i!=j and tmp_list[i]==tmp_list[j]):
duplicate = True
if(db.session.query(db.exists().where(TGroup.date == curdate_p)).scalar()):
db.session.query(TGroup).delete()
db.session.commit()
if(duplicate):
return redirect(url_for('duplicate_firer_error'))
else:
gr=TGroup(
date=datetime.now(),
group_no=grp,
target_1_no=r,
target_2_no=r1,
target_3_no=r2,
target_4_no=r3,
target_5_no=r4,
target_6_no=r5,
target_7_no=r6,
target_8_no=r7
)
db.session.add(gr)
db.session.commit()
else:
if(duplicate):
return redirect(url_for('duplicate_firer_error'))
else:
gr=TGroup(
date=datetime.now(),
group_no=grp,
target_1_no=r,
target_2_no=r1,
target_3_no=r2,
target_4_no=r3,
target_5_no=r4,
target_6_no=r5,
target_7_no=r6,
target_8_no=r7
)
db.session.add(gr)
db.session.commit()
except Exception as e:
return render_template('errors/group_view_error.html')
return redirect(url_for('group_view'))
@app.route('/group_view/', methods=['GET', 'POST'])
def group_view():
detail = TGroup.query.all()
return render_template('pages/group_detail_view.html',detail=detail)
@app.route('/group_view/detail/<id>', methods=['GET', 'POST'])
def group_detail_view(id):
view = TGroup.query.filter(TGroup.group_no == id)
return render_template('pages/group_detail_view_id.html' , data = view)
@app.route('/group_details/edit/<id>', methods=['GET', 'POST'])
def group_detail_edit(id):
firer = TGroup.query.filter(TGroup.group_no == id).first()
form=GroupEditForm(obj=firer)
if form.validate_on_submit():
firer.date=form.date.data
firer.target_1_no=form.target_1_army.data
firer.target_2_no=form.target_2_army.data
firer.target_3_no=form.target_3_army.data
firer.target_4_no=form.target_4_army.data
firer.target_5_no=form.target_5_army.data
firer.target_6_no=form.target_6_army.data
firer.target_7_no=form.target_7_army.data
firer.target_8_no=form.target_8_army.data
firer.group_no=form.group_no.data
db.session.commit()
return redirect(url_for('group_view'))
form.group_no.data=firer.group_no
form.target_1_army.data=firer.target_1_no
form.target_2_army.data=firer.target_2_no
form.target_3_army.data=firer.target_3_no
form.target_4_army.data=firer.target_4_no
form.target_5_army.data=firer.target_5_no
form.target_6_army.data=firer.target_6_no
form.target_7_army.data=firer.target_7_no
form.target_8_army.data=firer.target_8_no
return render_template('pages/group_edit.html' , firer = firer , form=form)
@app.route('/firer_details/detail/<id>', methods=['GET', 'POST'])
def firer_detail_view(id):
firer = Shooter.query.filter(Shooter.service_id == id)
for firers in firer:
firers.cantonment_name= Cantonment.query.filter(Cantonment.id==firers.cantonment_id).scalar()
firers.division = Cantonment.query.filter(Cantonment.id==firers.cantonment_id).scalar()
firers.rank = Rank.query.filter(Rank.id==firers.rank_id).scalar()
firers.gender_name = Gender.query.filter(Gender.id==firers.gender_id).scalar()
return render_template('pages/firer_detail_view.html' , data = firer)
@app.route('/firer_details/edit/<id>', methods=['GET', 'POST'])
def firer_detail_edit(id):
firer = Shooter.query.filter(Shooter.service_id == id).first()
form=RegistrationEditForm(obj=firer)
try:
if form.validate_on_submit():
firer.name = form.name.data
firer.service_id=form.service_id.data
firer.registration_date=form.date.data
gender_obj=Gender.query.filter(Gender.name==form.gender.data).scalar()
firer.gender_id=gender_obj.id
cantonment_obj=Cantonment.query.filter(Cantonment.cantonment==form.cantonment.data ,Cantonment.division==form.div.data).scalar()
firer.cantonment_id=cantonment_obj.id
rank_obj=Range.query.filter(Rank.name==form.rank.data).distinct(Rank.id).scalar()
firer.rank_id=rank_obj.id
firer.unit=form.unit.data
firer.brigade=form.brigade.data
db.session.commit()
return redirect(url_for('firer_details'))
form.name.data=firer.name
form.service_id.data=firer.service_id
form.date.data=firer.registration_date
gender_name=Gender.query.filter(Gender.id==firer.gender_id).scalar()
form.gender.data=gender_name.name
cantonment_name=Cantonment.query.filter(Cantonment.id==firer.cantonment_id).scalar()
form.cantonment.data=cantonment_name.cantonment
form.div.data=cantonment_name.division
unit_data=Shooter.query.filter(Shooter.service_id==firer.service_id).scalar()
form.unit.data=unit_data.unit
form.brigade.data=unit_data.brigade
rank_name=Rank.query.filter(Rank.id==firer.rank_id).distinct(Rank.name).scalar()
form.rank.data=rank_name.name
except Exception as e:
return redirect(url_for('error_7'))
return render_template('pages/firer_detail_edit.html' , firer = firer , form=form)
@app.route('/live/')
def live():
T1_name = mysession.query(Shooter.name).filter(Shooter.id==TShooting.target_1_id).scalar()
T1_service = mysession.query(Shooter.service_id).filter(Shooter.id==TShooting.target_1_id).scalar()
T1_r_id = mysession.query(Shooter.rank_id).filter(Shooter.id==TShooting.target_1_id).scalar()
T1_rank = mysession.query(Rank.name).filter(Rank.id==T1_r_id).scalar()
T2_name = mysession.query(Shooter.name).filter(Shooter.id==TShooting.target_2_id).scalar()
T2_service = mysession.query(Shooter.service_id).filter(Shooter.id==TShooting.target_2_id).scalar()
T2_r_id = mysession.query(Shooter.rank_id).filter(Shooter.id==TShooting.target_2_id).scalar()
T2_rank = mysession.query(Rank.name).filter(Rank.id==T2_r_id).scalar()
T3_name = mysession.query(Shooter.name).filter(Shooter.id==TShooting.target_3_id).scalar()
T3_service = mysession.query(Shooter.service_id).filter(Shooter.id==TShooting.target_3_id).scalar()
T3_r_id = mysession.query(Shooter.rank_id).filter(Shooter.id==TShooting.target_3_id).scalar()
T3_rank = mysession.query(Rank.name).filter(Rank.id==T3_r_id).scalar()
T4_name = mysession.query(Shooter.name).filter(Shooter.id==TShooting.target_4_id).scalar()
T4_service = mysession.query(Shooter.service_id).filter(Shooter.id==TShooting.target_4_id).scalar()
T4_r_id = mysession.query(Shooter.rank_id).filter(Shooter.id==TShooting.target_4_id).scalar()
T4_rank = mysession.query(Rank.name).filter(Rank.id==T4_r_id).scalar()
T5_name = mysession.query(Shooter.name).filter(Shooter.id==TShooting.target_5_id).scalar()
T5_service = mysession.query(Shooter.service_id).filter(Shooter.id==TShooting.target_5_id).scalar()
T5_r_id = mysession.query(Shooter.rank_id).filter(Shooter.id==TShooting.target_5_id).scalar()
T5_rank = mysession.query(Rank.name).filter(Rank.id==T5_r_id).scalar()
T6_name = mysession.query(Shooter.name).filter(Shooter.id==TShooting.target_6_id).scalar()
T6_service = mysession.query(Shooter.service_id).filter(Shooter.id==TShooting.target_6_id).scalar()
T6_r_id = mysession.query(Shooter.rank_id).filter(Shooter.id==TShooting.target_6_id).scalar()
T6_rank = mysession.query(Rank.name).filter(Rank.id==T6_r_id).scalar()
T7_name = mysession.query(Shooter.name).filter(Shooter.id==TShooting.target_7_id).scalar()
T7_service = mysession.query(Shooter.service_id).filter(Shooter.id==TShooting.target_7_id).scalar()
T7_r_id = mysession.query(Shooter.rank_id).filter(Shooter.id==TShooting.target_7_id).scalar()
T7_rank = mysession.query(Rank.name).filter(Rank.id==T7_r_id).scalar()
T8_name = mysession.query(Shooter.name).filter(Shooter.id==TShooting.target_8_id).scalar()
T8_service = mysession.query(Shooter.service_id).filter(Shooter.id==TShooting.target_8_id).scalar()
T8_r_id = mysession.query(Shooter.rank_id).filter(Shooter.id==TShooting.target_8_id).scalar()
T8_rank = mysession.query(Rank.name).filter(Rank.id==T8_r_id).scalar()
return render_template('pages/live.html' ,
T1_name=T1_name,
T1_service=T1_service,
T2_name=T2_name,
T2_service=T2_service,
T3_name=T3_name,
T3_service=T3_service,
T4_name=T4_name,
T4_service=T4_service,
T5_name=T5_name,
T5_service=T5_service,
T6_name=T6_name,
T6_service=T6_service,
T7_name=T7_name,
T7_service=T7_service,
T8_name=T8_name,
T8_service=T8_service,
T1_rank=T1_rank,
T2_rank=T2_rank,
T3_rank=T3_rank,
T4_rank=T4_rank,
T5_rank=T5_rank,
T6_rank=T6_rank,
T7_rank=T7_rank,
T8_rank=T8_rank
)
@app.route('/cam_detail_2/', methods=['GET', 'POST'])
def cam_detail_2():
return render_template('pages/cam_detail_1.html')
@app.route('/cam_detail_4/', methods=['GET', 'POST'])
def cam_detail_4():
return render_template('pages/cam_detail_2.html')
@app.route('/cam_detail_1/', methods=['GET', 'POST'])
def cam_detail_1():
return render_template('pages/cam_detail_3.html')
@app.route('/cam_detail_3/', methods=['GET', 'POST'])
def cam_detail_3():
return render_template('pages/cam_detail_4.html')
@app.route('/cam_detail_6/', methods=['GET', 'POST'])
def cam_detail_6():
return render_template('pages/cam_detail_5.html')
@app.route('/cam_detail_8/', methods=['GET', 'POST'])
def cam_detail_8():
return render_template('pages/cam_detail_6.html')
@app.route('/cam_detail_7/', methods=['GET', 'POST'])
def cam_detail_7():
return render_template('pages/cam_detail_7.html')
@app.route('/cam_detail_5/', methods=['GET', 'POST'])
def cam_detail_5():
return render_template('pages/cam_detail_8.html')
@app.route('/session_setup/', methods=['GET', 'POST'])
def session_setup():
try:
data = Shooter.query.all()
rang= Range.query.all()
firearms = Firearms.query.all()
ammunation = Ammunation.query.all()
rang_name = request.form.get('comp_select_4')
fire_name = request.form.get('comp_select_5')
ammu_name = request.form.get('comp_select_6')
form=SessionForm()
if(rang_name is None):
range_id=999
fire_id=999
ammu_id=999
else:
range_id = db.session.query(Range.id).filter(Range.name==rang_name).scalar()
fire_id = db.session.query(Firearms.id).filter(Firearms.name==fire_name).scalar()
ammu_id = db.session.query(Ammunation.id).filter(Ammunation.name==ammu_name).scalar()
if form.validate_on_submit():
shooting=Shooting_Session(
date=form.date.data.strftime('%Y-%m-%d'),
datetimestamp=time.strftime("%Y-%m-%d %H:%M"),
shooting_range_id=range_id,
firearms_id=fire_id,
ammunation_id=ammu_id,
target_distance = form.target_distance.data,
weather_notes = form.weather_notes.data,
comments = form.comments.data,
session_no=form.session_no.data,
occasion=form.occ.data
)
db.session.add(shooting)
db.session.commit()
return redirect(url_for('session_config'))
except Exception as e:
return redirect(url_for('error5_505.html'))
return render_template('forms/shooting_form.html', form=form, data =data ,rang=rang , firearmns=firearms, ammunation = ammunation)
@app.route('/configuration/', methods=['GET', 'POST'])
def session_config():
config = Shooting_Session.query.all()
for con in config:
con.range_name = Range.query.filter(Range.id==con.shooting_range_id).scalar()
con.firerarms_name = Firearms.query.filter(Firearms.id==con.firearms_id).scalar()
con.ammunation_name = Ammunation.query.filter(Ammunation.id==con.ammunation_id).scalar()
return render_template('pages/shooting_configuration_detail.html',con=config)
@app.route('/image_process/')
def image_process():
dt=time.strftime("%Y-%m-%d")
detail_data=db.session.query(Session_Detail).filter(Session_Detail.date==dt,Session_Detail.save_flag==0).all()
data =TShooting.query.scalar()
if(data is None):
T1_name ="NA"
T1_service ="NA"
T1_rank="NA"
T2_name ="NA"
T2_service ="NA"
T2_rank="NA"
T3_name ="NA"
T3_service ="NA"
T3_rank="NA"
T4_name ="NA"
T4_service ="NA"
T4_rank="NA"
T5_name ="NA"
T5_service ="NA"
T5_rank="NA"
T6_name ="NA"
T6_service ="NA"
T6_rank="NA"
T7_name ="NA"
T7_service ="NA"
T7_rank="NA"
T8_name ="NA"
T8_service ="NA"
T8_rank="NA"
elif(data.save_flag == 1 ):
db.session.query(TShooting).delete()
db.session.commit()
T1_name ="NA"
T1_service ="NA"
T1_rank="NA"
T2_name ="NA"
T2_service ="NA"
T2_rank="NA"
T3_name ="NA"
T3_service ="NA"
T3_rank="NA"
T4_name ="NA"
T4_service ="NA"
T4_rank="NA"
T5_name ="NA"
T5_service ="NA"
T5_rank="NA"
T6_name ="NA"
T6_service ="NA"
T6_rank="NA"
T7_name ="NA"
T7_service ="NA"
T7_rank="NA"
T8_name ="NA"
T8_service ="NA"
T8_rank="NA"
else:
T1=Shooter.query.filter(Shooter.id==TShooting.target_1_id).scalar()
if(T1 is None):
T1_name ="NA"
T1_service ="NA"
T1_rank="NA"
else:
T1_name = T1.name
T1_service = T1.service_id
T1_r_id = T1.rank_id
T1_rank_id = Rank.query.filter(Rank.id==T1_r_id).scalar()
T1_rank=T1_rank_id.name
T2=Shooter.query.filter(Shooter.id==TShooting.target_2_id).scalar()
if(T2 is None):
T2_name ="NA"
T2_service ="NA"
T2_rank="NA"
else:
T2_name = T2.name
T2_service = T2.service_id
T2_r_id = T2.rank_id
T2_rank_id = Rank.query.filter(Rank.id==T2_r_id).scalar()
T2_rank=T2_rank_id.name
T3=Shooter.query.filter(Shooter.id==TShooting.target_3_id,TShooting.target_3_id!=999).scalar()
if(T3 is None):
T3_name ="NA"
T3_service ="NA"
T3_rank="NA"
else:
T3_name = T3.name
T3_service = T3.service_id
T3_r_id = T3.rank_id
T3_rank_id = Rank.query.filter(Rank.id==T3_r_id).scalar()
T3_rank=T3_rank_id.name
T4=Shooter.query.filter(Shooter.id==TShooting.target_4_id,TShooting.target_4_id!=999).scalar()
if(T4 is None):
T4_name ="NA"
T4_service ="NA"
T4_rank="NA"
else:
T4_name = T4.name
T4_service = T4.service_id
T4_r_id = T4.rank_id
T4_rank_id = Rank.query.filter(Rank.id==T4_r_id).scalar()
T4_rank=T4_rank_id.name
T5=Shooter.query.filter(Shooter.id==TShooting.target_5_id).scalar()
if(T5 is None):
T5_name ="NA"
T5_service ="NA"
T5_rank="NA"
else:
T5_name = T5.name
T5_service = T5.service_id
T5_r_id = T5.rank_id
T5_rank_id = Rank.query.filter(Rank.id==T5_r_id).scalar()
T5_rank=T5_rank_id.name
T6=Shooter.query.filter(Shooter.id==TShooting.target_6_id).scalar()
if(T6 is None):
T6_name ="NA"
T6_service ="NA"
T6_rank="NA"
else:
T6_name = T6.name
T6_service = T6.service_id
T6_r_id = T6.rank_id
T6_rank_id = Rank.query.filter(Rank.id==T6_r_id).scalar()
T6_rank=T6_rank_id.name
T7=Shooter.query.filter(Shooter.id==TShooting.target_7_id).scalar()
if(T7 is None):
T7_name ="NA"
T7_service ="NA"
T7_rank="NA"
else:
T7_name = T7.name
T7_service = T7.service_id
T7_r_id = T7.rank_id
T7_rank_id = Rank.query.filter(Rank.id==T7_r_id).scalar()
T7_rank=T7_rank_id.name
T8=Shooter.query.filter(Shooter.id==TShooting.target_8_id).scalar()
if(T8 is None):
T8_name ="NA"
T8_service ="NA"
T8_rank="NA"
else:
T8_name = T8.name
T8_service = T8.service_id
T8_r_id = T8.rank_id
T8_rank_id = Rank.query.filter(Rank.id==T8_r_id).scalar()
T8_rank=T8_rank_id.name
return render_template('pages/image_process.html' ,
T1_name=T1_name,
detail_data=detail_data,
T1_service=T1_service,
T2_name=T2_name,
T2_service=T2_service,
T3_name=T3_name,
T3_service=T3_service,
T4_name=T4_name,
T4_service=T4_service,
T5_name=T5_name,
T5_service=T5_service,
T6_name=T6_name,
T6_service=T6_service,
T7_name=T7_name,
T7_service=T7_service,
T8_name=T8_name,
T8_service=T8_service,
T1_rank=T1_rank,
T2_rank=T2_rank,
T3_rank=T3_rank,
T4_rank=T4_rank,
T5_rank=T5_rank,
T6_rank=T6_rank,
T7_rank=T7_rank,
T8_rank=T8_rank
)
@app.route('/image_edit_1/', methods=['GET', 'POST'])
def image_edit_1():
return render_template('pages/image_edit_1.html')
@app.route('/image_edit_2/', methods=['GET', 'POST'])
def image_edit_2():
return render_template('pages/image_edit_2.html')
@app.route('/image_edit_3/', methods=['GET', 'POST'])
def image_edit_3():
return render_template('pages/image_edit_3.html')
@app.route('/image_edit_4/', methods=['GET', 'POST'])
def image_edit_4():
return render_template('pages/image_edit_4.html')
@app.route('/image_edit_5/', methods=['GET', 'POST'])
def image_edit_5():
return render_template('pages/image_edit_5.html')
@app.route('/image_edit_6/', methods=['GET', 'POST'])
def image_edit_6():
return render_template('pages/image_edit_6.html')
@app.route('/image_edit_7/', methods=['GET', 'POST'])
def image_edit_7():
return render_template('pages/image_edit_7.html')
@app.route('/image_edit_8/', methods=['GET', 'POST'])
def image_edit_8():
return render_template('pages/image_edit_8.html')
@app.route('/configuration/detail/<id>', methods=['GET', 'POST'])
def session_config_detail(id):
config = Shooting_Session.query.filter(Shooting_Session.id == id)
for con in config:
con.range_name = Range.query.filter(Range.id==con.shooting_range_id).scalar()
con.firerarms_name = Firearms.query.filter(Firearms.id==con.firearms_id).scalar()
con.ammunation_name = Ammunation.query.filter(Ammunation.id==con.ammunation_id).scalar()
return render_template('pages/shooting_configuration_detail_view.html',con=config)
@app.route('/configuration/edit/<id>', methods=['GET', 'POST'])
def shooting_config_edit(id):
edit = Shooting_Session.query.get_or_404(id)
form = SessionEditForm(obj=edit)
if form.validate_on_submit():
edit.session_no = form.session_no.data
edit.date = form.date.data
edit.occasion=form.occ.data
edit.target_distance = form.target_distance.data
ammunation_id=Ammunation.query.filter(Ammunation.name==form.ammunation_name.data).scalar()
edit.ammunation_id=ammunation_id.id
firearms_id=Firearms.query.filter(Firearms.name==form.firerarms_name.data).scalar()
edit.firearms_id=firearms_id.id
range_id=Range.query.filter(Range.name==form.range_name.data).scalar()
edit.shooting_range_id=range_id.id
edit.weather_notes=form.weather_notes.data
edit.comments=form.comments.data
db.session.commit()
return redirect(url_for('session_config'))
form.session_no.data=edit.session_no
form.date.data=edit.date
form.occ.data=edit.occasion
ammunation_name=Ammunation.query.filter(Ammunation.id==edit.ammunation_id).scalar()
form.ammunation_name.data=ammunation_name.name
firerarms_name=Firearms.query.filter(Firearms.id==edit.firearms_id).scalar()
form.firerarms_name.data=firerarms_name.name
range_name=Range.query.filter(Range.id==edit.shooting_range_id).scalar()
form.range_name.data=range_name.name
form.weather_notes.data=edit.weather_notes
form.comments.data=edit.comments
return render_template('pages/shooting_configuration_edit.html',form=form,edit=edit)
@app.route('/detail_dashboard/')
def detail_dashboard():
tshoot=db.session.query(TShooting).scalar()
if(tshoot is None):
T1_name = "NA"
T1_service="NA"
T1_rank ="NA"
T2_name = "NA"
T2_service="NA"
T2_rank ="NA"
T3_name = "NA"
T3_service="NA"
T3_rank ="NA"
T4_name = "NA"
T4_service="NA"
T4_rank ="NA"
T5_name = "NA"
T5_service="NA"
T5_rank ="NA"
T6_name = "NA"
T6_service="NA"
T6_rank ="NA"
T7_name = "NA"
T7_service="NA"
T7_rank ="NA"
T8_name = "NA"
T8_service="NA"
T8_rank ="NA"
else:
T1=Shooter.query.filter(Shooter.id==TShooting.target_1_id).scalar()
T1_name = T1.name
T1_service = T1.service_id
T1_r_id = T1.rank_id
T1_rank_id = Rank.query.filter(Rank.id==T1_r_id).scalar()
T1_rank=T1_rank_id.name
T2=Shooter.query.filter(Shooter.id==TShooting.target_2_id).scalar()
T2_name = T2.name
T2_service = T2.service_id
T2_r_id = T2.rank_id
T2_rank_id = Rank.query.filter(Rank.id==T2_r_id).scalar()
T2_rank=T2_rank_id.name
T3=Shooter.query.filter(Shooter.id==TShooting.target_3_id).scalar()
T3_name = T3.name
T3_service = T3.service_id
T3_r_id = T3.rank_id
T3_rank_id = Rank.query.filter(Rank.id==T3_r_id).scalar()
T3_rank=T3_rank_id.name
T4=Shooter.query.filter(Shooter.id==TShooting.target_4_id).scalar()
T4_name = T4.name
T4_service = T4.service_id
T4_r_id = T4.rank_id
T4_rank_id = Rank.query.filter(Rank.id==T4_r_id).scalar()
T4_rank=T4_rank_id.name
T5=Shooter.query.filter(Shooter.id==TShooting.target_5_id).scalar()
T5_name = T5.name
T5_service = T5.service_id
T5_r_id = T5.rank_id
T5_rank_id = Rank.query.filter(Rank.id==T5_r_id).scalar()
T5_rank=T5_rank_id.name
T6=Shooter.query.filter(Shooter.id==TShooting.target_6_id).scalar()
T6_name = T6.name
T6_service = T6.service_id
T6_r_id = T6.rank_id
T6_rank_id = Rank.query.filter(Rank.id==T6_r_id).scalar()
T6_rank=T6_rank_id.name
T7=Shooter.query.filter(Shooter.id==TShooting.target_7_id).scalar()
T7_name = T7.name
T7_service = T7.service_id
T7_r_id = T7.rank_id
T7_rank_id = Rank.query.filter(Rank.id==T7_r_id).scalar()
T7_rank=T7_rank_id.name
T8=Shooter.query.filter(Shooter.id==TShooting.target_8_id).scalar()
T8_name = T8.name
T8_service = T8.service_id
T8_r_id = T8.rank_id
T8_rank_id = Rank.query.filter(Rank.id==T8_r_id).scalar()
T8_rank=T8_rank_id.name
return render_template('pages/detail_dashboard.html' ,
T1_name=T1_name,
T1_service=T1_service,
T2_name=T2_name,
T2_service=T2_service,
T3_name=T3_name,
T3_service=T3_service,
T4_name=T4_name,
T4_service=T4_service,
T5_name=T5_name,
T5_service=T5_service,
T6_name=T6_name,
T6_service=T6_service,
T7_name=T7_name,
T7_service=T7_service,
T8_name=T8_name,
T8_service=T8_service,
T1_rank=T1_rank,
T2_rank=T2_rank,
T3_rank=T3_rank,
T4_rank=T4_rank,
T5_rank=T5_rank,
T6_rank=T6_rank,
T7_rank=T7_rank,
T8_rank=T8_rank
)
@app.route('/adhoc_detail_1/', methods=['GET', 'POST'])
def adhoc_detail_1():
name_1=None
army=None
rank=None
cant=None
set_1_name=None
set_1_army=None
set_2_name=None
set_2_army=None
set_3_name=None
set_3_army=None
set_4_name=None
set_4_army=None
res=[]
ten=[]
gp_len=[]
if request.method == "POST":
data1 = request.get_json()
army=data1['usr']
curdate=time.strftime("%Y-%m-%d")
name_1=db.session.query(Shooter.name).filter(Shooter.service_id==army).scalar()
target_1_id=db.session.query(Shooter.id).filter(Shooter.service_id==army).scalar()
rank_id=db.session.query(Shooter.rank_id).filter(Shooter.service_id==army).scalar()
cant_id=db.session.query(Shooter.cantonment_id).filter(Shooter.service_id==army).scalar()
rank=db.session.query(Rank.name).filter(Rank.id==rank_id).scalar()
cant=db.session.query(Cantonment.cantonment).filter(Cantonment.id==cant_id).scalar()
tten=db.session.query(MPI.tendency_code).filter(MPI.firer_id==target_1_id).order_by(MPI.datetimestamp.desc()).limit(5).all()[::-1]
tres = db.session.query(Grouping.result).filter(Grouping.firer_id==target_1_id).order_by(Grouping.datetimestamp.desc()).limit(5).all()[::-1]
tgp = db.session.query(Grouping.grouping_length_f).filter(Grouping.firer_id==target_1_id).order_by(Grouping.datetimestamp.desc()).limit(5).all()[::-1]
for ele in tres:
for ele2 in ele:
res.append(ele2)
for ele3 in tten:
for ele4 in ele3:
ten.append(ele4)
for ele5 in tgp:
for ele6 in ele5:
gp_len.append(ele6)
set_1_id = db.session.query(Firer_Details.firer_id).filter(Firer_Details.date==curdate,
Firer_Details.target_no==1,
Firer_Details.set_no==1
).distinct().scalar()
set_1_name=db.session.query(Shooter.name).filter(Shooter.id==set_1_id).scalar()
set_1_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_1_id).scalar()
set_2_id = db.session.query(Firer_Details.firer_id).filter(Firer_Details.date==curdate,
Firer_Details.target_no==2,
Firer_Details.set_no==2
).distinct().scalar()
set_2_name=db.session.query(Shooter.name).filter(Shooter.id==set_2_id).scalar()
set_2_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_2_id).scalar()
set_3_id = db.session.query(Firer_Details.firer_id).filter(Firer_Details.date==curdate,
Firer_Details.target_no==3,
Firer_Details.set_no==3
).distinct().scalar()
set_3_name=db.session.query(Shooter.name).filter(Shooter.id==set_3_id).scalar()
set_3_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_3_id).scalar()
set_4_id = db.session.query(Firer_Details.firer_id).filter(Firer_Details.date==curdate,
Firer_Details.target_no==4,
Firer_Details.set_no==4
).distinct().scalar()
set_4_name=db.session.query(Shooter.name).filter(Shooter.id==set_4_id).scalar()
set_4_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_4_id).scalar()
return jsonify(name_1=name_1,army=army,rank=rank,cant=cant,
set_1_name=set_1_name,
set_2_name=set_2_name,
set_3_name=set_3_name,
set_4_name=set_4_name,
set_1_army=set_1_army,
set_2_army=set_2_army,
set_3_army=set_3_army,
set_4_army=set_4_army,
gp_len=gp_len,
res=res,
ten=ten
)
@app.route('/individual_score/target_1', methods=['GET', 'POST'])
def individual_score_target_1():
session.clear()
data=TShooting.query.scalar()
firing_set_arr=[]
cantonment=Cantonment.query.distinct(Cantonment.cantonment)
curdate=time.strftime("%Y-%m-%d")
selection=Shooting_Session.query.filter(Shooting_Session.date>=curdate).order_by(Shooting_Session.datetimestamp.desc()).all()
gender =Gender.query.all()
rank_s = Rank.query.all()
firing_set=db.session.query(Firer_Details.set_no).filter(Firer_Details.target_no==1).distinct().all()
for ele in firing_set:
for ele2 in ele:
firing_set_arr.append(ele2)
if(len(firing_set_arr)<1):
pass
else:
i=len(firing_set_arr)-1
if(firing_set_arr[i]==5):
db.session.query(Firer_Details).filter(Firer_Details.target_no==1).delete()
db.session.commit()
else:
pass
dt=time.strftime("%Y-%m-%d")
curdatetime=datetime.now()
firer_1 = [row.service_id for row in Shooter.query.all()]
detail_data=db.session.query(Session_Detail).filter(Session_Detail.date==dt,Session_Detail.save_flag==0).all()
name = "NA"
detail_no ="NA"
rank ="NA"
target_no = 1
service_id ="NA"
ten = []
res = []
selection=Shooting_Session.query.filter(Shooting_Session.date>=dt).order_by(Shooting_Session.datetimestamp.desc()).all()
firearms = Firearms.query.all()
rang= Range.query.all()
ammunation = Ammunation.query.all()
return render_template('pages/prediction_target_1.html',
curdatetime=curdatetime,
name = name,
firer_1=firer_1,
rank=rank,
detail_data=detail_data,
detail_no=detail_no,
target_no=target_no,
service_id=service_id,
firearms=firearms,
ammunation=ammunation,
data=selection,
rang=rang,
res=res,
date=dt,
ten=ten,
cantonment=cantonment,
gender=gender,
rank_s=rank_s)
@app.route('/session_target_1/', methods=['GET', 'POST'])
def session_target_1():
if request.method == "POST":
data1 = request.get_json()
session=data1["session"]
ran=data1["range"]
arms=data1["arms"]
distance=data1["dis"]
occ=data1["occ"]
ammu=data1["ammu"]
weather=data1["weather"]
comment=data1["comment"]
range_id=db.session.query(Range.id).filter(Range.name==ran).scalar()
arms_id=db.session.query(Firearms.id).filter(Firearms.name==arms).scalar()
ammu_id=db.session.query(Ammunation.id).filter(Ammunation.name==ammu).scalar()
shooting=Shooting_Session(
date=time.strftime("%Y-%m-%d"),
datetimestamp=time.strftime("%Y-%m-%d %H:%M"),
shooting_range_id=range_id,
firearms_id=arms_id,
ammunation_id=ammu_id,
target_distance=distance,
weather_notes =weather,
comments =comment,
session_no=session,
occasion=occ
)
db.session.add(shooting)
db.session.commit()
result="This is Successfully Saved"
return jsonify(result=result ,session=session)
@app.route('/target_1_populate/', methods=['GET', 'POST'])
def target_1_populate():
if request.method == 'POST':
session_id=db.session.query(TShooting.session_id).scalar()
return jsonify(session_id=session_id)
@app.route('/load_detail_1/', methods=['GET', 'POST'])
def load_detail_1():
result_1="Done"
if request.method == 'POST':
curdate=time.strftime("%Y-%m-%d")
r8=None
data=request.get_json()
tmp_list = []
duplicate = False
detail =data["detail"]
sess=data["session"]
paper=data["paper"]
shot=data["shot"]
set=data["set"]
if(data["r1"]==""):
r1_id=999
else:
r1=data["r1"]
r1_id=db.session.query(Shooter.id).filter(Shooter.service_id==r1).scalar()
if(data["r2"]==""):
r2_id=999
else:
r2=data["r2"]
r2_id=db.session.query(Shooter.id).filter(Shooter.service_id==r2).scalar()
if(data["r3"]==""):
r3_id=999
else:
r3=data["r3"]
r3_id=db.session.query(Shooter.id).filter(Shooter.service_id==r3).scalar()
if(data["r4"]==""):
r4_id=999
else:
r4=data["r4"]
r4_id=db.session.query(Shooter.id).filter(Shooter.service_id==r4).scalar()
if(data["r5"]==""):
r5_id=999
else:
r5=data["r5"]
r5_id=db.session.query(Shooter.id).filter(Shooter.service_id==r5).scalar()
if(data["r6"]==""):
r6_id=999
else:
r6=data["r6"]
r6_id=db.session.query(Shooter.id).filter(Shooter.service_id==r6).scalar()
if(data["r7"]==""):
r7_id=999
else:
r7=data["r7"]
r7_id=db.session.query(Shooter.id).filter(Shooter.service_id==r7).scalar()
if(data["r8"]==""):
r8_id=999
else:
r8=data["r8"]
r8_id=db.session.query(Shooter.id).filter(Shooter.service_id==r8).scalar()
tmp_list.append(r1_id)
tmp_list.append(r2_id)
tmp_list.append(r3_id)
tmp_list.append(r4_id)
tmp_list.append(r5_id)
tmp_list.append(r6_id)
tmp_list.append(r7_id)
tmp_list.append(r8_id)
db.session.query(TPaper_ref).delete()
db.session.commit()
ref_db = TPaper_ref(
date=time.strftime("%Y-%m-%d"),
paper_ref=paper,
detail_no=detail,
session_no=sess
)
db.session.add(ref_db)
db.session.commit()
for i in range(len(tmp_list)):
for j in range(len(tmp_list)):
if(i!=j and tmp_list[i]==tmp_list[j]):
if(tmp_list[i]== 999 and tmp_list[j]==999):
duplicate = False
else:
duplicate = True
else:
duplicate = False
if(duplicate):
print("inside dup")
error="dup"
else:
db.session.query(TShooting).delete()
db.session.commit()
tshoot=TShooting(
date=datetime.now(),
datetimestamp=time.strftime("%Y-%m-%d %H:%M"),
session_id=sess,
detail_no=detail,
target_1_id=r1_id,
target_2_id=r2_id,
target_3_id=r3_id,
target_4_id=r4_id,
target_5_id=r5_id,
target_6_id=r6_id,
target_7_id=r7_id,
target_8_id=r8_id,
paper_ref=paper,
set_no=set,
save_flag=0
)
db.session.add(tshoot)
db.session.commit()
detail_shots =Session_Detail(
date=datetime.now(),
datetimestamp=time.strftime("%Y-%m-%d %H:%M"),
session_id=sess,
detail_no=detail,
target_1_id=r1_id,
target_2_id=r2_id,
target_3_id=r3_id,
target_4_id=r4_id,
target_5_id=r5_id,
target_6_id=r6_id,
target_7_id=r7_id,
target_8_id=r8_id,
paper_ref=paper,
set_no=set,
save_flag=0
)
db.session.add(detail_shots)
db.session.commit()
error="ok"
firer_name,cant,rank,service_id,res,tenden,gp_len,set_4_name,set_4_army,set_4_session_no,set_4_detail_no,set_3_name,set_3_army,set_3_session_no,set_3_detail_no,set_2_name,set_2_army,set_2_session_no,set_2_detail_no,set_1_name,set_1_army,set_1_session_no,set_1_detail_no,current_firer_name,current_army_no,current_session_no,current_detail_no=get_information(r1_id,sess,paper)
result="The Detail is Saved Successfully"
return jsonify(result=result,data1=firer_name,ra_1=rank,detail=detail,
service_id_1=service_id,
session=sess,
paper=paper,
set_no=set,
cant=cant,
gp_len=gp_len,
res=res,
ten=tenden,
set_4_name=set_4_name,
set_3_name=set_3_name,
set_2_name=set_2_name,
set_1_name=set_1_name,
current_firer_name=current_firer_name,
set_4_army=set_4_army,
set_3_army=set_3_army,
set_2_army=set_2_army,
set_1_army=set_1_army,
current_army_no=current_army_no,
set_4_session_no=set_4_session_no,
set_3_session_no=set_3_session_no,
set_2_session_no=set_2_session_no,
set_1_session_no=set_1_session_no,
current_session_no=current_session_no,
set_4_detail_no=set_4_detail_no,
set_3_detail_no=set_3_detail_no,
set_2_detail_no=set_2_detail_no,
set_1_detail_no=set_1_detail_no,
current_detail_no=current_detail_no
)
return jsonify(result_1=result_1)
def get_information(target_1_id,sess,paper_ref):
res=[]
ten=[]
gp_len=[]
curdate=time.strftime("%Y-%m-%d")
tten=db.session.query(MPI.tendency_code).filter(MPI.firer_id==target_1_id).order_by(MPI.datetimestamp.desc()).limit(5).all()[::-1]
tres = db.session.query(Grouping.result).filter(Grouping.firer_id==target_1_id).order_by(Grouping.datetimestamp.desc()).limit(5).all()[::-1]
tgp = db.session.query(Grouping.grouping_length_f).filter(Grouping.firer_id==target_1_id).order_by(Grouping.datetimestamp.desc()).limit(5).all()[::-1]
for ele in tres:
for ele2 in ele:
res.append(ele2)
for ele3 in tten:
for ele4 in ele3:
ten.append(ele4)
for ele5 in tgp:
for ele6 in ele5:
gp_len.append(int(ele6))
da_1=db.session.query(Shooter.name).filter(Shooter.id==target_1_id).scalar()
cant_id=db.session.query(Shooter.cantonment_id).filter(Shooter.id==target_1_id).scalar()
cant=db.session.query(Cantonment.cantonment).filter(Cantonment.id==cant_id).scalar()
ra_1_id=db.session.query(Shooter.rank_id).filter(Shooter.id==target_1_id).scalar()
ra_1 = db.session.query(Rank.name).filter(Rank.id==ra_1_id).scalar()
service_id_1 = db.session.query(Shooter.service_id).filter(Shooter.id==target_1_id).scalar()
set_1_id = db.session.query(Firer_Details.firer_id).filter(
Firer_Details.date==curdate,
Firer_Details.target_no==1,
Firer_Details.set_no==1
).distinct().scalar()
set_1_session_no=db.session.query(Firer_Details.session_id).filter(
Firer_Details.date==curdate,
Firer_Details.target_no==1,
Firer_Details.set_no==1
).distinct().scalar()
set_1_detail_no=db.session.query(Firer_Details.detail_id).filter(
Firer_Details.date==curdate,
Firer_Details.target_no==1,
Firer_Details.set_no==1
).distinct().scalar()
set_1_name=db.session.query(Shooter.name).filter(
Shooter.id==set_1_id
).scalar()
set_1_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_1_id).scalar()
set_2_id = db.session.query(Firer_Details.firer_id).filter(
Firer_Details.date==curdate,
Firer_Details.target_no==1,
Firer_Details.set_no==2
).distinct().scalar()
set_2_session_no=db.session.query(Firer_Details.session_id).filter(
Firer_Details.date==curdate,
Firer_Details.target_no==1,
Firer_Details.set_no==2
).distinct().scalar()
set_2_detail_no=db.session.query(Firer_Details.detail_id).filter(
Firer_Details.date==curdate,
Firer_Details.target_no==1,
Firer_Details.set_no==2
).distinct().scalar()
set_2_name=db.session.query(Shooter.name).filter(
Shooter.id==set_2_id
).scalar()
set_2_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_2_id).scalar()
set_3_id = db.session.query(Firer_Details.firer_id).filter(
Firer_Details.date==curdate,
Firer_Details.target_no==1,
Firer_Details.set_no==3
).distinct().scalar()
set_3_session_no=db.session.query(Firer_Details.session_id).filter(
Firer_Details.date==curdate,
Firer_Details.target_no==1,
Firer_Details.set_no==3
).distinct().scalar()
set_3_detail_no=db.session.query(Firer_Details.detail_id).filter(
Firer_Details.date==curdate,
Firer_Details.target_no==1,
Firer_Details.set_no==3
).distinct().scalar()
set_3_name=db.session.query(Shooter.name).filter(
Shooter.id==set_3_id
).scalar()
set_3_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_3_id).scalar()
set_4_id = db.session.query(Firer_Details.firer_id).filter(
Firer_Details.date==curdate,
Firer_Details.target_no==1,
Firer_Details.set_no==4
).distinct().scalar()
set_4_session_no=db.session.query(Firer_Details.session_id).filter(
Firer_Details.date==curdate,
Firer_Details.target_no==1,
Firer_Details.set_no==4
).distinct().scalar()
set_4_detail_no=db.session.query(Firer_Details.detail_id).filter(
Firer_Details.date==curdate,
Firer_Details.target_no==1,
Firer_Details.set_no==4
).distinct().scalar()
set_4_name=db.session.query(Shooter.name).filter(
Shooter.id==set_4_id
).scalar()
set_4_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_4_id).scalar()
current_firer_name = db.session.query(Shooter.name).filter(Shooter.id==target_1_id).scalar()
current_army_no = db.session.query(Shooter.service_id).filter(Shooter.id==target_1_id).scalar()
current_session_no=db.session.query(TShooting.session_id).filter(TShooting.target_1_id==target_1_id).scalar()
current_detail_no=db.session.query(TShooting.detail_no).filter(TShooting.target_1_id==target_1_id).scalar()
return(da_1,cant,ra_1,service_id_1,res,ten,gp_len,
set_4_name,set_4_army,set_4_session_no,set_4_detail_no,
set_3_name,set_3_army,set_3_session_no,set_3_detail_no,
set_2_name,set_2_army,set_2_session_no,set_2_detail_no,
set_1_name,set_1_army,set_1_session_no,set_1_detail_no,
current_firer_name,current_army_no,current_session_no,current_detail_no
)
@app.route('/individual_score/target_2', methods=['GET', 'POST'])
def individual_score_target_2():
firer_id =db.session.query(TShooting.target_2_id).scalar()
detail_no =db.session.query(TShooting.detail_no).scalar()
session_no =db.session.query(TShooting.session_id).scalar()
target_no = 2
tres = db.session.query(Grouping.result).filter(Grouping.firer_id==firer_id).order_by(Grouping.datetimestamp.desc()).limit(5).all()[::-1]
res=[]
ten=[]
tten=db.session.query(MPI.tendency_code).filter(MPI.firer_id==firer_id).order_by(MPI.datetimestamp.desc()).limit(5).all()[::-1]
print(tres,)
for ele in tres:
for ele2 in ele:
print(type(ele2))
res.append(ele2)
for ele3 in tten:
for ele4 in ele3:
print(type(ele4))
ten.append(ele4)
service_id = db.session.query(Shooter.service_id).filter(Shooter.id==firer_id).scalar()
rank_id=db.session.query(Shooter.rank_id).filter(Shooter.id==firer_id).scalar()
rank=db.session.query(Rank.name).filter(Rank.id==rank_id).scalar()
name = db.session.query(Shooter.name).filter(Shooter.id==firer_id).scalar()
firer_id,sess,o,p,u,q,t1_x,t1_y,xmpi,ympi,f,gp,Tfirt_x,Tfirt_y,fin_x_1,fin_y_1,result_1,fir_tendency_1=prediction_calculation_2()
if request.method == 'POST':
paper_ref=db.session.query(TPaper_ref.paper_ref).scalar()
print("paper_ref")
print(paper_ref)
return render_template('pages/prediction_target_2.html',
name = name,
detail_no=detail_no,
session_no=session_no,
target_no=target_no,
service_id=service_id,
rank=rank,
res=res,
ten=ten)
@app.route('/individual_score/target_3', methods=['GET', 'POST'])
def individual_score_target_3():
firer_id =db.session.query(TShooting.target_3_id).scalar()
detail_no =db.session.query(TShooting.detail_no).scalar()
session_no =db.session.query(TShooting.session_id).scalar()
target_no = 3
tres = db.session.query(Grouping.result).filter(Grouping.firer_id==firer_id).order_by(Grouping.datetimestamp.desc()).limit(5).all()[::-1]
res=[]
ten=[]
tten=db.session.query(MPI.tendency_code).filter(MPI.firer_id==firer_id).order_by(MPI.datetimestamp.desc()).limit(5).all()[::-1]
print(tres)
for ele in tres:
for ele2 in ele:
print(type(ele2))
res.append(ele2)
for ele3 in tten:
for ele4 in ele3:
print(type(ele4))
ten.append(ele4)
service_id = db.session.query(Shooter.service_id).filter(Shooter.id==firer_id).scalar()
rank_id=db.session.query(Shooter.rank_id).filter(Shooter.id==firer_id).scalar()
rank=db.session.query(Rank.name).filter(Rank.id==rank_id).scalar()
name = db.session.query(Shooter.name).filter(Shooter.id==firer_id).scalar()
return render_template('pages/prediction_target_3.html',
name = name,
detail_no=detail_no,
session_no=session_no,
target_no=target_no,
service_id=service_id,
rank=rank,
res=res,
ten=ten)
@app.route('/individual_score/target_4', methods=['GET', 'POST'])
def individual_score_target_4():
firer_id =db.session.query(TShooting.target_4_id).scalar()
detail_no =db.session.query(TShooting.detail_no).scalar()
session_no =db.session.query(TShooting.session_id).scalar()
target_no = 4
tres = db.session.query(Grouping.result).filter(Grouping.firer_id==firer_id).order_by(Grouping.datetimestamp.desc()).limit(5).all()[::-1]
res=[]
ten=[]
tten=db.session.query(MPI.tendency_code).filter(MPI.firer_id==firer_id).order_by(MPI.datetimestamp.desc()).limit(5).all()[::-1]
print(tres)
for ele in tres:
for ele2 in ele:
print(type(ele2))
res.append(ele2)
for ele3 in tten:
for ele4 in ele3:
print(type(ele4))
ten.append(ele4)
service_id = db.session.query(Shooter.service_id).filter(Shooter.id==firer_id).scalar()
rank_id=db.session.query(Shooter.rank_id).filter(Shooter.id==firer_id).scalar()
rank=db.session.query(Rank.name).filter(Rank.id==rank_id).scalar()
name = db.session.query(Shooter.name).filter(Shooter.id==firer_id).scalar()
return render_template('pages/prediction_target_4.html',
name = name,
detail_no=detail_no,
session_no=session_no,
target_no=target_no,
service_id=service_id,
rank=rank,
res=res,
ten=ten)
@app.route('/individual_score/target_5', methods=['GET', 'POST'])
def individual_score_target_5():
firer_id =db.session.query(TShooting.target_5_id).scalar()
detail_no =db.session.query(TShooting.detail_no).scalar()
session_no =db.session.query(TShooting.session_id).scalar()
target_no = 5
tres = db.session.query(Grouping.result).filter(Grouping.firer_id==firer_id).order_by(Grouping.datetimestamp.desc()).limit(5).all()[::-1]
res=[]
ten=[]
tten=db.session.query(MPI.tendency_code).filter(MPI.firer_id==firer_id).order_by(MPI.datetimestamp.desc()).limit(5).all()[::-1]
print(tres)
for ele in tres:
for ele2 in ele:
print(type(ele2))
res.append(ele2)
for ele3 in tten:
for ele4 in ele3:
print(type(ele4))
ten.append(ele4)
service_id = db.session.query(Shooter.service_id).filter(Shooter.id==firer_id).scalar()
rank_id=db.session.query(Shooter.rank_id).filter(Shooter.id==firer_id).scalar()
rank=db.session.query(Rank.name).filter(Rank.id==rank_id).scalar()
name = db.session.query(Shooter.name).filter(Shooter.id==firer_id).scalar()
return render_template('pages/prediction_target_5.html',
name = name,
detail_no=detail_no,
session_no=session_no,
target_no=target_no,
service_id=service_id,
rank=rank,
res=res,
ten=ten)
@app.route('/individual_score/target_6', methods=['GET', 'POST'])
def individual_score_target_6():
firer_id =db.session.query(TShooting.target_6_id).scalar()
detail_no =db.session.query(TShooting.detail_no).scalar()
session_no =db.session.query(TShooting.session_id).scalar()
target_no = 6
tres = db.session.query(Grouping.result).filter(Grouping.firer_id==firer_id).order_by(Grouping.datetimestamp.desc()).limit(5).all()[::-1]
res=[]
ten=[]
tten=db.session.query(MPI.tendency_code).filter(MPI.firer_id==firer_id).order_by(MPI.datetimestamp.desc()).limit(5).all()[::-1]
print(tres)
for ele in tres:
for ele2 in ele:
print(type(ele2))
res.append(ele2)
for ele3 in tten:
for ele4 in ele3:
print(type(ele4))
ten.append(ele4)
service_id = db.session.query(Shooter.service_id).filter(Shooter.id==firer_id).scalar()
rank_id=db.session.query(Shooter.rank_id).filter(Shooter.id==firer_id).scalar()
rank=db.session.query(Rank.name).filter(Rank.id==rank_id).scalar()
name = db.session.query(Shooter.name).filter(Shooter.id==firer_id).scalar()
return render_template('pages/prediction_target_6.html',
name = name,
detail_no=detail_no,
session_no=session_no,
target_no=target_no,
service_id=service_id,
rank=rank,
res=res,
ten=ten)
@app.route('/individual_score/target_7', methods=['GET', 'POST'])
def individual_score_target_7():
firer_id =db.session.query(TShooting.target_7_id).scalar()
detail_no =db.session.query(TShooting.detail_no).scalar()
session_no =db.session.query(TShooting.session_id).scalar()
target_no = 7
tres = db.session.query(Grouping.result).filter(Grouping.firer_id==firer_id).order_by(Grouping.datetimestamp.desc()).limit(5).all()[::-1]
res=[]
ten=[]
tten=db.session.query(MPI.tendency_code).filter(MPI.firer_id==firer_id).order_by(MPI.datetimestamp.desc()).limit(5).all()[::-1]
print(tres)
for ele in tres:
for ele2 in ele:
print(type(ele2))
res.append(ele2)
for ele3 in tten:
for ele4 in ele3:
print(type(ele4))
ten.append(ele4)
service_id = db.session.query(Shooter.service_id).filter(Shooter.id==firer_id).scalar()
rank_id=db.session.query(Shooter.rank_id).filter(Shooter.id==firer_id).scalar()
rank=db.session.query(Rank.name).filter(Rank.id==rank_id).scalar()
name = db.session.query(Shooter.name).filter(Shooter.id==firer_id).scalar()
return render_template('pages/prediction_target_7.html',
name = name,
detail_no=detail_no,
session_no=session_no,
target_no=target_no,
service_id=service_id,
rank=rank,
res=res,
ten=ten)
@app.route('/individual_score/target_8', methods=['GET', 'POST'])
def individual_score_target_8():
firer_id =db.session.query(TShooting.target_8_id).scalar()
detail_no =db.session.query(TShooting.detail_no).scalar()
session_no =db.session.query(TShooting.session_id).scalar()
target_no = 7
tres = db.session.query(Grouping.result).filter(Grouping.firer_id==firer_id).order_by(Grouping.datetimestamp.desc()).limit(5).all()[::-1]
res=[]
ten=[]
tten=db.session.query(MPI.tendency_code).filter(MPI.firer_id==firer_id).order_by(MPI.datetimestamp.desc()).limit(5).all()[::-1]
print(tres)
for ele in tres:
for ele2 in ele:
print(type(ele2))
res.append(ele2)
for ele3 in tten:
for ele4 in ele3:
print(type(ele4))
ten.append(ele4)
service_id = db.session.query(Shooter.service_id).filter(Shooter.id==firer_id).scalar()
rank_id=db.session.query(Shooter.rank_id).filter(Shooter.id==firer_id).scalar()
rank=db.session.query(Rank.name).filter(Rank.id==rank_id).scalar()
name = db.session.query(Shooter.name).filter(Shooter.id==firer_id).scalar()
return render_template('pages/prediction_target_8.html',
name = name,
detail_no=detail_no,
session_no=session_no,
target_no=target_no,
service_id=service_id,
rank=rank,
res=res,
ten=ten)
@app.route('/prediction_target_1/', methods=['GET', 'POST'])
def prediction_target_1():
t1_x=0
t1_y=0
xmpi_j=0
ympi_j=0
gp=0
Tfirt_x_j=0
Tfirt_y_j=0
fin_x_1=0
fin_y_1=0
xmpi_inch = 0
ympi_inch = 0
result_1=None
fir_tendency=None
set_1_name = None
set_1_army =None
set_1_session_no = None
set_1_detail_no=None
set_1_id =None
set_2_name = None
set_2_army =None
set_2_session_no = None
set_2_detail_no=None
set_2_id =None
set_3_name = None
set_3_army =None
set_3_session_no = None
set_3_detail_no=None
set_3_id =None
set_4_name = None
set_4_army =None
set_4_session_no = None
set_4_detail_no=None
set_4_id =None
fir_tendency_1=None
firer_id=None
current_army_no=None
current_firer_name=None
current_session_no=None
session_detail_no=None
current_detail_no=None
set_2_x=None
set_2_y=None
set_3_x=None
set_3_y=None
set_4_x=None
set_4_y=None
paper_ref=None
sess=None
res=None
set_2_x_arr=[]
set_2_y_arr=[]
set_3_x_arr=[]
set_3_y_arr=[]
set_4_x_arr=[]
set_4_y_arr=[]
fin_x_arr_1=[]
fin_y_arr_1=[]
curdate=time.strftime("%Y-%m-%d")
if request.method == 'POST':
firer_id,sess,detail,p,u,q,t1_x,t1_y,xmpi,ympi,f,gp,Tfirt_x,Tfirt_y,fin_x_1,fin_y_1,result_1,fir_tendency_1=prediction_calculation_1()
paper_ref=db.session.query(TPaper_ref.paper_ref).scalar()
set_2_x=db.session.query(Firer_Details).filter(Firer_Details.date==curdate , Firer_Details.target_no==1 ,Firer_Details.set_no==2 , Firer_Details.session_id==sess).all()
set_2_y=db.session.query(Firer_Details).filter(Firer_Details.date==curdate , Firer_Details.target_no==1 , Firer_Details.set_no==2 , Firer_Details.session_id==sess).all()
for x_2 in set_2_x:
set_2_x_arr.append(int(x_2.final_x))
for y_2 in set_2_y:
set_2_y_arr.append(int(y_2.final_y))
set_3_x=db.session.query(Firer_Details).filter(Firer_Details.date==curdate , Firer_Details.target_no==1 , Firer_Details.set_no==3 , Firer_Details.session_id==sess).all()
set_3_y=db.session.query(Firer_Details).filter(Firer_Details.date==curdate , Firer_Details.target_no==1 , Firer_Details.set_no==3 , Firer_Details.session_id==sess).all()
for x_3 in set_3_x:
set_3_x_arr.append(int(x_3.final_x))
for y_3 in set_3_y:
set_3_y_arr.append(int(y_3.final_y))
print(set_3_x_arr)
set_4_x=db.session.query(Firer_Details).filter(Firer_Details.date==curdate , Firer_Details.target_no==1 , Firer_Details.set_no==4 , Firer_Details.session_id==sess).all()
set_4_y=db.session.query(Firer_Details).filter(Firer_Details.date==curdate , Firer_Details.target_no==1 , Firer_Details.set_no==4 , Firer_Details.session_id==sess).all()
for x_4 in set_4_x:
set_4_x_arr.append(int(x_4.final_x))
for y_4 in set_4_y:
set_4_y_arr.append(int(y_4.final_y))
set_1_id = db.session.query(Firer_Details.firer_id).filter(
Firer_Details.date==curdate,
Firer_Details.target_no==1,
Firer_Details.set_no==1
).distinct().scalar()
set_1_session_no=db.session.query(Firer_Details.session_id).filter(
Firer_Details.date==curdate,
Firer_Details.target_no==1,
Firer_Details.set_no==1
).distinct().scalar()
set_1_detail_no=db.session.query(Firer_Details.detail_id).filter(
Firer_Details.date==curdate,
Firer_Details.target_no==1,
Firer_Details.set_no==1
).distinct().scalar()
set_1_name=db.session.query(Shooter.name).filter(
Shooter.id==set_1_id
).scalar()
set_1_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_1_id).scalar()
set_2_id = db.session.query(Firer_Details.firer_id).filter(
Firer_Details.date==curdate,
Firer_Details.target_no==1,
Firer_Details.set_no==2
).distinct().scalar()
set_2_session_no=db.session.query(Firer_Details.session_id).filter(
Firer_Details.date==curdate,
Firer_Details.target_no==1,
Firer_Details.set_no==2
).distinct().scalar()
set_2_detail_no=db.session.query(Firer_Details.detail_id).filter(
Firer_Details.date==curdate,
Firer_Details.target_no==1,
Firer_Details.set_no==2
).distinct().scalar()
print("set_2_detail_no")
print(set_2_detail_no)
print(set_2_detail_no)
set_2_name=db.session.query(Shooter.name).filter(
Shooter.id==set_2_id
).scalar()
set_2_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_2_id).scalar()
set_3_id = db.session.query(Firer_Details.firer_id).filter(
Firer_Details.date==curdate,
Firer_Details.target_no==1,
Firer_Details.set_no==3
).distinct().scalar()
set_3_session_no=db.session.query(Firer_Details.session_id).filter(
Firer_Details.date==curdate,
Firer_Details.target_no==1,
Firer_Details.set_no==3
).distinct().scalar()
set_3_detail_no=db.session.query(Firer_Details.detail_id).filter(
Firer_Details.date==curdate,
Firer_Details.target_no==1,
Firer_Details.set_no==3
).distinct().scalar()
set_3_name=db.session.query(Shooter.name).filter(
Shooter.id==set_3_id
).scalar()
set_3_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_3_id).scalar()
set_4_id = db.session.query(Firer_Details.firer_id).filter(
Firer_Details.date==curdate,
Firer_Details.target_no==1,
Firer_Details.set_no==4
).distinct().scalar()
set_4_session_no=db.session.query(Firer_Details.session_id).filter(
Firer_Details.date==curdate,
Firer_Details.target_no==1,
Firer_Details.set_no==4
).distinct().scalar()
set_4_detail_no=db.session.query(Firer_Details.detail_id).filter(
Firer_Details.date==curdate,
Firer_Details.target_no==1,
Firer_Details.set_no==4
).distinct().scalar()
set_4_name=db.session.query(Shooter.name).filter(
Shooter.id==set_4_id
).scalar()
set_4_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_4_id).scalar()
current_firer_name = db.session.query(Shooter.name).filter(Shooter.id==firer_id).scalar()
current_army_no = db.session.query(Shooter.service_id).filter(Shooter.id==firer_id).scalar()
current_session_no=db.session.query(TShooting.session_id).filter(TShooting.target_1_id==firer_id).scalar()
current_detail_no=db.session.query(TShooting.detail_no).filter(TShooting.target_1_id==firer_id).scalar()
xmpi_inch = pixeltoinch(xmpi)
ympi_inch = pixeltoinch(ympi)
xmpi_j =pd.Series(xmpi_inch).to_json(orient='values')
ympi_j =pd.Series(ympi_inch).to_json(orient='values')
Tfirt_x_j =pd.Series(Tfirt_x).to_json(orient='values')
Tfirt_y_j =pd.Series(Tfirt_y).to_json(orient='values')
for x_1 in fin_x_1:
fin_x_arr_1.append(int(x_1.final_x))
for y_1 in fin_y_1 :
fin_y_arr_1.append(int(y_1.final_y))
return jsonify(x1=t1_x ,
y1=t1_y ,
xmpi1=Tfirt_x_j ,
ympi1=Tfirt_y_j,
gp=gp,
txf1=Tfirt_x_j,
tyf1=Tfirt_y_j,
fx1=fin_x_arr_1,
fy1=fin_y_arr_1,
result_1=result_1,
fir_tendency_1=fir_tendency_1,
set_1_name=set_1_name,
current_firer_name=current_firer_name,
set_1_army=set_1_army,
current_army_no=current_army_no,
set_1_session_no=set_1_session_no,
current_session_no=current_session_no,
set_1_detail_no=set_1_detail_no,
current_detail_no=current_detail_no,
set_2_x=set_2_x_arr,
set_2_y=set_2_y_arr,
set_2_name=set_2_name,
set_2_army=set_2_army,
set_2_detail_no=set_2_detail_no,
set_2_session_no=set_2_session_no,
set_3_x=set_3_x_arr,
set_3_y=set_3_y_arr,
set_3_name=set_3_name,
set_3_army=set_3_army,
set_3_session_no=set_3_session_no,
set_3_detail_no=set_3_detail_no,
set_4_x=set_4_x_arr,
set_4_y=set_4_y_arr,
set_4_name=set_4_name,
set_4_army=set_4_army,
set_4_session_no=set_4_session_no,
set_4_detail_no=set_4_detail_no
)
@app.route('/prediction_target_2/', methods=['GET', 'POST'])
def prediction_target_2():
t1_x=0
t1_y=0
xmpi_j=0
ympi_j=0
gp=0
Tfirt_x_j=0
Tfirt_y_j=0
fin_x_1=0
fin_y_1=0
xmpi_inch = 0
ympi_inch = 0
result_1=None
fir_tendency=None
set_1_name = None
set_1_army =None
set_1_session_no = None
set_1_detail_no=None
set_1_id =None
set_2_name = None
set_2_army =None
set_2_session_no = None
set_2_detail_no=None
set_2_id =None
set_3_name = None
set_3_army =None
set_3_session_no = None
set_3_detail_no=None
set_3_id =None
set_4_name = None
set_4_army =None
set_4_session_no = None
set_4_detail_no=None
set_4_id =None
fir_tendency_1=None
firer_id=None
current_army_no=None
current_firer_name=None
current_session_no=None
session_detail_no=None
current_detail_no=None
set_2_x=None
set_2_y=None
set_3_x=None
set_3_y=None
set_4_x=None
set_4_y=None
paper_ref=None
sess=None
res=None
set_2_x_arr=[]
set_2_y_arr=[]
set_3_x_arr=[]
set_3_y_arr=[]
set_4_x_arr=[]
set_4_y_arr=[]
fin_x_arr_1=[]
fin_y_arr_1=[]
curdate=time.strftime("%Y-%m-%d")
if request.method == 'POST':
firer_id,sess,o,p,u,q,t1_x,t1_y,xmpi,ympi,f,gp,Tfirt_x,Tfirt_y,fin_x_1,fin_y_1,result_1,fir_tendency_1=prediction_calculation_2()
paper_ref=db.session.query(TPaper_ref.paper_ref).scalar()
set_2_x=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==2 ,T_Firer_Details.set_no==2 , T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess).all()
set_2_y=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==2 , T_Firer_Details.set_no==2 , T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess).all()
for x_2 in set_2_x:
set_2_x_arr.append(int(x_2.final_x))
for y_2 in set_2_y:
set_2_y_arr.append(int(y_2.final_y))
set_3_x=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==2 , T_Firer_Details.set_no==3 , T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess).all()
set_3_y=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==2 , T_Firer_Details.set_no==3 , T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess).all()
for x_3 in set_3_x:
set_3_x_arr.append(int(x_3.final_x))
for y_3 in set_3_y:
set_3_y_arr.append(int(y_3.final_y))
set_4_x=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==2 , T_Firer_Details.set_no==4 , T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess).all()
set_4_y=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==2 , T_Firer_Details.set_no==4 ,T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess).all()
for x_4 in set_4_x:
set_4_x_arr.append(int(x_4.final_x))
for y_4 in set_4_y:
set_4_y_arr.append(int(y_4.final_y))
set_1_id = db.session.query(T_Firer_Details.firer_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==2,
T_Firer_Details.set_no==1,
T_Firer_Details.paper_ref==paper_ref,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_1_session_no=db.session.query(T_Firer_Details.session_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==2,
T_Firer_Details.set_no==1,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_1_detail_no=db.session.query(T_Firer_Details.detail_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==2,
T_Firer_Details.set_no==1,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_1_name=db.session.query(Shooter.name).filter(
Shooter.id==set_1_id
).scalar()
set_1_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_1_id).scalar()
set_2_id = db.session.query(T_Firer_Details.firer_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==2,
T_Firer_Details.set_no==2,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_2_session_no=db.session.query(T_Firer_Details.session_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==2,
T_Firer_Details.set_no==2,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_2_detail_no=db.session.query(T_Firer_Details.detail_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==2,
T_Firer_Details.set_no==2,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_2_name=db.session.query(Shooter.name).filter(
Shooter.id==set_2_id
).scalar()
set_2_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_2_id).scalar()
set_3_id = db.session.query(T_Firer_Details.firer_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==2,
T_Firer_Details.set_no==3,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_3_session_no=db.session.query(T_Firer_Details.session_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==2,
T_Firer_Details.set_no==3,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_3_detail_no=db.session.query(T_Firer_Details.detail_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==2,
T_Firer_Details.set_no==3,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_3_name=db.session.query(Shooter.name).filter(
Shooter.id==set_3_id
).scalar()
set_3_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_3_id).scalar()
set_4_id = db.session.query(T_Firer_Details.firer_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==2,
T_Firer_Details.set_no==4,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_4_session_no=db.session.query(T_Firer_Details.session_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==2,
T_Firer_Details.set_no==4,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_4_detail_no=db.session.query(T_Firer_Details.detail_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==2,
T_Firer_Details.set_no==4,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_4_name=db.session.query(Shooter.name).filter(
Shooter.id==set_4_id
).scalar()
set_4_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_4_id).scalar()
current_firer_name = db.session.query(Shooter.name).filter(Shooter.id==firer_id).scalar()
current_army_no = db.session.query(Shooter.service_id).filter(Shooter.id==firer_id).scalar()
current_session_no=db.session.query(TShooting.session_id).filter(TShooting.target_1_id==firer_id).scalar()
current_detail_no=db.session.query(TShooting.detail_no).filter(TShooting.target_1_id==firer_id).scalar()
xmpi_inch = pixeltoinch(xmpi)
ympi_inch = pixeltoinch(ympi)
xmpi_j =pd.Series(xmpi_inch).to_json(orient='values')
ympi_j =pd.Series(ympi_inch).to_json(orient='values')
Tfirt_x_j =pd.Series(Tfirt_x).to_json(orient='values')
Tfirt_y_j =pd.Series(Tfirt_y).to_json(orient='values')
fin_x_arr_1=[]
fin_y_arr_1=[]
for x_1 in fin_x_1:
fin_x_arr_1.append(int(x_1.final_x))
for y_1 in fin_y_1 :
fin_y_arr_1.append(int(y_1.final_y))
return jsonify(x1=t1_x ,
y1=t1_y ,
xmpi1=Tfirt_x_j ,
ympi1=Tfirt_y_j,
gp=gp,
txf1=Tfirt_x_j,
tyf1=Tfirt_y_j,
fx1=fin_x_arr_1,
fy1=fin_y_arr_1,
result_1=result_1,
fir_tendency_1=fir_tendency_1,
set_1_name=set_1_name,
current_firer_name=current_firer_name,
set_1_army=set_1_army,
current_army_no=current_army_no,
set_1_session_no=set_1_session_no,
current_session_no=current_session_no,
set_1_detail_no=set_1_detail_no,
current_detail_no=current_detail_no,
set_2_x=set_2_x_arr,
set_2_y=set_2_y_arr,
set_2_name=set_2_name,
set_2_army=set_2_army,
set_2_detail_no=set_2_detail_no,
set_2_session_no=set_2_session_no,
set_3_x=set_3_x_arr,
set_3_y=set_3_y_arr,
set_3_name=set_3_name,
set_3_army=set_3_army,
set_3_session_no=set_3_session_no,
set_3_detail_no=set_3_detail_no,
set_4_x=set_4_x_arr,
set_4_y=set_4_y_arr,
set_4_name=set_4_name,
set_4_army=set_4_army,
set_4_session_no=set_4_session_no,
set_4_detail_no=set_4_detail_no
)
@app.route('/prediction_target_3/', methods=['GET', 'POST'])
def prediction_target_3():
t1_x=0
t1_y=0
xmpi_j=0
ympi_j=0
gp=0
Tfirt_x_j=0
Tfirt_y_j=0
fin_x_1=0
fin_y_1=0
xmpi_inch = 0
ympi_inch = 0
result_1=None
fir_tendency=None
set_1_name = None
set_1_army =None
set_1_session_no = None
set_1_detail_no=None
set_1_id =None
set_2_name = None
set_2_army =None
set_2_session_no = None
set_2_detail_no=None
set_2_id =None
set_3_name = None
set_3_army =None
set_3_session_no = None
set_3_detail_no=None
set_3_id =None
set_4_name = None
set_4_army =None
set_4_session_no = None
set_4_detail_no=None
set_4_id =None
fir_tendency_1=None
firer_id=None
current_army_no=None
current_firer_name=None
current_session_no=None
session_detail_no=None
current_detail_no=None
set_2_x=None
set_2_y=None
set_3_x=None
set_3_y=None
set_4_x=None
set_4_y=None
paper_ref=None
sess=None
res=None
set_2_x_arr=[]
set_2_y_arr=[]
set_3_x_arr=[]
set_3_y_arr=[]
set_4_x_arr=[]
set_4_y_arr=[]
fin_x_arr_1=[]
fin_y_arr_1=[]
curdate=time.strftime("%Y-%m-%d")
if request.method == 'POST':
firer_id,sess,o,p,u,q,t1_x,t1_y,xmpi,ympi,f,gp,Tfirt_x,Tfirt_y,fin_x_1,fin_y_1,result_1,fir_tendency_1=prediction_calculation_3()
paper_ref=db.session.query(TPaper_ref.paper_ref).scalar()
set_2_x=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==3 ,T_Firer_Details.set_no==2 , T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess).all()
set_2_y=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==3 , T_Firer_Details.set_no==2 , T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess).all()
for x_2 in set_2_x:
set_2_x_arr.append(int(x_2.final_x))
for y_2 in set_2_y:
set_2_y_arr.append(int(y_2.final_y))
set_3_x=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==3 , T_Firer_Details.set_no==3 , T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess).all()
set_3_y=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==3 , T_Firer_Details.set_no==3 , T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess).all()
for x_3 in set_3_x:
set_3_x_arr.append(int(x_3.final_x))
for y_2 in set_2_y:
set_3_y_arr.append(int(y_3.final_y))
set_4_x=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==3 , T_Firer_Details.set_no==4 , T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess).all()
set_4_y=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==3 , T_Firer_Details.set_no==4 ,T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess).all()
for x_4 in set_4_x:
set_4_x_arr.append(int(x_4.final_x))
for y_2 in set_2_y:
set_4_y_arr.append(int(y_4.final_y))
set_1_id = db.session.query(T_Firer_Details.firer_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==3,
T_Firer_Details.set_no==1,
T_Firer_Details.paper_ref==paper_ref,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_1_session_no=db.session.query(T_Firer_Details.session_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==3,
T_Firer_Details.set_no==1,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_1_detail_no=db.session.query(T_Firer_Details.detail_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==3,
T_Firer_Details.set_no==1,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_1_name=db.session.query(Shooter.name).filter(
Shooter.id==set_1_id
).scalar()
set_1_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_1_id).scalar()
set_2_id = db.session.query(T_Firer_Details.firer_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==3,
T_Firer_Details.set_no==2,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_2_session_no=db.session.query(T_Firer_Details.session_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==3,
T_Firer_Details.set_no==2,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_2_detail_no=db.session.query(T_Firer_Details.detail_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==3,
T_Firer_Details.set_no==2,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_2_name=db.session.query(Shooter.name).filter(
Shooter.id==set_2_id
).scalar()
set_2_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_2_id).scalar()
set_3_id = db.session.query(T_Firer_Details.firer_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==3,
T_Firer_Details.set_no==3,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_3_session_no=db.session.query(T_Firer_Details.session_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==3,
T_Firer_Details.set_no==3,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_3_detail_no=db.session.query(T_Firer_Details.detail_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==3,
T_Firer_Details.set_no==3,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_3_name=db.session.query(Shooter.name).filter(
Shooter.id==set_3_id
).scalar()
set_3_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_3_id).scalar()
set_4_id = db.session.query(T_Firer_Details.firer_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==3,
T_Firer_Details.set_no==4,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_4_session_no=db.session.query(T_Firer_Details.session_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==3,
T_Firer_Details.set_no==4,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_4_detail_no=db.session.query(T_Firer_Details.detail_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==3,
T_Firer_Details.set_no==4,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_4_name=db.session.query(Shooter.name).filter(
Shooter.id==set_4_id
).scalar()
set_4_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_4_id).scalar()
current_firer_name = db.session.query(Shooter.name).filter(Shooter.id==firer_id).scalar()
current_army_no = db.session.query(Shooter.service_id).filter(Shooter.id==firer_id).scalar()
current_session_no=db.session.query(TShooting.session_id).filter(TShooting.target_1_id==firer_id).scalar()
current_detail_no=db.session.query(TShooting.detail_no).filter(TShooting.target_1_id==firer_id).scalar()
xmpi_inch = pixeltoinch(xmpi)
ympi_inch = pixeltoinch(ympi)
xmpi_j =pd.Series(xmpi_inch).to_json(orient='values')
ympi_j =pd.Series(ympi_inch).to_json(orient='values')
Tfirt_x_j =pd.Series(Tfirt_x).to_json(orient='values')
Tfirt_y_j =pd.Series(Tfirt_y).to_json(orient='values')
fin_x_arr_1=[]
fin_y_arr_1=[]
for x_1 in fin_x_1:
fin_x_arr_1.append(int(x_1.final_x))
for y_1 in fin_y_1 :
fin_y_arr_1.append(int(y_1.final_y))
return jsonify(x1=t1_x ,
y1=t1_y ,
xmpi1=Tfirt_x_j ,
ympi1=Tfirt_y_j,
gp=gp,
txf1=Tfirt_x_j,
tyf1=Tfirt_y_j,
fx1=fin_x_arr_1,
fy1=fin_y_arr_1,
result_1=result_1,
fir_tendency_1=fir_tendency_1,
set_1_name=set_1_name,
current_firer_name=current_firer_name,
set_1_army=set_1_army,
current_army_no=current_army_no,
set_1_session_no=set_1_session_no,
current_session_no=current_session_no,
set_1_detail_no=set_1_detail_no,
current_detail_no=current_detail_no,
set_2_x=set_2_x_arr,
set_2_y=set_2_y_arr,
set_2_name=set_2_name,
set_2_army=set_2_army,
set_2_detail_no=set_2_detail_no,
set_2_session_no=set_2_session_no,
set_3_x=set_3_x_arr,
set_3_y=set_3_y_arr,
set_3_name=set_3_name,
set_3_army=set_3_army,
set_3_session_no=set_3_session_no,
set_3_detail_no=set_3_detail_no,
set_4_x=set_4_x_arr,
set_4_y=set_4_y_arr,
set_4_name=set_4_name,
set_4_army=set_4_army,
set_4_session_no=set_4_session_no,
set_4_detail_no=set_4_detail_no
)
@app.route('/prediction_target_4/', methods=['GET', 'POST'])
def prediction_target_4():
t1_x=0
t1_y=0
xmpi_j=0
ympi_j=0
gp=0
Tfirt_x_j=0
Tfirt_y_j=0
fin_x_1=0
fin_y_1=0
xmpi_inch = 0
ympi_inch = 0
result_1=None
fir_tendency=None
set_1_name = None
set_1_army =None
set_1_session_no = None
set_1_detail_no=None
set_1_id =None
set_2_name = None
set_2_army =None
set_2_session_no = None
set_2_detail_no=None
set_2_id =None
set_3_name = None
set_3_army =None
set_3_session_no = None
set_3_detail_no=None
set_3_id =None
set_4_name = None
set_4_army =None
set_4_session_no = None
set_4_detail_no=None
set_4_id =None
fir_tendency_1=None
firer_id=None
current_army_no=None
current_firer_name=None
current_session_no=None
session_detail_no=None
current_detail_no=None
set_2_x=None
set_2_y=None
set_3_x=None
set_3_y=None
set_4_x=None
set_4_y=None
paper_ref=None
sess=None
res=None
set_2_x_arr=[]
set_2_y_arr=[]
set_3_x_arr=[]
set_3_y_arr=[]
set_4_x_arr=[]
set_4_y_arr=[]
fin_x_arr_1=[]
fin_y_arr_1=[]
curdate=time.strftime("%Y-%m-%d")
if request.method == 'POST':
firer_id,sess,o,p,u,q,t1_x,t1_y,xmpi,ympi,f,gp,Tfirt_x,Tfirt_y,fin_x_1,fin_y_1,result_1,fir_tendency_1=prediction_calculation_4()
paper_ref=db.session.query(TPaper_ref.paper_ref).scalar()
set_2_x=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==4 ,T_Firer_Details.set_no==2 , T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess).all()
set_2_y=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==4 , T_Firer_Details.set_no==2 , T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess).all()
for x_2 in set_2_x:
set_2_x_arr.append(int(x_2.final_x))
for y_2 in set_2_y:
set_2_y_arr.append(int(y_2.final_y))
set_3_x=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==4 , T_Firer_Details.set_no==3 , T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess).all()
set_3_y=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==4 , T_Firer_Details.set_no==3 , T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess).all()
for x_3 in set_3_x:
set_3_x_arr.append(int(x_3.final_x))
for y_3 in set_3_y:
set_3_y_arr.append(int(y_3.final_y))
set_4_x=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==4 , T_Firer_Details.set_no==4 , T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess).all()
set_4_y=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==4 , T_Firer_Details.set_no==4 ,T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess).all()
for x_4 in set_4_x:
set_4_x_arr.append(int(x_4.final_x))
for y_4 in set_4_y:
set_4_y_arr.append(int(y_4.final_y))
set_1_id = db.session.query(T_Firer_Details.firer_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==4,
T_Firer_Details.set_no==1,
T_Firer_Details.paper_ref==paper_ref,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_1_session_no=db.session.query(T_Firer_Details.session_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==4,
T_Firer_Details.set_no==1,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_1_detail_no=db.session.query(T_Firer_Details.detail_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==4,
T_Firer_Details.set_no==1,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_1_name=db.session.query(Shooter.name).filter(
Shooter.id==set_1_id
).scalar()
set_1_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_1_id).scalar()
set_2_id = db.session.query(T_Firer_Details.firer_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==4,
T_Firer_Details.set_no==2,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_2_session_no=db.session.query(T_Firer_Details.session_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==4,
T_Firer_Details.set_no==2,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_2_detail_no=db.session.query(T_Firer_Details.detail_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==4,
T_Firer_Details.set_no==2,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_2_name=db.session.query(Shooter.name).filter(
Shooter.id==set_2_id
).scalar()
set_2_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_2_id).scalar()
set_3_id = db.session.query(T_Firer_Details.firer_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==4,
T_Firer_Details.set_no==3,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_3_session_no=db.session.query(T_Firer_Details.session_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==4,
T_Firer_Details.set_no==3,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_3_detail_no=db.session.query(T_Firer_Details.detail_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==4,
T_Firer_Details.set_no==3,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_3_name=db.session.query(Shooter.name).filter(
Shooter.id==set_3_id
).scalar()
set_3_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_3_id).scalar()
set_4_id = db.session.query(T_Firer_Details.firer_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==4,
T_Firer_Details.set_no==4,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_4_session_no=db.session.query(T_Firer_Details.session_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==4,
T_Firer_Details.set_no==4,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_4_detail_no=db.session.query(T_Firer_Details.detail_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==4,
T_Firer_Details.set_no==4,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_4_name=db.session.query(Shooter.name).filter(
Shooter.id==set_4_id
).scalar()
set_4_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_4_id).scalar()
current_firer_name = db.session.query(Shooter.name).filter(Shooter.id==firer_id).scalar()
current_army_no = db.session.query(Shooter.service_id).filter(Shooter.id==firer_id).scalar()
current_session_no=db.session.query(TShooting.session_id).filter(TShooting.target_1_id==firer_id).scalar()
current_detail_no=db.session.query(TShooting.detail_no).filter(TShooting.target_1_id==firer_id).scalar()
xmpi_inch = pixeltoinch(xmpi)
ympi_inch = pixeltoinch(ympi)
xmpi_j =pd.Series(xmpi_inch).to_json(orient='values')
ympi_j =pd.Series(ympi_inch).to_json(orient='values')
Tfirt_x_j =pd.Series(Tfirt_x).to_json(orient='values')
Tfirt_y_j =pd.Series(Tfirt_y).to_json(orient='values')
for x_1 in fin_x_1:
fin_x_arr_1.append(int(x_1.final_x))
for y_1 in fin_y_1 :
fin_y_arr_1.append(int(y_1.final_y))
return jsonify(x1=t1_x ,
y1=t1_y ,
xmpi1=Tfirt_x_j ,
ympi1=Tfirt_y_j,
gp=gp,
txf1=Tfirt_x_j,
tyf1=Tfirt_y_j,
fx1=fin_x_arr_1,
fy1=fin_y_arr_1,
result_1=result_1,
fir_tendency_1=fir_tendency_1,
set_1_name=set_1_name,
current_firer_name=current_firer_name,
set_1_army=set_1_army,
current_army_no=current_army_no,
set_1_session_no=set_1_session_no,
current_session_no=current_session_no,
set_1_detail_no=set_1_detail_no,
current_detail_no=current_detail_no,
set_2_x=set_2_x_arr,
set_2_y=set_2_y_arr,
set_2_name=set_2_name,
set_2_army=set_2_army,
set_2_detail_no=set_2_detail_no,
set_2_session_no=set_2_session_no,
set_3_x=set_3_x_arr,
set_3_y=set_3_y_arr,
set_3_name=set_3_name,
set_3_army=set_3_army,
set_3_session_no=set_3_session_no,
set_3_detail_no=set_3_detail_no,
set_4_x=set_4_x_arr,
set_4_y=set_4_y_arr,
set_4_name=set_4_name,
set_4_army=set_4_army,
set_4_session_no=set_4_session_no,
set_4_detail_no=set_4_detail_no
)
@app.route('/prediction_target_5/', methods=['GET', 'POST'])
def prediction_target_5():
t1_x=0
t1_y=0
xmpi_j=0
ympi_j=0
gp=0
Tfirt_x_j=0
Tfirt_y_j=0
fin_x_1=0
fin_y_1=0
xmpi_inch = 0
ympi_inch = 0
result_1=None
fir_tendency=None
set_1_name = None
set_1_army =None
set_1_session_no = None
set_1_detail_no=None
set_1_id =None
set_2_name = None
set_2_army =None
set_2_session_no = None
set_2_detail_no=None
set_2_id =None
set_3_name = None
set_3_army =None
set_3_session_no = None
set_3_detail_no=None
set_3_id =None
set_4_name = None
set_4_army =None
set_4_session_no = None
set_4_detail_no=None
set_4_id =None
fir_tendency_1=None
firer_id=None
current_army_no=None
current_firer_name=None
current_session_no=None
session_detail_no=None
current_detail_no=None
set_2_x=None
set_2_y=None
set_3_x=None
set_3_y=None
set_4_x=None
set_4_y=None
paper_ref=None
sess=None
res=None
set_2_x_arr=[]
set_2_y_arr=[]
set_3_x_arr=[]
set_3_y_arr=[]
set_4_x_arr=[]
set_4_y_arr=[]
fin_x_arr_1=[]
fin_y_arr_1=[]
curdate=time.strftime("%Y-%m-%d")
if request.method == 'POST':
firer_id,sess,o,p,u,q,t1_x,t1_y,xmpi,ympi,f,gp,Tfirt_x,Tfirt_y,fin_x_1,fin_y_1,result_1,fir_tendency_1=prediction_calculation_5()
paper_ref=db.session.query(TPaper_ref.paper_ref).scalar()
set_2_x=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==5 ,T_Firer_Details.set_no==2 , T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess).all()
set_2_y=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==5 , T_Firer_Details.set_no==2 , T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess).all()
for x_2 in set_2_x:
set_2_x_arr.append(int(x_2.final_x))
for y_2 in set_2_y:
set_2_y_arr.append(int(y_2.final_y))
set_3_x=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==5 , T_Firer_Details.set_no==3 , T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess).all()
set_3_y=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==5 , T_Firer_Details.set_no==3 , T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess).all()
for x_3 in set_3_x:
set_3_x_arr.append(int(x_3.final_x))
for y_3 in set_3_y:
set_3_y_arr.append(int(y_3.final_y))
set_4_x=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==5 , T_Firer_Details.set_no==4 , T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess).all()
set_4_y=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==5 , T_Firer_Details.set_no==4 ,T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess).all()
for x_4 in set_4_x:
set_4_x_arr.append(int(x_4.final_x))
for y_4 in set_4_y:
set_4_y_arr.append(int(y_4.final_y))
set_1_id = db.session.query(T_Firer_Details.firer_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==5,
T_Firer_Details.set_no==1,
T_Firer_Details.paper_ref==paper_ref,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_1_session_no=db.session.query(T_Firer_Details.session_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==5,
T_Firer_Details.set_no==1,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_1_detail_no=db.session.query(T_Firer_Details.detail_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==5,
T_Firer_Details.set_no==1,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_1_name=db.session.query(Shooter.name).filter(
Shooter.id==set_1_id
).scalar()
set_1_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_1_id).scalar()
set_2_id = db.session.query(T_Firer_Details.firer_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==5,
T_Firer_Details.set_no==2,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_2_session_no=db.session.query(T_Firer_Details.session_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==5,
T_Firer_Details.set_no==2,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_2_detail_no=db.session.query(T_Firer_Details.detail_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==5,
T_Firer_Details.set_no==2,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_2_name=db.session.query(Shooter.name).filter(
Shooter.id==set_2_id
).scalar()
set_2_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_2_id).scalar()
set_3_id = db.session.query(T_Firer_Details.firer_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==5,
T_Firer_Details.set_no==3,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_3_session_no=db.session.query(T_Firer_Details.session_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==5,
T_Firer_Details.set_no==3,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_3_detail_no=db.session.query(T_Firer_Details.detail_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==5,
T_Firer_Details.set_no==3,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_3_name=db.session.query(Shooter.name).filter(
Shooter.id==set_3_id
).scalar()
set_3_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_3_id).scalar()
set_4_id = db.session.query(T_Firer_Details.firer_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==5,
T_Firer_Details.set_no==4,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_4_session_no=db.session.query(T_Firer_Details.session_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==5,
T_Firer_Details.set_no==4,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_4_detail_no=db.session.query(T_Firer_Details.detail_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==5,
T_Firer_Details.set_no==4,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_4_name=db.session.query(Shooter.name).filter(
Shooter.id==set_4_id
).scalar()
set_4_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_4_id).scalar()
current_firer_name = db.session.query(Shooter.name).filter(Shooter.id==firer_id).scalar()
current_army_no = db.session.query(Shooter.service_id).filter(Shooter.id==firer_id).scalar()
current_session_no=db.session.query(TShooting.session_id).filter(TShooting.target_1_id==firer_id).scalar()
current_detail_no=db.session.query(TShooting.detail_no).filter(TShooting.target_1_id==firer_id).scalar()
xmpi_inch = pixeltoinch(xmpi)
ympi_inch = pixeltoinch(ympi)
xmpi_j =pd.Series(xmpi_inch).to_json(orient='values')
ympi_j =pd.Series(ympi_inch).to_json(orient='values')
Tfirt_x_j =pd.Series(Tfirt_x).to_json(orient='values')
Tfirt_y_j =pd.Series(Tfirt_y).to_json(orient='values')
for x_1 in fin_x_1:
fin_x_arr_1.append(int(x_1.final_x))
for y_1 in fin_y_1 :
fin_y_arr_1.append(int(y_1.final_y))
return jsonify(x1=t1_x ,
y1=t1_y ,
xmpi1=Tfirt_x_j ,
ympi1=Tfirt_y_j,
gp=gp,
txf1=Tfirt_x_j,
tyf1=Tfirt_y_j,
fx1=fin_x_arr_1,
fy1=fin_y_arr_1,
result_1=result_1,
fir_tendency_1=fir_tendency_1,
set_1_name=set_1_name,
current_firer_name=current_firer_name,
set_1_army=set_1_army,
current_army_no=current_army_no,
set_1_session_no=set_1_session_no,
current_session_no=current_session_no,
set_1_detail_no=set_1_detail_no,
current_detail_no=current_detail_no,
set_2_x=set_2_x_arr,
set_2_y=set_2_y_arr,
set_2_name=set_2_name,
set_2_army=set_2_army,
set_2_detail_no=set_2_detail_no,
set_2_session_no=set_2_session_no,
set_3_x=set_3_x_arr,
set_3_y=set_3_y_arr,
set_3_name=set_3_name,
set_3_army=set_3_army,
set_3_session_no=set_3_session_no,
set_3_detail_no=set_3_detail_no,
set_4_x=set_4_x_arr,
set_4_y=set_4_y_arr,
set_4_name=set_4_name,
set_4_army=set_4_army,
set_4_session_no=set_4_session_no,
set_4_detail_no=set_4_detail_no
)
@app.route('/prediction_target_6/', methods=['GET', 'POST'])
def prediction_target_6():
t1_x=0
t1_y=0
xmpi_j=0
ympi_j=0
gp=0
Tfirt_x_j=0
Tfirt_y_j=0
fin_x_1=0
fin_y_1=0
xmpi_inch = 0
ympi_inch = 0
result_1=None
fir_tendency=None
set_1_name = None
set_1_army =None
set_1_session_no = None
set_1_detail_no=None
set_1_id =None
set_2_name = None
set_2_army =None
set_2_session_no = None
set_2_detail_no=None
set_2_id =None
set_3_name = None
set_3_army =None
set_3_session_no = None
set_3_detail_no=None
set_3_id =None
set_4_name = None
set_4_army =None
set_4_session_no = None
set_4_detail_no=None
set_4_id =None
fir_tendency_1=None
firer_id=None
current_army_no=None
current_firer_name=None
current_session_no=None
session_detail_no=None
current_detail_no=None
set_2_x=None
set_2_y=None
set_3_x=None
set_3_y=None
set_4_x=None
set_4_y=None
paper_ref=None
sess=None
res=None
set_2_x_arr=[]
set_2_y_arr=[]
set_3_x_arr=[]
set_3_y_arr=[]
set_4_x_arr=[]
set_4_y_arr=[]
fin_x_arr_1=[]
fin_y_arr_1=[]
curdate=time.strftime("%Y-%m-%d")
if request.method == 'POST':
firer_id,sess,o,p,u,q,t1_x,t1_y,xmpi,ympi,f,gp,Tfirt_x,Tfirt_y,fin_x_1,fin_y_1,result_1,fir_tendency_1=prediction_calculation_6()
paper_ref=db.session.query(TPaper_ref.paper_ref).scalar()
set_2_x=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==6 ,T_Firer_Details.set_no==2 , T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess).all()
set_2_y=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==6 , T_Firer_Details.set_no==2 , T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess).all()
for x_2 in set_2_x:
set_2_x_arr.append(int(x_2.final_x))
for y_2 in set_2_y:
set_2_y_arr.append(int(y_2.final_y))
set_3_x=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==6 , T_Firer_Details.set_no==3 , T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess).all()
set_3_y=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==6 , T_Firer_Details.set_no==3 , T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess).all()
for x_3 in set_3_x:
set_3_x_arr.append(int(x_3.final_x))
for y_3 in set_3_y:
set_3_y_arr.append(int(y_3.final_y))
set_4_x=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==6 , T_Firer_Details.set_no==4 , T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess).all()
set_4_y=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==6 , T_Firer_Details.set_no==4 ,T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess).all()
for x_4 in set_4_x:
set_4_x_arr.append(int(x_4.final_x))
for y_4 in set_4_y:
set_4_y_arr.append(int(y_4.final_y))
set_1_id = db.session.query(T_Firer_Details.firer_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==6,
T_Firer_Details.set_no==1,
T_Firer_Details.paper_ref==paper_ref,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_1_session_no=db.session.query(T_Firer_Details.session_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==6,
T_Firer_Details.set_no==1,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_1_detail_no=db.session.query(T_Firer_Details.detail_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==6,
T_Firer_Details.set_no==1,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_1_name=db.session.query(Shooter.name).filter(
Shooter.id==set_1_id
).scalar()
set_1_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_1_id).scalar()
set_2_id = db.session.query(T_Firer_Details.firer_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==6,
T_Firer_Details.set_no==2,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_2_session_no=db.session.query(T_Firer_Details.session_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==6,
T_Firer_Details.set_no==2,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_2_detail_no=db.session.query(T_Firer_Details.detail_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==6,
T_Firer_Details.set_no==2,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_2_name=db.session.query(Shooter.name).filter(
Shooter.id==set_2_id
).scalar()
set_2_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_2_id).scalar()
set_3_id = db.session.query(T_Firer_Details.firer_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==6,
T_Firer_Details.set_no==3,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_3_session_no=db.session.query(T_Firer_Details.session_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==6,
T_Firer_Details.set_no==3,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_3_detail_no=db.session.query(T_Firer_Details.detail_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==6,
T_Firer_Details.set_no==3,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_3_name=db.session.query(Shooter.name).filter(
Shooter.id==set_3_id
).scalar()
set_3_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_3_id).scalar()
set_4_id = db.session.query(T_Firer_Details.firer_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==6,
T_Firer_Details.set_no==4,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_4_session_no=db.session.query(T_Firer_Details.session_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==6,
T_Firer_Details.set_no==4,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_4_detail_no=db.session.query(T_Firer_Details.detail_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==6,
T_Firer_Details.set_no==4,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_4_name=db.session.query(Shooter.name).filter(
Shooter.id==set_4_id
).scalar()
set_4_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_4_id).scalar()
current_firer_name = db.session.query(Shooter.name).filter(Shooter.id==firer_id).scalar()
current_army_no = db.session.query(Shooter.service_id).filter(Shooter.id==firer_id).scalar()
current_session_no=db.session.query(TShooting.session_id).filter(TShooting.target_1_id==firer_id).scalar()
current_detail_no=db.session.query(TShooting.detail_no).filter(TShooting.target_1_id==firer_id).scalar()
xmpi_inch = pixeltoinch(xmpi)
ympi_inch = pixeltoinch(ympi)
xmpi_j =pd.Series(xmpi_inch).to_json(orient='values')
ympi_j =pd.Series(ympi_inch).to_json(orient='values')
Tfirt_x_j =pd.Series(Tfirt_x).to_json(orient='values')
Tfirt_y_j =pd.Series(Tfirt_y).to_json(orient='values')
for x_1 in fin_x_1:
fin_x_arr_1.append(int(x_1.final_x))
for y_1 in fin_y_1 :
fin_y_arr_1.append(int(y_1.final_y))
return jsonify(x1=t1_x ,
y1=t1_y ,
xmpi1=Tfirt_x_j ,
ympi1=Tfirt_y_j,
gp=gp,
txf1=Tfirt_x_j,
tyf1=Tfirt_y_j,
fx1=fin_x_arr_1,
fy1=fin_y_arr_1,
result_1=result_1,
fir_tendency_1=fir_tendency_1,
set_1_name=set_1_name,
current_firer_name=current_firer_name,
set_1_army=set_1_army,
current_army_no=current_army_no,
set_1_session_no=set_1_session_no,
current_session_no=current_session_no,
set_1_detail_no=set_1_detail_no,
current_detail_no=current_detail_no,
set_2_x=set_2_x_arr,
set_2_y=set_2_y_arr,
set_2_name=set_2_name,
set_2_army=set_2_army,
set_2_detail_no=set_2_detail_no,
set_2_session_no=set_2_session_no,
set_3_x=set_3_x_arr,
set_3_y=set_3_y_arr,
set_3_name=set_3_name,
set_3_army=set_3_army,
set_3_session_no=set_3_session_no,
set_3_detail_no=set_3_detail_no,
set_4_x=set_4_x_arr,
set_4_y=set_4_y_arr,
set_4_name=set_4_name,
set_4_army=set_4_army,
set_4_session_no=set_4_session_no,
set_4_detail_no=set_4_detail_no
)
@app.route('/prediction_target_7/', methods=['GET', 'POST'])
def prediction_target_7():
t1_x=0
t1_y=0
xmpi_j=0
ympi_j=0
gp=0
Tfirt_x_j=0
Tfirt_y_j=0
fin_x_1=0
fin_y_1=0
xmpi_inch = 0
ympi_inch = 0
result_1=None
fir_tendency=None
set_1_name = None
set_1_army =None
set_1_session_no = None
set_1_detail_no=None
set_1_id =None
set_2_name = None
set_2_army =None
set_2_session_no = None
set_2_detail_no=None
set_2_id =None
set_3_name = None
set_3_army =None
set_3_session_no = None
set_3_detail_no=None
set_3_id =None
set_4_name = None
set_4_army =None
set_4_session_no = None
set_4_detail_no=None
set_4_id =None
fir_tendency_1=None
firer_id=None
current_army_no=None
current_firer_name=None
current_session_no=None
session_detail_no=None
current_detail_no=None
set_2_x=None
set_2_y=None
set_3_x=None
set_3_y=None
set_4_x=None
set_4_y=None
paper_ref=None
sess=None
res=None
set_2_x_arr=[]
set_2_y_arr=[]
set_3_x_arr=[]
set_3_y_arr=[]
set_4_x_arr=[]
set_4_y_arr=[]
fin_x_arr_1=[]
fin_y_arr_1=[]
curdate=time.strftime("%Y-%m-%d")
if request.method == 'POST':
firer_id,sess,o,p,u,q,t1_x,t1_y,xmpi,ympi,f,gp,Tfirt_x,Tfirt_y,fin_x_1,fin_y_1,result_1,fir_tendency_1=prediction_calculation_7()
paper_ref=db.session.query(TPaper_ref.paper_ref).scalar()
set_2_x=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==7,T_Firer_Details.set_no==2 , T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess).all()
set_2_y=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==7 , T_Firer_Details.set_no==2 , T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess).all()
for x_2 in set_2_x:
set_2_x_arr.append(int(x_2.final_x))
for y_2 in set_2_y:
set_2_y_arr.append(int(y_2.final_y))
set_3_x=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==7 , T_Firer_Details.set_no==3 , T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess).all()
set_3_y=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==7 , T_Firer_Details.set_no==3 , T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess).all()
for x_3 in set_3_x:
set_3_x_arr.append(int(x_3.final_x))
for y_3 in set_3_y:
set_3_y_arr.append(int(y_3.final_y))
set_4_x=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==7 , T_Firer_Details.set_no==4 , T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess).all()
set_4_y=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==7 , T_Firer_Details.set_no==4 ,T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess).all()
for x_4 in set_4_x:
set_4_x_arr.append(int(x_4.final_x))
for y_4 in set_4_y:
set_4_y_arr.append(int(y_4.final_y))
set_1_id = db.session.query(T_Firer_Details.firer_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==7,
T_Firer_Details.set_no==1,
T_Firer_Details.paper_ref==paper_ref,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_1_session_no=db.session.query(T_Firer_Details.session_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==7,
T_Firer_Details.set_no==1,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_1_detail_no=db.session.query(T_Firer_Details.detail_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==7,
T_Firer_Details.set_no==1,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_1_name=db.session.query(Shooter.name).filter(
Shooter.id==set_1_id
).scalar()
set_1_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_1_id).scalar()
set_2_id = db.session.query(T_Firer_Details.firer_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==7,
T_Firer_Details.set_no==2,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_2_session_no=db.session.query(T_Firer_Details.session_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==7,
T_Firer_Details.set_no==2,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_2_detail_no=db.session.query(T_Firer_Details.detail_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==7,
T_Firer_Details.set_no==2,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_2_name=db.session.query(Shooter.name).filter(
Shooter.id==set_2_id
).scalar()
set_2_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_2_id).scalar()
set_3_id = db.session.query(T_Firer_Details.firer_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==7,
T_Firer_Details.set_no==3,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_3_session_no=db.session.query(T_Firer_Details.session_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==7,
T_Firer_Details.set_no==3,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_3_detail_no=db.session.query(T_Firer_Details.detail_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==7,
T_Firer_Details.set_no==3,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_3_name=db.session.query(Shooter.name).filter(
Shooter.id==set_3_id
).scalar()
set_3_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_3_id).scalar()
set_4_id = db.session.query(T_Firer_Details.firer_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==7,
T_Firer_Details.set_no==4,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_4_session_no=db.session.query(T_Firer_Details.session_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==7,
T_Firer_Details.set_no==4,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_4_detail_no=db.session.query(T_Firer_Details.detail_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==7,
T_Firer_Details.set_no==4,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_4_name=db.session.query(Shooter.name).filter(
Shooter.id==set_4_id
).scalar()
set_4_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_4_id).scalar()
current_firer_name = db.session.query(Shooter.name).filter(Shooter.id==firer_id).scalar()
current_army_no = db.session.query(Shooter.service_id).filter(Shooter.id==firer_id).scalar()
current_session_no=db.session.query(TShooting.session_id).filter(TShooting.target_1_id==firer_id).scalar()
current_detail_no=db.session.query(TShooting.detail_no).filter(TShooting.target_1_id==firer_id).scalar()
xmpi_inch = pixeltoinch(xmpi)
ympi_inch = pixeltoinch(ympi)
xmpi_j =pd.Series(xmpi_inch).to_json(orient='values')
ympi_j =pd.Series(ympi_inch).to_json(orient='values')
Tfirt_x_j =pd.Series(Tfirt_x).to_json(orient='values')
Tfirt_y_j =pd.Series(Tfirt_y).to_json(orient='values')
for x_1 in fin_x_1:
fin_x_arr_1.append(int(x_1.final_x))
for y_1 in fin_y_1 :
fin_y_arr_1.append(int(y_1.final_y))
return jsonify(x1=t1_x ,
y1=t1_y ,
xmpi1=Tfirt_x_j ,
ympi1=Tfirt_y_j,
gp=gp,
txf1=Tfirt_x_j,
tyf1=Tfirt_y_j,
fx1=fin_x_arr_1,
fy1=fin_y_arr_1,
result_1=result_1,
fir_tendency_1=fir_tendency_1,
set_1_name=set_1_name,
current_firer_name=current_firer_name,
set_1_army=set_1_army,
current_army_no=current_army_no,
set_1_session_no=set_1_session_no,
current_session_no=current_session_no,
set_1_detail_no=set_1_detail_no,
current_detail_no=current_detail_no,
set_2_x=set_2_x_arr,
set_2_y=set_2_y_arr,
set_2_name=set_2_name,
set_2_army=set_2_army,
set_2_detail_no=set_2_detail_no,
set_2_session_no=set_2_session_no,
set_3_x=set_3_x_arr,
set_3_y=set_3_y_arr,
set_3_name=set_3_name,
set_3_army=set_3_army,
set_3_session_no=set_3_session_no,
set_3_detail_no=set_3_detail_no,
set_4_x=set_4_x_arr,
set_4_y=set_4_y_arr,
set_4_name=set_4_name,
set_4_army=set_4_army,
set_4_session_no=set_4_session_no,
set_4_detail_no=set_4_detail_no
)
@app.route('/prediction_target_8/', methods=['GET', 'POST'])
def prediction_target_8():
t1_x=0
t1_y=0
xmpi_j=0
ympi_j=0
gp=0
Tfirt_x_j=0
Tfirt_y_j=0
fin_x_1=0
fin_y_1=0
xmpi_inch = 0
ympi_inch = 0
result_1=None
fir_tendency=None
set_1_name = None
set_1_army =None
set_1_session_no = None
set_1_detail_no=None
set_1_id =None
set_2_name = None
set_2_army =None
set_2_session_no = None
set_2_detail_no=None
set_2_id =None
set_3_name = None
set_3_army =None
set_3_session_no = None
set_3_detail_no=None
set_3_id =None
set_4_name = None
set_4_army =None
set_4_session_no = None
set_4_detail_no=None
set_4_id =None
fir_tendency_1=None
firer_id=None
current_army_no=None
current_firer_name=None
current_session_no=None
session_detail_no=None
current_detail_no=None
set_2_x=None
set_2_y=None
set_3_x=None
set_3_y=None
set_4_x=None
set_4_y=None
paper_ref=None
sess=None
res=None
set_2_x_arr=[]
set_2_y_arr=[]
set_3_x_arr=[]
set_3_y_arr=[]
set_4_x_arr=[]
set_4_y_arr=[]
fin_x_arr_1=[]
fin_y_arr_1=[]
curdate=time.strftime("%Y-%m-%d")
if request.method == 'POST':
firer_id,sess,o,p,u,q,t1_x,t1_y,xmpi,ympi,f,gp,Tfirt_x,Tfirt_y,fin_x_1,fin_y_1,result_1,fir_tendency_1=prediction_calculation_8()
paper_ref=db.session.query(TPaper_ref.paper_ref).scalar()
set_2_x=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==8,T_Firer_Details.set_no==2 , T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess).all()
set_2_y=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==8 , T_Firer_Details.set_no==2 , T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess).all()
for x_2 in set_2_x:
set_2_x_arr.append(int(x_2.final_x))
for y_2 in set_2_y:
set_2_y_arr.append(int(y_2.final_y))
set_3_x=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==8 , T_Firer_Details.set_no==3 , T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess).all()
set_3_y=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==8 , T_Firer_Details.set_no==3 , T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess).all()
for x_3 in set_3_x:
set_3_x_arr.append(int(x_3.final_x))
for y_3 in set_3_y:
set_3_y_arr.append(int(y_3.final_y))
set_4_x=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==8 , T_Firer_Details.set_no==4 , T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess).all()
set_4_y=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==8 , T_Firer_Details.set_no==4 ,T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess).all()
for x_4 in set_4_x:
set_4_x_arr.append(int(x_4.final_x))
for y_4 in set_4_y:
set_4_y_arr.append(int(y_4.final_y))
set_1_id = db.session.query(T_Firer_Details.firer_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==8,
T_Firer_Details.set_no==1,
T_Firer_Details.paper_ref==paper_ref,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_1_session_no=db.session.query(T_Firer_Details.session_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==8,
T_Firer_Details.set_no==1,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_1_detail_no=db.session.query(T_Firer_Details.detail_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==8,
T_Firer_Details.set_no==1,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_1_name=db.session.query(Shooter.name).filter(
Shooter.id==set_1_id
).scalar()
set_1_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_1_id).scalar()
set_2_id = db.session.query(T_Firer_Details.firer_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==8,
T_Firer_Details.set_no==2,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_2_session_no=db.session.query(T_Firer_Details.session_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==8,
T_Firer_Details.set_no==2,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_2_detail_no=db.session.query(T_Firer_Details.detail_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==8,
T_Firer_Details.set_no==2,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_2_name=db.session.query(Shooter.name).filter(
Shooter.id==set_2_id
).scalar()
set_2_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_2_id).scalar()
set_3_id = db.session.query(T_Firer_Details.firer_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==8,
T_Firer_Details.set_no==3,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_3_session_no=db.session.query(T_Firer_Details.session_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==8,
T_Firer_Details.set_no==3,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_3_detail_no=db.session.query(T_Firer_Details.detail_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==8,
T_Firer_Details.set_no==3,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_3_name=db.session.query(Shooter.name).filter(
Shooter.id==set_3_id
).scalar()
set_3_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_3_id).scalar()
set_4_id = db.session.query(T_Firer_Details.firer_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==8,
T_Firer_Details.set_no==4,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_4_session_no=db.session.query(T_Firer_Details.session_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==8,
T_Firer_Details.set_no==4,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_4_detail_no=db.session.query(T_Firer_Details.detail_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==8,
T_Firer_Details.set_no==4,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_4_name=db.session.query(Shooter.name).filter(
Shooter.id==set_4_id
).scalar()
set_4_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_4_id).scalar()
current_firer_name = db.session.query(Shooter.name).filter(Shooter.id==firer_id).scalar()
current_army_no = db.session.query(Shooter.service_id).filter(Shooter.id==firer_id).scalar()
current_session_no=db.session.query(TShooting.session_id).filter(TShooting.target_1_id==firer_id).scalar()
current_detail_no=db.session.query(TShooting.detail_no).filter(TShooting.target_1_id==firer_id).scalar()
xmpi_inch = pixeltoinch(xmpi)
ympi_inch = pixeltoinch(ympi)
xmpi_j =pd.Series(xmpi_inch).to_json(orient='values')
ympi_j =pd.Series(ympi_inch).to_json(orient='values')
Tfirt_x_j =pd.Series(Tfirt_x).to_json(orient='values')
Tfirt_y_j =pd.Series(Tfirt_y).to_json(orient='values')
for x_1 in fin_x_1:
fin_x_arr_1.append(int(x_1.final_x))
for y_1 in fin_y_1 :
fin_y_arr_1.append(int(y_1.final_y))
return jsonify(x1=t1_x ,
y1=t1_y ,
xmpi1=Tfirt_x_j ,
ympi1=Tfirt_y_j,
gp=gp,
txf1=Tfirt_x_j,
tyf1=Tfirt_y_j,
fx1=fin_x_arr_1,
fy1=fin_y_arr_1,
result_1=result_1,
fir_tendency_1=fir_tendency_1,
set_1_name=set_1_name,
current_firer_name=current_firer_name,
set_1_army=set_1_army,
current_army_no=current_army_no,
set_1_session_no=set_1_session_no,
current_session_no=current_session_no,
set_1_detail_no=set_1_detail_no,
current_detail_no=current_detail_no,
set_2_x=set_2_x_arr,
set_2_y=set_2_y_arr,
set_2_name=set_2_name,
set_2_army=set_2_army,
set_2_detail_no=set_2_detail_no,
set_2_session_no=set_2_session_no,
set_3_x=set_3_x_arr,
set_3_y=set_3_y_arr,
set_3_name=set_3_name,
set_3_army=set_3_army,
set_3_session_no=set_3_session_no,
set_3_detail_no=set_3_detail_no,
set_4_x=set_4_x_arr,
set_4_y=set_4_y_arr,
set_4_name=set_4_name,
set_4_army=set_4_army,
set_4_session_no=set_4_session_no,
set_4_detail_no=set_4_detail_no
)
@app.route('/previous_page_target_1/', methods=['GET', 'POST'])
def previous_page_target_1():
T1_name = db.session.query(Shooter.name).filter(Shooter.id==TShooting.target_1_id).scalar()
T1_service = db.session.query(Shooter.service_id).filter(Shooter.id==TShooting.target_1_id).scalar()
T1_r_id = db.session.query(Shooter.rank_id).filter(Shooter.id==TShooting.target_1_id).scalar()
T1_rank = db.session.query(Rank.name).filter(Rank.id==T1_r_id).scalar()
T2_name = db.session.query(Shooter.name).filter(Shooter.id==TShooting.target_2_id).scalar()
T2_service = db.session.query(Shooter.service_id).filter(Shooter.id==TShooting.target_2_id).scalar()
T2_r_id = db.session.query(Shooter.rank_id).filter(Shooter.id==TShooting.target_2_id).scalar()
T2_rank = db.session.query(Rank.name).filter(Rank.id==T2_r_id).scalar()
T3_name = db.session.query(Shooter.name).filter(Shooter.id==TShooting.target_3_id).scalar()
T3_service = db.session.query(Shooter.service_id).filter(Shooter.id==TShooting.target_3_id).scalar()
T3_r_id = db.session.query(Shooter.rank_id).filter(Shooter.id==TShooting.target_3_id).scalar()
T3_rank = db.session.query(Rank.name).filter(Rank.id==T3_r_id).scalar()
T4_name = db.session.query(Shooter.name).filter(Shooter.id==TShooting.target_4_id).scalar()
T4_service = db.session.query(Shooter.service_id).filter(Shooter.id==TShooting.target_4_id).scalar()
T4_r_id = db.session.query(Shooter.rank_id).filter(Shooter.id==TShooting.target_4_id).scalar()
T4_rank = db.session.query(Rank.name).filter(Rank.id==T4_r_id).scalar()
print(T1_rank)
print(T2_rank)
print(T3_rank)
print(T4_rank)
return render_template('pages/previous_page_target_1.html' ,
T1_name=T1_name,
T1_service=T1_service,
T2_name=T2_name,
T2_service=T2_service,
T3_name=T3_name,
T3_service=T3_service,
T4_name=T4_name,
T4_service=T4_service,
T4_rank=T4_rank,
T1_rank=T1_rank,
T2_rank=T2_rank,
T3_rank=T3_rank
)
@app.route('/previous_page_target_5/', methods=['GET', 'POST'])
def previous_page_target_5():
T5_name = db.session.query(Shooter.name).filter(Shooter.id==TShooting.target_5_id).scalar()
T5_service = db.session.query(Shooter.service_id).filter(Shooter.id==TShooting.target_5_id).scalar()
T5_r_id = db.session.query(Shooter.rank_id).filter(Shooter.id==TShooting.target_5_id).scalar()
T5_rank = db.session.query(Rank.name).filter(Rank.id==T5_r_id).scalar()
T6_name = db.session.query(Shooter.name).filter(Shooter.id==TShooting.target_6_id).scalar()
T6_service = db.session.query(Shooter.service_id).filter(Shooter.id==TShooting.target_6_id).scalar()
T6_r_id = db.session.query(Shooter.rank_id).filter(Shooter.id==TShooting.target_6_id).scalar()
T6_rank = db.session.query(Rank.name).filter(Rank.id==T6_r_id).scalar()
T7_name = db.session.query(Shooter.name).filter(Shooter.id==TShooting.target_7_id).scalar()
T7_service = db.session.query(Shooter.service_id).filter(Shooter.id==TShooting.target_7_id).scalar()
T7_r_id = db.session.query(Shooter.rank_id).filter(Shooter.id==TShooting.target_7_id).scalar()
T7_rank = db.session.query(Rank.name).filter(Rank.id==T7_r_id).scalar()
T8_name = db.session.query(Shooter.name).filter(Shooter.id==TShooting.target_8_id).scalar()
T8_service = db.session.query(Shooter.service_id).filter(Shooter.id==TShooting.target_8_id).scalar()
T8_r_id = db.session.query(Shooter.rank_id).filter(Shooter.id==TShooting.target_8_id).scalar()
T8_rank = db.session.query(Rank.name).filter(Rank.id==T8_r_id).scalar()
return render_template('pages/previous_page_target_5.html' ,
T5_name=T5_name,
T5_service=T5_service,
T6_name=T6_name,
T6_service=T6_service,
T7_name=T7_name,
T7_service=T7_service,
T8_name=T8_name,
T8_service=T8_service,
T5_rank=T5_rank,
T6_rank=T6_rank,
T7_rank=T7_rank,
T8_rank=T8_rank
)
def prediction_calculation_1():
curdate=time.strftime("%Y-%m-%d")
X_json=0
Y_json=0
firer_id =db.session.query(TShooting.target_1_id).scalar()
sess_id = db.session.query(TShooting.session_id).scalar()
detail_id = db.session.query(TShooting.detail_no).scalar()
target_no=1
paper_ref=db.session.query(TPaper_ref.paper_ref).scalar()
print(paper_ref )
data_x_1=db.session.query(Firer_Details).filter(Firer_Details.date==curdate , Firer_Details.target_no==1 , Firer_Details.set_no==1 , Firer_Details.paper_ref==paper_ref , Firer_Details.session_id==sess_id).all()
data_y_1=db.session.query(Firer_Details).filter(Firer_Details.date==curdate , Firer_Details.target_no==1 , Firer_Details.set_no==1 , Firer_Details.paper_ref==paper_ref , Firer_Details.session_id==sess_id).all()
print(data_x_1)
set_no=db.session.query(TShooting.set_no).scalar()
paper_ref=db.session.query(TShooting.paper_ref).scalar()
print('Old x')
print(data_x_1)
image=Image.open('E:/FRAS Windows/FRAS_production/static/img_dump/1.png')
#image=Image.open('/Users/wasifaahmed/Documents/FRAS/Fras_production_v.0.1/FRAS Windows/FRAS Windows/FRAS_production/static/img_dump/1.png')
w,h = image.size
predictedMatrix = predictAsMatrix(image,w,h)
g= Graph(80, 80, predictedMatrix)
N=g.countIslands()
points(predictedMatrix,h=80,w=80)
centroids=kmean(N,pointsarray)
print(centroids)
if(centroids is None):
x=0,
y=0,
mpit=0
xmpi1=0
ympi1=0
f1=0,
firt_x=0
firt_y=0
fir_tendency_code=""
fir_tendency_txt=""
gp_1=""
result_1=""
else:
x= centroids [:, 1]
y= 2000-centroids [:, 0]
X_json=pd.Series(x).to_json(orient='values')
Y_json = pd.Series(y).to_json(orient='values')
mpit=mpi(1,centroids)
xmpi1 = mpit [:, 1]
ympi1 = 2000-mpit [:, 0]
f1 ,firt_x,firt_y= firing_tendancy(1000, 1000 , xmpi1, ympi1)
fir_tendency_txt,fir_tendency_code = getfiringtendencytext(f1 ,firt_x,firt_y)
gp_1 = grouping_length(0 , 0 , x , y)
result_1 =getresulttext(gp_1)
return (firer_id,
sess_id,
detail_id,
target_no,
set_no,
paper_ref,
X_json,
Y_json,
xmpi1,
ympi1,
f1,
gp_1,
firt_x,
firt_y,
data_x_1,
data_y_1,
result_1,
fir_tendency_txt
)
def prediction_calculation_2():
curdate=time.strftime("%Y-%m-%d")
X_json=0
Y_json=0
firer_id =db.session.query(TShooting.target_2_id).scalar()
sess_id = db.session.query(TShooting.session_id).scalar()
detail_id = db.session.query(TShooting.detail_no).scalar()
target_no=2
paper_ref=db.session.query(TPaper_ref.paper_ref).scalar()
print(paper_ref )
data_x_1=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==2 , T_Firer_Details.set_no==1 , T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess_id).all()
data_y_1=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==2 , T_Firer_Details.set_no==1 , T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess_id).all()
print(data_x_1)
set_no=db.session.query(TShooting.set_no).scalar()
paper_ref=db.session.query(TShooting.paper_ref).scalar()
print('Old x' )
print(data_x_1)
image=Image.open('E:/FRAS Windows/FRAS_production/static/img_dump/2.png')
w,h = image.size
predictedMatrix = predictAsMatrix(image,w,h)
g= Graph(80, 80, predictedMatrix)
N=g.countIslands()
points(predictedMatrix,h=80,w=80)
centroids=kmean(N,pointsarray)
if(centroids is None):
x=0,
y=0,
mpit=0
xmpi1=0
ympi1=0
f1=0,
firt_x=0
firt_y=0
fir_tendency_code=""
fir_tendency_txt=""
gp_1=""
result_1=""
else:
x= centroids [:, 1]
y= 2000-centroids [:, 0]
X_json=pd.Series(x).to_json(orient='values')
Y_json = pd.Series(y).to_json(orient='values')
mpit=mpi(1,centroids)
xmpi1 = mpit [:, 1]
ympi1 = 2000-mpit [:, 0]
f1 ,firt_x,firt_y= firing_tendancy(1000, 1000 , xmpi1, ympi1)
fir_tendency_txt,fir_tendency_code = getfiringtendencytext(f1 ,firt_x,firt_y)
gp_1 = grouping_length(0 , 0 , x , y)
result_1 =getresulttext(gp_1)
return (firer_id,
sess_id,
detail_id,
target_no,
set_no,
paper_ref,
X_json,
Y_json,
xmpi1,
ympi1,
f1,
gp_1,
firt_x,
firt_y,
data_x_1,
data_y_1,
result_1,
fir_tendency_txt
)
def prediction_calculation_3():
X_json=0
Y_json=0
curdate=time.strftime("%Y-%m-%d")
firer_id =db.session.query(TShooting.target_3_id).scalar()
sess_id = db.session.query(TShooting.session_id).scalar()
detail_id = db.session.query(TShooting.detail_no).scalar()
target_no=3
paper_ref=db.session.query(TPaper_ref.paper_ref).scalar()
print(paper_ref)
data_x_1=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==3 , T_Firer_Details.set_no==1 , T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess_id).all()
data_y_1=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==3 , T_Firer_Details.set_no==1 , T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess_id).all()
print(data_x_1)
set_no=db.session.query(TShooting.set_no).scalar()
paper_ref=db.session.query(TShooting.paper_ref).scalar()
print('Old x' )
print(data_x_1)
image=Image.open('E:/FRAS Windows/FRAS_production/static/img_dump/3.png')
w,h = image.size
predictedMatrix = predictAsMatrix(image,w,h)
g= Graph(80, 80, predictedMatrix)
N=g.countIslands()
points(predictedMatrix,h=80,w=80)
centroids=kmean(N,pointsarray)
if(centroids is None):
x=0,
y=0,
mpit=0
xmpi1=0
ympi1=0
f1=0,
firt_x=0
firt_y=0
fir_tendency_code=""
fir_tendency_txt=""
gp_1=""
result_1=""
else:
x= centroids [:, 1]
y= 2000-centroids [:, 0]
X_json=pd.Series(x).to_json(orient='values')
Y_json = pd.Series(y).to_json(orient='values')
mpit=mpi(1,centroids)
xmpi1 = mpit [:, 1]
ympi1 = 2000-mpit [:, 0]
f1 ,firt_x,firt_y= firing_tendancy(1000, 1000 , xmpi1, ympi1)
fir_tendency_txt,fir_tendency_code = getfiringtendencytext(f1 ,firt_x,firt_y)
print("calling from prediction_calculation_1" )
gp_1 = grouping_length(0 , 0 , x , y)
result_1 =getresulttext(gp_1)
return (firer_id,
sess_id,
detail_id,
target_no,
set_no,
paper_ref,
X_json,
Y_json,
xmpi1,
ympi1,
f1,
gp_1,
firt_x,
firt_y,
data_x_1,
data_y_1,
result_1,
fir_tendency_txt
)
def prediction_calculation_4():
curdate=time.strftime("%Y-%m-%d")
firer_id =db.session.query(TShooting.target_4_id).scalar()
sess_id = db.session.query(TShooting.session_id).scalar()
detail_id = db.session.query(TShooting.detail_no).scalar()
target_no=4
paper_ref=db.session.query(TPaper_ref.paper_ref).scalar()
print(paper_ref )
data_x_1=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==4 , T_Firer_Details.set_no==1 , T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess_id).all()
data_y_1=db.session.query(T_Firer_Details.final_y).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==4 , T_Firer_Details.set_no==1 , T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess_id).all()
print(data_x_1)
set_no=db.session.query(TShooting.set_no).scalar()
paper_ref=db.session.query(TShooting.paper_ref).scalar()
print('Old x' )
print(data_x_1)
image=Image.open('E:/FRAS Windows/FRAS_production/static/img_dump/4.png')
w,h = image.size
predictedMatrix = predictAsMatrix(image,w,h)
g= Graph(80, 80, predictedMatrix)
N=g.countIslands()
points(predictedMatrix,h=80,w=80)
centroids=kmean(N,pointsarray)
if(centroids is None):
x=0,
y=0,
mpit=0
xmpi1=0
ympi1=0
f1=0,
firt_x=0
firt_y=0
fir_tendency_code=""
fir_tendency_txt=""
gp_1=""
result_1=""
else:
x= centroids [:, 1]
y= 2000-centroids [:, 0]
X_json=pd.Series(x).to_json(orient='values')
Y_json = pd.Series(y).to_json(orient='values')
mpit=mpi(1,centroids)
xmpi1 = mpit [:, 1]
ympi1 = 2000-mpit [:, 0]
f1 ,firt_x,firt_y= firing_tendancy(1000, 1000 , xmpi1, ympi1)
fir_tendency_txt,fir_tendency_code = getfiringtendencytext(f1 ,firt_x,firt_y)
print("calling from prediction_calculation_1" )
gp_1 = grouping_length(0 , 0 , x , y)
result_1 =getresulttext(gp_1)
return (firer_id,
sess_id,
detail_id,
target_no,
set_no,
paper_ref,
X_json,
Y_json,
xmpi1,
ympi1,
f1,
gp_1,
firt_x,
firt_y,
data_x_1,
data_y_1,
result_1,
fir_tendency_txt
)
def prediction_calculation_5():
curdate=time.strftime("%Y-%m-%d")
firer_id =db.session.query(TShooting.target_5_id).scalar()
sess_id = db.session.query(TShooting.session_id).scalar()
detail_id = db.session.query(TShooting.detail_no).scalar()
target_no=5
paper_ref=db.session.query(TPaper_ref.paper_ref).scalar()
print(paper_ref)
data_x_1=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==5 , T_Firer_Details.set_no==1 , T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess_id).all()
data_y_1=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==5 , T_Firer_Details.set_no==1 , T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess_id).all()
print(data_x_1)
set_no=db.session.query(TShooting.set_no).scalar()
paper_ref=db.session.query(TShooting.paper_ref).scalar()
print('Old x' )
print(data_x_1)
image=Image.open('E:/FRAS Windows/FRAS_production/static/img_dump/5.png')
w,h = image.size
predictedMatrix = predictAsMatrix(image,w,h)
g= Graph(80, 80, predictedMatrix)
N=g.countIslands()
points(predictedMatrix,h=80,w=80)
centroids=kmean(N,pointsarray)
if(centroids is None):
x=0,
y=0,
mpit=0
xmpi1=0
ympi1=0
f1=0,
firt_x=0
firt_y=0
fir_tendency_code=""
fir_tendency_txt=""
gp_1=""
result_1=""
else:
x= centroids [:, 1]
y= 2000-centroids [:, 0]
X_json=pd.Series(x).to_json(orient='values')
Y_json = pd.Series(y).to_json(orient='values')
mpit=mpi(1,centroids)
xmpi1 = mpit [:, 1]
ympi1 = 2000-mpit [:, 0]
f1 ,firt_x,firt_y= firing_tendancy(1000, 1000 , xmpi1, ympi1)
fir_tendency_txt,fir_tendency_code = getfiringtendencytext(f1 ,firt_x,firt_y)
print("calling from prediction_calculation_1" )
gp_1 = grouping_length(0 , 0 , x , y)
result_1 =getresulttext(gp_1)
return (firer_id,
sess_id,
detail_id,
target_no,
set_no,
paper_ref,
X_json,
Y_json,
xmpi1,
ympi1,
f1,
gp_1,
firt_x,
firt_y,
data_x_1,
data_y_1,
result_1,
fir_tendency_txt
)
def prediction_calculation_6():
curdate=time.strftime("%Y-%m-%d")
firer_id =db.session.query(TShooting.target_6_id).scalar()
sess_id = db.session.query(TShooting.session_id).scalar()
detail_id = db.session.query(TShooting.detail_no).scalar()
target_no=6
paper_ref=db.session.query(TPaper_ref.paper_ref).scalar()
print(paper_ref)
data_x_1=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==6 , T_Firer_Details.set_no==1 , T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess_id).all()
data_y_1=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==6 , T_Firer_Details.set_no==1 , T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess_id).all()
print(data_x_1)
set_no=db.session.query(TShooting.set_no).scalar()
paper_ref=db.session.query(TShooting.paper_ref).scalar()
print('Old x' )
print(data_x_1)
image=Image.open('E:/FRAS Windows/FRAS_production/static/img_dump/6.png')
w,h = image.size
predictedMatrix = predictAsMatrix(image,w,h)
g= Graph(80, 80, predictedMatrix)
N=g.countIslands()
points(predictedMatrix,h=80,w=80)
centroids=kmean(N,pointsarray)
if(centroids is None):
x=0,
y=0,
mpit=0
xmpi1=0
ympi1=0
f1=0,
firt_x=0
firt_y=0
fir_tendency_code=""
fir_tendency_txt=""
gp_1=""
result_1=""
else:
x= centroids [:, 1]
y= 2000-centroids [:, 0]
X_json=pd.Series(x).to_json(orient='values')
Y_json = pd.Series(y).to_json(orient='values')
mpit=mpi(1,centroids)
xmpi1 = mpit [:, 1]
ympi1 = 2000-mpit [:, 0]
f1 ,firt_x,firt_y= firing_tendancy(1000, 1000 , xmpi1, ympi1)
fir_tendency_txt,fir_tendency_code = getfiringtendencytext(f1 ,firt_x,firt_y)
print("calling from prediction_calculation_1" )
gp_1 = grouping_length(0 , 0 , x , y)
result_1 =getresulttext(gp_1)
return (firer_id,
sess_id,
detail_id,
target_no,
set_no,
paper_ref,
X_json,
Y_json,
xmpi1,
ympi1,
f1,
gp_1,
firt_x,
firt_y,
data_x_1,
data_y_1,
result_1,
fir_tendency_txt
)
def prediction_calculation_7():
curdate=time.strftime("%Y-%m-%d")
firer_id =db.session.query(TShooting.target_7_id).scalar()
sess_id = db.session.query(TShooting.session_id).scalar()
detail_id = db.session.query(TShooting.detail_no).scalar()
target_no=7
paper_ref=db.session.query(TPaper_ref.paper_ref).scalar()
print(paper_ref)
data_x_1=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==7 , T_Firer_Details.set_no==1 , T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess_id).all()
data_y_1=db.session.query(T_Firer_Details.final_y).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==7 , T_Firer_Details.set_no==1 , T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess_id).all()
print(data_x_1)
set_no=db.session.query(TShooting.set_no).scalar()
paper_ref=db.session.query(TShooting.paper_ref).scalar()
print('Old x' )
print(data_x_1)
image=Image.open('E:/FRAS Windows/FRAS_production/static/img_dump/7.png')
w,h = image.size
predictedMatrix = predictAsMatrix(image,w,h)
g= Graph(80, 80, predictedMatrix)
N=g.countIslands()
points(predictedMatrix,h=80,w=80)
centroids=kmean(N,pointsarray)
if(centroids is None):
x=0,
y=0,
mpit=0
xmpi1=0
ympi1=0
f1=0,
firt_x=0
firt_y=0
fir_tendency_code=""
fir_tendency_txt=""
gp_1=""
result_1=""
else:
x= centroids [:, 1]
y= 2000-centroids [:, 0]
X_json=pd.Series(x).to_json(orient='values')
Y_json = pd.Series(y).to_json(orient='values')
mpit=mpi(1,centroids)
xmpi1 = mpit [:, 1]
ympi1 = 2000-mpit [:, 0]
f1 ,firt_x,firt_y= firing_tendancy(1000, 1000 , xmpi1, ympi1)
fir_tendency_txt,fir_tendency_code = getfiringtendencytext(f1 ,firt_x,firt_y)
print("calling from prediction_calculation_1" )
gp_1 = grouping_length(0 , 0 , x , y)
result_1 =getresulttext(gp_1)
return (firer_id,
sess_id,
detail_id,
target_no,
set_no,
paper_ref,
X_json,
Y_json,
xmpi1,
ympi1,
f1,
gp_1,
firt_x,
firt_y,
data_x_1,
data_y_1,
result_1,
fir_tendency_txt
)
def prediction_calculation_8():
session.clear()
curdate=time.strftime("%Y-%m-%d")
firer_id =db.session.query(TShooting.target_8_id).scalar()
sess_id = db.session.query(TShooting.session_id).scalar()
detail_id = db.session.query(TShooting.detail_no).scalar()
target_no=8
paper_ref=db.session.query(TPaper_ref.paper_ref).scalar()
print(paper_ref)
data_x_1=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==8 , T_Firer_Details.set_no==1 , T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess_id).all()
data_y_1=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==8 , T_Firer_Details.set_no==1 , T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess_id).all()
print(data_x_1)
set_no=db.session.query(TShooting.set_no).scalar()
paper_ref=db.session.query(TShooting.paper_ref).scalar()
print('Old x' )
print(data_x_1)
image=Image.open('E:/FRAS Windows/FRAS_production/static/img_dump/8.png')
w,h = image.size
predictedMatrix = predictAsMatrix(image,w,h)
g= Graph(80, 80, predictedMatrix)
N=g.countIslands()
points(predictedMatrix,h=80,w=80)
centroids=kmean(N,pointsarray)
if(centroids is None):
x=0,
y=0,
mpit=0
xmpi1=0
ympi1=0
f1=0,
firt_x=0
firt_y=0
fir_tendency_code=""
fir_tendency_txt=""
gp_1=""
result_1=""
else:
x= centroids [:, 1]
y= 2000-centroids [:, 0]
X_json=pd.Series(x).to_json(orient='values')
Y_json = pd.Series(y).to_json(orient='values')
mpit=mpi(1,centroids)
xmpi1 = mpit [:, 1]
ympi1 = 2000-mpit [:, 0]
f1 ,firt_x,firt_y= firing_tendancy(1000, 1000 , xmpi1, ympi1)
fir_tendency_txt,fir_tendency_code = getfiringtendencytext(f1 ,firt_x,firt_y)
print("calling from prediction_calculation_1" )
gp_1 = grouping_length(0 , 0 , x , y)
result_1 =getresulttext(gp_1)
return (firer_id,
sess_id,
detail_id,
target_no,
set_no,
paper_ref,
X_json,
Y_json,
xmpi1,
ympi1,
f1,
gp_1,
firt_x,
firt_y,
data_x_1,
data_y_1,
result_1,
fir_tendency_txt
)
@app.route('/save_adhoc_1/', methods=['GET', 'POST'])
def save_adhoc_1():
return redirect(url_for('previous_page_target_1'))
@app.route('/save_1/', methods=['GET', 'POST'])
def save_call_1():
print("this is save_call_1",file=sys.stderr)
final_x=[]
final_y=[]
tend_f_x_t = None
tend_f_y_t = None
x_list=None
y_list=None
if request.method == 'POST':
curr_date=date.today()
firer_id,session_id,detail_no,target_no,set_no,paper_no,x,y,mx1,my1,tendency,grouping_length,firt_x,firt_y,o,p,result,f=prediction_calculation_1()
t1= session.get('tmpi',None)
Tupdate=db.session.query(TShooting).scalar()
if(Tupdate.save_flag==1):
return render_template('errors/error_save.html')
else:
print("t1",file=sys.stderr)
print(t1,file=sys.stderr)
print(f,file=sys.stderr)
if(t1 is None):
f_mpix_1=0
else:
f_mpix_1 = t1[ : 1 ]
f_mpiy_1=t1[ : 0 ]
final_x_1 = session.get('x1', None)
final_y_1 = session.get('y1', None)
print(session.get('x1'),file=sys.stderr)
print("final_x_1",file=sys.stderr)
print(final_x_1,file=sys.stderr)
gp_1_f=session.get('gp_u_1', None)
res_u_1=session.get('res_u_1',None)
tend_f = session.get('tf_u_1', None)
tend_f_x = session.get('tfirer_x1', None)
tend_f_y = session.get('tfirer_y1', None)
tend_f_x_1 = session.get('tfirer_x1_f', None)
tend_f_y_1 = session.get('tfirer_y1_f', None)
if (x==0):
x=0
y=0
else:
x_len=len(x)
y_len=len(y)
x_ss=x[1:x_len-1]
y_ss=y[1:y_len-1]
x_split = x_ss.split(",")
y_split = y_ss.split(",")
x_list=[]
y_list=[]
for x_t in x_split:
x_list.append(float(x_t))
for y_t in y_split:
y_list.append(float(y_t))
print(final_x_1,file=sys.stderr)
box = savein_db(firer_id,session_id,detail_no,target_no,set_no,paper_no,x_list,y_list,final_x_1,final_y_1)
mpi=savempi_db(detail_no,target_no,paper_no,firer_id,firt_x,firt_y,tendency,session_id,set_no,tend_f,tend_f_x ,tend_f_y,tend_f_x_1,tend_f_y_1,f)
gp=savegp_db(firer_id,session_id,detail_no,target_no,set_no,paper_no,grouping_length,gp_1_f,res_u_1,result)
Tupdate.save_flag=1
db.session.commit()
Supdate=db.session.query(Session_Detail).filter(
Session_Detail.session_id==session_id,
Session_Detail.detail_no==detail_no
).scalar()
Supdate.save_flag=1
print(Supdate)
db.session.commit()
image_save=save_image_1(firer_id)
image = image_record(
date=time.strftime("%Y-%m-%d"),
datetimestamp = time.strftime("%Y-%m-%d %H:%M"),
session_id=session_id,
detail_id=detail_no,
firer_id=firer_id,
target_no=target_no,
set_no=set_no,
paper_ref=paper_no,
image_name=image_save
)
db.session.add(image)
db.session.commit()
return redirect(url_for('previous_page_target_1'))
@app.route('/save_2/', methods=['GET', 'POST'])
def save_call_2():
final_x_1=[]
final_y_1=[]
x_list=None
y_list=None
tend_f_x_t = None
tend_f_y_t = None
if request.method == 'POST':
firer_id,session_id,detail_no,target_no,set_no,paper_no,x,y,mx1,my1,tendency,grouping_length,firt_x,firt_y,o,p,result,f=prediction_calculation_2()
t1= session.get('tmpi_2',None)
f_mpix_1 = t1[ : 1 ]
f_mpiy_1=t1[ : 0 ]
final_x_1 = session.get('x2', None)
final_y_1 = session.get('y2', None)
gp_1_f=session.get('gp_u_2', None)
res_u_1=session.get('res_u_2',None)
tend_f = session.get('tf_u_2', None)
tend_f_x = session.get('tfirer_x2', None)
tend_f_y = session.get('tfirer_y2', None)
tend_f_x_1 = session.get('tfirer_x1_f', None)
tend_f_y_1 = session.get('tfirer_y1_f', None)
if (x==0):
x=0
y=0
else:
x_len=len(x)
y_len=len(y)
x_ss=x[1:x_len-1]
y_ss=y[1:y_len-1]
x_split = x_ss.split(",")
y_split = y_ss.split(",")
x_list=[]
y_list=[]
for x_t in x_split:
x_list.append(float(x_t))
for y_t in y_split:
y_list.append(float(y_t))
print(x_list,file=sys.stderr)
box = savein_db(firer_id,session_id,detail_no,target_no,set_no,paper_no,x_list,y_list,final_x_1,final_y_1)
mpi=savempi_db(detail_no,target_no,paper_no,firer_id,firt_x,firt_y,tendency,session_id,set_no,tend_f,tend_f_x ,tend_f_y,tend_f_x_1,tend_f_y_1)
gp=savegp_db(firer_id,session_id,detail_no,target_no,set_no,paper_no,grouping_length,gp_1_f,res_u_1)
image_save=save_image_2(firer_id)
image = image_record(
date=time.strftime("%x"),
datetimestamp = time.strftime("%Y-%m-%d %H:%M"),
session_id=session_id,
detail_id=detail_no,
firer_id=firer_id,
target_no=target_no,
set_no=set_no,
paper_ref=paper_no,
image_name=image_save
)
db.session.add(image)
db.session.commit()
return redirect(url_for('previous_page_target_1'))
@app.route('/save_3/', methods=['GET', 'POST'])
def save_call_3():
final_x_1=[]
final_y_1=[]
x_list=None
y_list=None
tend_f_x_t = None
tend_f_y_t = None
if request.method == 'POST':
firer_id,session_id,detail_no,target_no,set_no,paper_no,x,y,mx1,my1,tendency,grouping_length,firt_x,firt_y,o,p,result,f=prediction_calculation_3()
t1= session.get('tmpi_2',None)
f_mpix_1 = t1[ : 1 ]
f_mpiy_1=t1[ : 0 ]
final_x_1 = session.get('x2', None)
final_y_1 = session.get('y2', None)
gp_1_f=session.get('gp_u_2', None)
res_u_1=session.get('res_u_2',None)
tend_f = session.get('tf_u_2', None)
tend_f_x = session.get('tfirer_x2', None)
tend_f_y = session.get('tfirer_y2', None)
tend_f_x_1 = session.get('tfirer_x1_f', None)
tend_f_y_1 = session.get('tfirer_y1_f', None)
if (x==0):
x=0
y=0
else:
x_len=len(x)
y_len=len(y)
x_ss=x[1:x_len-1]
y_ss=y[1:y_len-1]
x_split = x_ss.split(",")
y_split = y_ss.split(",")
x_list=[]
y_list=[]
for x_t in x_split:
x_list.append(float(x_t))
for y_t in y_split:
y_list.append(float(y_t))
print(x_list,file=sys.stderr)
box = savein_db(firer_id,session_id,detail_no,target_no,set_no,paper_no,x_list,y_list,final_x_1,final_y_1)
mpi=savempi_db(detail_no,target_no,paper_no,firer_id,firt_x,firt_y,tendency,session_id,set_no,tend_f,tend_f_x ,tend_f_y,tend_f_x_1,tend_f_y_1)
gp=savegp_db(firer_id,session_id,detail_no,target_no,set_no,paper_no,grouping_length,gp_1_f,res_u_1)
image_save=save_image_3(firer_id)
image = image_record(
date=time.strftime("%x"),
datetimestamp = time.strftime("%Y-%m-%d %H:%M"),
session_id=session_id,
detail_id=detail_no,
firer_id=firer_id,
target_no=target_no,
set_no=set_no,
paper_ref=paper_no,
image_name=image_save
)
db.session.add(image)
db.session.commit()
return redirect(url_for('previous_page_target_1'))
@app.route('/save_4/', methods=['GET', 'POST'])
def save_call_4():
final_x_1=[]
final_y_1=[]
x_list=None
y_list=None
tend_f_x_t = None
tend_f_y_t = None
if request.method == 'POST':
firer_id,session_id,detail_no,target_no,set_no,paper_no,x,y,mx1,my1,tendency,grouping_length,firt_x,firt_y,o,p,result,f=prediction_calculation_4()
t1= session.get('tmpi_2',None)
f_mpix_1 = t1[ : 1 ]
f_mpiy_1=t1[ : 0 ]
final_x_1 = session.get('x2', None)
final_y_1 = session.get('y2', None)
gp_1_f=session.get('gp_u_2', None)
res_u_1=session.get('res_u_2',None)
tend_f = session.get('tf_u_2', None)
tend_f_x = session.get('tfirer_x2', None)
tend_f_y = session.get('tfirer_y2', None)
tend_f_x_1 = session.get('tfirer_x1_f', None)
tend_f_y_1 = session.get('tfirer_y1_f', None)
if (x==0):
x=0
y=0
else:
x_len=len(x)
y_len=len(y)
x_ss=x[1:x_len-1]
y_ss=y[1:y_len-1]
x_split = x_ss.split(",")
y_split = y_ss.split(",")
x_list=[]
y_list=[]
for x_t in x_split:
x_list.append(float(x_t))
for y_t in y_split:
y_list.append(float(y_t))
print(x_list,file=sys.stderr)
box = savein_db(firer_id,session_id,detail_no,target_no,set_no,paper_no,x_list,y_list,final_x_1,final_y_1)
mpi=savempi_db(detail_no,target_no,paper_no,firer_id,firt_x,firt_y,tendency,session_id,set_no,tend_f,tend_f_x ,tend_f_y,tend_f_x_1,tend_f_y_1)
gp=savegp_db(firer_id,session_id,detail_no,target_no,set_no,paper_no,grouping_length,gp_1_f,res_u_1)
image_save=save_image_4(firer_id)
image = image_record(
date=time.strftime("%x"),
datetimestamp = time.strftime("%Y-%m-%d %H:%M"),
session_id=session_id,
detail_id=detail_no,
firer_id=firer_id,
target_no=target_no,
set_no=set_no,
paper_ref=paper_no,
image_name=image_save
)
db.session.add(image)
db.session.commit()
return redirect(url_for('previous_page_target_1'))
@app.route('/save_5/', methods=['GET', 'POST'])
def save_call_5():
final_x_1=[]
final_y_1=[]
x_list=None
y_list=None
tend_f_x_t = None
tend_f_y_t = None
if request.method == 'POST':
firer_id,session_id,detail_no,target_no,set_no,paper_no,x,y,mx1,my1,tendency,grouping_length,firt_x,firt_y,o,p,result,f=prediction_calculation_5()
t1= session.get('tmpi_2',None)
f_mpix_1 = t1[ : 1 ]
f_mpiy_1=t1[ : 0 ]
final_x_1 = session.get('x2', None)
final_y_1 = session.get('y2', None)
gp_1_f=session.get('gp_u_2', None)
res_u_1=session.get('res_u_2',None)
tend_f = session.get('tf_u_2', None)
tend_f_x = session.get('tfirer_x2', None)
tend_f_y = session.get('tfirer_y2', None)
tend_f_x_1 = session.get('tfirer_x1_f', None)
tend_f_y_1 = session.get('tfirer_y1_f', None)
if (x==0):
x=0
y=0
else:
x_len=len(x)
y_len=len(y)
x_ss=x[1:x_len-1]
y_ss=y[1:y_len-1]
x_split = x_ss.split(",")
y_split = y_ss.split(",")
x_list=[]
y_list=[]
for x_t in x_split:
x_list.append(float(x_t))
for y_t in y_split:
y_list.append(float(y_t))
print(x_list,file=sys.stderr)
box = savein_db(firer_id,session_id,detail_no,target_no,set_no,paper_no,x_list,y_list,final_x_1,final_y_1)
mpi=savempi_db(detail_no,target_no,paper_no,firer_id,firt_x,firt_y,tendency,session_id,set_no,tend_f,tend_f_x ,tend_f_y,tend_f_x_1,tend_f_y_1)
gp=savegp_db(firer_id,session_id,detail_no,target_no,set_no,paper_no,grouping_length,gp_1_f,res_u_1)
image_save=save_image_5(firer_id)
image = image_record(
date=time.strftime("%x"),
datetimestamp = time.strftime("%Y-%m-%d %H:%M"),
session_id=session_id,
detail_id=detail_no,
firer_id=firer_id,
target_no=target_no,
set_no=set_no,
paper_ref=paper_no,
image_name=image_save
)
db.session.add(image)
db.session.commit()
return redirect(url_for('previous_page_target_5'))
@app.route('/save_6/', methods=['GET', 'POST'])
def save_call_6():
final_x_1=[]
final_y_1=[]
x_list=None
y_list=None
tend_f_x_t = None
tend_f_y_t = None
if request.method == 'POST':
firer_id,session_id,detail_no,target_no,set_no,paper_no,x,y,mx1,my1,tendency,grouping_length,firt_x,firt_y,o,p,result,f=prediction_calculation_6()
t1= session.get('tmpi_2',None)
f_mpix_1 = t1[ : 1 ]
f_mpiy_1=t1[ : 0 ]
final_x_1 = session.get('x2', None)
final_y_1 = session.get('y2', None)
gp_1_f=session.get('gp_u_2', None)
res_u_1=session.get('res_u_2',None)
tend_f = session.get('tf_u_2', None)
tend_f_x = session.get('tfirer_x2', None)
tend_f_y = session.get('tfirer_y2', None)
tend_f_x_1 = session.get('tfirer_x1_f', None)
tend_f_y_1 = session.get('tfirer_y1_f', None)
if (x==0):
x=0
y=0
else:
x_len=len(x)
y_len=len(y)
x_ss=x[1:x_len-1]
y_ss=y[1:y_len-1]
x_split = x_ss.split(",")
y_split = y_ss.split(",")
x_list=[]
y_list=[]
for x_t in x_split:
x_list.append(float(x_t))
for y_t in y_split:
y_list.append(float(y_t))
print(x_list,file=sys.stderr)
box = savein_db(firer_id,session_id,detail_no,target_no,set_no,paper_no,x_list,y_list,final_x_1,final_y_1)
mpi=savempi_db(detail_no,target_no,paper_no,firer_id,firt_x,firt_y,tendency,session_id,set_no,tend_f,tend_f_x ,tend_f_y,tend_f_x_1,tend_f_y_1)
gp=savegp_db(firer_id,session_id,detail_no,target_no,set_no,paper_no,grouping_length,gp_1_f,res_u_1)
image_save=save_image_6(firer_id)
image = image_record(
date=time.strftime("%x"),
datetimestamp = time.strftime("%Y-%m-%d %H:%M"),
session_id=session_id,
detail_id=detail_no,
firer_id=firer_id,
target_no=target_no,
set_no=set_no,
paper_ref=paper_no,
image_name=image_save
)
db.session.add(image)
db.session.commit()
return redirect(url_for('previous_page_target_5'))
@app.route('/save_7/', methods=['GET', 'POST'])
def save_call_7():
final_x_1=[]
final_y_1=[]
x_list=None
y_list=None
tend_f_x_t = None
tend_f_y_t = None
if request.method == 'POST':
firer_id,session_id,detail_no,target_no,set_no,paper_no,x,y,mx1,my1,tendency,grouping_length,firt_x,firt_y,o,p,result,f=prediction_calculation_7()
t1= session.get('tmpi_2',None)
f_mpix_1 = t1[ : 1 ]
f_mpiy_1=t1[ : 0 ]
final_x_1 = session.get('x2', None)
final_y_1 = session.get('y2', None)
gp_1_f=session.get('gp_u_2', None)
res_u_1=session.get('res_u_2',None)
tend_f = session.get('tf_u_2', None)
tend_f_x = session.get('tfirer_x2', None)
tend_f_y = session.get('tfirer_y2', None)
tend_f_x_1 = session.get('tfirer_x1_f', None)
tend_f_y_1 = session.get('tfirer_y1_f', None)
if (x==0):
x=0
y=0
else:
x_len=len(x)
y_len=len(y)
x_ss=x[1:x_len-1]
y_ss=y[1:y_len-1]
x_split = x_ss.split(",")
y_split = y_ss.split(",")
x_list=[]
y_list=[]
for x_t in x_split:
x_list.append(float(x_t))
for y_t in y_split:
y_list.append(float(y_t))
print(x_list,file=sys.stderr)
box = savein_db(firer_id,session_id,detail_no,target_no,set_no,paper_no,x_list,y_list,final_x_1,final_y_1)
mpi=savempi_db(detail_no,target_no,paper_no,firer_id,firt_x,firt_y,tendency,session_id,set_no,tend_f,tend_f_x ,tend_f_y,tend_f_x_1,tend_f_y_1)
gp=savegp_db(firer_id,session_id,detail_no,target_no,set_no,paper_no,grouping_length,gp_1_f,res_u_1)
image_save=save_image_7(firer_id)
image = image_record(
date=time.strftime("%x"),
datetimestamp = time.strftime("%Y-%m-%d %H:%M"),
session_id=session_id,
detail_id=detail_no,
firer_id=firer_id,
target_no=target_no,
set_no=set_no,
paper_ref=paper_no,
image_name=image_save
)
db.session.add(image)
db.session.commit()
return redirect(url_for('previous_page_target_5'))
@app.route('/save_8/', methods=['GET', 'POST'])
def save_call_8():
final_x_1=[]
final_y_1=[]
x_list=None
y_list=None
tend_f_x_t = None
tend_f_y_t = None
if request.method == 'POST':
firer_id,session_id,detail_no,target_no,set_no,paper_no,x,y,mx1,my1,tendency,grouping_length,firt_x,firt_y,o,p,result,f=prediction_calculation_8()
t1= session.get('tmpi_2',None)
f_mpix_1 = t1[ : 1 ]
f_mpiy_1=t1[ : 0 ]
final_x_1 = session.get('x2', None)
final_y_1 = session.get('y2', None)
gp_1_f=session.get('gp_u_2', None)
res_u_1=session.get('res_u_2',None)
tend_f = session.get('tf_u_2', None)
tend_f_x = session.get('tfirer_x2', None)
tend_f_y = session.get('tfirer_y2', None)
tend_f_x_1 = session.get('tfirer_x1_f', None)
tend_f_y_1 = session.get('tfirer_y1_f', None)
if (x==0):
x=0
y=0
else:
x_len=len(x)
y_len=len(y)
x_ss=x[1:x_len-1]
y_ss=y[1:y_len-1]
x_split = x_ss.split(",")
y_split = y_ss.split(",")
x_list=[]
y_list=[]
for x_t in x_split:
x_list.append(float(x_t))
for y_t in y_split:
y_list.append(float(y_t))
print(x_list,file=sys.stderr)
box = savein_db(firer_id,session_id,detail_no,target_no,set_no,paper_no,x_list,y_list,final_x_1,final_y_1)
mpi=savempi_db(detail_no,target_no,paper_no,firer_id,firt_x,firt_y,tendency,session_id,set_no,tend_f,tend_f_x ,tend_f_y,tend_f_x_1,tend_f_y_1)
gp=savegp_db(firer_id,session_id,detail_no,target_no,set_no,paper_no,grouping_length,gp_1_f,res_u_1)
image_save=save_image_8(firer_id)
image = image_record(
date=time.strftime("%x"),
datetimestamp = time.strftime("%Y-%m-%d %H:%M"),
session_id=session_id,
detail_id=detail_no,
firer_id=firer_id,
target_no=target_no,
set_no=set_no,
paper_ref=paper_no,
image_name=image_save
)
db.session.add(image)
db.session.commit()
return redirect(url_for('previous_page_target_5'))
def savein_db(firer_id,session_id,detail_no,target_no,set_no,paper_no,x,y,final_x,final_y):
try:
print("Save in DB",file=sys.stderr)
print("--------------",file=sys.stderr)
print(final_x,file=sys.stderr)
if(final_x is None):
print("if",file=sys.stderr)
i = 0
while i <len(x):
print(x[i],file=sys.stderr)
detail=T_Firer_Details(
date=time.strftime("%Y-%m-%d"),
datetimestamp = time.strftime("%Y-%m-%d %H:%M"),
session_id=session_id,
detail_id=detail_no,
target_no=target_no,
set_no=set_no,
paper_ref=paper_no,
firer_id=firer_id,
x=x[i],
y=y[i],
final_x=x[i],
final_y=y[i]
)
db.session.add(detail)
db.session.commit()
tdetail=Firer_Details(
date=time.strftime("%Y-%m-%d"),
datetimestamp = time.strftime("%Y-%m-%d %H:%M"),
session_id=session_id,
detail_id=detail_no,
target_no=target_no,
set_no=set_no,
paper_ref=paper_no,
firer_id=firer_id,
x=x[i],
y=y[i],
final_x=x[i],
final_y=y[i]
)
db.session.add(tdetail)
db.session.commit()
i=i+1
else:
print("x",file=sys.stderr)
print(x,file=sys.stderr)
if (x is None):
f_x ,f_y = making_array_null(x,y , len(final_x))
i = 0
while i < len(final_x):
detail1=T_Firer_Details(
date=time.strftime("%Y-%m-%d"),
datetimestamp = time.strftime("%Y-%m-%d %H:%M"),
session_id=session_id,
detail_id=detail_no,
target_no=target_no,
set_no=set_no,
paper_ref=paper_no,
firer_id=firer_id,
x=f_x[i],
y=f_y[i],
final_x=final_x[i][0],
final_y=final_y[i][0]
)
db.session.add(detail1)
db.session.commit()
tdetail1=Firer_Details(
date=time.strftime("%Y-%m-%d"),
datetimestamp = time.strftime("%Y-%m-%d %H:%M"),
session_id=session_id,
detail_id=detail_no,
target_no=target_no,
set_no=set_no,
paper_ref=paper_no,
firer_id=firer_id,
x=f_x[i],
y=f_y[i],
final_x=final_x[i][0],
final_y=final_y[i][0]
)
db.session.add(tdetail1)
db.session.commit()
i=i+1
else:
if(len(final_x)<len(x)):
f_x_f=[]
f_y_f=[]
f_x_f ,f_y_f = making_array_del(final_x, final_y , len(x))
z = 0
while z <len(x):
detail1=T_Firer_Details(
date=time.strftime("%Y-%m-%d"),
datetimestamp = time.strftime("%Y-%m-%d %H:%M"),
session_id=session_id,
detail_id=detail_no,
target_no=target_no,
set_no=set_no,
paper_ref=paper_no,
firer_id=firer_id,
x=x[z],
y=y[z],
final_x=f_x_f[z],
final_y=f_y_f[z]
)
db.session.add(detail1)
db.session.commit()
tdetail1=Firer_Details(
date=time.strftime("%Y-%m-%d"),
datetimestamp = time.strftime("%Y-%m-%d %H:%M"),
session_id=session_id,
detail_id=detail_no,
target_no=target_no,
set_no=set_no,
paper_ref=paper_no,
firer_id=firer_id,
x=x[z],
y=y[z],
final_x=f_x_f[z],
final_y=f_y_f[z]
)
db.session.add(tdetail1)
db.session.commit()
z=z+1
elif(len(x)<len(final_x)):
firer_x=[]
firer_y=[]
firer_x,firer_y =making_array_add(x,y ,len(final_x))
z=0
f_x_f1=[]
f_y_f1=[]
for h in range(len(final_x)):
f_x_f1.append(final_x[h][0])
f_y_f1.append(final_y[h][0])
while z <len(f_y_f1):
detail2=T_Firer_Details(
date=time.strftime("%Y-%m-%d"),
datetimestamp = time.strftime("%Y-%m-%d %H:%M"),
session_id=session_id,
detail_id=detail_no,
target_no=target_no,
set_no=set_no,
paper_ref=paper_no,
firer_id=firer_id,
x=firer_x[z],
y=firer_y[z],
final_x=f_x_f1[z],
final_y=f_y_f1[z]
)
db.session.add(detail2)
db.session.commit()
tdetail2=Firer_Details(
date=time.strftime("%Y-%m-%d"),
datetimestamp = time.strftime("%Y-%m-%d %H:%M"),
session_id=session_id,
detail_id=detail_no,
target_no=target_no,
set_no=set_no,
paper_ref=paper_no,
firer_id=firer_id,
x=firer_x[z],
y=firer_y[z],
final_x=f_x_f1[z],
final_y=f_y_f1[z]
)
db.session.add(tdetail2)
db.session.commit()
z=z+1
else:
z=0
f_x_f1=[]
f_y_f1=[]
for h in range(len(final_x)):
f_x_f1.append(final_x[h][0])
f_y_f1.append(final_y[h][0])
print(type(f_x_f1),f_x_f1[0],file=sys.stderr)
while z <len(x):
detail3=T_Firer_Details(
date=time.strftime("%Y-%m-%d"),
datetimestamp = time.strftime("%Y-%m-%d %H:%M"),
session_id=session_id,
detail_id=detail_no,
target_no=target_no,
set_no=set_no,
paper_ref=paper_no,
firer_id=firer_id,
x=x[z],
y=y[z],
final_x=int(f_x_f1[z]),
final_y=int(f_y_f1[z])
)
db.session.add(detail3)
db.session.commit()
tdetail3=Firer_Details(
date=time.strftime("%Y-%m-%d"),
datetimestamp = time.strftime("%Y-%m-%d %H:%M"),
session_id=session_id,
detail_id=detail_no,
target_no=target_no,
set_no=set_no,
paper_ref=paper_no,
firer_id=firer_id,
x=x[z],
y=y[z],
final_x=int(f_x_f1[z]),
final_y=int(f_y_f1[z])
)
db.session.add(tdetail3)
db.session.commit()
z=z+1
except Exception as e:
return redirect(url_for('error_6'))
return True
def making_array_null(x,y,l):
x1=[]
y1=[]
i=0
for i in range(l):
x1.append(-1)
y1.append(-1)
return x1 , y1
def save_image_1(firer_id):
srcfile = 'E:/FRAS Windows/FRAS_production/static/img_dump/1.png'
dstdir = 'E:/FRAS Windows/FRAS_production/static/image_db'
#srcfile = '/Users/wasifaahmed/Documents/FRAS/Fras_production_v.0.1/FRAS Windows/FRAS Windows/FRAS_production/static/img_dump/1.png'
#dstdir = '/Users/wasifaahmed/Documents/FRAS/Fras_production_v.0.1/FRAS Windows/FRAS Windows/FRAS_production/static/image_db/'
shutil.copy(srcfile, dstdir)
old_file = os.path.join("E:/FRAS Windows/FRAS_production/static/image_db", "1.png")
#old_file = os.path.join('/Users/wasifaahmed/Documents/FRAS/Fras_production_v.0.1/FRAS Windows/FRAS Windows/FRAS_production/static/image_db/', "1.png")
newfilename=""
newfilename+=str(firer_id)
newfilename+="_"
newfilename+=time.strftime("%Y_%m_%d_%H_%M")
newfilename+=".png"
new_file = os.path.join("E:/FRAS Windows/FRAS_production/static/image_db", newfilename)
#new_file = os.path.join("/Users/wasifaahmed/Documents/FRAS/Fras_production_v.0.1/FRAS Windows/FRAS Windows/FRAS_production/static/image_db/", newfilename)
os.rename(old_file, new_file)
return newfilename
def save_image_2(firer_id):
srcfile = 'E:/FRAS Windows/FRAS_production/static/img_dump/2.png'
dstdir = 'E:/FRAS Windows/FRAS_production/static/image_db'
shutil.copy(srcfile, dstdir)
old_file = os.path.join("E:/FRAS Windows/FRAS_production/static/image_db", "2.png")
newfilename=""
newfilename+=str(firer_id)
newfilename+="_"
newfilename+=time.strftime("%Y_%m_%d_%H_%M")
newfilename+=".png"
new_file = os.path.join("E:/FRAS Windows/FRAS_production/static/image_db", newfilename)
os.rename(old_file, new_file)
return newfilename
def save_image_3(firer_id):
srcfile = 'E:/FRAS_production/static/img_dump/3.png'
dstdir = 'E:/FRAS Windows/FRAS_production/static/image_db'
shutil.copy(srcfile, dstdir)
old_file = os.path.join("E:/FRAS Windows/FRAS_production/static/image_db", "3.png")
newfilename=""
newfilename+=str(firer_id)
newfilename+="_"
newfilename+=time.strftime("%Y_%m_%d_%H_%M")
newfilename+=".png"
new_file = os.path.join("E:/FRAS Windows/FRAS_production/static/image_db", newfilename)
os.rename(old_file, new_file)
return newfilename
def save_image_4(firer_id):
srcfile = 'E:/FRAS_production/static/img_dump/4.png'
dstdir = 'E:/FRAS Windows/FRAS_production/static/image_db'
shutil.copy(srcfile, dstdir)
old_file = os.path.join("E:/FRAS Windows/FRAS_production/static/image_db", "4.png")
newfilename=""
newfilename+=str(firer_id)
newfilename+="_"
newfilename+=time.strftime("%Y_%m_%d_%H_%M")
newfilename+=".png"
new_file = os.path.join("E:/FRAS Windows/FRAS_production/static/image_db", newfilename)
os.rename(old_file, new_file)
return newfilename
def save_image_5(firer_id):
srcfile = 'E:/FRAS_production/static/img_dump/5.png'
dstdir = 'E:/FRAS Windows/FRAS_production/static/image_db'
shutil.copy(srcfile, dstdir)
old_file = os.path.join("E:/FRAS Windows/FRAS_production/static/image_db", "5.png")
newfilename=""
newfilename+=str(firer_id)
newfilename+="_"
newfilename+=time.strftime("%Y_%m_%d_%H_%M")
newfilename+=".png"
new_file = os.path.join("E:/FRAS Windows/FRAS_production/static/image_db", newfilename)
os.rename(old_file, new_file)
return newfilename
def save_image_6(firer_id):
srcfile = 'E:/FRAS_production/static/img_dump/6.png'
dstdir = 'E:/FRAS Windows/FRAS_production/static/image_db'
shutil.copy(srcfile, dstdir)
old_file = os.path.join("E:/FRAS Windows/FRAS_production/static/image_db", "6.png")
newfilename=""
newfilename+=str(firer_id)
newfilename+="_"
newfilename+=time.strftime("%Y_%m_%d_%H_%M")
newfilename+=".png"
new_file = os.path.join("E:/FRAS Windows/FRAS_production/static/image_db", newfilename)
os.rename(old_file, new_file)
return newfilename
def save_image_7(firer_id):
srcfile = 'E:/FRAS_production/static/img_dump/7.png'
dstdir = 'E:/FRAS Windows/FRAS_production/static/image_db'
shutil.copy(srcfile, dstdir)
old_file = os.path.join("E:/FRAS Windows/FRAS_production/static/image_db", "7.png")
newfilename=""
newfilename+=str(firer_id)
newfilename+="_"
newfilename+=time.strftime("%Y_%m_%d_%H_%M")
newfilename+=".png"
new_file = os.path.join("E:/FRAS Windows/FRAS_production/static/image_db", newfilename)
os.rename(old_file, new_file)
return newfilename
def save_image_8(firer_id):
srcfile = 'E:/FRAS_production/static/img_dump/8.png'
dstdir = 'E:/FRAS Windows/FRAS_production/static/image_db'
shutil.copy(srcfile, dstdir)
old_file = os.path.join("E:/FRAS Windows/FRAS_production/static/image_db", "8.png")
newfilename=""
newfilename+=str(firer_id)
newfilename+="_"
newfilename+=time.strftime("%Y_%m_%d_%H_%M")
newfilename+=".png"
new_file = os.path.join("E:/FRAS Windows/FRAS_production/static/image_db", newfilename)
os.rename(old_file, new_file)
return newfilename
def savempi_db(detail_no,target_no,paper_no,firer_id,firt_x,firt_y,tendency,session_id,set_no,tend_f,tend_f_x ,tend_f_y,tend_f_x_1,tend_f_y_1,f):
try:
print("this is tend_f_x",file=sys.stderr)
print(tend_f_x_1,file=sys.stderr)
if(firt_x==0):
mpi= MPI (
date=time.strftime("%Y-%m-%d"),
datetimestamp = time.strftime("%Y-%m-%d %H:%M"),
session_id=session_id,
detail_no=detail_no,
target_no=target_no,
spell_no=set_no,
paper_ref=paper_no,
firer_id=firer_id,
mpi_x=-1,
mpi_y=-1,
f_mpi_x=tend_f_x_1,
f_mpi_y=tend_f_y_1,
tendency=-1,
tendency_f=int(tend_f),
tendency_text=tend_f_x,
tendency_code=tend_f_y
)
db.session.add(mpi)
db.session.commit()
else:
if(tend_f_x_1 is None):
mpi= MPI (
date=time.strftime("%Y-%m-%d"),
datetimestamp = time.strftime("%Y-%m-%d %H:%M"),
session_id=session_id,
detail_no=detail_no,
target_no=target_no,
spell_no=set_no,
paper_ref=paper_no,
firer_id=firer_id,
mpi_x=firt_x[0],
mpi_y=firt_y[0],
f_mpi_x=firt_x[0],
f_mpi_y=firt_y[0],
tendency=int(tendency),
tendency_f=int(tendency),
tendency_text=f,
tendency_code=f
)
db.session.add(mpi)
db.session.commit()
else:
mpi= MPI (
date=time.strftime("%Y-%m-%d"),
datetimestamp = time.strftime("%Y-%m-%d %H:%M"),
session_id=session_id,
detail_no=detail_no,
target_no=target_no,
spell_no=set_no,
paper_ref=paper_no,
firer_id=firer_id,
mpi_x=firt_x[0],
mpi_y=firt_y[0],
f_mpi_x=tend_f_x_1,
f_mpi_y=tend_f_y_1,
tendency=tendency,
tendency_f=int(tend_f),
tendency_text=tend_f_x,
tendency_code=tend_f_y
)
db.session.add(mpi)
db.session.commit()
except Exception as e:
return redirect(url_for('error_6'))
return True
def savegp_db(firer_id,session_id,detail_no,target_no,set_no,paper_no,gp_l,gp_f,result,result_p):
try:
print("gp_l",file=sys.stderr)
print(gp_l,file=sys.stderr)
if (gp_l==""):
gp=Grouping(
date=time.strftime("%Y-%m-%d"),
datetimestamp = time.strftime("%Y-%m-%d %H:%M"),
session_id=session_id,
detail_no=detail_no,
target_no=target_no,
spell_no=set_no,
paper_ref=paper_no,
firer_id=firer_id,
grouping_length=-1,
grouping_length_f=gp_f,
result = result
)
db.session.add(gp)
db.session.commit()
else:
if(gp_f is None):
gp=Grouping(
date=time.strftime("%Y-%m-%d"),
datetimestamp = time.strftime("%Y-%m-%d %H:%M"),
session_id=session_id,
detail_no=detail_no,
target_no=target_no,
spell_no=set_no,
paper_ref=paper_no,
firer_id=firer_id,
grouping_length=gp_l,
grouping_length_f=gp_l,
result = result_p
)
db.session.add(gp)
db.session.commit()
else:
gp=Grouping(
date=time.strftime("%Y-%m-%d"),
datetimestamp = time.strftime("%Y-%m-%d %H:%M"),
session_id=session_id,
detail_no=detail_no,
target_no=target_no,
spell_no=set_no,
paper_ref=paper_no,
firer_id=firer_id,
grouping_length=gp_l,
grouping_length_f=gp_f,
result = result
)
db.session.add(gp)
db.session.commit()
except Exception as e:
return redirect(url_for('error_6'))
return True
@app.errorhandler(500)
def internal_error(error):
return render_template('errors/500.html'), 500
@app.errorhandler(404)
def not_found_error(error):
return render_template('errors/404.html'), 404
@app.route('/duplicate_firer_error/')
def duplicate_firer_error():
return render_template('errors/duplicate.html')
@app.route('/paper_duplicate/')
def paper_duplicate_error():
return render_template('errors/paper_dup.html')
@app.route('/error_duplicate/')
def error_duplicate():
return render_template('errors/error_duplicate.html')
@app.route('/error/')
def error():
return render_template('errors/error_505.html')
@app.route('/error_2/')
def error_2():
return render_template('errors/error2_505.html')
@app.route('/error_102/')
def error_102():
return render_template('errors/error_102.html')
@app.route('/error_31/')
def error_31():
return render_template('errors/error31.html')
@app.route('/error_target_1/')
def error_target_1():
return render_template('errors/error_target_1.html')
@app.route('/error_3/')
def error_3():
return render_template('errors/error3_505.html')
@app.route('/error_4/')
def error_4():
return render_template('errors/error4_505.html')
@app.route('/error_5/')
def error_5():
return render_template('errors/error5_505.html')
@app.route('/error_6/')
def error_6():
return render_template('errors/error6_505.html')
@app.route('/error_7/')
def error_7():
return render_template('errors/error7_505.html')
def making_array_del(x , y , l):
x_f=[]
y_f=[]
for i in range(len(x)):
x_f.append(x[i][0])
y_f.append(y[i][0])
for j in range(l-len(x)):
x_f.append(-1)
y_f.append(-1)
return x_f , y_f
def making_array_add(x , y , l):
x_1=[]
y_1=[]
for i in range(len(x)):
x_1.append(x[i])
y_1.append(y[i])
for j in range(l-len(x)):
x_1.append(-1)
y_1.append(-1)
return x_1 , y_1
def firing_tendancy(origin_x, origin_y , x, y):
print("x,y",file=sys.stderr)
print(x,y,file=sys.stderr)
x1 = origin_x-x
y1 = origin_y-y
xfirt=None
yfirt=None
deg = 0
h = math.sqrt(x1**2 + y1**2)
x_dis = x-origin_x
y_dis = y-origin_y
theta = math.degrees(y_dis/h)
if( x_dis > 0 and y_dis < 0 ):
deg = 360 - theta
xfirt=pixeltoinch(x_dis)
yfirt=pixeltoinch(y_dis)
elif (x_dis < 0 and y_dis < 0 ):
deg = 270 - theta
xfirt=pixeltoinch(x_dis)
yfirt=pixeltoinch(y_dis)
elif(x_dis < 0 and y_dis > 0 ):
deg = 180 - theta
xfirt=pixeltoinch(x_dis)
yfirt=pixeltoinch(y_dis)
else :
deg = theta
xfirt=pixeltoinch(x_dis)
yfirt=pixeltoinch(y_dis)
print("Sending xfirt....", file=sys.stderr)
print(xfirt, file=sys.stderr)
print(yfirt, file=sys.stderr)
return (np.round(deg,0) ,xfirt ,yfirt )
def getfiringtendencytext(f1 ,firt_x,firt_y):
print("Receiving xfirt....", file=sys.stderr)
print(firt_x, file=sys.stderr)
print(firt_y, file=sys.stderr)
fttext=""
ftcode=""
t1=""
t2=""
t1code=""
t2code=""
isbullseye=False
if(abs(firt_x)<=4.5 and abs(firt_y)<=4.5):
isbullseye=True
if firt_x >=0 and firt_y >=0:
t1="Top"
t2="Right"
t1code="T"
t2code="R"
elif firt_x <0 and firt_y >=0:
t1="Top"
t2="Left"
t1code="T"
t2code="L"
elif firt_x <0 and firt_y <0:
t1="Bottom"
t2="Left"
t1code="B"
t2code="L"
else:
t1="Bottom"
t2="Right"
t1code="B"
t2code="R"
if(isbullseye):
ftcode="Center"
fttext = "Center "+"("+str(firt_y)+" , "+str(firt_x)+")"
else:
ftcode=t1code+t2code
fttext = t1+"("+str(firt_y)+") "+t2+"("+str(firt_x)+")"
return fttext,ftcode
def grouping_length(xt,yt,x ,y):
d = {}
counter=0
for i in range(len(x)):
for j in range(len(x)):
d[counter]=distance(x[j],y[j],x[i],y[i])
counter+=1
maxdist = 0
for key in d.keys():
if(maxdist<d[key]):
maxdist= d[key]
maxdist_inch = pixeltoinch(maxdist)
return maxdist_inch
def distance (x1,y1,x,y):
dist = 0
xdist = x1 - x
ydist = y1 - y
dist = math.sqrt(xdist**2 + ydist**2)
return dist
def pixeltoinch(maxdist):
inch = (34/2000 *1.0)*maxdist
return np.round(inch,1)
def getresulttext(gpinch):
print(type(gpinch),file=sys.stderr)
print(gpinch,file=sys.stderr)
if gpinch <=10:
return "Pass"
else:
return "W/O"
@app.route('/previous_page_edit_1/')
def previous_page_edit_1():
return render_template('pages/image_edit_previous_1.html')
@app.route('/previous_page_edit_5/')
def previous_page_edit_5():
return render_template('pages/image_edit_previous_5.html')
@app.route('/crop_data_1', methods=['GET', 'POST'])
def crop_data_1():
img = Image.open("E:/FRAS Windows/FRAS_production/static/raw_image/CAMERA1_1.JPG")
if request.method == "POST":
data=request.get_json()
point_1=data['data1'][0]
point_2=data['data2'][0]
point_3=data['data3'][0]
point_4=data['data4'][0]
print(point_1,file=sys.stderr)
print(point_2,file=sys.stderr)
print(point_3,file=sys.stderr)
print(point_4,file=sys.stderr)
points=[]
points.append(point_1)
points.append(point_2)
points.append(point_3)
points.append(point_4)
temp_points = []
for p in points:
temp_points.append(p)
l=99999
left1=None
for p in temp_points:
if l > p[0]:
l=p[0]
left1 = p
temp_points2 = []
for p in temp_points:
if(p[0]!=left1[0] and p[1]!=left1[1]):
temp_points2.append(p)
l2=99999
left2=None
for p in temp_points2:
if l2 > p[0]:
l2=p[0]
left2 = p
left = None
print("left1,left2",file=sys.stderr)
print(left1,left2,file=sys.stderr)
if left1[1]>left2[1]:
left = left1
else:
left = left2
r=-1000
right1=None
for p in points:
if r < p[0]:
r = p[0]
right1 = p
temp_points3 = []
for p in points:
if(p[0]!=right1[0] and p[1]!=right1[1]):
temp_points3.append(p)
r2=-1000
right2=None
for p in temp_points3:
if r2 < p[0]:
r2=p[0]
right2 = p
right = None
if right1[1]<right2[1]:
right = right1
else:
right = right2
print("right1,right2",file=sys.stderr)
print(right1,right2,file=sys.stderr)
print("left,right",file=sys.stderr)
print(left,right,file=sys.stderr)
x1=int(left[0])
if(x1>5470):
x1=5470
y1=int(3648.0-(left[1]))
if(y1<0):
y1=0
x2=int(right[0])+80
if(x2>5470):
x2=5470
y2=int(3648.0-(right[1]))
if(y2<0):
y2=0
x1=x1+50
y1=y1+50
x2=x2+50
y2=y2+50
print("x1,y1,x2, y2",file=sys.stderr)
print(x1,y1,x2, y2,file=sys.stderr)
img2 = img.crop((x1, y1, x2, y2))
resize_image=img2.resize((2000, 2000), Image.ANTIALIAS)
resize_image.save('E:/FRAS Windows/FRAS_production/static/img_dump/1.jpg', 'JPEG')
image_png=Image.open('E:/FRAS Windows/FRAS_production/static/img_dump/1.jpg')
#os.remove("E:/FRAS_production/static/img_dump/1.png")
image_png.save('E:/FRAS Windows/FRAS_production/static/img_dump/1.png')
sth = db.session.query(Crop).filter(Crop.target_no==1).scalar()
if sth is None:
crop =Crop(
target_no=1,
x1=x1,
y1=y1,
x2=x2,
y2=y2
)
db.session.add(crop)
db.session.commit()
else:
db.session.query(Crop).filter(Crop.target_no==1).delete()
db.session.commit()
crop =Crop(
target_no=1,
x1=x1,
y1=y1,
x2=x2,
y2=y2
)
db.session.add(crop)
db.session.commit()
return redirect(url_for('previous_page_edit_1'))
@app.route('/calibration_1', methods=['GET', 'POST'])
def calibration_1():
data = db.session.query(Crop).filter_by(target_no=1).scalar()
print(data.target_no,file=sys.stderr)
print(data.x1,file=sys.stderr)
x1=data.x1
y1=data.y1
x2=data.x2
y2=data.y2
img = Image.open('E:/FRAS Windows/FRAS_production/static/raw_image/CAMERA1_1.JPG')
img2 = img.crop((x1, y1, x2, y2))
resize_image=img2.resize((2000, 2000), Image.ANTIALIAS)
resize_image.save('E:/FRAS Windows/FRAS_production/static/img_dump/1.jpg', 'JPEG')
image_png=Image.open("E:/FRAS Windows/FRAS_production/static/img_dump/1.jpg")
image_png.save("E:/FRAS Windows/FRAS_production/static/img_dump/1.png")
return redirect(url_for('previous_page_edit_1'))
@app.route('/calibration_2', methods=['GET', 'POST'])
def calibration_2():
data = db.session.query(Crop).filter_by(target_no=2).scalar()
print(data.target_no,file=sys.stderr)
print(data.x1,file=sys.stderr)
x1=data.x1
y1=data.y1
x2=data.x2
y2=data.y2
img = Image.open('E:/FRAS Windows/FRAS_production/static/raw_image/CAMERA2_2.JPG')
img2 = img.crop((x1, y1, x2, y2))
resize_image=img2.resize((2000, 2000), Image.ANTIALIAS)
resize_image.save('E:/FRAS Windows/FRAS_production/static/img_dump/2.jpg', 'JPEG')
image_png=Image.open("E:/FRAS Windows/FRAS_production/static/img_dump/2.jpg")
image_png.save("E:/FRAS Windows/FRAS_production/static/img_dump/2png")
return redirect(url_for('previous_page_edit_1'))
@app.route('/crop_data_2', methods=['GET', 'POST'])
def crop_data_2():
img = Image.open("E:/FRAS Windows/FRAS_production/static/raw_image/CAMERA2_2.JPG")
if request.method == "POST":
data=request.get_json()
point_1=data['data1'][0]
point_2=data['data2'][0]
point_3=data['data3'][0]
point_4=data['data4'][0]
print(point_1,file=sys.stderr)
print(point_2,file=sys.stderr)
print(point_3,file=sys.stderr)
print(point_4,file=sys.stderr)
points=[]
points.append(point_1)
points.append(point_2)
points.append(point_3)
points.append(point_4)
temp_points = []
for p in points:
temp_points.append(p)
l=99999
left1=None
for p in temp_points:
if l > p[0]:
l=p[0]
left1 = p
temp_points2 = []
for p in temp_points:
if(p[0]!=left1[0] and p[1]!=left1[1]):
temp_points2.append(p)
l2=99999
left2=None
for p in temp_points2:
if l2 > p[0]:
l2=p[0]
left2 = p
left = None
print("left1,left2",file=sys.stderr)
print(left1,left2,file=sys.stderr)
if left1[1]>left2[1]:
left = left1
else:
left = left2
r=-1000
right1=None
for p in points:
if r < p[0]:
r = p[0]
right1 = p
temp_points3 = []
for p in points:
if(p[0]!=right1[0] and p[1]!=right1[1]):
temp_points3.append(p)
r2=-1000
right2=None
for p in temp_points3:
if r2 < p[0]:
r2=p[0]
right2 = p
right = None
if right1[1]<right2[1]:
right = right1
else:
right = right2
print("right1,right2",file=sys.stderr)
print(right1,right2,file=sys.stderr)
print("left,right",file=sys.stderr)
print(left,right,file=sys.stderr)
x1=int(left[0])
if(x1>5470):
x1=5470
y1=int(3648.0-(left[1]))
if(y1<0):
y1=0
x2=int(right[0])+80
if(x2>5470):
x2=5470
y2=int(3648.0-(right[1]))
if(y2<0):
y2=0
x1=x1+50
y1=y1+50
x2=x2+50
y2=y2+50
print("x1,y1,x2, y2",file=sys.stderr)
print(x1,y1,x2, y2,file=sys.stderr)
img2 = img.crop((x1, y1, x2, y2))
resize_image=img2.resize((2000, 2000), Image.ANTIALIAS)
resize_image.save('E:/FRAS Windows/FRAS_production/static/img_dump/2.jpg', 'JPEG')
image_png=Image.open('E:/FRAS Windows/FRAS_production/static/img_dump/2.jpg')
#os.remove("E:/FRAS_production/static/img_dump/1.png")
image_png.save('E:/FRAS Windows/FRAS_production/static/img_dump/2.png')
sth = db.session.query(Crop).filter(Crop.target_no==1).scalar()
if sth is None:
crop =Crop(
target_no=2,
x1=x1,
y1=y1,
x2=x2,
y2=y2
)
db.session.add(crop)
db.session.commit()
else:
db.session.query(Crop).filter(Crop.target_no==1).delete()
db.session.commit()
crop =Crop(
target_no=2,
x1=x1,
y1=y1,
x2=x2,
y2=y2
)
db.session.add(crop)
db.session.commit()
return redirect(url_for('previous_page_edit_1'))
@app.route('/crop_data_3', methods=['GET', 'POST'])
def crop_data_3():
img = Image.open("/Users/wasifaahmed/Documents/FRAS/FRAS_production/static/raw_image/CAMERA3_3.JPG")
if request.method == "POST":
data=request.get_json()
point_1=data['data1'][0]
point_2=data['data2'][0]
point_3=data['data3'][0]
point_4=data['data4'][0]
print(point_1,file=sys.stderr)
print(point_2,file=sys.stderr)
print(point_3,file=sys.stderr)
print(point_4,file=sys.stderr)
points=[]
points.append(point_1)
points.append(point_2)
points.append(point_3)
points.append(point_4)
temp_points = []
for p in points:
temp_points.append(p)
l=99999
left1=None
for p in temp_points:
if l > p[0]:
l=p[0]
left1 = p
temp_points2 = []
for p in temp_points:
if(p[0]!=left1[0] and p[1]!=left1[1]):
temp_points2.append(p)
l2=99999
left2=None
for p in temp_points2:
if l2 > p[0]:
l2=p[0]
left2 = p
left = None
print("left1,left2",file=sys.stderr)
print(left1,left2,file=sys.stderr)
if left1[1]>left2[1]:
left = left1
else:
left = left2
r=-1000
right1=None
for p in points:
if r < p[0]:
r = p[0]
right1 = p
temp_points3 = []
for p in points:
if(p[0]!=right1[0] and p[1]!=right1[1]):
temp_points3.append(p)
r2=-1000
right2=None
for p in temp_points3:
if r2 < p[0]:
r2=p[0]
right2 = p
right = None
if right1[1]<right2[1]:
right = right1
else:
right = right2
print("right1,right2",file=sys.stderr)
print(right1,right2,file=sys.stderr)
print("left,right",file=sys.stderr)
print(left,right,file=sys.stderr)
x1=int(left[0])
if(x1>5470):
x1=5470
y1=int(3648.0-(left[1]))
if(y1<0):
y1=0
x2=int(right[0])+80
if(x2>5470):
x2=5470
y2=int(3648.0-(right[1]))
if(y2<0):
y2=0
print("x1,y1,x2, y2",file=sys.stderr)
print(x1,y1,x2, y2,file=sys.stderr)
img2 = img.crop((x1, y1, x2, y2))
resize_image=img2.resize((2000, 2000), Image.ANTIALIAS)
resize_image.save('/Users/wasifaahmed/Documents/FRAS/FRAS_production/static/img_dump/3.jpg', 'JPEG')
image_png=Image.open("/Users/wasifaahmed/Documents/FRAS/FRAS_production/static/img_dump/3.jpg")
image_png.save("/Users/wasifaahmed/Documents/FRAS/FRAS_production/static/img_dump/3.png")
return redirect(url_for('previous_page_edit_1'))
@app.route('/crop_data_4', methods=['GET', 'POST'])
def crop_data_4():
img = Image.open("/Users/wasifaahmed/Documents/FRAS/FRAS_production/static/raw_image/CAMERA4_4.JPG")
if request.method == "POST":
data=request.get_json()
point_1=data['data1'][0]
point_2=data['data2'][0]
point_3=data['data3'][0]
point_4=data['data4'][0]
print(point_1,file=sys.stderr)
print(point_2,file=sys.stderr)
print(point_3,file=sys.stderr)
print(point_4,file=sys.stderr)
points=[]
points.append(point_1)
points.append(point_2)
points.append(point_3)
points.append(point_4)
temp_points = []
for p in points:
temp_points.append(p)
l=99999
left1=None
for p in temp_points:
if l > p[0]:
l=p[0]
left1 = p
temp_points2 = []
for p in temp_points:
if(p[0]!=left1[0] and p[1]!=left1[1]):
temp_points2.append(p)
l2=99999
left2=None
for p in temp_points2:
if l2 > p[0]:
l2=p[0]
left2 = p
left = None
print("left1,left2",file=sys.stderr)
print(left1,left2,file=sys.stderr)
if left1[1]>left2[1]:
left = left1
else:
left = left2
r=-1000
right1=None
for p in points:
if r < p[0]:
r = p[0]
right1 = p
temp_points3 = []
for p in points:
if(p[0]!=right1[0] and p[1]!=right1[1]):
temp_points3.append(p)
r2=-1000
right2=None
for p in temp_points3:
if r2 < p[0]:
r2=p[0]
right2 = p
right = None
if right1[1]<right2[1]:
right = right1
else:
right = right2
print("right1,right2",file=sys.stderr)
print(right1,right2,file=sys.stderr)
print("left,right",file=sys.stderr)
print(left,right,file=sys.stderr)
x1=int(left[0])
if(x1>5470):
x1=5470
y1=int(3648.0-(left[1]))
if(y1<0):
y1=0
x2=int(right[0])+80
if(x2>5470):
x2=5470
y2=int(3648.0-(right[1]))
if(y2<0):
y2=0
print("x1,y1,x2, y2",file=sys.stderr)
print(x1,y1,x2, y2,file=sys.stderr)
img2 = img.crop((x1, y1, x2, y2))
resize_image=img2.resize((2000, 2000), Image.ANTIALIAS)
resize_image.save('/Users/wasifaahmed/Documents/FRAS/FRAS_production/static/img_dump/4.jpg', 'JPEG')
image_png=Image.open("/Users/wasifaahmed/Documents/FRAS/FRAS_production/static/img_dump/4.jpg")
image_png.save("/Users/wasifaahmed/Documents/FRAS/FRAS_production/static/img_dump/4.png")
return redirect(url_for('previous_page_edit_1'))
@app.route('/crop_data_5', methods=['GET', 'POST'])
def crop_data_5():
img = Image.open("/Users/wasifaahmed/Documents/FRAS/FRAS_production/static/raw_image/CAMERA5_5.JPG")
if request.method == "POST":
data=request.get_json()
point_1=data['data1'][0]
point_2=data['data2'][0]
point_3=data['data3'][0]
point_4=data['data4'][0]
print(point_1,file=sys.stderr)
print(point_2,file=sys.stderr)
print(point_3,file=sys.stderr)
print(point_4,file=sys.stderr)
points=[]
points.append(point_1)
points.append(point_2)
points.append(point_3)
points.append(point_4)
temp_points = []
for p in points:
temp_points.append(p)
l=99999
left1=None
for p in temp_points:
if l > p[0]:
l=p[0]
left1 = p
temp_points2 = []
for p in temp_points:
if(p[0]!=left1[0] and p[1]!=left1[1]):
temp_points2.append(p)
l2=99999
left2=None
for p in temp_points2:
if l2 > p[0]:
l2=p[0]
left2 = p
left = None
print("left1,left2",file=sys.stderr)
print(left1,left2,file=sys.stderr)
if left1[1]>left2[1]:
left = left1
else:
left = left2
r=-1000
right1=None
for p in points:
if r < p[0]:
r = p[0]
right1 = p
temp_points3 = []
for p in points:
if(p[0]!=right1[0] and p[1]!=right1[1]):
temp_points3.append(p)
r2=-1000
right2=None
for p in temp_points3:
if r2 < p[0]:
r2=p[0]
right2 = p
right = None
if right1[1]<right2[1]:
right = right1
else:
right = right2
print("right1,right2",file=sys.stderr)
print(right1,right2,file=sys.stderr)
print("left,right",file=sys.stderr)
print(left,right,file=sys.stderr)
x1=int(left[0])
if(x1>5470):
x1=5470
y1=int(3648.0-(left[1]))
if(y1<0):
y1=0
x2=int(right[0])+80
if(x2>5470):
x2=5470
y2=int(3648.0-(right[1]))
if(y2<0):
y2=0
print("x1,y1,x2, y2",file=sys.stderr)
print(x1,y1,x2, y2,file=sys.stderr)
img2 = img.crop((x1, y1, x2, y2))
resize_image=img2.resize((2000, 2000), Image.ANTIALIAS)
resize_image.save('/Users/wasifaahmed/Documents/FRAS/FRAS_production/static/img_dump/5.jpg', 'JPEG')
image_png=Image.open("/Users/wasifaahmed/Documents/FRAS/FRAS_production/static/img_dump/5.jpg")
image_png.save("/Users/wasifaahmed/Documents/FRAS/FRAS_production/static/img_dump/5.png")
return redirect(url_for('previous_page_edit_2'))
@app.route('/crop_data_6', methods=['GET', 'POST'])
def crop_data_6():
img = Image.open("/Users/wasifaahmed/Documents/FRAS/FRAS_production/static/raw_image/CAMERA6_6.JPG")
if request.method == "POST":
data=request.get_json()
point_1=data['data1'][0]
point_2=data['data2'][0]
point_3=data['data3'][0]
point_4=data['data4'][0]
print(point_1,file=sys.stderr)
print(point_2,file=sys.stderr)
print(point_3,file=sys.stderr)
print(point_4,file=sys.stderr)
points=[]
points.append(point_1)
points.append(point_2)
points.append(point_3)
points.append(point_4)
temp_points = []
for p in points:
temp_points.append(p)
l=99999
left1=None
for p in temp_points:
if l > p[0]:
l=p[0]
left1 = p
temp_points2 = []
for p in temp_points:
if(p[0]!=left1[0] and p[1]!=left1[1]):
temp_points2.append(p)
l2=99999
left2=None
for p in temp_points2:
if l2 > p[0]:
l2=p[0]
left2 = p
left = None
print("left1,left2",file=sys.stderr)
print(left1,left2,file=sys.stderr)
if left1[1]>left2[1]:
left = left1
else:
left = left2
r=-1000
right1=None
for p in points:
if r < p[0]:
r = p[0]
right1 = p
temp_points3 = []
for p in points:
if(p[0]!=right1[0] and p[1]!=right1[1]):
temp_points3.append(p)
r2=-1000
right2=None
for p in temp_points3:
if r2 < p[0]:
r2=p[0]
right2 = p
right = None
if right1[1]<right2[1]:
right = right1
else:
right = right2
print("right1,right2",file=sys.stderr)
print(right1,right2,file=sys.stderr)
print("left,right",file=sys.stderr)
print(left,right,file=sys.stderr)
x1=int(left[0])
if(x1>5470):
x1=5470
y1=int(3648.0-(left[1]))
if(y1<0):
y1=0
x2=int(right[0])+80
if(x2>5470):
x2=5470
y2=int(3648.0-(right[1]))
if(y2<0):
y2=0
print("x1,y1,x2, y2",file=sys.stderr)
print(x1,y1,x2, y2,file=sys.stderr)
img2 = img.crop((x1, y1, x2, y2))
resize_image=img2.resize((2000, 2000), Image.ANTIALIAS)
resize_image.save('/Users/wasifaahmed/Documents/FRAS/FRAS_production/static/img_dump/6.jpg', 'JPEG')
image_png=Image.open("/Users/wasifaahmed/Documents/FRAS/FRAS_production/static/img_dump/6.jpg")
image_png.save("/Users/wasifaahmed/Documents/FRAS/FRAS_production/static/img_dump/6.png")
return redirect(url_for('previous_page_edit_2'))
@app.route('/crop_data_7', methods=['GET', 'POST'])
def crop_data_7():
img = Image.open("/Users/wasifaahmed/Documents/FRAS/FRAS_production/static/raw_image/CAMERA7_7.JPG")
if request.method == "POST":
data=request.get_json()
point_1=data['data1'][0]
point_2=data['data2'][0]
point_3=data['data3'][0]
point_4=data['data4'][0]
print(point_1,file=sys.stderr)
print(point_2,file=sys.stderr)
print(point_3,file=sys.stderr)
print(point_4,file=sys.stderr)
points=[]
points.append(point_1)
points.append(point_2)
points.append(point_3)
points.append(point_4)
temp_points = []
for p in points:
temp_points.append(p)
l=99999
left1=None
for p in temp_points:
if l > p[0]:
l=p[0]
left1 = p
temp_points2 = []
for p in temp_points:
if(p[0]!=left1[0] and p[1]!=left1[1]):
temp_points2.append(p)
l2=99999
left2=None
for p in temp_points2:
if l2 > p[0]:
l2=p[0]
left2 = p
left = None
print("left1,left2",file=sys.stderr)
print(left1,left2,file=sys.stderr)
if left1[1]>left2[1]:
left = left1
else:
left = left2
r=-1000
right1=None
for p in points:
if r < p[0]:
r = p[0]
right1 = p
temp_points3 = []
for p in points:
if(p[0]!=right1[0] and p[1]!=right1[1]):
temp_points3.append(p)
r2=-1000
right2=None
for p in temp_points3:
if r2 < p[0]:
r2=p[0]
right2 = p
right = None
if right1[1]<right2[1]:
right = right1
else:
right = right2
print("right1,right2",file=sys.stderr)
print(right1,right2,file=sys.stderr)
print("left,right",file=sys.stderr)
print(left,right,file=sys.stderr)
x1=int(left[0])
if(x1>5470):
x1=5470
y1=int(3648.0-(left[1]))
if(y1<0):
y1=0
x2=int(right[0])+80
if(x2>5470):
x2=5470
y2=int(3648.0-(right[1]))
if(y2<0):
y2=0
print("x1,y1,x2, y2",file=sys.stderr)
print(x1,y1,x2, y2,file=sys.stderr)
img2 = img.crop((x1, y1, x2, y2))
resize_image=img2.resize((2000, 2000), Image.ANTIALIAS)
resize_image.save('/Users/wasifaahmed/Documents/FRAS/FRAS_production/static/img_dump/7.jpg', 'JPEG')
image_png=Image.open("/Users/wasifaahmed/Documents/FRAS/FRAS_production/static/img_dump/7.jpg")
image_png.save("/Users/wasifaahmed/Documents/FRAS/FRAS_production/static/img_dump/7.png")
return redirect(url_for('previous_page_edit_2'))
@app.route('/crop_data_8', methods=['GET', 'POST'])
def crop_data_8():
img = Image.open("/Users/wasifaahmed/Documents/FRAS/FRAS_production/static/raw_image/CAMERA8_8.JPG")
if request.method == "POST":
data=request.get_json()
point_1=data['data1'][0]
point_2=data['data2'][0]
point_3=data['data3'][0]
point_4=data['data4'][0]
print(point_1,file=sys.stderr)
print(point_2,file=sys.stderr)
print(point_3,file=sys.stderr)
print(point_4,file=sys.stderr)
points=[]
points.append(point_1)
points.append(point_2)
points.append(point_3)
points.append(point_4)
temp_points = []
for p in points:
temp_points.append(p)
l=99999
left1=None
for p in temp_points:
if l > p[0]:
l=p[0]
left1 = p
temp_points2 = []
for p in temp_points:
if(p[0]!=left1[0] and p[1]!=left1[1]):
temp_points2.append(p)
l2=99999
left2=None
for p in temp_points2:
if l2 > p[0]:
l2=p[0]
left2 = p
left = None
print("left1,left2",file=sys.stderr)
print(left1,left2,file=sys.stderr)
if left1[1]>left2[1]:
left = left1
else:
left = left2
r=-1000
right1=None
for p in points:
if r < p[0]:
r = p[0]
right1 = p
temp_points3 = []
for p in points:
if(p[0]!=right1[0] and p[1]!=right1[1]):
temp_points3.append(p)
r2=-1000
right2=None
for p in temp_points3:
if r2 < p[0]:
r2=p[0]
right2 = p
right = None
if right1[1]<right2[1]:
right = right1
else:
right = right2
print("right1,right2",file=sys.stderr)
print(right1,right2,file=sys.stderr)
print("left,right",file=sys.stderr)
print(left,right,file=sys.stderr)
x1=int(left[0])
if(x1>5470):
x1=5470
y1=int(3648.0-(left[1]))
if(y1<0):
y1=0
x2=int(right[0])+80
if(x2>5470):
x2=5470
y2=int(3648.0-(right[1]))
if(y2<0):
y2=0
print("x1,y1,x2, y2",file=sys.stderr)
print(x1,y1,x2, y2,file=sys.stderr)
img2 = img.crop((x1, y1, x2, y2))
resize_image=img2.resize((2000, 2000), Image.ANTIALIAS)
resize_image.save('/Users/wasifaahmed/Documents/FRAS/FRAS_production/static/img_dump/8.jpg', 'JPEG')
image_png=Image.open("/Users/wasifaahmed/Documents/FRAS/FRAS_production/static/img_dump/8.jpg")
image_png.save("/Users/wasifaahmed/Documents/FRAS/FRAS_production/static/img_dump/8.png")
return redirect(url_for('previous_page_edit_2'))
@app.route('/test', methods=['GET', 'POST'])
def update():
mp=0
gp_1=0
tyf=0
txf=0
f_1=0
xmpi_1=0
ympi_1=0
j_x=None
j_y=None
j_mp=None
up_res_1=None
mp_inch = []
x1=[]
y1=[]
u_fir_tendency_txt=None
u_fir_tendency_code=None
if request.method == "POST":
data1 = request.get_json()
tx1 =data1['x1']
for le in tx1:
x1.append(le[0])
ty1 = data1['y1']
for le in ty1:
y1.append(le[0])
points = data1['points']
mp = mpi(1,points).tolist()
mp_inch.append(pixeltoinch(mp[0][0]))
mp_inch.append(pixeltoinch(mp[0][1]))
tmpi=mpi(1,points)
#print("Printing from UPDATE...", file=sys.stderr)
#print(tmpi, file=sys.stderr)
xmpi_1 = tmpi[0][0]
ympi_1 = tmpi[0][1]
session['tmpi']=mp
f_1,txf_list,tyf_list =firing_tendancy(1000, 1000 , xmpi_1, ympi_1)
txf=txf_list
tyf=tyf_list
j_x=pd.Series(txf).to_json(orient='values')
j_y=pd.Series(tyf).to_json(orient='values')
print("this is inside upadate",file=sys.stderr)
print(txf,file=sys.stderr)
gp_1 = grouping_length(0 , 0 , x1 , y1)
up_res_1=getresulttext(gp_1)
u_fir_tendency_txt,u_fir_tendency_code = getfiringtendencytext(f_1,txf_list,tyf_list)
session['x1'] = data1['x1']
session ['y1'] = data1['y1']
print("session.get('x1')",file=sys.stderr)
print(session.get('x1'),file=sys.stderr)
session['tf_u_1']=f_1
session['gp_u_1']=gp_1
session ['res_u_1']=up_res_1
session ['tfirer_x1']=u_fir_tendency_txt
session ['tfirer_y1']=u_fir_tendency_code
session ['tfirer_x1_f']=txf
session ['tfirer_y1_f']=tyf
return jsonify(mp = mp_inch ,
gp_1=gp_1,
ten_yu=j_y,
ten_xu=j_x,
result=up_res_1,
u_fir_tendency=u_fir_tendency_txt
)
@app.route('/test_2', methods=['GET', 'POST'])
def update_2():
mp_2=0
gp_2=0
tyf_2=0
txf_2=0
f_2=0
xmpi_2=0
ympi_2=0
j_x_2=None
j_y_2=None
j_mp_2=None
up_res_2=None
mp_inch_2 = []
x2=[]
y2=[]
u_fir_tendency_txt_2=None
u_fir_tendency_code_2=None
if request.method == "POST":
data1 = request.get_json()
tx2 =data1['x1']
for le in tx2:
x2.append(le[0])
print("x2",file=sys.stderr)
print(x2,file=sys.stderr)
ty2 = data1['y1']
for le in ty2:
y2.append(le[0])
points = data1['points']
mp = mpi(1,points).tolist()
mp_inch_2.append(pixeltoinch(mp[0][0]))
mp_inch_2.append(pixeltoinch(mp[0][1]))
tmpi_2=mpi(1,points)
#print(tmpi, file=sys.stderr)
xmpi_1 = tmpi_2[0][0]
ympi_1 = tmpi_2[0][1]
session['tmpi_2']=mp
f_1,txf_list,tyf_list =firing_tendancy(1000, 1000 , xmpi_1, ympi_1)
txf_2=txf_list
tyf_2=tyf_list
j_x_2=pd.Series(txf_2).to_json(orient='values')
j_y_2=pd.Series(tyf_2).to_json(orient='values')
print("calling from update_2" ,file=sys.stderr)
print(txf_2,file=sys.stderr)
gp_2 = grouping_length(0 , 0 , x2 , y2)
up_res_2=getresulttext(gp_2)
u_fir_tendency_txt_2,u_fir_tendency_code_2 = getfiringtendencytext(f_2,txf_list,tyf_list)
session['x2'] = data1['x1']
print(j_x_2, file=sys.stderr)
session ['y2'] = data1['y1']
session['tf_u_2']=f_1
session['gp_u_2']=gp_2
session ['res_u_2']=up_res_2
session ['tfirer_x2']=u_fir_tendency_txt_2
session ['tfirer_y2']=u_fir_tendency_code_2
session ['tfirer_x1_f']=txf_2
session ['tfirer_y1_f']=tyf_2
return jsonify(mp = mp_inch_2 ,
gp_1=gp_2,
ten_yu=j_y_2,
ten_xu=j_x_2,
result=up_res_2,
u_fir_tendency=u_fir_tendency_txt_2
)
@app.route('/test_3', methods=['GET', 'POST'])
def update_3():
mp_2=0
gp_2=0
tyf_2=0
txf_2=0
f_2=0
xmpi_2=0
ympi_2=0
j_x_2=None
j_y_2=None
j_mp_2=None
up_res_2=None
mp_inch_2 = []
x2=[]
y2=[]
u_fir_tendency_txt_2=None
u_fir_tendency_code_2=None
if request.method == "POST":
data1 = request.get_json()
tx2 =data1['x1']
for le in tx2:
x2.append(le[0])
print("x2",file=sys.stderr)
print(x2,file=sys.stderr)
ty2 = data1['y1']
for le in ty2:
y2.append(le[0])
points = data1['points']
mp = mpi(1,points).tolist()
mp_inch_2.append(pixeltoinch(mp[0][0]))
mp_inch_2.append(pixeltoinch(mp[0][1]))
tmpi_2=mpi(1,points)
#print(tmpi, file=sys.stderr)
xmpi_1 = tmpi_2[0][0]
ympi_1 = tmpi_2[0][1]
session['tmpi_2']=mp
f_1,txf_list,tyf_list =firing_tendancy(1000, 1000 , xmpi_1, ympi_1)
txf_2=txf_list
tyf_2=tyf_list
j_x_2=pd.Series(txf_2).to_json(orient='values')
j_y_2=pd.Series(tyf_2).to_json(orient='values')
print("calling from update_2" ,file=sys.stderr)
print(txf_2,file=sys.stderr)
gp_2 = grouping_length(0 , 0 , x2 , y2)
up_res_2=getresulttext(gp_2)
u_fir_tendency_txt_2,u_fir_tendency_code_2 = getfiringtendencytext(f_2,txf_list,tyf_list)
session['x2'] = data1['x1']
print(j_x_2, file=sys.stderr)
session ['y2'] = data1['y1']
session['tf_u_2']=f_1
session['gp_u_2']=gp_2
session ['res_u_2']=up_res_2
session ['tfirer_x2']=u_fir_tendency_txt_2
session ['tfirer_y2']=u_fir_tendency_code_2
session ['tfirer_x1_f']=txf_2
session ['tfirer_y1_f']=tyf_2
return jsonify(mp = mp_inch_2 ,
gp_1=gp_2,
ten_yu=j_y_2,
ten_xu=j_x_2,
result=up_res_2,
u_fir_tendency=u_fir_tendency_txt_2
)
@app.route('/test_4', methods=['GET', 'POST'])
def update_4():
mp_2=0
gp_2=0
tyf_2=0
txf_2=0
f_2=0
xmpi_2=0
ympi_2=0
j_x_2=None
j_y_2=None
j_mp_2=None
up_res_2=None
mp_inch_2 = []
x2=[]
y2=[]
u_fir_tendency_txt_2=None
u_fir_tendency_code_2=None
if request.method == "POST":
data1 = request.get_json()
tx2 =data1['x1']
for le in tx2:
x2.append(le[0])
print("x2",file=sys.stderr)
print(x2,file=sys.stderr)
ty2 = data1['y1']
for le in ty2:
y2.append(le[0])
points = data1['points']
mp = mpi(1,points).tolist()
mp_inch_2.append(pixeltoinch(mp[0][0]))
mp_inch_2.append(pixeltoinch(mp[0][1]))
tmpi_2=mpi(1,points)
#print(tmpi, file=sys.stderr)
xmpi_1 = tmpi_2[0][0]
ympi_1 = tmpi_2[0][1]
session['tmpi_2']=mp
f_1,txf_list,tyf_list =firing_tendancy(1000, 1000 , xmpi_1, ympi_1)
txf_2=txf_list
tyf_2=tyf_list
j_x_2=pd.Series(txf_2).to_json(orient='values')
j_y_2=pd.Series(tyf_2).to_json(orient='values')
print("calling from update_2" ,file=sys.stderr)
print(txf_2,file=sys.stderr)
gp_2 = grouping_length(0 , 0 , x2 , y2)
up_res_2=getresulttext(gp_2)
u_fir_tendency_txt_2,u_fir_tendency_code_2 = getfiringtendencytext(f_2,txf_list,tyf_list)
session['x2'] = data1['x1']
print(j_x_2, file=sys.stderr)
session ['y2'] = data1['y1']
session['tf_u_2']=f_1
session['gp_u_2']=gp_2
session ['res_u_2']=up_res_2
session ['tfirer_x2']=u_fir_tendency_txt_2
session ['tfirer_y2']=u_fir_tendency_code_2
session ['tfirer_x1_f']=txf_2
session ['tfirer_y1_f']=tyf_2
return jsonify(mp = mp_inch_2 ,
gp_1=gp_2,
ten_yu=j_y_2,
ten_xu=j_x_2,
result=up_res_2,
u_fir_tendency=u_fir_tendency_txt_2
)
@app.route('/test_5', methods=['GET', 'POST'])
def update_5():
mp_2=0
gp_2=0
tyf_2=0
txf_2=0
f_2=0
xmpi_2=0
ympi_2=0
j_x_2=None
j_y_2=None
j_mp_2=None
up_res_2=None
mp_inch_2 = []
x2=[]
y2=[]
u_fir_tendency_txt_2=None
u_fir_tendency_code_2=None
if request.method == "POST":
data1 = request.get_json()
tx2 =data1['x1']
for le in tx2:
x2.append(le[0])
print("x2",file=sys.stderr)
print(x2,file=sys.stderr)
ty2 = data1['y1']
for le in ty2:
y2.append(le[0])
points = data1['points']
mp = mpi(1,points).tolist()
mp_inch_2.append(pixeltoinch(mp[0][0]))
mp_inch_2.append(pixeltoinch(mp[0][1]))
tmpi_2=mpi(1,points)
#print(tmpi, file=sys.stderr)
xmpi_1 = tmpi_2[0][0]
ympi_1 = tmpi_2[0][1]
session['tmpi_2']=mp
f_1,txf_list,tyf_list =firing_tendancy(1000, 1000 , xmpi_1, ympi_1)
txf_2=txf_list
tyf_2=tyf_list
j_x_2=pd.Series(txf_2).to_json(orient='values')
j_y_2=pd.Series(tyf_2).to_json(orient='values')
print("calling from update_2" ,file=sys.stderr)
print(txf_2,file=sys.stderr)
gp_2 = grouping_length(0 , 0 , x2 , y2)
up_res_2=getresulttext(gp_2)
u_fir_tendency_txt_2,u_fir_tendency_code_2 = getfiringtendencytext(f_2,txf_list,tyf_list)
session['x2'] = data1['x1']
print(j_x_2, file=sys.stderr)
session ['y2'] = data1['y1']
session['tf_u_2']=f_1
session['gp_u_2']=gp_2
session ['res_u_2']=up_res_2
session ['tfirer_x2']=u_fir_tendency_txt_2
session ['tfirer_y2']=u_fir_tendency_code_2
session ['tfirer_x1_f']=txf_2
session ['tfirer_y1_f']=tyf_2
return jsonify(mp = mp_inch_2 ,
gp_1=gp_2,
ten_yu=j_y_2,
ten_xu=j_x_2,
result=up_res_2,
u_fir_tendency=u_fir_tendency_txt_2
)
@app.route('/test_6', methods=['GET', 'POST'])
def update_6():
mp_2=0
gp_2=0
tyf_2=0
txf_2=0
f_2=0
xmpi_2=0
ympi_2=0
j_x_2=None
j_y_2=None
j_mp_2=None
up_res_2=None
mp_inch_2 = []
x2=[]
y2=[]
u_fir_tendency_txt_2=None
u_fir_tendency_code_2=None
if request.method == "POST":
data1 = request.get_json()
tx2 =data1['x1']
for le in tx2:
x2.append(le[0])
print("x2",file=sys.stderr)
print(x2,file=sys.stderr)
ty2 = data1['y1']
for le in ty2:
y2.append(le[0])
points = data1['points']
mp = mpi(1,points).tolist()
mp_inch_2.append(pixeltoinch(mp[0][0]))
mp_inch_2.append(pixeltoinch(mp[0][1]))
tmpi_2=mpi(1,points)
#print(tmpi, file=sys.stderr)
xmpi_1 = tmpi_2[0][0]
ympi_1 = tmpi_2[0][1]
session['tmpi_2']=mp
f_1,txf_list,tyf_list =firing_tendancy(1000, 1000 , xmpi_1, ympi_1)
txf_2=txf_list
tyf_2=tyf_list
j_x_2=pd.Series(txf_2).to_json(orient='values')
j_y_2= | pd.Series(tyf_2) | pandas.Series |
"""
from 3 files of the official evaluation repo:
dx_mapping_scored.csv, dx_mapping_unscored.csv, weights.csv
"""
import os
from io import StringIO
from typing import Union, Optional, List, Tuple, Sequence, Dict
from numbers import Real
import numpy as np
import pandas as pd
from ...cfg import CFG
__all__ = [
"df_weights",
"df_weights_expanded",
"df_weights_abbr",
"df_weights_fullname",
"dx_mapping_scored",
"dx_mapping_unscored",
"dx_mapping_all",
"equiv_class_dict",
"load_weights",
"get_class",
"get_class_count",
"get_class_weight",
"normalize_class",
"dx_cooccurrence_all",
"dx_cooccurrence_scored",
"get_cooccurrence",
]
# constants
df_weights = pd.read_csv(StringIO(""",164889003,164890007,6374002,426627000,733534002|164909002,713427006|59118001,270492004,713426002,39732003,445118002,164947007,251146004,111975006,698252002,426783006,284470004|63593006,10370003,365413008,427172004|17338001,164917005,47665007,427393009,426177001,427084000,164934002,59931005
164889003,1.0,0.5,0.475,0.3,0.475,0.4,0.3,0.3,0.35,0.35,0.3,0.425,0.45,0.35,0.25,0.3375,0.375,0.425,0.375,0.4,0.35,0.3,0.3,0.375,0.5,0.5
164890007,0.5,1.0,0.475,0.3,0.475,0.4,0.3,0.3,0.35,0.35,0.3,0.425,0.45,0.35,0.25,0.3375,0.375,0.425,0.375,0.4,0.35,0.3,0.3,0.375,0.5,0.5
6374002,0.475,0.475,1.0,0.325,0.475,0.425,0.325,0.325,0.375,0.375,0.325,0.45,0.475,0.375,0.275,0.3625,0.4,0.45,0.4,0.375,0.375,0.325,0.325,0.4,0.475,0.475
426627000,0.3,0.3,0.325,1.0,0.325,0.4,0.5,0.5,0.45,0.45,0.5,0.375,0.35,0.45,0.45,0.4625,0.425,0.375,0.425,0.2,0.45,0.5,0.5,0.425,0.3,0.3
733534002|164909002,0.475,0.475,0.475,0.325,1.0,0.425,0.325,0.325,0.375,0.375,0.325,0.45,0.475,0.375,0.275,0.3625,0.4,0.45,0.4,0.375,0.375,0.325,0.325,0.4,0.475,0.475
713427006|59118001,0.4,0.4,0.425,0.4,0.425,1.0,0.4,0.4,0.45,0.45,0.4,0.475,0.45,0.45,0.35,0.4375,0.475,0.475,0.475,0.3,0.45,0.4,0.4,0.475,0.4,0.4
270492004,0.3,0.3,0.325,0.5,0.325,0.4,1.0,0.5,0.45,0.45,0.5,0.375,0.35,0.45,0.45,0.4625,0.425,0.375,0.425,0.2,0.45,0.5,0.5,0.425,0.3,0.3
713426002,0.3,0.3,0.325,0.5,0.325,0.4,0.5,1.0,0.45,0.45,0.5,0.375,0.35,0.45,0.45,0.4625,0.425,0.375,0.425,0.2,0.45,0.5,0.5,0.425,0.3,0.3
39732003,0.35,0.35,0.375,0.45,0.375,0.45,0.45,0.45,1.0,0.5,0.45,0.425,0.4,0.5,0.4,0.4875,0.475,0.425,0.475,0.25,0.5,0.45,0.45,0.475,0.35,0.35
445118002,0.35,0.35,0.375,0.45,0.375,0.45,0.45,0.45,0.5,1.0,0.45,0.425,0.4,0.5,0.4,0.4875,0.475,0.425,0.475,0.25,0.5,0.45,0.45,0.475,0.35,0.35
164947007,0.3,0.3,0.325,0.5,0.325,0.4,0.5,0.5,0.45,0.45,1.0,0.375,0.35,0.45,0.45,0.4625,0.425,0.375,0.425,0.2,0.45,0.5,0.5,0.425,0.3,0.3
251146004,0.425,0.425,0.45,0.375,0.45,0.475,0.375,0.375,0.425,0.425,0.375,1.0,0.475,0.425,0.325,0.4125,0.45,0.475,0.45,0.325,0.425,0.375,0.375,0.45,0.425,0.425
111975006,0.45,0.45,0.475,0.35,0.475,0.45,0.35,0.35,0.4,0.4,0.35,0.475,1.0,0.4,0.3,0.3875,0.425,0.475,0.425,0.35,0.4,0.35,0.35,0.425,0.45,0.45
698252002,0.35,0.35,0.375,0.45,0.375,0.45,0.45,0.45,0.5,0.5,0.45,0.425,0.4,1.0,0.4,0.4875,0.475,0.425,0.475,0.25,0.5,0.45,0.45,0.475,0.35,0.35
426783006,0.25,0.25,0.275,0.45,0.275,0.35,0.45,0.45,0.4,0.4,0.45,0.325,0.3,0.4,1.0,0.4125,0.375,0.325,0.375,0.15,0.4,0.45,0.45,0.375,0.25,0.25
284470004|63593006,0.3375,0.3375,0.3625,0.4625,0.3625,0.4375,0.4625,0.4625,0.4875,0.4875,0.4625,0.4125,0.3875,0.4875,0.4125,1.0,0.4625,0.4125,0.4625,0.2375,0.4875,0.4625,0.4625,0.4625,0.3375,0.3375
10370003,0.375,0.375,0.4,0.425,0.4,0.475,0.425,0.425,0.475,0.475,0.425,0.45,0.425,0.475,0.375,0.4625,1.0,0.45,0.5,0.275,0.475,0.425,0.425,0.5,0.375,0.375
365413008,0.425,0.425,0.45,0.375,0.45,0.475,0.375,0.375,0.425,0.425,0.375,0.475,0.475,0.425,0.325,0.4125,0.45,1.0,0.45,0.325,0.425,0.375,0.375,0.45,0.425,0.425
427172004|17338001,0.375,0.375,0.4,0.425,0.4,0.475,0.425,0.425,0.475,0.475,0.425,0.45,0.425,0.475,0.375,0.4625,0.5,0.45,1.0,0.275,0.475,0.425,0.425,0.5,0.375,0.375
164917005,0.4,0.4,0.375,0.2,0.375,0.3,0.2,0.2,0.25,0.25,0.2,0.325,0.35,0.25,0.15,0.2375,0.275,0.325,0.275,1.0,0.25,0.2,0.2,0.275,0.4,0.4
47665007,0.35,0.35,0.375,0.45,0.375,0.45,0.45,0.45,0.5,0.5,0.45,0.425,0.4,0.5,0.4,0.4875,0.475,0.425,0.475,0.25,1.0,0.45,0.45,0.475,0.35,0.35
427393009,0.3,0.3,0.325,0.5,0.325,0.4,0.5,0.5,0.45,0.45,0.5,0.375,0.35,0.45,0.45,0.4625,0.425,0.375,0.425,0.2,0.45,1.0,0.5,0.425,0.3,0.3
426177001,0.3,0.3,0.325,0.5,0.325,0.4,0.5,0.5,0.45,0.45,0.5,0.375,0.35,0.45,0.45,0.4625,0.425,0.375,0.425,0.2,0.45,0.5,1.0,0.425,0.3,0.3
427084000,0.375,0.375,0.4,0.425,0.4,0.475,0.425,0.425,0.475,0.475,0.425,0.45,0.425,0.475,0.375,0.4625,0.5,0.45,0.5,0.275,0.475,0.425,0.425,1.0,0.375,0.375
164934002,0.5,0.5,0.475,0.3,0.475,0.4,0.3,0.3,0.35,0.35,0.3,0.425,0.45,0.35,0.25,0.3375,0.375,0.425,0.375,0.4,0.35,0.3,0.3,0.375,1.0,0.5
59931005,0.5,0.5,0.475,0.3,0.475,0.4,0.3,0.3,0.35,0.35,0.3,0.425,0.45,0.35,0.25,0.3375,0.375,0.425,0.375,0.4,0.35,0.3,0.3,0.375,0.5,1.0"""), index_col=0)
df_weights.index = df_weights.index.map(str)
def expand_equiv_classes(df:pd.DataFrame, sep:str="|") -> pd.DataFrame:
""" finished, checked,
expand df so that rows/cols with equivalent classes indicated by `sep` are separated
Parameters
----------
df: DataFrame,
the dataframe to be split
sep: str, default "|",
separator of equivalent classes
Returns
-------
df_out: DataFrame,
the expanded DataFrame
"""
# check whether df is symmetric
if not (df.columns == df.index).all() or not (df.values.T == df.values).all():
raise ValueError("the input DataFrame (matrix) is not symmetric")
df_out = df.copy()
col_row = df_out.columns.tolist()
# df_sep = "\|" if sep == "|" else sep
new_cols = []
for c in col_row:
for new_c in c.split(sep)[1:]:
new_cols.append(new_c)
df_out[new_c] = df_out[c].values
new_r = new_c
df_out.loc[new_r] = df_out.loc[df_out.index.str.contains(new_r)].values[0]
col_row = [c.split(sep)[0] for c in col_row] + new_cols
df_out.columns = col_row
df_out.index = col_row
return df_out
df_weights_expanded = expand_equiv_classes(df_weights)
dx_mapping_scored = pd.read_csv(StringIO("""Dx,SNOMEDCTCode,Abbreviation,CPSC,CPSC_Extra,StPetersburg,PTB,PTB_XL,Georgia,Chapman_Shaoxing,Ningbo,Total,Notes
atrial fibrillation,164889003,AF,1221,153,2,15,1514,570,1780,0,5255,
atrial flutter,164890007,AFL,0,54,0,1,73,186,445,7615,8374,
bundle branch block,6374002,BBB,0,0,1,20,0,116,0,385,522,
bradycardia,426627000,Brady,0,271,11,0,0,6,0,7,295,
complete left bundle branch block,733534002,CLBBB,0,0,0,0,0,0,0,213,213,We score 733534002 and 164909002 as the same diagnosis
complete right bundle branch block,713427006,CRBBB,0,113,0,0,542,28,0,1096,1779,We score 713427006 and 59118001 as the same diagnosis.
1st degree av block,270492004,IAVB,722,106,0,0,797,769,247,893,3534,
incomplete right bundle branch block,713426002,IRBBB,0,86,0,0,1118,407,0,246,1857,
left axis deviation,39732003,LAD,0,0,0,0,5146,940,382,1163,7631,
left anterior fascicular block,445118002,LAnFB,0,0,0,0,1626,180,0,380,2186,
left bundle branch block,164909002,LBBB,236,38,0,0,536,231,205,35,1281,We score 733534002 and 164909002 as the same diagnosis
low qrs voltages,251146004,LQRSV,0,0,0,0,182,374,249,794,1599,
nonspecific intraventricular conduction disorder,698252002,NSIVCB,0,4,1,0,789,203,235,536,1768,
sinus rhythm,426783006,NSR,918,4,0,80,18092,1752,1826,6299,28971,
premature atrial contraction,284470004,PAC,616,73,3,0,398,639,258,1054,3041,We score 284470004 and 63593006 as the same diagnosis.
pacing rhythm,10370003,PR,0,3,0,0,296,0,0,1182,1481,
poor R wave Progression,365413008,PRWP,0,0,0,0,0,0,0,638,638,
premature ventricular contractions,427172004,PVC,0,188,0,0,0,0,0,1091,1279,We score 427172004 and 17338001 as the same diagnosis.
prolonged pr interval,164947007,LPR,0,0,0,0,340,0,12,40,392,
prolonged qt interval,111975006,LQT,0,4,0,0,118,1391,57,337,1907,
qwave abnormal,164917005,QAb,0,1,0,0,548,464,235,828,2076,
right axis deviation,47665007,RAD,0,1,0,0,343,83,215,638,1280,
right bundle branch block,59118001,RBBB,1857,1,2,0,0,542,454,195,3051,We score 713427006 and 59118001 as the same diagnosis.
sinus arrhythmia,427393009,SA,0,11,2,0,772,455,0,2550,3790,
sinus bradycardia,426177001,SB,0,45,0,0,637,1677,3889,12670,18918,
sinus tachycardia,427084000,STach,0,303,11,1,826,1261,1568,5687,9657,
supraventricular premature beats,63593006,SVPB,0,53,4,0,157,1,0,9,224,We score 284470004 and 63593006 as the same diagnosis.
t wave abnormal,164934002,TAb,0,22,0,0,2345,2306,1876,5167,11716,
t wave inversion,59931005,TInv,0,5,1,0,294,812,157,2720,3989,
ventricular premature beats,17338001,VPB,0,8,0,0,0,357,294,0,659,We score 427172004 and 17338001 as the same diagnosis."""))
dx_mapping_scored = dx_mapping_scored.fillna("")
dx_mapping_scored["SNOMEDCTCode"] = dx_mapping_scored["SNOMEDCTCode"].apply(str)
dx_mapping_scored["CUSPHNFH"] = dx_mapping_scored["Chapman_Shaoxing"].values + dx_mapping_scored["Ningbo"].values
dx_mapping_scored = dx_mapping_scored["Dx,SNOMEDCTCode,Abbreviation,CPSC,CPSC_Extra,StPetersburg,PTB,PTB_XL,Georgia,CUSPHNFH,Chapman_Shaoxing,Ningbo,Total,Notes".split(",")]
dx_mapping_unscored = pd.read_csv(StringIO("""Dx,SNOMEDCTCode,Abbreviation,CPSC,CPSC_Extra,StPetersburg,PTB,PTB_XL,Georgia,Chapman_Shaoxing,Ningbo,Total
accelerated atrial escape rhythm,233892002,AAR,0,0,0,0,0,0,0,16,16
abnormal QRS,164951009,abQRS,0,0,0,0,3389,0,0,0,3389
atrial escape beat,251187003,AED,0,0,0,0,0,0,0,17,17
accelerated idioventricular rhythm,61277005,AIVR,0,0,0,0,0,0,0,14,14
accelerated junctional rhythm,426664006,AJR,0,0,0,0,0,19,0,12,31
suspect arm ecg leads reversed,251139008,ALR,0,0,0,0,0,12,0,0,12
acute myocardial infarction,57054005,AMI,0,0,6,0,0,0,0,49,55
acute myocardial ischemia,413444003,AMIs,0,1,0,0,0,1,0,0,2
anterior ischemia,426434006,AnMIs,0,0,0,0,44,281,0,0,325
anterior myocardial infarction,54329005,AnMI,0,62,0,0,354,0,0,57,473
atrial bigeminy,251173003,AB,0,0,3,0,0,0,3,0,6
atrial fibrillation and flutter,195080001,AFAFL,0,39,0,0,0,2,0,0,41
atrial hypertrophy,195126007,AH,0,2,0,0,0,60,0,0,62
atrial pacing pattern,251268003,AP,0,0,0,0,0,52,0,0,52
atrial rhythm,106068003,ARH,0,0,0,0,0,0,0,215,215
atrial tachycardia,713422000,ATach,0,15,0,0,0,28,121,176,340
av block,233917008,AVB,0,5,0,0,0,74,166,78,323
atrioventricular dissociation,50799005,AVD,0,0,0,0,0,0,0,59,59
atrioventricular junctional rhythm,29320008,AVJR,0,6,0,0,0,0,0,139,145
atrioventricular node reentrant tachycardia,251166008,AVNRT,0,0,0,0,0,0,16,0,16
atrioventricular reentrant tachycardia,233897008,AVRT,0,0,0,0,0,0,8,18,26
blocked premature atrial contraction,251170000,BPAC,0,2,3,0,0,0,0,62,67
brugada,418818005,BRU,0,0,0,0,0,0,0,5,5
brady tachy syndrome,74615001,BTS,0,1,1,0,0,0,0,0,2
chronic atrial fibrillation,426749004,CAF,0,1,0,0,0,0,0,0,1
countercolockwise rotation,251199005,CCR,0,0,0,0,0,0,162,0,162
clockwise or counterclockwise vectorcardiographic loop,61721007,CVCL/CCVCL,0,0,0,0,0,0,0,653,653
cardiac dysrhythmia,698247007,CD,0,0,0,16,0,0,0,0,16
complete heart block,27885002,CHB,0,27,0,0,16,8,1,75,127
congenital incomplete atrioventricular heart block,204384007,CIAHB,0,0,0,2,0,0,0,0,2
coronary heart disease,53741008,CHD,0,0,16,21,0,0,0,0,37
chronic myocardial ischemia,413844008,CMI,0,161,0,0,0,0,0,0,161
clockwise rotation,251198002,CR,0,0,0,0,0,0,76,0,76
diffuse intraventricular block,82226007,DIB,0,1,0,0,0,0,0,0,1
early repolarization,428417006,ERe,0,0,0,0,0,140,22,344,506
fusion beats,13640000,FB,0,0,7,0,0,0,2,114,123
fqrs wave,164942001,FQRS,0,0,0,0,0,0,3,0,3
heart failure,84114007,HF,0,0,0,7,0,0,0,0,7
heart valve disorder,368009,HVD,0,0,0,6,0,0,0,0,6
high t-voltage,251259000,HTV,0,1,0,0,0,0,0,0,1
indeterminate cardiac axis,251200008,ICA,0,0,0,0,156,0,0,0,156
2nd degree av block,195042002,IIAVB,0,21,0,0,14,23,8,58,124
mobitz type II atrioventricular block,426183003,IIAVBII,0,0,0,0,0,0,0,7,7
inferior ischaemia,425419005,IIs,0,0,0,0,219,451,0,0,670
incomplete left bundle branch block,251120003,ILBBB,0,42,0,0,77,86,0,6,211
inferior ST segment depression,704997005,ISTD,0,1,0,0,0,0,0,0,1
idioventricular rhythm,49260003,IR,0,0,2,0,0,0,0,0,2
junctional escape,426995002,JE,0,4,0,0,0,5,15,60,84
junctional premature complex,251164006,JPC,0,2,0,0,0,0,1,10,13
junctional tachycardia,426648003,JTach,0,2,0,0,0,4,0,24,30
left atrial abnormality,253352002,LAA,0,0,0,0,0,72,0,0,72
left atrial enlargement,67741000119109,LAE,0,1,0,0,427,870,0,1,1299
left atrial hypertrophy,446813000,LAH,0,40,0,0,0,0,0,8,48
lateral ischaemia,425623009,LIs,0,0,0,0,142,903,0,0,1045
left posterior fascicular block,445211001,LPFB,0,0,0,0,177,25,0,5,207
left ventricular hypertrophy,164873001,LVH,0,158,10,0,2359,1232,15,632,4406
left ventricular high voltage,55827005,LVHV,0,0,0,0,0,0,1295,4106,5401
left ventricular strain,370365005,LVS,0,1,0,0,0,0,0,0,1
myocardial infarction,164865005,MI,0,376,9,368,5261,7,40,83,6144
myocardial ischemia,164861001,MIs,0,384,0,0,2175,0,0,0,2559
mobitz type i wenckebach atrioventricular block,54016002,MoI,0,0,3,0,0,0,6,25,34
nonspecific st t abnormality,428750005,NSSTTA,0,1290,0,0,381,1883,1158,0,4712
old myocardial infarction,164867002,OldMI,0,1168,0,0,0,0,0,0,1168
paroxysmal atrial fibrillation,282825002,PAF,0,0,1,1,0,0,0,0,2
prolonged P wave,251205003,PPW,0,0,0,0,0,0,0,106,106
paroxysmal supraventricular tachycardia,67198005,PSVT,0,0,3,0,24,0,0,0,27
paroxysmal ventricular tachycardia,425856008,PVT,0,0,15,0,0,0,0,109,124
p wave change,164912004,PWC,0,0,0,0,0,0,95,47,142
right atrial abnormality,253339007,RAAb,0,0,0,0,0,14,0,0,14
r wave abnormal,164921003,RAb,0,1,0,0,0,10,0,0,11
right atrial hypertrophy,446358003,RAH,0,18,0,0,99,0,3,33,153
right atrial high voltage,67751000119106,RAHV,0,0,0,0,0,0,8,28,36
rapid atrial fibrillation,314208002,RAF,0,0,0,2,0,0,0,0,2
right ventricular hypertrophy,89792004,RVH,0,20,0,0,126,86,4,106,342
sinus atrium to atrial wandering rhythm,17366009,SAAWR,0,0,0,0,0,0,7,0,7
sinoatrial block,65778007,SAB,0,9,0,0,0,0,0,5,14
sinus arrest,5609005,SARR,0,0,0,0,0,0,0,33,33
sinus node dysfunction,60423000,SND,0,0,2,0,0,0,0,0,2
shortened pr interval,49578007,SPRI,0,3,0,0,0,2,0,23,28
decreased qt interval,77867006,SQT,0,1,0,0,0,0,0,2,3
s t changes,55930002,STC,0,1,0,0,770,6,0,4232,5009
st depression,429622005,STD,869,57,4,0,1009,38,402,1266,3645
st elevation,164931005,STE,220,66,4,0,28,134,176,0,628
st interval abnormal,164930006,STIAb,0,481,2,0,0,992,2,799,2276
supraventricular bigeminy,251168009,SVB,0,0,1,0,0,0,0,0,1
supraventricular tachycardia,426761007,SVT,0,3,1,0,27,32,587,137,787
transient ischemic attack,266257000,TIA,0,0,7,0,0,0,0,0,7
tall p wave,251223006,TPW,0,0,0,0,0,0,0,215,215
u wave abnormal,164937009,UAb,0,1,0,0,0,0,22,114,137
ventricular bigeminy,11157007,VBig,0,5,9,0,82,2,3,0,101
ventricular ectopics,164884008,VEB,700,0,49,0,1154,41,0,0,1944
ventricular escape beat,75532003,VEsB,0,3,1,0,0,0,7,49,60
ventricular escape rhythm,81898007,VEsR,0,1,0,0,0,1,0,96,98
ventricular fibrillation,164896001,VF,0,10,0,25,0,3,0,59,97
ventricular flutter,111288001,VFL,0,1,0,0,0,0,0,7,8
ventricular hypertrophy,266249003,VH,0,5,0,13,30,71,0,0,119
ventricular pre excitation,195060002,VPEx,0,6,0,0,0,2,12,0,20
ventricular pacing pattern,251266004,VPP,0,0,0,0,0,46,0,0,46
paired ventricular premature complexes,251182009,VPVC,0,0,23,0,0,0,0,0,23
ventricular tachycardia,164895002,VTach,0,1,1,10,0,0,0,0,12
ventricular trigeminy,251180001,VTrig,0,4,4,0,20,1,8,0,37
wandering atrial pacemaker,195101003,WAP,0,0,0,0,0,7,2,0,9
wolff parkinson white pattern,74390002,WPW,0,0,4,2,80,2,4,68,160"""))
dx_mapping_unscored["SNOMEDCTCode"] = dx_mapping_unscored["SNOMEDCTCode"].apply(str)
dx_mapping_unscored["CUSPHNFH"] = dx_mapping_unscored["Chapman_Shaoxing"].values + dx_mapping_unscored["Ningbo"].values
dx_mapping_unscored = dx_mapping_unscored["Dx,SNOMEDCTCode,Abbreviation,CPSC,CPSC_Extra,StPetersburg,PTB,PTB_XL,Georgia,CUSPHNFH,Chapman_Shaoxing,Ningbo,Total".split(",")]
dms = dx_mapping_scored.copy()
dms["scored"] = True
dmn = dx_mapping_unscored.copy()
dmn["Notes"] = ""
dmn["scored"] = False
dx_mapping_all = pd.concat([dms, dmn], ignore_index=True).fillna("")
df_weights_snomed = df_weights_expanded # alias
snomed_ct_code_to_abbr = \
CFG({row["SNOMEDCTCode"]:row["Abbreviation"] for _,row in dx_mapping_all.iterrows()})
abbr_to_snomed_ct_code = CFG({v:k for k,v in snomed_ct_code_to_abbr.items()})
df_weights_abbr = df_weights_expanded.copy()
df_weights_abbr.columns = \
df_weights_abbr.columns.map(lambda i: snomed_ct_code_to_abbr.get(i, i))
# df_weights_abbr.columns.map(lambda i: snomed_ct_code_to_abbr[i])
df_weights_abbr.index = \
df_weights_abbr.index.map(lambda i: snomed_ct_code_to_abbr.get(i, i))
# df_weights_abbr.index.map(lambda i: snomed_ct_code_to_abbr[i])
df_weights_abbreviations = df_weights.copy() # corresponding to weights_abbreviations.csv
df_weights_abbreviations.columns = \
df_weights_abbreviations.columns.map(lambda i: "|".join([snomed_ct_code_to_abbr.get(item, item) for item in i.split("|")]))
# df_weights_abbreviations.columns.map(lambda i: "|".join([snomed_ct_code_to_abbr[item] for item in i.split("|")]))
df_weights_abbreviations.index = \
df_weights_abbreviations.index.map(lambda i: "|".join([snomed_ct_code_to_abbr.get(item, item) for item in i.split("|")]))
# df_weights_abbreviations.index.map(lambda i: "|".join([snomed_ct_code_to_abbr[item] for item in i.split("|")]))
snomed_ct_code_to_fullname = \
CFG({row["SNOMEDCTCode"]:row["Dx"] for _,row in dx_mapping_all.iterrows()})
fullname_to_snomed_ct_code = CFG({v:k for k,v in snomed_ct_code_to_fullname.items()})
df_weights_fullname = df_weights_expanded.copy()
df_weights_fullname.columns = \
df_weights_fullname.columns.map(lambda i: snomed_ct_code_to_fullname.get(i, i))
# df_weights_fullname.columns.map(lambda i: snomed_ct_code_to_fullname[i])
df_weights_fullname.index = \
df_weights_fullname.index.map(lambda i: snomed_ct_code_to_fullname.get(i, i))
# df_weights_fullname.index.map(lambda i: snomed_ct_code_to_fullname[i])
abbr_to_fullname = \
CFG({row["Abbreviation"]:row["Dx"] for _,row in dx_mapping_all.iterrows()})
fullname_to_abbr = CFG({v:k for k,v in abbr_to_fullname.items()})
# equiv_class_dict = CFG({ # from unofficial phase, deprecated
# "CRBBB": "RBBB",
# "SVPB": "PAC",
# "VPB": "PVC",
# "713427006": "59118001",
# "63593006": "284470004",
# "17338001": "427172004",
# "complete right bundle branch block": "right bundle branch block",
# "supraventricular premature beats": "premature atrial contraction",
# "ventricular premature beats": "premature ventricular contractions",
# })
equiv_class_dict = {}
for c in df_weights.columns:
if "|" not in c:
continue
v, k = c.split("|")
equiv_class_dict[k] = v
equiv_class_dict[snomed_ct_code_to_abbr[k]] = snomed_ct_code_to_abbr[v]
equiv_class_dict[snomed_ct_code_to_fullname[k]] = snomed_ct_code_to_fullname[v]
# functions
def load_weights(classes:Sequence[Union[int,str]]=None,
equivalent_classes:Optional[Union[Dict[str,str], List[List[str]]]]=None,
return_fmt:str="np") -> Union[np.ndarray, pd.DataFrame]:
""" NOT finished, NOT checked,
load the weight matrix of the `classes`
Parameters
----------
classes: sequence of str or int, optional,
the classes (abbr. or SNOMEDCTCode) to load their weights,
if not given, weights of all classes in `dx_mapping_scored` will be loaded
equivalent_classes: dict or list, optional,
list or dict of equivalent classes,
if not specified, defaults to `equiv_class_dict`
return_fmt: str, default "np",
"np" or "pd", the values in the form of a 2d array or a DataFrame
Returns
-------
mat: 2d array or DataFrame,
the weight matrix of the `classes`
"""
if classes:
l_nc = [normalize_class(c, ensure_scored=True) for c in classes]
assert len(set(l_nc)) == len(classes), "`classes` has duplicates!"
mat = df_weights_abbr.loc[l_nc,l_nc]
else:
mat = df_weights_abbr.copy()
if return_fmt.lower() == "np":
mat = mat.values
elif return_fmt.lower() == "pd":
# columns and indices back to the original input format
mat.columns = list(map(str, classes))
mat.index = list(map(str, classes))
else:
raise ValueError(f"format of `{return_fmt}` is not supported!")
return mat
def normalize_class(c:Union[str,int], ensure_scored:bool=False) -> str:
""" finished, checked,
normalize the class name to its abbr.,
facilitating the computation of the `load_weights` function
Parameters
----------
c: str or int,
abbr. or SNOMEDCTCode of the class
ensure_scored: bool, default False,
ensure that the class is a scored class,
if True, `ValueError` would be raised if `c` is not scored
Returns
-------
nc: str,
the abbr. of the class
"""
nc = snomed_ct_code_to_abbr.get(str(c), str(c))
if ensure_scored and nc not in df_weights_abbr.columns:
raise ValueError(f"class `{c}` not among the scored classes")
return nc
def get_class(snomed_ct_code:Union[str,int]) -> Dict[str,str]:
""" finished, checked,
look up the abbreviation and the full name of an ECG arrhythmia,
given its SNOMEDCTCode
Parameters
----------
snomed_ct_code: str or int,
the SNOMEDCTCode of the arrhythmia
Returns
-------
arrhythmia_class: dict,
containing `abbr` the abbreviation and `fullname` the full name of the arrhythmia
"""
arrhythmia_class = {
"abbr": snomed_ct_code_to_abbr[str(snomed_ct_code)],
"fullname": snomed_ct_code_to_fullname[str(snomed_ct_code)],
}
return arrhythmia_class
def get_class_count(tranches:Union[str, Sequence[str]],
exclude_classes:Optional[Sequence[str]]=None,
scored_only:bool=False,
normalize:bool=True,
threshold:Optional[Real]=0,
fmt:str="a") ->Dict[str, int]:
""" finished, checked,
Parameters
----------
tranches: str or sequence of str,
tranches to count classes, can be combinations of "A", "B", "C", "D", "E", "F", "G"
exclude_classes: sequence of str, optional,
abbrevations or SNOMEDCTCodes of classes to be excluded from counting
scored_only: bool, default True,
if True, only scored classes are counted
normalize: bool, default True,
collapse equivalent classes into one,
used only when `scored_only` = True
threshold: real number,
minimum ratio (0-1) or absolute number (>1) of a class to be counted
fmt: str, default "a",
the format of the names of the classes in the returned dict,
can be one of the following (case insensitive):
- "a", abbreviations
- "f", full names
- "s", SNOMEDCTCode
Returns
-------
class_count: dict,
key: class in the format of `fmt`
value: count of a class in `tranches`
"""
assert threshold >= 0
tranche_names = CFG({
"A": "CPSC",
"B": "CPSC_Extra",
"C": "StPetersburg",
"D": "PTB",
"E": "PTB_XL",
"F": "Georgia",
"G": "CUSPHNFH",
})
tranche_names = [tranche_names[t] for t in tranches]
_exclude_classes = [normalize_class(c) for c in (exclude_classes or [])]
df = dx_mapping_scored.copy() if scored_only else dx_mapping_all.copy()
class_count = CFG()
for _, row in df.iterrows():
key = row["Abbreviation"]
val = row[tranche_names].values.sum()
if val == 0:
continue
if key in _exclude_classes:
continue
if normalize and scored_only:
key = equiv_class_dict.get(key, key)
if key in _exclude_classes:
continue
if key in class_count.keys():
class_count[key] += val
else:
class_count[key] = val
tmp = CFG()
tot_count = sum(class_count.values())
_threshold = threshold if threshold >= 1 else threshold * tot_count
if fmt.lower() == "s":
for key, val in class_count.items():
if val < _threshold:
continue
tmp[abbr_to_snomed_ct_code[key]] = val
class_count = tmp.copy()
elif fmt.lower() == "f":
for key, val in class_count.items():
if val < _threshold:
continue
tmp[abbr_to_fullname[key]] = val
class_count = tmp.copy()
else:
class_count = {key: val for key, val in class_count.items() if val >= _threshold}
del tmp
return class_count
def get_class_weight(tranches:Union[str, Sequence[str]],
exclude_classes:Optional[Sequence[str]]=None,
scored_only:bool=False,
normalize:bool=True,
threshold:Optional[Real]=0,
fmt:str="a",
min_weight:Real=0.5) ->Dict[str, int]:
""" finished, checked,
Parameters
----------
tranches: str or sequence of str,
tranches to count classes, can be combinations of "A", "B", "C", "D", "E", "F"
exclude_classes: sequence of str, optional,
abbrevations or SNOMEDCTCodes of classes to be excluded from counting
scored_only: bool, default True,
if True, only scored classes are counted
normalize: bool, default True,
collapse equivalent classes into one,
used only when `scored_only` = True
threshold: real number,
minimum ratio (0-1) or absolute number (>1) of a class to be counted
fmt: str, default "a",
the format of the names of the classes in the returned dict,
can be one of the following (case insensitive):
- "a", abbreviations
- "f", full names
- "s", SNOMEDCTCode
min_weight: real number, default 0.5,
minimum value of the weight of all classes,
or equivalently the weight of the largest class
Returns
-------
class_weight: dict,
key: class in the format of `fmt`
value: weight of a class in `tranches`
"""
class_count = get_class_count(
tranches=tranches,
exclude_classes=exclude_classes,
scored_only=scored_only,
normalize=normalize,
threshold=threshold,
fmt=fmt,
)
class_weight = CFG({
key: sum(class_count.values()) / val for key, val in class_count.items()
})
class_weight = CFG({
key: min_weight * val / min(class_weight.values()) for key, val in class_weight.items()
})
return class_weight
# extra statistics
dx_cooccurrence_all_fp = "./dx_cooccurrence_all.csv"
if os.path.isfile(dx_cooccurrence_all_fp):
dx_cooccurrence_all = | pd.read_csv(dx_cooccurrence_all_fp, index_col=0) | pandas.read_csv |
import pytest
from ..dataset import (
magnitude_and_scale,
get_type,
_try_import,
indent,
get_df_type,
cast_and_clean_df,
sql_dataset,
)
import pandas as pd
import numpy as np
import datetime
import pyodbc
import requests
CMD_DROP_TEST_TABLE_IF_EXISTS = "IF OBJECT_ID('test_table', 'U') IS NOT NULL DROP TABLE test_table;"
CMD_CREATE_TEST_TABLE = """
CREATE TABLE test_table (
[dt] datetime NULL,
[dt2] date NOT NULL,
[uid] nvarchar(10) NOT NULL,
[strcol] nvarchar(max) NOT NULL,
[name] nvarchar(10) NULL,
[empty_col] nvarchar(100) NULL,
[float] decimal(22,3) NULL,
[float_k] decimal(22,3) NULL,
[float_m] decimal(22,13) NULL,
[float_b] decimal(22,9) NULL,
[float_na] decimal(22,3) NULL,
[bit] bit NULL,
[bit_na] bit NULL,
[tinyint] tinyint NULL,
[tinyint_na] tinyint NULL,
[smallint] smallint NOT NULL,
[smallint_na] smallint NULL,
[int] int NOT NULL,
[int_na] int NULL,
[bigint] bigint NULL,
[bigint_na] bigint NULL,
[bool] bit NULL,
[bool_na] bit NULL,
[empty_str_col] nvarchar(100) NULL
);
"""
expected_schema = [
['dt', 'datetime', [], True, ''],
['dt2', 'date', [], False, ''],
['uid', 'nvarchar', [10], False, ''],
['strcol', 'nvarchar', ['max'], False, ''],
['name', 'nvarchar', [10], True, ''],
['empty_col', 'nvarchar', [100], True, ''],
['float', 'decimal', [22,3], True, ''],
['float_k', 'decimal', [22,3], True, ''],
['float_m', 'decimal', [22,13], True, ''],
['float_b', 'decimal', [22,9], True, ''],
['float_na', 'decimal', [22,3], True, ''],
['bit', 'bit', [], True, ''],
['bit_na', 'bit', [], True, ''],
['tinyint', 'tinyint', [], True, ''],
['tinyint_na', 'tinyint', [], True, ''],
['smallint', 'smallint', [], False, ''],
['smallint_na', 'smallint', [], True, ''],
['int', 'int', [], False, ''],
['int_na', 'int', [], True, ''],
['bigint', 'bigint', [], True, ''],
['bigint_na', 'bigint', [], True, ''],
['bool', 'bit', [], True, ''],
['bool_na', 'bit', [], True, ''],
['empty_str_col', 'nvarchar', [100], True, ''],
]
# dataset.magnitude_and_scale
def test_magnitude_and_scale_int():
mag, scale = magnitude_and_scale(pd.Series([1, 2, 3]).astype(int))
assert mag == 1
assert scale == 0
def test_magnitude_and_scale_float_type_int():
mag, scale = magnitude_and_scale(pd.Series([123.0, 1.0, 1234.0, np.nan]))
assert mag == 4
assert scale == 0
def test_magnitude_and_scale_float_with_inf():
mag, scale = magnitude_and_scale(pd.Series([1.0, 2.0, np.inf, -np.inf]))
assert mag == 1
assert scale == 0
def test_magnitude_and_scale_zero():
mag, scale = magnitude_and_scale(pd.Series([0]))
assert mag == 1
assert scale == 0
def test_magnitude_and_scale_float():
mag, scale = magnitude_and_scale(pd.Series([123.1234, 12345.1234567, 12.1234567800]))
assert mag == 5
assert scale == 8
def test_magnitude_and_scale_only_frac_part():
mag, scale = magnitude_and_scale(pd.Series([0.12345, 0.123456, 0.123]))
assert mag == 1
assert scale == 6
def test_magnitude_and_scale_empty_raises_error():
with pytest.raises(ValueError) as e_info:
mag, scale = magnitude_and_scale(pd.Series([], dtype='float64'))
def test_magnitude_and_scale_nan_raises_error():
with pytest.raises(ValueError) as e_info:
mag, scale = magnitude_and_scale(pd.Series([np.nan]))
def test_magnitude_and_scale_inf_raises_error():
with pytest.raises(ValueError) as e_info:
mag, scale = magnitude_and_scale(pd.Series([np.inf]))
# dataset.get_type
def test_get_type_decimal():
dtype, params, has_null, comment = get_type(pd.Series([1.1, 2.1, 3.0]))
assert dtype == 'decimal'
assert params == [2, 1]
assert has_null == False
assert comment == ''
dtype, params, has_null, comment = get_type(pd.Series([123.1234, 12345.1234567, 12.1234567800]))
assert dtype == 'decimal'
assert params == [19, 12]
assert has_null == False
assert comment == ''
dtype, params, has_null, comment = get_type(pd.Series([0.12345, 0.123456, 0.123]))
assert dtype == 'decimal'
assert params == [10, 9]
assert has_null == False
assert comment == ''
def test_get_type_decimal_na_inf():
dtype, params, has_null, comment = get_type(pd.Series([1.1, 2.1, 3.0, np.nan]))
assert dtype == 'decimal'
assert params == [2, 1]
assert has_null == True
assert comment == ''
dtype, params, has_null, comment = get_type(pd.Series([1.1, 2.1, 3.0, np.nan, np.inf]))
assert dtype == 'decimal'
assert params == [2, 1]
assert has_null == True
assert comment == ''
def test_get_type_str():
dtype, params, has_null, comment = get_type(pd.Series(['123']))
assert dtype == 'nvarchar'
assert params == [6]
assert has_null == False
assert comment == ''
dtype, params, has_null, comment = get_type( | pd.Series(['a' * 1000]) | pandas.Series |
import pandas as pd
import numpy as np
import requests
import warnings
import scipy as sp
from scipy import stats
try:
import sklearn
except ImportError:
sklearn = False
else:
from sklearn.decomposition import PCA
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
from . import filters, process
from .utils import get_protein_id
def correlation(df, rowvar=False):
"""
Calculate column-wise Pearson correlations using ``numpy.ma.corrcoef``
Input data is masked to ignore NaNs when calculating correlations. Data is returned as
a Pandas ``DataFrame`` of column_n x column_n dimensions, with column index copied to
both axes.
:param df: Pandas DataFrame
:return: Pandas DataFrame (n_columns x n_columns) of column-wise correlations
"""
# Create a correlation matrix for all correlations
# of the columns (filled with na for all values)
df = df.copy()
maskv = np.ma.masked_where(np.isnan(df.values), df.values)
cdf = np.ma.corrcoef(maskv, rowvar=False)
cdf = pd.DataFrame(np.array(cdf))
cdf.columns = df.columns
cdf.index = df.columns
cdf = cdf.sort_index(level=0, axis=1)
cdf = cdf.sort_index(level=0)
return cdf
def pca(df, n_components=2, mean_center=False, **kwargs):
"""
Principal Component Analysis, based on `sklearn.decomposition.PCA`
Performs a principal component analysis (PCA) on the supplied dataframe, selecting the first ``n_components`` components
in the resulting model. The model scores and weights are returned.
For more information on PCA and the algorithm used, see the `scikit-learn documentation <http://scikit-learn.org/stable/modules/generated/sklearn.decomposition.PCA.html>`_.
:param df: Pandas ``DataFrame`` to perform the analysis on
:param n_components: ``int`` number of components to select
:param mean_center: ``bool`` mean center the data before performing PCA
:param kwargs: additional keyword arguments to `sklearn.decomposition.PCA`
:return: scores ``DataFrame`` of PCA scores n_components x n_samples
weights ``DataFrame`` of PCA weights n_variables x n_components
"""
if not sklearn:
assert('This library depends on scikit-learn (sklearn) to perform PCA analysis')
from sklearn.decomposition import PCA
df = df.copy()
# We have to zero fill, nan errors in PCA
df[ np.isnan(df) ] = 0
if mean_center:
mean = np.mean(df.values, axis=0)
df = df - mean
pca = PCA(n_components=n_components, **kwargs)
pca.fit(df.values.T)
scores = pd.DataFrame(pca.transform(df.values.T)).T
scores.index = ['Principal Component %d (%.2f%%)' % ( (n+1), pca.explained_variance_ratio_[n]*100 ) for n in range(0, scores.shape[0])]
scores.columns = df.columns
weights = pd.DataFrame(pca.components_).T
weights.index = df.index
weights.columns = ['Weights on Principal Component %d' % (n+1) for n in range(0, weights.shape[1])]
return scores, weights
def plsda(df, a, b, n_components=2, mean_center=False, scale=True, **kwargs):
"""
Partial Least Squares Discriminant Analysis, based on `sklearn.cross_decomposition.PLSRegression`
Performs a binary group partial least squares discriminant analysis (PLS-DA) on the supplied
dataframe, selecting the first ``n_components``.
Sample groups are defined by the selectors ``a`` and ``b`` which are used to select columns
from the supplied dataframe. The result model is applied to the entire dataset,
projecting non-selected samples into the same space.
For more information on PLS regression and the algorithm used, see the `scikit-learn documentation <http://scikit-learn.org/stable/modules/generated/sklearn.cross_decomposition.PLSRegression.html>`_.
:param df: Pandas ``DataFrame`` to perform the analysis on
:param a: Column selector for group a
:param b: Column selector for group b
:param n_components: ``int`` number of components to select
:param mean_center: ``bool`` mean center the data before performing PLS regression
:param kwargs: additional keyword arguments to `sklearn.cross_decomposition.PLSRegression`
:return: scores ``DataFrame`` of PLSDA scores n_components x n_samples
weights ``DataFrame`` of PLSDA weights n_variables x n_components
"""
if not sklearn:
assert('This library depends on scikit-learn (sklearn) to perform PLS-DA')
from sklearn.cross_decomposition import PLSRegression
df = df.copy()
# We have to zero fill, nan errors in PLSRegression
df[ np.isnan(df) ] = 0
if mean_center:
mean = np.mean(df.values, axis=0)
df = df - mean
sxa, _ = df.columns.get_loc_level(a)
sxb, _ = df.columns.get_loc_level(b)
dfa = df.iloc[:, sxa]
dfb = df.iloc[:, sxb]
dff = pd.concat([dfa, dfb], axis=1)
y = np.ones(dff.shape[1])
y[np.arange(dfa.shape[1])] = 0
plsr = PLSRegression(n_components=n_components, scale=scale, **kwargs)
plsr.fit(dff.values.T, y)
# Apply the generated model to the original data
x_scores = plsr.transform(df.values.T)
scores = pd.DataFrame(x_scores.T)
scores.index = ['Latent Variable %d' % (n+1) for n in range(0, scores.shape[0])]
scores.columns = df.columns
weights = | pd.DataFrame(plsr.x_weights_) | pandas.DataFrame |
"""
-------------------------------------------------------
Duplicated Bug Report Detection
-------------------------------------------------------
Copyright (c) 2020.
Author: <NAME>
Email: <EMAIL>
https://github.com/ghasemieh
__Updated__ = 1/29/20, 6:35 AM.
-------------------------------------------------------
"""
import pymongo
import time
from flask import Flask, render_template, request, redirect
from pandas import merge, DataFrame
import os
from Modules import postgres as ps, similarity_models as sm
from Modules.Bugzilla_API import API_data_extract_2
from Modules.text_processing import preprocessing
app = Flask(__name__) # Present on the website
ps.create_table() # create the table if is not existed
with open('current_bug_id.txt', 'r') as f:
current_bug_id = f.read() # Set bug id pointer
data_df = | DataFrame() | pandas.DataFrame |
from typing import List, Tuple, Union
from pathlib import Path
from preprocess.peopleStatusDataGenerator import peopleStatusDataGenerator
from utils.aioLogger import aioLogger
from preprocess.peopleStatusCsvSelector import peopleStatusCsvSelector
import pandas as pd
from pandas_profiling import ProfileReport
class peopleStatusRawDataPreprocessor:
def __init__(self, root_folder: Path, normalize: bool) -> None:
self.normalize = normalize
self.logger = aioLogger(self.__class__.__name__).logger
self.root_folder = root_folder
self.csv_selector = peopleStatusCsvSelector(self.root_folder)
self.id = root_folder.name
self.get_and_read_files()
def get_and_read_files(self):
files = self.csv_selector.get_matched_files()
if files:
self.files = files
self._read_files()
else:
self.logger.warning(f"get and read file from {self.root_folder} ERROR !")
def _read_files(self):
dfs = []
for file in self.files:
dfs.append(
peopleStatusDataGenerator(
csv_file=file, normalize=self.normalize
).df_output
)
self.df = pd.concat(dfs, ignore_index=True)
def create_txt_file_for_cut_position(self, file_stem: str):
"""this is used to temp get cut position manually
:param file_stem: [file name without extention]
:type file_stem: str
"""
file = self.root_folder / f"{file_stem}.txt"
if not file.exists():
with open(file, mode="w") as f:
f.writelines(f",\n")
f.writelines(f",\n")
f.writelines(f",\n")
@staticmethod
def save_data_profile(df: pd.DataFrame, title: str):
# config_default.yaml to have correlation charts // config_minimal.yaml without correlation.
profile = ProfileReport(df, config_file=r".\config\config_default.yaml")
save_folder = Path(r"./quick_ds_report")
save_folder.mkdir(exist_ok=True, parents=True)
profile.to_file(save_folder / f"{title}.html")
def remove_some_correct_before_data():
excel_path = Path(f"./data/cartana_status_data_points.xlsx")
df = pd.read_excel(excel_path, header=0)
df_before = df[df["status_2"] == "correct_before"]
index_to_remove = df_before.sample(int(len(df_before) * 0.8)).index
df_reduced = df.drop(index=index_to_remove)
df_reduced.to_excel(
source_folder.parent.joinpath("cartana_status_data_points_reduced.xlsx"),
index=False,
)
if __name__ == "__main__":
source_folder = Path(r"D:\Github\aio\data\Cartana")
# for folder in list(source_folder.iterdir())[:2]:
# rawDataPreprocessorCartANA(folder)
dfs = []
for folder in list(source_folder.iterdir()):
dfs.append(peopleStatusRawDataPreprocessor(folder, normalize=True).df)
df_final = | pd.concat(dfs, ignore_index=True) | pandas.concat |
"""Test suite for packet building."""
import pandas as pd
from unittest import TestCase
from os.path import join, dirname
from capalyzer.packet_parser import DataTableFactory
from capalyzer.packet_parser.data_utils import group_small_cols
PACKET_DIR = join(dirname(__file__), 'built_packet')
def basic_test_runner(tester, name, nrows=2, **kwargs):
"""Check that we can build a table."""
table_factory = DataTableFactory(PACKET_DIR)
tbl = getattr(table_factory, name)(**kwargs)
if nrows >= 0:
tester.assertEqual(tbl.shape[0], nrows)
class TestPacketParser(TestCase):
"""Test suite for packet building."""
def test_make_taxonomy(self):
"""Test that we can build a taxonomy table."""
basic_test_runner(self, 'taxonomy')
def test_group_small_cols(self):
"""Test that we can build a taxonomy table."""
taxa = DataTableFactory(PACKET_DIR).taxonomy()
taxa = group_small_cols(taxa, top=2)
self.assertEqual(taxa.shape[1], 3)
def test_subsample_taxonomy(self):
"""Test that we can build a taxonomy table."""
basic_test_runner(self, 'taxonomy', nrows=6, niter=3, normalize='subsample')
def test_make_core_taxa(self):
"""Test that we can build a taxonomy table."""
basic_test_runner(self, 'core_taxa', nrows=-1)
def test_make_taxa_long(self):
"""Test that we can build a taxonomy table from longform."""
basic_test_runner(self, 'taxonomy', rank='all')
def test_make_amr(self):
"""Test we can make AMR table."""
basic_test_runner(self, 'amrs', nrows=0)
def test_make_pathways(self):
"""Test we can make pathways table."""
basic_test_runner(self, 'pathways')
def test_make_pathways_with_coverage_min(self):
"""Test we can make pathways table."""
basic_test_runner(self, 'pathways', coverage_min=0.5)
def test_make_ags(self):
"""Test we can make AGS vec."""
table_factory = DataTableFactory(PACKET_DIR)
table_factory.ags()
def test_make_hmp(self):
"""Test we can make HMP table."""
table_factory = DataTableFactory(PACKET_DIR)
table_factory.hmp()
def test_make_macrobes(self):
"""Test we can make macrobe table."""
basic_test_runner(self, 'macrobes')
def test_read_props(self):
"""Test we can make read prop table."""
basic_test_runner(self, 'read_props')
def test_taxa_alpha_div(self):
"""Test we can make alpha div vec."""
table_factory = DataTableFactory(PACKET_DIR)
entropy = table_factory.taxa_alpha_diversity()
self.assertTrue((entropy > 0).all())
def test_taxa_alpha_div_genus(self):
"""Test we can make alpha div vec."""
table_factory = DataTableFactory(PACKET_DIR)
entropy = table_factory.taxa_alpha_diversity(rank='genus')
self.assertTrue((entropy > 0).all())
def test_taxa_chao1(self):
"""Test we can make alpha div vec."""
table_factory = DataTableFactory(PACKET_DIR)
chao1 = table_factory.taxa_alpha_diversity(metric='chao1', rarefy=1000 * 1000)
self.assertTrue((chao1 > 0).all())
def test_taxa_beta_div(self):
"""Test we can make beta div table."""
basic_test_runner(self, 'taxa_beta_diversity')
def test_taxa_rarefaction(self):
table_factory = DataTableFactory(PACKET_DIR)
rarefied = table_factory.taxa_rarefaction(ns=[1, 2, 3, 4], nsample=2)
self.assertEqual(rarefied.shape[1], 2)
self.assertEqual(rarefied.shape[0], 10)
def test_metadata_filter_general(self):
"""Test that a basic table is metadata filtered."""
metadata = | pd.DataFrame({'foo': {'haib18CEM5332_HMGTJCCXY_SL342402': 1}}) | pandas.DataFrame |
# pylint: disable=C0103
from __future__ import absolute_import
import pandas as pd
import re
import sys
import unicodedata
import codecs
import xmltodict as xd
from dateutil import parser
import arrow
import gzip
import numpy as np
import string
from six import unichr
from lxml import etree
from docopt import docopt
ns1 = 'http://www.garmin.com/xmlschemas/TrainingCenterDatabase/v2'
ns2 = 'http://www.garmin.com/xmlschemas/ActivityExtension/v2'
import unicodedata
def remove_control_characters(s):
return "".join(ch for ch in s if unicodedata.category(ch)[0]!="C")
def strip_control_characters(input):
if input:
# unicode invalid characters
RE_XML_ILLEGAL = u'([\u0000-\u0008\u000b-\u000c\u000e-\u001f\ufffe-\uffff])' + \
u'|' + \
u'([%s-%s][^%s-%s])|([^%s-%s][%s-%s])|([%s-%s]$)|(^[%s-%s])' % \
(unichr(0xd800),unichr(0xdbff),unichr(0xdc00),unichr(0xdfff),
unichr(0xd800),unichr(0xdbff),unichr(0xdc00),unichr(0xdfff),
unichr(0xd800),unichr(0xdbff),unichr(0xdc00),unichr(0xdfff),
)
input = re.sub(RE_XML_ILLEGAL, "", input)
#input = "".join(ch for ch in input if unicodedata.category(ch)[0]!="C")
# ascii control characters
#input = re.sub(r"[\x01-\x1F\x7F]", "", input)
return input
def tcx_getdict(path):
extension = path[-3:].lower()
if extension == '.gz':
with gzip.open(path,'r') as f:
input = f.read()
input = strip_control_characters(input)
d = xd.parse(input)
else:
with open(path, 'r') as f:
input = f.read()
input = strip_control_characters(input)
d = xd.parse(input)
return d['TrainingCenterDatabase']
def tcxgetactivities(d):
try:
return d['Activities']['Activity']
except KeyError:
return None
def tcxactivitygetid(d):
try:
return d['Id']
except KeyError:
return None
def tcxactivitygetlaps(d):
try:
return d['Lap']
except KeyError:
return None
except TypeError:
try:
return d[0]['Lap']
except KeyError:
return None
def tcxlapgettrack(d):
try:
return d['Track']
except KeyError:
return None
def tcxtrackgettrackpoint(d):
try:
return d['Trackpoint']
except KeyError:
return None
except TypeError:
try:
return d[0]['Trackpoint']
except KeyError:
return None
def getvalue(x,key):
try:
return x[key]
except TypeError:
return np.nan
def tcxtrack_getdata(track):
trackpoints = tcxtrackgettrackpoint(track)
df = pd.DataFrame(trackpoints)
datetime = df['Time'].apply(lambda x: parser.parse(x, fuzzy=True))
df['timestamp'] = datetime.apply(
lambda x: arrow.get(x).timestamp()+arrow.get(x).microsecond/1.e6
)
try:
#df['latitude'] = df['Position'].apply(lambda x: x['LatitudeDegrees'])
#df['longitude'] = df['Position'].apply(lambda x: x['LongitudeDegrees'])
df['latitude'] = df['Position'].apply(
lambda x: getvalue(x,'LatitudeDegrees'))
df['longitude'] = df['Position'].apply(
lambda x: getvalue(x,'LongitudeDegrees'))
except KeyError:
pass
except TypeError:
pass
for key in df.keys():
v = df[key].dropna().values
try:
if len(v) and 'Value' in v[0]:
l = df[key].apply(pd.Series)
df[key] = l['Value']
except TypeError:
pass
if key == 'Extensions':
extensionsdf = df[key].apply(pd.Series)
thekeys = list(extensionsdf.keys())
for counter, key in enumerate(thekeys):
if key:
df['extension'+str(counter)] = key
l = extensionsdf[key].apply(pd.Series)
if 'Extensions' in list(l.keys()):
#print 'aap'
l = l.apply(pd.Series)['Extensions'].apply(pd.Series)
for kk in l.keys():
if kk != 0 and 'xmlns' not in kk:
df[kk] = l[kk]
return df
def tcxtodf(path):
data = tcx_getdict(path)
activity = tcxgetactivities(data)
laps = tcxactivitygetlaps(activity)
try:
track = tcxlapgettrack(laps)
df = tcxtrack_getdata(track)
except TypeError:
df = | pd.DataFrame() | pandas.DataFrame |
from trefle.fitness_functions.output_thresholder import round_to_cls
from sklearn.metrics import confusion_matrix
import libraries.measures_calculation
import pandas as pd
import numpy as np
def getConfusionMatrixValues(y_true, y_pred):
"""
return tcross validation matrix
:param y_true: True labels
:param y_pred: Labels predicted by the algorithm
:type y_true: [[int]] - required
:type y_pred: [[int]] - required
:return: The confusion matrix
:rtype: Float
"""
y_pred_bin = round_to_cls(y_pred, n_classes=2)
tn, fp, fn, tp = confusion_matrix(y_true, y_pred_bin).ravel()
return tn, fp, fn, tp
def calculateMean(dictionary_values):
for key in dictionary_values:
dictionary_values[key] = np.mean(dictionary_values[key])
return dictionary_values
def calculateMeasures(df_values_experiments, vec_measures):
dict_measures = { i : [] for i in vec_measures }
for index, row in df_values_experiments.iterrows():
tn, fp, fn, tp = row['tn'], row['fp'], row['fn'], row['tp']
if 'acc' in vec_measures:
acc = libraries.measures_calculation.calculateAccuracy(tn, fp, fn, tp)
dict_measures['acc'].append(acc)
if 'f1' in vec_measures:
f1 = libraries.measures_calculation.calculateF1(tn, fp, fn, tp)
dict_measures['f1'].append(f1)
if 'sen' in vec_measures:
sen = libraries.measures_calculation.calculateSensitivity(tn, fp, fn, tp)
dict_measures['sen'].append(sen)
if 'spe' in vec_measures:
spe = libraries.measures_calculation.calculateSpecificity(tn, fp, fn, tp)
dict_measures['spe'].append(spe)
dict_measures = calculateMean(dict_measures)
return dict_measures
def treatmentResultsValues(data_frame, parameter_a_name:str, parameter_b_name:str, vec_measures:list):
df1 = data_frame.iloc[:,0:2]
df1 = df1.drop_duplicates()
#Get all different configurations
qty_experiments = df1.shape[0]
#Start tu calculate
param_a_designation = 'param_a'
param_b_designation = 'param_b'
list_dict = []
for index, row in df1.iterrows():
df_experiment = data_frame.query("{0} == {1} and {2} == {3}".format(param_a_designation, row[0], param_b_designation, row[1]))
results = calculateMeasures(df_experiment,vec_measures)
list_dict.append(results)
results_dataframe = | pd.DataFrame(list_dict) | pandas.DataFrame |
from __future__ import division
import pytest
import numpy as np
from datetime import timedelta
from pandas import (
Interval, IntervalIndex, Index, isna, notna, interval_range, Timestamp,
Timedelta, compat, date_range, timedelta_range, DateOffset)
from pandas.compat import lzip
from pandas.tseries.offsets import Day
from pandas._libs.interval import IntervalTree
from pandas.tests.indexes.common import Base
import pandas.util.testing as tm
import pandas as pd
@pytest.fixture(scope='class', params=['left', 'right', 'both', 'neither'])
def closed(request):
return request.param
@pytest.fixture(scope='class', params=[None, 'foo'])
def name(request):
return request.param
class TestIntervalIndex(Base):
_holder = IntervalIndex
def setup_method(self, method):
self.index = IntervalIndex.from_arrays([0, 1], [1, 2])
self.index_with_nan = IntervalIndex.from_tuples(
[(0, 1), np.nan, (1, 2)])
self.indices = dict(intervalIndex=tm.makeIntervalIndex(10))
def create_index(self, closed='right'):
return IntervalIndex.from_breaks(range(11), closed=closed)
def create_index_with_nan(self, closed='right'):
mask = [True, False] + [True] * 8
return IntervalIndex.from_arrays(
np.where(mask, np.arange(10), np.nan),
np.where(mask, np.arange(1, 11), np.nan), closed=closed)
def test_constructors(self, closed, name):
left, right = Index([0, 1, 2, 3]), Index([1, 2, 3, 4])
ivs = [Interval(l, r, closed=closed) for l, r in lzip(left, right)]
expected = IntervalIndex._simple_new(
left=left, right=right, closed=closed, name=name)
result = IntervalIndex(ivs, name=name)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_intervals(ivs, name=name)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_breaks(
np.arange(5), closed=closed, name=name)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_arrays(
left.values, right.values, closed=closed, name=name)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_tuples(
lzip(left, right), closed=closed, name=name)
tm.assert_index_equal(result, expected)
result = Index(ivs, name=name)
assert isinstance(result, IntervalIndex)
tm.assert_index_equal(result, expected)
# idempotent
tm.assert_index_equal(Index(expected), expected)
tm.assert_index_equal(IntervalIndex(expected), expected)
result = IntervalIndex.from_intervals(expected)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_intervals(
expected.values, name=expected.name)
tm.assert_index_equal(result, expected)
left, right = expected.left, expected.right
result = IntervalIndex.from_arrays(
left, right, closed=expected.closed, name=expected.name)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_tuples(
expected.to_tuples(), closed=expected.closed, name=expected.name)
tm.assert_index_equal(result, expected)
breaks = expected.left.tolist() + [expected.right[-1]]
result = IntervalIndex.from_breaks(
breaks, closed=expected.closed, name=expected.name)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('data', [[np.nan], [np.nan] * 2, [np.nan] * 50])
def test_constructors_nan(self, closed, data):
# GH 18421
expected_values = np.array(data, dtype=object)
expected_idx = IntervalIndex(data, closed=closed)
# validate the expected index
assert expected_idx.closed == closed
tm.assert_numpy_array_equal(expected_idx.values, expected_values)
result = IntervalIndex.from_tuples(data, closed=closed)
tm.assert_index_equal(result, expected_idx)
tm.assert_numpy_array_equal(result.values, expected_values)
result = IntervalIndex.from_breaks([np.nan] + data, closed=closed)
tm.assert_index_equal(result, expected_idx)
tm.assert_numpy_array_equal(result.values, expected_values)
result = IntervalIndex.from_arrays(data, data, closed=closed)
tm.assert_index_equal(result, expected_idx)
tm.assert_numpy_array_equal(result.values, expected_values)
if closed == 'right':
# Can't specify closed for IntervalIndex.from_intervals
result = IntervalIndex.from_intervals(data)
tm.assert_index_equal(result, expected_idx)
tm.assert_numpy_array_equal(result.values, expected_values)
@pytest.mark.parametrize('data', [
[],
np.array([], dtype='int64'),
np.array([], dtype='float64'),
np.array([], dtype=object)])
def test_constructors_empty(self, data, closed):
# GH 18421
expected_dtype = data.dtype if isinstance(data, np.ndarray) else object
expected_values = np.array([], dtype=object)
expected_index = IntervalIndex(data, closed=closed)
# validate the expected index
assert expected_index.empty
assert expected_index.closed == closed
assert expected_index.dtype.subtype == expected_dtype
tm.assert_numpy_array_equal(expected_index.values, expected_values)
result = IntervalIndex.from_tuples(data, closed=closed)
tm.assert_index_equal(result, expected_index)
tm.assert_numpy_array_equal(result.values, expected_values)
result = IntervalIndex.from_breaks(data, closed=closed)
tm.assert_index_equal(result, expected_index)
tm.assert_numpy_array_equal(result.values, expected_values)
result = IntervalIndex.from_arrays(data, data, closed=closed)
tm.assert_index_equal(result, expected_index)
tm.assert_numpy_array_equal(result.values, expected_values)
if closed == 'right':
# Can't specify closed for IntervalIndex.from_intervals
result = | IntervalIndex.from_intervals(data) | pandas.IntervalIndex.from_intervals |
import json
import os
import zipfile
from io import StringIO
import pandas as pd
import uuid
import shutil
from wsgiref.util import FileWrapper
import datetime
# import the logging library
import logging
# Get an instance of a logger
logger = logging.getLogger(__name__)
from django.db import connections
from django.http import HttpResponse, FileResponse
from django.shortcuts import render
from rest_framework import viewsets
from django.core.files.storage import default_storage
from django.core.files.base import ContentFile
from django.core.mail import send_mail
from django.conf import settings
from django.http import JsonResponse
from rest_framework.parsers import BaseParser
from rest_framework.exceptions import ParseError
from rest_framework.views import APIView
from rest_framework.response import Response
from django.views import View
from celery.result import AsyncResult
from api.security import ISpyBSafeQuerySet
from api.utils import get_params, get_highlighted_diffs
from viewer.models import (
Molecule,
Protein,
Compound,
Target,
ActionType,
SessionProject,
SessionActions,
Snapshot,
SnapshotActions,
ComputedMolecule,
ComputedSet,
CSetKeys,
NumericalScoreValues,
ScoreDescription,
File,
TagCategory,
MoleculeTag,
SessionProjectTag,
DownloadLinks
)
from viewer import filters
from .forms import CSetForm, UploadKeyForm, CSetUpdateForm, TSetForm
from .tasks import *
from .discourse import create_discourse_post, list_discourse_posts_for_topic, check_discourse_user
from viewer.serializers import (
MoleculeSerializer,
ProteinSerializer,
CompoundSerializer,
TargetSerializer,
MolImageSerialzier,
CmpdImageSerialzier,
ProtMapInfoSerialzer,
ProtPDBInfoSerialzer,
ProtPDBBoundInfoSerialzer,
VectorsSerializer,
GraphSerializer,
ActionTypeSerializer,
SessionProjectWriteSerializer,
SessionProjectReadSerializer,
SessionActionsSerializer,
SnapshotReadSerializer,
SnapshotWriteSerializer,
SnapshotActionsSerializer,
FileSerializer,
ComputedSetSerializer,
ComputedMoleculeSerializer,
NumericalScoreSerializer,
ScoreDescriptionSerializer,
TextScoreSerializer,
ComputedMolAndScoreSerializer,
DiscoursePostWriteSerializer,
DictToCsvSerializer,
TagCategorySerializer,
MoleculeTagSerializer,
SessionProjectTagSerializer,
TargetMoleculesSerializer,
DownloadStructuresSerializer
)
# filepaths mapping for writing associated files to the zip archive.
_ZIP_FILEPATHS = {
'pdb_info': ('pdbs'),
'bound_info': ('bound'),
'cif_info': ('cifs'),
'mtz_info': ('mtzs'),
'map_info': ('maps'),
'sigmaa_info': ('maps'),
'diff_info': ('maps'),
'event_info': ('maps'),
'trans_matrix_info': ('trans'),
'sdf_info': ('sdfs'),
'single_sdf_file': ('sdfs'),
'metadata_info': (''),
'smiles_info': (''),
}
class VectorsView(ISpyBSafeQuerySet):
""" DjagnoRF view for vectors
Methods
-------
url:
api/vector
queryset:
`viewer.models.Molecule.objects.filter()`
filter fields:
- `viewer.models.Molecule.prot_id` - ?prot_id=<int>
- `viewer.models.Molecule.cmpd_id` - ?cmpd_id=<int>
- `viewer.models.Molecule.smiles` - ?smiles=<str>
- `viewer.models.Molecule.prot_id__target_id` - ?target_id=<int>
returns:
vectors for a given molecule generated by `frag.network.generate.get_3d_vects_for_mol()` (JSON)
"""
queryset = Molecule.objects.filter()
serializer_class = VectorsSerializer
filter_permissions = "prot_id__target_id__project_id"
filter_fields = ("prot_id", "cmpd_id", "smiles", "prot_id__target_id", "mol_groups")
class GraphView(ISpyBSafeQuerySet):
""" DjagnoRF view for graph
Methods
-------
url:
api/graph
queryset:
`viewer.models.Molecule.objects.filter()`
filter fields:
- `viewer.models.Molecule.prot_id` - ?prot_id=<int>
- `viewer.models.Molecule.cmpd_id` - ?cmpd_id=<int>
- `viewer.models.Molecule.smiles` - ?smiles=<str>
- `viewer.models.Molecule.prot_id__target_id` - ?target_id=<int>
- `viewer.models.Molecule.mol_groups` - ?mol_groups=<int>,<int>
returns:
graph network results for given molecule from `frag.network.query.get_full_graph()` (JSON)
example output:
.. code-block:: javascript
"results": [
{
"id": 385,
"graph": {
"CC(=O)Nc1cnccc1[Xe]_1_DELETION": {
"vector": "CC(O)NC1CCCCC1[101Xe]",
"addition": [
{
"change": "C[101Xe]",
"end": "CC(=O)Nc1cccnc1",
"compound_ids": [
"REAL:PV-001793547821",
"MOLPORT:000-165-661"
]
}
]
},
"C[Xe].NC(=O)C[Xe]_2_LINKER": {
"vector": "C[101Xe].NC(O)C[100Xe]",
"addition": [
{
"change": "CNC1CC([100Xe])C(O)C1[101Xe]",
"end": "CN=C1SC(CC(N)=O)C(=O)N1C",
"compound_ids": [
"MOLPORT:000-680-640"
]
},
{
"change": "[100Xe]C1CCCCC1[101Xe]",
"end": "CC1CCCCN1CC(N)=O",
"compound_ids": [
"REAL:Z54751033",
"MOLPORT:001-599-191"
]
}
]
},
"Cc1ccnc(Cl)c1[Xe]_2_REPLACE": {
"vector": "CC1CCCC(Cl)C1[100Xe]",
"addition": [
{
"change": "CC(O)N[100Xe]",
"end": "Cc1ccnc(Cl)c1",
"compound_ids": [
"MOLPORT:000-140-635"
]
}
]
}
}
}
]
"""
queryset = Molecule.objects.filter()
serializer_class = GraphSerializer
filter_permissions = "prot_id__target_id__project_id"
filter_fields = ("prot_id", "cmpd_id", "smiles", "prot_id__target_id", "mol_groups")
class MolImageView(ISpyBSafeQuerySet):
""" DjagnoRF view for molecule images
Methods
-------
url:
api/molimg
queryset:
`viewer.models.Molecule.objects.filter()`
filter fields:
- `viewer.models.Molecule.prot_id` - ?prot_id=<int>
- `viewer.models.Molecule.cmpd_id` - ?cmpd_id=<int>
- `viewer.models.Molecule.smiles` - ?smiles=<str>
- `viewer.models.Molecule.prot_id__target_id` - ?target_id=<int>
- `viewer.models.Molecule.mol_groups` - ?mol_groups=<int>,<int>
returns:
SVG image text for query molecule generated by `api.utils.draw_mol()` (JSON)
example output:
.. code-block:: javascript
"results": [
{"id": 13912,
"mol_image": "<?xml version='1.0' encoding='iso-8859-1'?><svg version='1.1' nk'..."}]
"""
queryset = Molecule.objects.filter()
serializer_class = MolImageSerialzier
filter_permissions = "prot_id__target_id__project_id"
filter_fields = ("prot_id", "cmpd_id", "smiles", "prot_id__target_id", "mol_groups")
class CompoundImageView(ISpyBSafeQuerySet):
""" DjagnoRF view for compound images
Methods
-------
url:
api/cmpdimg
queryset:
`viewer.models.Compound.objects.filter()`
filter fields:
- `viewer.models.Molecule.smiles` - ?smiles=<str>
returns:
SVG image text for query compound generated by `api.utils.draw_mol()` (JSON)
example output:
.. code-block:: javascript
"results": [
{"id": 13912,
"mol_image": "<?xml version='1.0' encoding='iso-8859-1'?><svg version='1.1' nk'..."}]
"""
queryset = Compound.objects.filter()
serializer_class = CmpdImageSerialzier
filter_permissions = "project_id"
filter_fields = ("smiles",)
class ProteinMapInfoView(ISpyBSafeQuerySet):
""" DjagnoRF view to retrieve map info (file) for a given protein
Methods
-------
url:
api/protmap
queryset:
`viewer.models.Protein.objects.filter()`
filter fields:
- `viewer.models.Protein.code` - ?code=<str>
- `viewer.models.Protein.target_id` - ?target_id=<int>
- `viewer.models.Protein.prot_type` - ?prot_type=<str>
returns:
If a map file has been uploaded for the protein `map_info.path.read()` (JSON)
"""
queryset = Protein.objects.filter()
serializer_class = ProtMapInfoSerialzer
filter_permissions = "target_id__project_id"
filter_fields = ("code", "target_id", "target_id__title", "prot_type")
class ProteinPDBInfoView(ISpyBSafeQuerySet):
""" DjagnoRF view to retrieve apo pdb info (file) for a given protein
Methods
-------
url:
api/protpdb
queryset:
`viewer.models.Protein.objects.filter()`
filter fields:
- `viewer.models.Protein.code` - ?code=<str>
- `viewer.models.Protein.target_id` - ?target_id=<int>
- `viewer.models.Protein.prot_type` - ?prot_type=<str>
returns: JSON
- id: id of the protein object
- pdb_data: If a pdb file has been uploaded for the protein `bound_info.path.read()`
- prot_type: type of protein (e.g. AP for apo - see docs for model)
example output:
.. code-block:: javascript
"results": [
{
"id": 27387,
"pdb_data": "REMARK warning: chains may be ommitted for alignment REMARK ...",
"prot_type": "AP"
},]
"""
queryset = Protein.objects.filter()
serializer_class = ProtPDBInfoSerialzer
filter_permissions = "target_id__project_id"
filter_fields = ("code", "target_id", "target_id__title", "prot_type")
class ProteinPDBBoundInfoView(ISpyBSafeQuerySet):
""" DjagnoRF view to retrieve bound pdb info (file) for a given protein
Methods
-------
url:
api/protpdbbound
queryset:
`viewer.models.Protein.objects.filter()`
filter fields:
- `viewer.models.Protein.code` - ?code=<str>
- `viewer.models.Protein.target_id` - ?target_id=<int>
- `viewer.models.Protein.prot_type` - ?prot_type=<str>
returns: JSON
- id: id of the protein object
- pdb_data: If a pdb file has been uploaded for the protein `bound_info.path.read()`
- prot_type: type of protein (e.g. AP for apo - see docs for model)
example output:
.. code-block:: javascript
"results": [
{
"id": 27387,
"pdb_data": "REMARK warning: chains may be ommitted for alignment REMARK ...",
"prot_type": "AP"
},]
"""
queryset = Protein.objects.filter()
serializer_class = ProtPDBBoundInfoSerialzer
filter_permissions = "target_id__project_id"
filter_fields = ("code", "target_id", "target_id__title", "prot_type")
class TargetView(ISpyBSafeQuerySet):
""" DjagnoRF view to retrieve info about targets
Methods
-------
url:
api/targets
queryset:
`viewer.models.Target.objects.filter()`
filter fields:
- `viewer.models.Target.title` - ?title=<str>
returns: JSON
- id: id of the target object
- title: name of the target
- project_id: list of the ids of the projects the target is linked to
- protein_set: list of the ids of the protein sets the target is linked to
- template_protein: the template protein displayed in fragalysis front-end for this target
- metadata: link to the metadata file for the target if it was uploaded
- zip_archive: link to the zip archive of the uploaded data
example output:
.. code-block:: javascript
"results": [
{
"id": 62,
"title": "Mpro",
"project_id": [
2
],
"protein_set": [
29281,
29274,
29259,
29305,
...,
],
"template_protein": "/media/pdbs/Mpro-x10417_0_apo.pdb",
"metadata": "http://fragalysis.diamond.ac.uk/media/metadata/metadata_2FdP5OJ.csv",
"zip_archive": "http://fragalysis.diamond.ac.uk/media/targets/Mpro.zip"
}
]
"""
queryset = Target.objects.filter()
serializer_class = TargetSerializer
filter_permissions = "project_id"
filter_fields = ("title",)
class MoleculeView(ISpyBSafeQuerySet):
""" DjagnoRF view to retrieve info about molecules
Methods
-------
url:
api/molecules
queryset:
`viewer.models.Molecule.objects.filter()`
filter fields:
- `viewer.models.Molecule.prot_id` - ?prot_id=<int>
- `viewer.models.Molecule.cmpd_id` - ?cmpd_id=<int>
- `viewer.models.Molecule.smiles` - ?smiles=<string>
- `viewer.models.Molecule.prot_id__target_id` - ?target_id=<int>
- `viewer.models.Molecule.mol_type` - ?mol_type=<str>
- `viewer.models.Molecule.mol_groups` - ?mol_groups=<int>,<int>
returns: JSON
- id: id of the target object
- smiles: smiles string of the molecule
- cmpd_id: id of the related 2D compound object
- prot_id: id of the related protein object
- protein_code: code of the related protein object
- mol_type: type of molecule - see Molecule model docs
- molecule_protein: filepath of the apo protein structure for the molecule
- lig_id: residue label for the ligand
- chain_id: chain in the pdb file that the ligand belongs to
- sdf_info: 3D coordinated of the molecule in MDL file format
- x_com: x-coordinate for molecule centre of mass
- y_com: y-coordinate for molecule centre of mass
- z_com: z-coordinate for molecule centre of mass
- mw: molecular weight
- logp: LogP
- tpsa: Topological Polar Surface Area
- ha: heavy atom count
- hacc: hydrogen-bond acceptors
- hdon: hydrogen-bond donors
- rots: number of rotatable bonds
- rings: number of rings
- velec: number of valence electrons
example output:
.. code-block:: javascript
"results": [
{
"id": 13912,
"smiles": "CN(C)c1ccc(C(=O)Nc2ccccn2)cc1",
"cmpd_id": 796,
"prot_id": 13923,
"protein_code": "NUDT7A_Crude-x2226_2",
"mol_type": "PR",
"molecule_protein": "/media/pdbs/NUDT7A_Crude-x2226_2_apo_x5GxiLq.pdb",
"lig_id": "LIG",
"chain_id": "Z",
"sdf_info": " RDKit 3D 18 19 0 0 0 0 0 0 0 0999...",
"x_com": null,
"y_com": null,
"z_com": null,
"mw": 241.12,
"logp": 2.4,
"tpsa": 45.23,
"ha": 18,
"hacc": 3,
"hdon": 1,
"rots": 3,
"rings": 2,
"velec": 92
},]
"""
queryset = Molecule.objects.filter()
serializer_class = MoleculeSerializer
filter_permissions = "prot_id__target_id__project_id"
filter_fields = (
"prot_id",
"prot_id__code",
"cmpd_id",
"smiles",
"prot_id__target_id",
"prot_id__target_id__title",
"mol_type",
"mol_groups",
)
class CompoundView(ISpyBSafeQuerySet):
""" DjagnoRF view for compound info
Methods
-------
url:
api/compounds
queryset:
`viewer.models.Compound.objects.filter()`
filter fields:
- `viewer.models.Molecule.smiles` - ?smiles=<str>
returns:
- id: id for compound object
- inchi: inchi key for compound
- smiles: smiles string for compound
- mol_log_p: LogP for compound
- num_h_acceptors: number of hydrogen-bond acceptors
- num_h_donors: number of hydrogen-bond donors
example output:
.. code-block:: javascript
"results": [
{
"id": 1,
"inchi": "InChI=1S/C9H15NOS/c1-7(11)5-10-6-9-4-3-8(2)12-9/h3-4,7,10-11H,5-6H2,1-2H3/t7-/m0/s1",
"smiles": "Cc1ccc(CNC[C@H](C)O)s1",
"mol_log_p": 1.52692,
"mol_wt": 185.0874351,
"num_h_acceptors": 3,
"num_h_donors": 2
},]
"""
queryset = Compound.objects.filter()
serializer_class = CompoundSerializer
filter_permissions = "project_id"
filter_fields = ("smiles", "current_identifier", "inchi", "long_inchi")
class ProteinView(ISpyBSafeQuerySet):
""" DjagnoRF view to retrieve bound pdb info (file) for a given protein
Methods
-------
url:
api/proteins
queryset:
`viewer.models.Protein.objects.filter()`
filter fields:
- `viewer.models.Protein.code` - ?code=<str>
- `viewer.models.Protein.target_id` - ?target_id=<int>
- `viewer.models.Protein.prot_type` - ?prot_type=<str>
returns: JSON
- id: id of the protein object
- code: the code/name of the protein
- target_id: the id of the related target object
- prot_type: the type of protein (e.g. AP for apo)
- pdb_info: link to the apo pdb file
- bound_info: link to the bound pdb file
- mtz_info: link to the mtz file
- map_info: link to the map file
- cif_info: link to the cif file
example output:
.. code-block:: javascript
"results": [
{
"id": 14902,
"code": "XX02KALRNA-x1376_1",
"target_id": 51,
"prot_type": "AP",
"pdb_info": "http://fragalysis.diamond.ac.uk/media/pdbs/XX02KALRNA-x1376_1_apo_9VSCvR8.pdb",
"bound_info": "http://fragalysis.diamond.ac.uk/media/bound/XX02KALRNA-x1376_1_bound_6xmXkUm.pdb",
"mtz_info": null,
"map_info": null,
"cif_info": null
},]
"""
queryset = Protein.objects.filter()
serializer_class = ProteinSerializer
filter_permissions = "target_id__project_id"
filter_fields = ("code", "target_id", "target_id__title", "prot_type")
def react(request):
"""
:param request:
:return: viewer/react page with context
"""
discourse_api_key = settings.DISCOURSE_API_KEY
context = {}
context['discourse_available'] = 'false'
if discourse_api_key:
context['discourse_available'] = 'true'
# If user is authenticated and a discourse api key is available, then check discourse to
# see if user is set up and set up flag in context.
user = request.user
if user.is_authenticated:
context['discourse_host'] = ''
context['user_present_on_discourse'] = 'false'
if discourse_api_key:
context['discourse_host'] = settings.DISCOURSE_HOST
error, error_message, user_id = check_discourse_user(user)
if user_id:
context['user_present_on_discourse'] = 'true'
return render(request, "viewer/react_temp.html", context)
# Upload Compound set functions
# email cset upload key
def cset_key(request):
""" View to render and control viewer/generate-key.html - a page allowing an upload key to be generated for a user
allowing upload of computed sets
Methods
-------
allowed requests:
- GET: renders form
- POST: generates an upload key, emails it to the user, and informs the user that this will happen
url:
viewer/cset_key
template:
viewer/generate-key.html
request params:
- contact_email (django.forms.FormField): user contact email
context:
- form (`django.Forms.form`): instance of `viewer.forms.UploadKeyForm`
- message (str): A message rendered in the template. Informs the user that their upload key will be emailed
"""
form = UploadKeyForm()
if request.method == 'POST':
form = UploadKeyForm(request.POST)
email = request.POST['contact_email']
new_key = CSetKeys()
new_key.user = email
new_key.save()
key_value = new_key.uuid
subject = 'Fragalysis: upload compound set key'
message = 'Your upload key is: ' + str(
key_value) + ' store it somewhere safe. Only one key will be issued per user'
email_from = settings.EMAIL_HOST_USER
recipient_list = [email, ]
send_mail(subject, message, email_from, recipient_list)
msg = 'Your key will be emailed to: <b>' + email + '</b>'
return render(request, 'viewer/generate-key.html', {'form': form, 'message': msg})
return render(request, 'viewer/generate-key.html', {'form': form, 'message': ''})
def save_pdb_zip(pdb_file):
zf = zipfile.ZipFile(pdb_file)
zip_lst = zf.namelist()
zfile = {}
zfile_hashvals = {}
print(zip_lst)
for filename in zip_lst:
# only handle pdb files
if filename.split('.')[-1] == 'pdb':
f = filename.split('/')[0]
save_path = os.path.join(settings.MEDIA_ROOT, 'tmp', f)
if default_storage.exists(f):
rand_str = uuid.uuid4().hex
pdb_path = default_storage.save(save_path.replace('.pdb', f'-{rand_str}.pdb'), ContentFile(zf.read(filename)))
# Test if Protein object already exists
# code = filename.split('/')[-1].replace('.pdb', '')
# test_pdb_code = filename.split('/')[-1].replace('.pdb', '')
# test_prot_objs = Protein.objects.filter(code=test_pdb_code)
#
# if len(test_prot_objs) > 0:
# # make a unique pdb code as not to overwrite existing object
# rand_str = uuid.uuid4().hex
# test_pdb_code = f'{code}#{rand_str}'
# zfile_hashvals[code] = rand_str
#
# fn = test_pdb_code + '.pdb'
#
# pdb_path = default_storage.save('tmp/' + fn,
# ContentFile(zf.read(filename)))
else:
pdb_path = default_storage.save(save_path, ContentFile(zf.read(filename)))
test_pdb_code = pdb_path.split('/')[-1].replace('.pdb', '')
zfile[test_pdb_code] = pdb_path
# Close the zip file
if zf:
zf.close()
return zfile, zfile_hashvals
def save_tmp_file(myfile):
""" Save file in temporary location for validation/upload processing
"""
name = myfile.name
path = default_storage.save('tmp/' + name, ContentFile(myfile.read()))
tmp_file = str(os.path.join(settings.MEDIA_ROOT, path))
return tmp_file
class UpdateCSet(View):
""" View to allow addition of new molecules/pdb files to an existing Computed Set
Methods
-------
allowed requests:
- GET: renders form
- POST: validates and optionally uploads the computed set that the user provides via the template form
url:
viewer/upload_cset
template:
viewer/upload-cset.html
request params:
- target_name (`django.forms.CharField`): Name of the existing fragalysis target to add the computed set to
- sdf_file (`django.forms.FileField`): SDF file of all computed molecules to upload for the computed set
- pdb_zip (`django.forms.FileField`): zip file of apo pdb files referenced in the ref_pdb field for molecules in sdf_file (optional)
- submit_choice (`django.forms.CharField`): 0 to validate, 1 to validate and upload
- upload_key (`django.forms.CharField`): users unique upload key, generated by `viewer.views.cset_key`
context:
- form (`django.Forms.form`): instance of `viewer.forms.CSetForm`
- validate_task_id (str): celery task id for validation step
- validate_task_status (str): celery task status for validation step
- upload_task_id (str): celery task id for upload step
- upload_task_status (str): celery task status for upload step
"""
def get(self, request):
form = CSetUpdateForm()
existing_sets = ComputedSet.objects.all()
return render(request, 'viewer/update-cset.html', {'form': form, 'sets': existing_sets})
def post(self, request):
check_services()
zfile = None
form = CSetUpdateForm(request.POST, request.FILES)
context = {}
if form.is_valid():
# get all of the variables needed from the form
myfile = request.FILES['sdf_file']
target = request.POST['target_name']
# get update choice
update_set = request.POST['update_set']
if 'pdb_zip' in list(request.FILES.keys()):
pdb_file = request.FILES['pdb_zip']
else:
pdb_file = None
# if there is a zip file of pdbs, check it for .pdb files, and ignore others
if pdb_file:
zfile, zfile_hashvals = save_pdb_zip(pdb_file)
# save uploaded sdf to tmp storage
tmp_file = save_tmp_file(myfile)
task_update = add_cset_mols.s(cset=update_set, target=target, sdf_file=tmp_file, zfile=zfile).apply_async()
context = {}
context['update_task_id'] = task_update.id
context['update_task_status'] = task_update.status
# Update client side with task id and status
return render(request, 'viewer/update-cset.html', context)
context['form'] = form
return render(request, 'viewer/update-cset.html', context)
class UploadCSet(View):
""" View to render and control viewer/upload-cset.html - a page allowing upload of computed sets. Validation and
upload tasks are defined in `viewer.compound_set_upload`, `viewer.sdf_check` and `viewer.tasks` and the task
response handling is done by `viewer.views.ValidateTaskView` and `viewer.views.UploadTaskView`
Methods
-------
allowed requests:
- GET: renders form
- POST: validates and optionally uploads the computed set that the user provides via the template form
url:
viewer/upload_cset
template:
viewer/upload-cset.html
request params:
- target_name (`django.forms.CharField`): Name of the existing fragalysis target to add the computed set to
- sdf_file (`django.forms.FileField`): SDF file of all computed molecules to upload for the computed set
- pdb_zip (`django.forms.FileField`): zip file of apo pdb files referenced in the ref_pdb field for molecules in sdf_file (optional)
- submit_choice (`django.forms.CharField`): 0 to validate, 1 to validate and upload
- upload_key (`django.forms.CharField`): users unique upload key, generated by `viewer.views.cset_key`
context:
- form (`django.Forms.form`): instance of `viewer.forms.CSetForm`
- validate_task_id (str): celery task id for validation step
- validate_task_status (str): celery task status for validation step
- upload_task_id (str): celery task id for upload step
- upload_task_status (str): celery task status for upload step
"""
def get(self, request):
# test = TargetView().get_queryset(request=request)
# targets = request.get('/api/targets/')
# int(targets)
form = CSetForm()
existing_sets = ComputedSet.objects.all()
return render(request, 'viewer/upload-cset.html', {'form': form, 'sets': existing_sets})
def post(self, request):
check_services()
zfile = None
zfile_hashvals = None
zf = None
cset = None
form = CSetForm(request.POST, request.FILES)
context = {}
if form.is_valid():
# get the upload key
# key = request.POST['upload_key']
# all_keys = CSetKeys.objects.all()
# if it's not valid, return a message
# if key not in [str(key.uuid) for key in all_keys]:
# html = "<br><p>You either didn't provide an upload key, or it wasn't valid. Please try again
# (email <EMAIL> to obtain an upload key)</p>"
# return render(request, 'viewer/upload-cset.html', {'form': form, 'table': html})
# get all of the variables needed from the form
myfile = request.FILES['sdf_file']
target = request.POST['target_name']
choice = request.POST['submit_choice']
# get update choice
update_set = request.POST['update_set']
if 'pdb_zip' in list(request.FILES.keys()):
pdb_file = request.FILES['pdb_zip']
else:
pdb_file = None
# save uploaded sdf and zip to tmp storage
tmp_sdf_file = save_tmp_file(myfile)
if pdb_file:
tmp_pdb_file = save_tmp_file(pdb_file)
else:
tmp_pdb_file = None
# Settings for if validate option selected
if str(choice) == '0':
# Start celery task
task_validate = validate_compound_set.delay(tmp_sdf_file, target=target, zfile=tmp_pdb_file, update=update_set)
context = {}
context['validate_task_id'] = task_validate.id
context['validate_task_status'] = task_validate.status
# Update client side with task id and status
return render(request, 'viewer/upload-cset.html', context)
# if it's an upload, run the compound set task
if str(choice) == '1':
# Start chained celery tasks. NB first function passes tuple
# to second function - see tasks.py
task_upload = (
validate_compound_set.s(tmp_sdf_file, target=target, zfile=tmp_pdb_file, update=update_set) | process_compound_set.s()).apply_async()
context = {}
context['upload_task_id'] = task_upload.id
context['upload_task_status'] = task_upload.status
# Update client side with task id and status
return render(request, 'viewer/upload-cset.html', context)
context['form'] = form
return render(request, 'viewer/upload-cset.html', context)
# End Upload Compound set functions
# Upload Target datasets functions
class UploadTSet(View):
""" View to render and control viewer/upload-tset.html - a page allowing upload of computed sets. Validation and
upload tasks are defined in `viewer.target_set_upload`, `viewer.sdf_check` and `viewer.tasks` and the task
response handling is done by `viewer.views.ValidateTaskView` and `viewer.views.UploadTaskView`
Methods
-------
allowed requests:
- GET: renders form
- POST: validates and optionally uploads the computed set that the user provides via the template form
url:
viewer/upload_tset
template:
viewer/upload-tset.html
request params:
- target_name (`django.forms.CharField`): Name of the existing fragalysis target to add the computed set to
- target_zip (`django.forms.FileField`): zip file of the target dataset
- submit_choice (`django.forms.CharField`): 0 to validate, 1 to validate and upload
context:
- form (`django.Forms.form`): instance of `viewer.forms.TSetForm`
- validate_task_id (str): celery task id for validation step
- validate_task_status (str): celery task status for validation step
- upload_task_id (str): celery task id for upload step
- upload_task_status (str): celery task status for upload step
"""
def get(self, request):
# Only authenticated users can upload files - this can be switched off in settings.py.
user = self.request.user
if not user.is_authenticated and settings.AUTHENTICATE_UPLOAD:
context = {}
context['error_message'] \
= 'Only authenticated users can upload files - please navigate to landing page and Login'
logger.info('- UploadTSet.get - authentication error')
return render(request, 'viewer/upload-tset.html', context)
contact_email = ''
if user.is_authenticated and settings.AUTHENTICATE_UPLOAD:
contact_email = user.email
form = TSetForm(initial={'contact_email': contact_email})
return render(request, 'viewer/upload-tset.html', {'form': form})
def post(self, request):
logger.info('+ UploadTSet.post')
context = {}
# Only authenticated users can upload files - this can be switched off in settings.py.
user = self.request.user
if not user.is_authenticated and settings.AUTHENTICATE_UPLOAD:
context['error_message'] \
= 'Only authenticated users can upload files - please navigate to landing page and Login'
logger.info('- UploadTSet.post - authentication error')
return render(request, 'viewer/upload-tset.html', context)
# Check celery/rdis is up and running
check_services()
form = TSetForm(request.POST, request.FILES)
if form.is_valid():
# get all of the variables needed from the form
target_file = request.FILES['target_zip']
target_name = request.POST['target_name']
choice = request.POST['submit_choice']
proposal_ref = request.POST['proposal_ref']
contact_email = request.POST['contact_email']
# Create /code/media/tmp if does not exist
media_root = settings.MEDIA_ROOT
tmp_folder = os.path.join(media_root, 'tmp')
if not os.path.isdir(tmp_folder):
os.mkdir(tmp_folder)
path = default_storage.save('tmp/' + 'NEW_DATA.zip', ContentFile(target_file.read()))
new_data_file = str(os.path.join(settings.MEDIA_ROOT, path))
# Settings for if validate option selected
if str(choice) == '0':
# Start celery task
task_validate = validate_target_set.delay(new_data_file, target=target_name, proposal=proposal_ref,
email=contact_email)
context = {}
context['validate_task_id'] = task_validate.id
context['validate_task_status'] = task_validate.status
# Update client side with task id and status
logger.info('- UploadTSet.post.choice == 0')
return render(request, 'viewer/upload-tset.html', context)
# if it's an upload, run the validate followed by the upload target set task
if str(choice) == '1':
# Start chained celery tasks. NB first function passes tuple
# to second function - see tasks.py
task_upload = (validate_target_set.s(new_data_file, target=target_name, proposal=proposal_ref,
email=contact_email) | process_target_set.s()).apply_async()
context = {}
context['upload_task_id'] = task_upload.id
context['upload_task_status'] = task_upload.status
# Update client side with task id and status
logger.info('- UploadTSet.post.choice == 1')
return render(request, 'viewer/upload-tset.html', context)
context['form'] = form
logger.info('- UploadTSet.post')
return render(request, 'viewer/upload-tset.html', context)
# End Upload Target datasets functions
def email_task_completion(contact_email, message_type, target_name, target_path=None, task_id=None):
""" Notifiy user of upload completion
"""
logger.info('+ email_notify_task_completion: ' + message_type + ' ' + target_name)
email_from = settings.EMAIL_HOST_USER
if contact_email == '' or not email_from:
# Only send email if configured.
return
if message_type == 'upload-success':
subject = 'Fragalysis: Target: '+target_name+' Uploaded'
message = 'The upload of your target data is complete. Your target is available at: ' \
+ str(target_path)
elif message_type == 'validate-success':
subject = 'Fragalysis: Target: '+target_name+' Validation'
message = 'Your data was validated. It can now be uploaded using the upload option.'
else:
# Validation failure
subject = 'Fragalysis: Target: ' + target_name + ' Validation/Upload Failed'
message = 'The validation/upload of your target data did not complete successfully. ' \
'Please navigate the following link to check the errors: validate_task/' + str(task_id)
recipient_list = [contact_email, ]
logger.info('+ email_notify_task_completion email_from: ' + email_from )
logger.info('+ email_notify_task_completion subject: ' + subject )
logger.info('+ email_notify_task_completion message: ' + message )
logger.info('+ email_notify_task_completion contact_email: ' + contact_email )
# Send email - this should not prevent returning to the screen in the case of error.
send_mail(subject, message, email_from, recipient_list, fail_silently=True)
logger.info('- email_notify_task_completion')
return
# Task functions common between Compound Sets and Target Set pages.
class ValidateTaskView(View):
""" View to handle dynamic loading of validation results from `viewer.tasks.validate` - the validation of files
uploaded to viewer/upload_cset or a target set by a user at viewer/upload_tset
Methods
-------
allowed requests:
- GET: takes a task id, checks it's status and returns the status, and result if the task is complete
url:
validate_task/<validate_task_id>
template:
viewer/upload-cset.html or viewer/upload-tset.html
"""
def get(self, request, validate_task_id):
""" Get method for `ValidateTaskView`. Takes a validate task id, checks it's status and returns the status,
and result if the task is complete
Parameters
----------
request: request
Context sent by `UploadCSet` or `UploadTset`
validate_task_id: str
task id provided by `UploadCSet` or `UploadTset`
Returns
-------
response_data: JSON
response data (dict) in JSON format:
- if status = 'RUNNING':
- validate_task_status (str): task.status
- validate_task_id (str): task.id
- if status = 'FAILURE':
- validate_task_status (str): task.status
- validate_task_id (str): task.id
- validate_traceback (str): task.traceback
- if status = 'SUCCESS':
- validate_task_status (str): task.status
- validate_task_id (str): task.id
- html (str): html of task outcome - success message or html table of errors & fail message
"""
logger.info('+ ValidateTaskView.get')
task = AsyncResult(validate_task_id)
response_data = {'validate_task_status': task.status,
'validate_task_id': task.id}
if task.status == 'FAILURE':
logger.info('+ ValidateTaskView.get.FAILURE')
result = task.traceback
response_data['validate_traceback'] = str(result)
return JsonResponse(response_data)
# Check if results ready
if task.status == "SUCCESS":
logger.info('+ ValidateTaskView.get.SUCCESS')
results = task.get()
# NB get tuple from validate task
process_type = results[1]
validate_dict = results[2]
validated = results[3]
if validated:
response_data['html'] = 'Your data was validated. \n It can now be uploaded using the upload option.'
response_data['validated'] = 'Validated'
if process_type== 'tset':
target_name = results[5]
contact_email = results[8]
email_task_completion(contact_email, 'validate-success', target_name)
return JsonResponse(response_data)
if not validated:
# set pandas options to display all column data
pd.set_option('display.max_colwidth', -1)
table = | pd.DataFrame.from_dict(validate_dict) | pandas.DataFrame.from_dict |
import pandas as pd, numpy as np
train = | pd.read_csv('../input/train.csv') | pandas.read_csv |
import pandas as pd
import numpy as np
import copy
from sklearn.naive_bayes import GaussianNB
from sklearn.model_selection import cross_val_score, train_test_split, GridSearchCV
from sklearn.feature_selection import mutual_info_classif, SelectKBest
import matplotlib.pyplot as plt
from sklearn import svm
from sklearn.neighbors import KNeighborsClassifier
from datetime import datetime
from os import listdir
from os.path import isfile, join
import sys
import math
from sklearn.metrics import accuracy_score, f1_score
import re
from Extractor import get_word_length_matrix, get_word_length_matrix_with_interval, get_average_word_length, \
get_word_length_matrix_with_margin, get_char_count, get_digits, get_sum_digits, get_word_n_grams, \
get_char_affix_n_grams, get_char_word_n_grams, get_char_punct_n_grams, get_pos_tags_n_grams, get_bow_matrix, \
get_yules_k, get_special_char_matrix, get_function_words, get_pos_tags, get_sentence_end_start, \
get_flesch_reading_ease_vector, get_sentence_count, get_word_count
from sklearn.preprocessing import StandardScaler, Normalizer
# Chapter 7.1.1. method to trim a feature with low sum e.g. ngrams lower then 5
def trim_df_sum_feature(par_df, par_n):
par_df = par_df.fillna(value=0)
columns = par_df.columns.to_numpy()
data_array = par_df.to_numpy(dtype=float)
sum_arr = data_array.sum(axis=0)
# reduce n if 0 features would be returned
while len(par_df.columns) - len(np.where(sum_arr < par_n)[0]) == 0:
par_n -= 1
positions = list(np.where(sum_arr < par_n))
columns = np.delete(columns, positions)
data_array = np.delete(data_array, positions, axis=1)
return pd.DataFrame(data=data_array, columns=columns)
# Chapter 7.1.1. method to trim feature with low occurrence over all article
def trim_df_by_occurrence(par_df, n):
df_masked = par_df.notnull().astype('int')
word_rate = df_masked.sum()
columns = []
filtered_bow = pd.DataFrame()
for i in range(0, len(word_rate)):
if word_rate[i] > n:
columns.append(word_rate.index[i])
for c in columns:
filtered_bow[c] = par_df[c]
return filtered_bow
# Chapter 7.1.1. Process of filtering the data with low occurrence and save the filtered features in a new file
def filter_low_occurrence():
df_bow = pd.read_csv("daten/raw/bow.csv", sep=',', encoding="utf-8", nrows=2500)
print(f"BOW before: {len(df_bow.columns)}")
df_bow = trim_df_by_occurrence(df_bow, 1)
print(f"BOW after: {len(df_bow.columns)}")
df_bow.to_csv(f"daten/2_filter_low_occurrence/bow.csv", index=False)
for n in range(2, 7):
word_n_gram = pd.read_csv(f"daten/raw/word_{n}_gram.csv", sep=',', encoding="utf-8", nrows=2500)
print(f"Word_{n}_gram before: {len(word_n_gram.columns)}")
word_n_gram = trim_df_by_occurrence(word_n_gram, 1)
print(f"Word_{n}_gram after: {len(word_n_gram.columns)}")
word_n_gram.to_csv(f"daten/2_filter_low_occurrence/word_{n}_gram.csv", index=False)
for n in range(2, 6):
char_affix_n_gram = pd.read_csv(f"daten/trimmed_occ_greater_one/char_affix_{n}_gram_1.csv", sep=',',
encoding="utf-8", nrows=2500)
print(f"char_affix_{n}_gram before: {len(char_affix_n_gram.columns)}")
char_affix_n_gram = trim_df_sum_feature(char_affix_n_gram, 5)
print(f"char_affix_{n}_gram after: {len(char_affix_n_gram.columns)}")
char_affix_n_gram.to_csv(f"daten/2_filter_low_occurrence/char_affix_{n}_gram.csv", index=False)
char_word_n_gram = pd.read_csv(f"daten/trimmed_occ_greater_one/char_word_{n}_gram_1.csv", sep=',',
encoding="utf-8", nrows=2500)
print(f"char_word_{n}_gram before: {len(char_word_n_gram.columns)}")
char_word_n_gram = trim_df_sum_feature(char_word_n_gram, 5)
print(f"char_word_{n}_gram after: {len(char_word_n_gram.columns)}")
char_word_n_gram.to_csv(f"daten/2_filter_low_occurrence/char_word_{n}_gram.csv", index=False)
char_punct_n_gram = pd.read_csv(f"daten/trimmed_occ_greater_one/char_punct_{n}_gram_1.csv", sep=',',
encoding="utf-8", nrows=2500)
print(f"char_punct_{n}_gram before: {len(char_punct_n_gram.columns)}")
char_punct_n_gram = trim_df_sum_feature(char_punct_n_gram, 5)
print(f"char_punct_{n}_gram after: {len(char_punct_n_gram.columns)}")
char_punct_n_gram.to_csv(f"daten/2_filter_low_occurrence/char_punct_{n}_gram.csv", index=False)
df_f_word = pd.read_csv("daten/raw/function_words.csv", sep=',', encoding="utf-8", nrows=2500)
print(f"Function Words before: {len(df_f_word.columns)}")
df_f_word = trim_df_by_occurrence(df_f_word, 1)
print(f"Function Words after: {len(df_f_word.columns)}")
df_f_word.to_csv(f"daten/2_filter_low_occurrence/function_words.csv", index=False)
for n in range(2, 6):
pos_tags_n_gram = pd.read_csv(f"daten/raw/pos_tag_{n}_gram.csv", sep=',', encoding="utf-8", nrows=2500)
print(f"pos_tag_{n}_gram before: {len(pos_tags_n_gram.columns)}")
pos_tags_n_gram = trim_df_by_occurrence(pos_tags_n_gram, 1)
print(f"pos_tag_{n}_gram after: {len(pos_tags_n_gram.columns)}")
pos_tags_n_gram.to_csv(f"daten/2_filter_low_occurrence/pos_tag_{n}_gram.csv", index=False)
# Chapter 7.1.2. method to filter words based on document frequency
def trim_df_by_doc_freq(par_df, par_doc_freq):
df_masked = par_df.notnull().astype('int')
word_rate = df_masked.sum() / len(par_df)
columns = []
filtered_bow = pd.DataFrame()
for i in range(0, len(word_rate)):
if word_rate[i] < par_doc_freq:
columns.append(word_rate.index[i])
for c in columns:
filtered_bow[c] = par_df[c]
return filtered_bow
# Chapter 7.1.2 Process of filtering the data with high document frequency and save the filtered features in a new file
def filter_high_document_frequency():
# Filter words with high document frequency
df_bow = pd.read_csv("daten/2_filter_low_occurrence/bow.csv", sep=',', encoding="utf-8", nrows=2500)
print(f"BOW before: {len(df_bow.columns)}")
df_bow = trim_df_by_doc_freq(df_bow, 0.5)
print(f"BOW after: {len(df_bow.columns)}")
df_bow.to_csv(f"daten/3_fiter_high_frequency/bow.csv", index=False)
df_f_word = pd.read_csv("daten/2_filter_low_occurrence/function_words.csv", sep=',', encoding="utf-8", nrows=2500)
print(f"Function Word before: {len(df_f_word.columns)}")
df_f_word = trim_df_by_doc_freq(df_f_word, 0.5)
print(f"Function Word after: {len(df_f_word.columns)}")
df_f_word.to_csv(f"daten/3_fiter_high_frequency/function_words.csv", index=False)
for n in range(2, 7):
word_n_gram = pd.read_csv(f"daten/2_filter_low_occurrence/word_{n}_gram.csv", sep=',', encoding="utf-8",
nrows=2500)
print(f"Word_{n}_gram before: {len(word_n_gram.columns)}")
word_n_gram = trim_df_by_doc_freq(word_n_gram, 0.5)
print(f"Word_{n}_gram after: {len(word_n_gram.columns)}")
word_n_gram.to_csv(f"daten/3_fiter_high_frequency/word_{n}_gram.csv", index=False)
# Chapter 7.1.4. get the relative frequency based on a length metric (char, word, sentence)
def get_rel_frequency(par_df_count, par_df_len_metric_vector):
df_rel_freq = pd.DataFrame(columns=par_df_count.columns)
for index, row in par_df_count.iterrows():
df_rel_freq = df_rel_freq.append(row.div(par_df_len_metric_vector[index]))
return df_rel_freq
# Chapter 7.1.4. whole process of the chapter. Get the individual relative frequency of a feature and compare
# the correlation to the article length from the absolute and relative feature, save the feature with the estimated
# relative frequency in a new file
def individual_relative_frequency():
df_len_metrics = pd.read_csv(f"daten/1_raw/length_metrics.csv", sep=',', encoding="utf-8", nrows=2500)
# different metrics for individual relative frequencies
metrics = ['word_count', 'char_count', 'sentence_count']
for m in metrics:
# The csv is placed in a folder based on the metric for the individual relative frequency
path = f'daten/4_relative_frequency/{m}'
files = [f for f in listdir(path) if isfile(join(path, f))]
for f in files:
x = pd.read_csv(f"daten/4_relative_frequency/{m}/{f}",
sep=',', encoding="utf-8", nrows=2500).fillna(value=0)
x_rel = get_rel_frequency(x, df_len_metrics[m])
# Save the CSV with relative frequency
x_rel.to_csv(
f"daten/4_relative_frequency/{f.split('.')[0]}"
f"_rel.csv", index=False)
# Correlation is always between the metrics and the word_count
x['word_count'] = df_len_metrics['word_count']
x_rel['word_count'] = df_len_metrics['word_count']
# only on the test data 60/40 split
x_train, x_test = train_test_split(x, test_size=0.4, random_state=42)
x_train_rel, x_test_rel = train_test_split(x_rel, test_size=0.4, random_state=42)
# Calculate the median correlation
print(f"{f}_abs: {x_train.corr(method='pearson', min_periods=1)['word_count'].iloc[:-1].mean()}")
print(f"{f}_rel: {x_train_rel.corr(method='pearson', min_periods=1)['word_count'].iloc[:-1].mean()}")
# Chapter 7.2.1 First step of the iterative filter: Rank the features
def sort_features_by_score(par_x, par_y, par_select_metric):
# Get a sorted ranking of all features by the selected metric
selector = SelectKBest(par_select_metric, k='all')
selector.fit(par_x, par_y)
# Sort the features by their score
return pd.DataFrame(dict(feature_names=par_x.columns, scores=selector.scores_)).sort_values('scores',
ascending=False)
# Chapter 7.2.1 method to get the best percentile for GNB
def get_best_percentile_gnb(par_x_train, par_y_train, par_iter, par_df_sorted_features, step):
result_list = []
gnb = GaussianNB()
best_perc_round = par_iter - 1 # If no other point is found, highest amount of features (-1 starts to count from 0)
# define the splits for the hyperparameter tuning, cannot be greater than the number of members in each class
if len(par_y_train.index) / len(np.unique(par_y_train.values).tolist()) < 10:
cv = int(len(par_y_train.index) / len(np.unique(par_y_train.values).tolist())) - 1
else:
cv = 10
for perc_features in np.arange(step, par_iter + 1, step):
start_time = datetime.now()
# 1%*i best features to keep and create new dataframe with those only
number_of_features = int(perc_features * (len(par_x_train.columns) / 100))
# minimum one feature
number_of_features = 1 if number_of_features < 1 else number_of_features
feature_list = par_df_sorted_features['feature_names'][: number_of_features].tolist()
x_new_training = copy.deepcopy(par_x_train[feature_list])
# GNB Training
result_list.append(
cross_val_score(gnb, x_new_training, par_y_train, cv=cv, n_jobs=-1, scoring='accuracy').mean())
# Compares the accuracy with the 5 following points => needs 6 points minimum
if len(result_list) > 5:
# list starts to count at 0, subtract one more from len
difference_list_p2p = [result_list[p + 1] - result_list[p] for p in
range(len(result_list) - 6, len(result_list) - 1)]
difference_list_1p = [result_list[p + 1] - result_list[len(result_list) - 6] for p in
range(len(result_list) - 6, len(result_list) - 1)]
# Find the best percent if 5 following points were lower then the point before or had a deviation <= 0.5%
# or all points are 2% lower then the first point
if all(point_y <= 0 for point_y in difference_list_p2p) or \
all(-0.005 <= point_y <= 0.005 for point_y in difference_list_1p) or \
all(point_y < -0.02 for point_y in difference_list_1p):
# the best perc is the results - 6 point in the result list
best_perc_round = len(result_list) - 6
break
# Console Output
print(f"GNB Round {perc_features / step}: {datetime.now() - start_time}")
# Optimization of the best percent
# If any point with a lower percent is higher, it is the new optimum
if any(point_y > result_list[best_perc_round] for point_y in result_list[:len(result_list) - 5]):
best_perc_round = result_list.index(max(result_list[:len(result_list) - 5]))
# Tradeoff of 0.5% accuracy for lesser percent of features
# As long as there is a lesser maximum with 1% lesser accuracy, which has a minimum of 2% less percent features
better_perc_exists = True
best_accuracy_tradeoff = result_list[best_perc_round] - 0.01
# If there are no 5% left for the tradeoff there is no better perc
if best_perc_round - int(2 / step) < 0:
better_perc_exists = False
while better_perc_exists:
earliest_pos = best_perc_round - int(2 / step)
# if its less then 0 it starts to count backside
earliest_pos = 0 if earliest_pos < 0 else earliest_pos
if any(point_y > best_accuracy_tradeoff for point_y in result_list[:earliest_pos]):
best_perc_round = result_list.index(max(result_list[:earliest_pos]))
else:
better_perc_exists = False
# the best percent of the features is calculated by the percent start plus the rounds * step
best_perc = step + step * best_perc_round
print(best_perc)
return best_perc, best_perc_round, result_list
# Chapter 7.2.1 method to get the best percentile for SVC
def get_best_percentile_svc(par_x_train, par_y_train, par_iter, par_df_sorted_features, step):
result_list = []
# Parameter for SVC
param_grid_svc = {'C': (0.001, 0.01, 0.1, 1, 10),
'kernel': ('linear', 'poly', 'rbf'),
'gamma': ('scale', 'auto')}
# define the splits for the hyperparameter tuning, cannot be greater than the number of members in each class
if len(par_y_train.index) / len(np.unique(par_y_train.values).tolist()) < 10:
cv = int(len(par_y_train.index) / len(np.unique(par_y_train.values).tolist())) - 1
else:
cv = 10
best_perc_round = par_iter - 1 # If no other point is found, highest amount of features (-1 starts to count from 0)
for perc_features in np.arange(step, par_iter + 1, step):
start_time = datetime.now()
# 1%*i best features to keep and create new dataframe with those only
number_of_features = int(perc_features * (len(par_x_train.columns) / 100))
# minimum one feature
number_of_features = 1 if number_of_features < 1 else number_of_features
feature_list = par_df_sorted_features['feature_names'][: number_of_features].tolist()
x_new_training = copy.deepcopy(par_x_train[feature_list])
# SVC Test
grid_search = GridSearchCV(svm.SVC(), param_grid_svc, cv=cv, n_jobs=-1, scoring='accuracy')
grid_results = grid_search.fit(x_new_training, par_y_train)
result_list.append(grid_results.best_score_)
# Compares the accuracy with the 5 following points => needs 6 points minimum
if len(result_list) > 5:
# list starts to count at 0, subtract one more from len
difference_list_p2p = [result_list[p + 1] - result_list[p] for p in
range(len(result_list) - 6, len(result_list) - 1)]
difference_list_1p = [result_list[p + 1] - result_list[len(result_list) - 6] for p in
range(len(result_list) - 6, len(result_list) - 1)]
# Find the best percent if 5 following points were lower then the point before or had a deviation <= 0.5%
# or all points are 2% lower then the first point
if all(point_y <= 0 for point_y in difference_list_p2p) or \
all(-0.005 <= point_y <= 0.005 for point_y in difference_list_1p) or \
all(point_y < -0.02 for point_y in difference_list_1p):
# the best perc is the results - 6 point in the result list
best_perc_round = len(result_list) - 6
break
# Console Output
print(f"SVC Round {perc_features / step}: {datetime.now() - start_time}")
# Optimization of the best percent
# If any point with a lower percent is higher, it is the new optimum
if any(point_y > result_list[best_perc_round] for point_y in result_list[:len(result_list) - 5]):
best_perc_round = result_list.index(max(result_list[:len(result_list) - 5]))
# Tradeoff of 1% accuracy for lesser percent of features
# As long as there is a lesser maximum with 1% lesser accuracy, which has a minimum of 2% less percent features
better_perc_exists = True
best_accuracy_tradeoff = result_list[best_perc_round] - 0.01
# If there are no 5% left for the tradeoff there is no better perc
if best_perc_round - int(2 / step) < 0:
better_perc_exists = False
while better_perc_exists:
earliest_pos = best_perc_round - int(2 / step)
# if its less then 0 it starts to count backside
earliest_pos = 0 if earliest_pos < 0 else earliest_pos
if any(point_y > best_accuracy_tradeoff for point_y in result_list[:earliest_pos]):
best_perc_round = result_list.index(max(result_list[:earliest_pos]))
else:
better_perc_exists = False
# the best percent of the features is calculated by the percent start plus the rounds * step
best_perc = step + step * best_perc_round
print(best_perc)
return best_perc, best_perc_round, result_list
# Chapter 7.2.1 method to get the best percentile for KNN
def get_best_percentile_knn(par_x_train, par_y_train, par_iter, par_df_sorted_features, step):
result_list = []
best_perc_round = par_iter - 1 # If no other point is found, highest amount of features (-1 starts to count from 0)
# define the splits for the hyperparameter tuning, cannot be greater than the number of members in each class
if len(par_y_train.index) / len(np.unique(par_y_train.values).tolist()) < 10:
cv = int(len(par_y_train.index) / len(np.unique(par_y_train.values).tolist())) - 1
else:
cv = 10
for perc_features in np.arange(step, par_iter + 1, step):
start_time = datetime.now()
# 1%*i best features to keep and create new dataframe with those only
number_of_features = int(perc_features * (len(par_x_train.columns) / 100))
# minimum one feature
number_of_features = 1 if number_of_features < 1 else number_of_features
feature_list = par_df_sorted_features['feature_names'][: number_of_features].tolist()
x_new_training = copy.deepcopy(par_x_train[feature_list])
# Parameter for KNN
# Some Values from 3 to square of samples
neighbors = [i for i in range(3, int(math.sqrt(len(x_new_training.index))), 13)]
neighbors += [1, 3, 5, 11, 19, 36]
if int(math.sqrt(len(feature_list))) not in neighbors:
neighbors.append(int(math.sqrt(len(x_new_training.index))))
# Not more neighbors then samples-2
neighbors = [x for x in neighbors if x < len(x_new_training.index) - 2]
# remove duplicates
neighbors = list(set(neighbors))
param_grid_knn = {'n_neighbors': neighbors,
'weights': ['uniform', 'distance'],
'metric': ['euclidean', 'manhattan']}
# KNN Training
grid_search = GridSearchCV(KNeighborsClassifier(), param_grid_knn, cv=cv, n_jobs=-1, scoring='accuracy')
grid_results = grid_search.fit(x_new_training, par_y_train)
result_list.append(grid_results.best_score_)
# Compares the accuracy with the 5 following points => needs 6 points minimum
if len(result_list) > 5:
# list starts to count at 0, subtract one more from len
difference_list_p2p = [result_list[p + 1] - result_list[p] for p in
range(len(result_list) - 6, len(result_list) - 1)]
difference_list_1p = [result_list[p + 1] - result_list[len(result_list) - 6] for p in
range(len(result_list) - 6, len(result_list) - 1)]
# Find the best percent if 5 following points were lower then the point before or had a deviation <= 0.5%
# or all points are 2% lower then the first point
if all(point_y <= 0 for point_y in difference_list_p2p) or \
all(-0.005 <= point_y <= 0.005 for point_y in difference_list_1p) or \
all(point_y < -0.02 for point_y in difference_list_1p):
# the best perc is the results - 6 point in the result list
best_perc_round = len(result_list) - 6
break
# Console Output
print(f"KNN Round {perc_features / step}: {datetime.now() - start_time}")
# Optimization of the best percent
# If any point with a lower percent is higher, it is the new optimum
if any(point_y > result_list[best_perc_round] for point_y in result_list[:len(result_list) - 5]):
best_perc_round = result_list.index(max(result_list[:len(result_list) - 5]))
# Tradeoff of 1% accuracy for lesser percent of features
# As long as there is a lesser maximum with 1% lesser accuracy, which has a minimum of 2% less percent features
better_perc_exists = True
best_accuracy_tradeoff = result_list[best_perc_round] - 0.01
# If there are no 5% left for the tradeoff there is no better perc
if best_perc_round - int(2 / step) < 0:
better_perc_exists = False
while better_perc_exists:
earliest_pos = best_perc_round - int(2 / step)
# if its less then 0 it starts to count backside
earliest_pos = 0 if earliest_pos < 0 else earliest_pos
if any(point_y >= best_accuracy_tradeoff for point_y in result_list[:earliest_pos]):
best_perc_round = result_list.index(max(result_list[:earliest_pos]))
else:
better_perc_exists = False
# the best percent of the features is calculated by the percent start plus the rounds * step
best_perc = step + step * best_perc_round
print(best_perc)
return best_perc, best_perc_round, result_list
# Chapter 7.2.1 Filter the feature based on the estimated best percentile and save it into a new file
def print_filter_feature_percentile(par_path, par_df_sorted_features, par_percent, par_x, par_file_name):
# select the 1 percent of the features (len/100) multiplied by par_best_percent
number_features = round(par_percent * (len(par_x.columns) / 100))
# If the 1st percent is less then 1
number_features = 1 if number_features < 1 else number_features
feature_list = par_df_sorted_features['feature_names'][:number_features].tolist()
# print the name of the features in a file
original_stdout = sys.stdout
with open(f'{par_path}selected_features/{par_file_name}_filtered.txt', 'w', encoding="utf-8") as f:
sys.stdout = f
print(f"Features: {len(feature_list)}")
print(f"{feature_list}")
sys.stdout = original_stdout
# select the best features from the original dataset
par_x[feature_list].to_csv(f"{par_path}csv_after_filter/{par_file_name}_filtered.csv", index=False)
# Chapter 7.2.1 Complete process of the iterative Filter
def iterative_filter_process(par_path, par_df, par_num_texts, par_num_authors):
y = par_df['label_encoded']
path = f'{par_path}csv_before_filter'
files = [f for f in listdir(path) if isfile(join(path, f))]
# Filter the files for author and text numbers if 'all' is not set.
if par_num_authors != "all":
r = re.compile(f"a{par_num_authors}_")
files = list(filter(r.match, files))
if par_num_authors != "all":
r = re.compile(f".*t{par_num_texts}_")
files = list(filter(r.match, files))
step_perc = 1.0
for f in files:
filename = f.split(".")[0]
print(f)
x = pd.read_csv(f"{par_path}csv_before_filter/{f}", sep=',', encoding="utf-8", nrows=2500)
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.4, random_state=42, stratify=y)
# Get sorted features
df_sorted_features = sort_features_by_score(x_train, y_train, mutual_info_classif)
# Calculate the best percentiles of the data for the different classifier
best_perc_gnb, best_round_gnb, result_list_gnb = get_best_percentile_gnb(x_train, y_train, 50,
df_sorted_features, step_perc)
best_perc_svc, best_round_svc, result_list_svc = get_best_percentile_svc(x_train, y_train, 50,
df_sorted_features, step_perc)
best_perc_knn, best_round_knn, result_list_knn = get_best_percentile_knn(x_train, y_train, 50,
df_sorted_features, step_perc)
# select the beast features from the original dataset
print_filter_feature_percentile(par_path, df_sorted_features, best_perc_gnb, x, "gnb_" + filename)
print_filter_feature_percentile(par_path, df_sorted_features, best_perc_svc, x, "svc_" + filename)
print_filter_feature_percentile(par_path, df_sorted_features, best_perc_knn, x, "knn_" + filename)
# print best perc to a file
original_stdout = sys.stdout
with open(f'{par_path}best_perc/{filename}.txt', 'w') as f:
sys.stdout = f
print(f"best_perc_gnb: ({best_perc_gnb}|{result_list_gnb[best_round_gnb]})\n"
f"best_perc_svc: ({best_perc_svc}|{result_list_svc[best_round_svc]})\n"
f"best_perc_knn: ({best_perc_knn}|{result_list_knn[best_round_knn]})")
sys.stdout = original_stdout
# draw diagram
len_list = [len(result_list_gnb), len(result_list_svc), len(result_list_knn)]
plt.plot([i * step_perc for i in range(1, len(result_list_gnb) + 1)], result_list_gnb, 'r-', label="gnb")
plt.plot(best_perc_gnb, result_list_gnb[best_round_gnb], 'rx')
plt.plot([i * step_perc for i in range(1, len(result_list_svc) + 1)], result_list_svc, 'g-', label="svc")
plt.plot(best_perc_svc, result_list_svc[best_round_svc], 'gx')
plt.plot([i * step_perc for i in range(1, len(result_list_knn) + 1)], result_list_knn, 'b-', label="knn")
plt.plot(best_perc_knn, result_list_knn[best_round_knn], 'bx')
plt.axis([step_perc, (max(len_list) + 1) * step_perc, 0, 1])
plt.xlabel('Daten in %')
plt.ylabel('Genauigkeit')
plt.legend()
plt.savefig(f"{par_path}/diagrams/{filename}")
plt.cla()
# print accuracy to file
df_percent = pd.DataFrame(data=[i * step_perc for i in range(1, max(len_list) + 1)], columns=['percent'])
df_gnb = pd.DataFrame(data=result_list_gnb, columns=['gnb'])
df_svc = pd.DataFrame(data=result_list_svc, columns=['svc'])
df_knn = pd.DataFrame(data=result_list_knn, columns=['knn'])
df_accuracy = pd.concat([df_percent, df_gnb, df_svc, df_knn], axis=1)
df_accuracy = df_accuracy.fillna(value="")
df_accuracy.to_csv(f'{par_path}accuracy/{filename}_filtered.csv', index=False)
# Chapter 8.1. and later, basically the process of the iterative filter only with the svc classifier
def iterative_filter_process_svm(par_path, par_df, par_num_texts, par_num_authors):
y = par_df['label_encoded']
path = f'{par_path}csv_before_filter'
files = [f for f in listdir(path) if isfile(join(path, f))]
# Filter the files for author and text numbers if 'all' is not set.
if par_num_authors != "all":
r = re.compile(f"a{par_num_authors}_")
files = list(filter(r.match, files))
if par_num_authors != "all":
r = re.compile(f".*t{par_num_texts}_")
files = list(filter(r.match, files))
step_perc = 1.0
for f in files:
filename = f.split(".")[0]
print(f)
x = pd.read_csv(f"{par_path}csv_before_filter/{f}", sep=',', encoding="utf-8", nrows=2500)
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.4, random_state=42, stratify=y)
# Get sorted features
df_sorted_features = sort_features_by_score(x_train, y_train, mutual_info_classif)
# Calculate the best percentiles of the data for svc
best_perc_svc, best_round_svc, result_list_svc = get_best_percentile_svc(x_train, y_train, 50,
df_sorted_features, step_perc)
# select the beast features from the original dataset
print_filter_feature_percentile(par_path, df_sorted_features, best_perc_svc, x, filename)
# print best perc to a file
original_stdout = sys.stdout
with open(f'{par_path}best_perc/{filename}.txt', 'w') as out_f:
sys.stdout = out_f
print(f"best_perc_svc: ({best_perc_svc}|{result_list_svc[best_round_svc]})\n")
sys.stdout = original_stdout
# draw diagram
plt.plot([i * step_perc for i in range(1, len(result_list_svc) + 1)], result_list_svc, 'g-', label="svc")
plt.plot(best_perc_svc, result_list_svc[best_round_svc], 'gx')
plt.axis([step_perc, (len(result_list_svc) + 1) * step_perc, 0, 1])
plt.xlabel('Daten in %')
plt.ylabel('Genauigkeit')
plt.legend()
plt.savefig(f"{par_path}/diagrams/{filename}")
plt.cla()
# print accuracy to file
df_percent = pd.DataFrame(data=[i * step_perc for i in range(1, len(result_list_svc) + 1)], columns=['percent'])
df_svc = pd.DataFrame(data=result_list_svc, columns=['svc'])
df_accuracy = pd.concat([df_percent, df_svc], axis=1)
df_accuracy = df_accuracy.fillna(value="")
df_accuracy.to_csv(f'{par_path}accuracy/{filename}_filtered.csv', index=False)
# Chapter 7.2.1. Get the accuracy of the features before the iterative filter, results in table 18
def get_accuracy_before_iterative_filter():
gnb_result_list, svc_result_list, knn_result_list, gnb_time_list, svc_time_list, knn_time_list \
= [], [], [], [], [], []
y = pd.read_csv("musikreviews_balanced_authors.csv", sep=',', encoding="utf-8", nrows=2500)['label_encoded']
path = f'daten/5_iterative_filter/csv_before_filter'
files = [f for f in listdir(path) if isfile(join(path, f))]
gnb = GaussianNB()
param_grid_svc = {'C': (0.001, 0.01, 0.1, 1, 10),
'kernel': ('linear', 'poly', 'rbf'),
'gamma': ('scale', 'auto')}
# Get the feature names for the table
feature_list = [re.search("(.+?(?=_rel))", f).group(1) for f in files]
for f in files:
print(f)
x = pd.read_csv(f"daten/5_iterative_filter/csv_before_filter/{f}", sep=',', encoding="utf-8", nrows=2500)
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.4, random_state=42)
# GNB fit
start_time = datetime.now()
gnb.fit(x_train, y_train)
# score on test data
score = accuracy_score(gnb.predict(x_test), y_test)
time_taken = datetime.now() - start_time
print(f"GNB test score for {f}: {score}")
print(f"GNB time for {f}: {time_taken}")
gnb_result_list.append(score)
gnb_time_list.append(time_taken)
# SVC parameter optimization
grid_search = GridSearchCV(svm.SVC(), param_grid_svc, cv=10, n_jobs=-1, scoring='accuracy')
grid_results = grid_search.fit(x_train, y_train)
svc = svm.SVC(C=grid_results.best_params_['C'], gamma=grid_results.best_params_['gamma'],
kernel=grid_results.best_params_['kernel'])
start_time = datetime.now()
# fit on train data
svc.fit(x_train, y_train)
# predict test data
score = accuracy_score(svc.predict(x_test), y_test)
time_taken = datetime.now() - start_time
print(f"SVC test score for {f}: {score}")
print(f"SVC time for {f}: {time_taken}")
svc_result_list.append(score)
svc_time_list.append(time_taken)
# Parameter for KNN
# Some Values from 3 to square of k
neighbors = [i for i in range(3, int(math.sqrt(len(x.columns))), 13)]
neighbors += [5, 11, 19, 36]
if int(math.sqrt(len(x.columns))) not in neighbors:
neighbors.append(int(math.sqrt(len(x.columns))))
param_grid_knn = {'n_neighbors': neighbors,
'weights': ['uniform', 'distance'],
'metric': ['euclidean', 'manhattan']}
# KNN parameter optimization
grid_search = GridSearchCV(KNeighborsClassifier(), param_grid_knn, cv=10, n_jobs=-1, scoring='accuracy')
grid_results = grid_search.fit(x_train, y_train)
knn = KNeighborsClassifier(n_neighbors=grid_results.best_params_['n_neighbors'],
metric=grid_results.best_params_['metric'],
weights=grid_results.best_params_['weights'])
# fit on train data
knn.fit(x_train, y_train)
# KNN predict test data
start_time = datetime.now()
# predict test data
score = accuracy_score(knn.predict(x_test), y_test)
time_taken = datetime.now() - start_time
print(f"KNN test score for {f}: {score}")
print(f"KNN time for {f}: {time_taken}")
knn_result_list.append(score)
knn_time_list.append(time_taken)
# create dataframe with the scores and times
df_results = pd.DataFrame()
df_results['feature'] = feature_list
df_results['score_gnb'] = gnb_result_list
df_results['time_gnb'] = gnb_time_list
df_results['score_svc'] = svc_result_list
df_results['time_svc'] = svc_time_list
df_results['score_knn'] = knn_result_list
df_results['time_knn'] = knn_time_list
return df_results
# Chapter 7.2.1. Get the accuracy of the features after the iterative filter, results in table 18
def get_accuracy_after_iterative_filter():
df_gnb_result = pd.DataFrame(columns=['feature', 'score_gnb', 'time_gnb'])
df_svc_result = pd.DataFrame(columns=['feature', 'score_svc', 'time_svc'])
df_knn_result = pd.DataFrame(columns=['feature', 'score_knn', 'time_knn'])
y = pd.read_csv("musikreviews_balanced_authors.csv", sep=',', encoding="utf-8", nrows=2500)['label_encoded']
# path = f'daten/5_iterative_filter/csv_after_filter'
path = f'daten/5_iterative_filter/5_iterative_filter/csv_after_filter'
files = [f for f in listdir(path) if isfile(join(path, f))]
gnb = GaussianNB()
param_grid_svc = {'C': (0.001, 0.01, 0.1, 1, 10),
'kernel': ('linear', 'poly', 'rbf'),
'gamma': ('scale', 'auto')}
for f in files:
print(f)
# Get the feature name for the table
feature = re.search(".{4}(.+?(?=_rel))", f).group(1)
# x = pd.read_csv(f"daten/5_iterative_filter/csv_after_filter/{f}", sep=',', encoding="utf-8", nrows=2500)
x = pd.read_csv(f"daten/5_iterative_filter/5_iterative_filter/csv_after_filter/{f}", sep=',', encoding="utf-8",
nrows=2500)
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.4, random_state=42)
# Select the classifier by the start of the filename
if f.split("_")[0] == "gnb":
# GNB fit
start_time = datetime.now()
gnb.fit(x_train, y_train)
# score on test data
score = accuracy_score(gnb.predict(x_test), y_test)
time_taken = datetime.now() - start_time
print(f"GNB test score for {f}: {score}")
print(f"GNB time for {f}: {time_taken}")
df_gnb_result = df_gnb_result.append(pd.DataFrame(data={'feature': [feature], 'score_gnb': [score],
'time_gnb': [time_taken]}), ignore_index=True)
elif f.split("_")[0] == "svc":
# SVC parameter optimization
grid_search = GridSearchCV(svm.SVC(), param_grid_svc, cv=10, n_jobs=-1, scoring='accuracy')
grid_results = grid_search.fit(x_train, y_train)
svc = svm.SVC(C=grid_results.best_params_['C'], gamma=grid_results.best_params_['gamma'],
kernel=grid_results.best_params_['kernel'])
start_time = datetime.now()
# fit on train data
svc.fit(x_train, y_train)
# predict test data
score = accuracy_score(svc.predict(x_test), y_test)
time_taken = datetime.now() - start_time
print(f"SVC test score for {f}: {score}")
print(f"SVC training time for {f}: {time_taken}")
df_svc_result = df_svc_result.append(pd.DataFrame(data={'feature': [feature], 'score_svc': [score],
'time_svc': [time_taken]}), ignore_index=True)
elif f.split("_")[0] == "knn":
# Parameter for KNN
# Some Values from 3 to square of k
neighbors = [i for i in range(3, int(math.sqrt(len(x.columns))), 13)]
neighbors += [5, 11, 19, 36]
if int(math.sqrt(len(x.columns))) not in neighbors:
neighbors.append(int(math.sqrt(len(x.columns))))
param_grid_knn = {'n_neighbors': neighbors,
'weights': ['uniform', 'distance'],
'metric': ['euclidean', 'manhattan']}
# KNN parameter optimization
grid_search = GridSearchCV(KNeighborsClassifier(), param_grid_knn, cv=10, n_jobs=-1, scoring='accuracy')
grid_results = grid_search.fit(x_train, y_train)
knn = KNeighborsClassifier(n_neighbors=grid_results.best_params_['n_neighbors'],
metric=grid_results.best_params_['metric'],
weights=grid_results.best_params_['weights'])
start_time = datetime.now()
# fit on train data
knn.fit(x_train, y_train)
# KNN predict test data
start_time = datetime.now()
# predict test data
score = accuracy_score(knn.predict(x_test), y_test)
time_taken = datetime.now() - start_time
print(f"KNN test score for {f}: {score}")
print(f"KNN test time for {f}: {time_taken}")
df_knn_result = df_knn_result.append(pd.DataFrame(data={'feature': [feature], 'score_knn': [score],
'time_knn': [time_taken]}), ignore_index=True)
df_merge = pd.merge(df_gnb_result, df_knn_result, on="feature", how='outer')
df_merge = pd.merge(df_merge, df_svc_result, on="feature", how='outer')
return df_merge
# Get n article for a given number of authors. Required for setups with different numbers of authors and article
def get_n_article_by_author(par_df, par_label_count, par_article_count):
df_articles = pd.DataFrame(columns=['label_encoded', 'text'])
# only keep entries of the "par_label_count" first labels
par_df = par_df.where(par_df['label_encoded'] <= par_label_count).dropna()
labels = np.unique(par_df['label_encoded'].values).tolist()
list_article_count = [par_article_count for i in labels]
for index, row in par_df.iterrows():
if list_article_count[labels.index(row['label_encoded'])] != 0:
d = {'label_encoded': [row['label_encoded']], 'text': [row['text']]}
df_articles = df_articles.append(pd.DataFrame.from_dict(d), ignore_index=True)
list_article_count[labels.index(row['label_encoded'])] -= 1
if sum(list_article_count) == 0:
break
return df_articles
# Return indices for n article for a given number of authors. Required for setups with different
# numbers of authors and article
def get_n_article_index_by_author(par_df, par_label_count, par_article_count):
index_list = []
# only keep entries of the "par_label_count" first labels
par_df = par_df.where(par_df['label_encoded'] <= par_label_count).dropna()
labels = np.unique(par_df['label_encoded'].values).tolist()
list_article_count = [par_article_count for i in labels]
for index, row in par_df.iterrows():
if row['label_encoded'] in labels:
if list_article_count[labels.index(row['label_encoded'])] != 0:
index_list.append(index)
list_article_count[labels.index(row['label_encoded'])] -= 1
if sum(list_article_count) == 0:
break
return index_list
# Method to estimate the f1 score of the test data for GNB
def get_f1_for_gnb(par_x_train, par_x_test, par_y_train, par_y_test):
gnb = GaussianNB()
# GNB fit
gnb.fit(par_x_train, par_y_train)
# score on test data
gnb_score = f1_score(gnb.predict(par_x_test), par_y_test, average='micro')
return gnb_score
# Method to estimate the f1 score of the test data for SVC
def get_f1_for_svc(par_x_train, par_x_test, par_y_train, par_y_test, par_cv):
# Param Grid SVC
param_grid_svc = {'C': (0.001, 0.01, 0.1, 1, 10),
'kernel': ('linear', 'poly', 'rbf'),
'gamma': ('scale', 'auto')}
# SVC parameter optimization
grid_search = GridSearchCV(svm.SVC(), param_grid_svc, cv=par_cv, n_jobs=-1, scoring='f1_micro')
grid_results = grid_search.fit(par_x_train, par_y_train)
svc = svm.SVC(C=grid_results.best_params_['C'], gamma=grid_results.best_params_['gamma'],
kernel=grid_results.best_params_['kernel'])
# fit on train data
svc.fit(par_x_train, par_y_train)
# predict test data
svc_score = f1_score(svc.predict(par_x_test), par_y_test, average='micro')
return svc_score
# Method to estimate the f1 score of the test data for KNN
def get_f1_for_knn(par_x_train, par_x_test, par_y_train, par_y_test, par_cv):
# define param grid for knn, neighbors has the be lower than samples
neighbors = [1, 3, 5, 11, 19, 36, 50]
# number of neighbors must be less than number of samples
neighbors = [x for x in neighbors if x < len(par_x_test)]
param_grid_knn = {'n_neighbors': neighbors,
'weights': ['uniform', 'distance'],
'metric': ['euclidean', 'manhattan']}
# KNN parameter optimization
grid_search = GridSearchCV(KNeighborsClassifier(), param_grid_knn, cv=par_cv, n_jobs=-1, scoring='f1_micro')
grid_results = grid_search.fit(par_x_train, par_y_train)
knn = KNeighborsClassifier(n_neighbors=grid_results.best_params_['n_neighbors'],
metric=grid_results.best_params_['metric'],
weights=grid_results.best_params_['weights'])
# fit on train data
knn.fit(par_x_train, par_y_train)
# predict test data
knn_score = f1_score(knn.predict(par_x_test), par_y_test, average='micro')
return knn_score
# Method to estimate the accuracy of the test data for SVC
def get_accuracy_for_svc(par_x_train, par_x_test, par_y_train, par_y_test, par_cv):
# Param Grid SVC
param_grid_svc = {'C': (0.001, 0.01, 0.1, 1, 10),
'kernel': ('linear', 'poly', 'rbf'),
'gamma': ('scale', 'auto')}
# SVC parameter optimization
grid_search = GridSearchCV(svm.SVC(), param_grid_svc, cv=par_cv, n_jobs=-1, scoring='f1_micro')
grid_results = grid_search.fit(par_x_train, par_y_train)
svc = svm.SVC(C=grid_results.best_params_['C'], gamma=grid_results.best_params_['gamma'],
kernel=grid_results.best_params_['kernel'])
# fit on train data
svc.fit(par_x_train, par_y_train)
# predict test data
svc_score = accuracy_score(svc.predict(par_x_test), par_y_test)
return svc_score
# Chapter 7.3.1. comparison of the word length feature alternatives
def compare_word_length_features():
df_all_texts = pd.read_csv("musikreviews_balanced_authors.csv", sep=',', encoding="utf-8")
# Different values for the texts by authors
list_author_texts = [10, 15, 25, 50, 75, 100]
# save the results in a dictionary
dic_f1_results = {'wl_matrix_gnb': [], 'wl_matrix_svc': [], 'wl_matrix_knn': [],
'wl_matrix_bins_20_30_gnb': [], 'wl_matrix_bins_20_30_svc': [], 'wl_matrix_bins_20_30_knn': [],
'wl_matrix_bins_10_20_gnb': [], 'wl_matrix_bins_10_20_svc': [], 'wl_matrix_bins_10_20_knn': [],
'wl_matrix_20_gnb': [], 'wl_matrix_20_svc': [], 'wl_matrix_20_knn': [],
'wl_avg_gnb': [], 'wl_avg_svc': [], 'wl_avg_knn': []}
for author_texts in list_author_texts:
# get article for n authors with number of author texts
df_article = get_n_article_by_author(df_all_texts, 25, author_texts)
# Get the word count for the individual relative frequency
word_count = get_word_count(df_article)
# define the splits for the hyperparameter tuning, cannot be greater than the number of members in each class
if author_texts * 0.4 < 10:
cv = int(author_texts * 0.4)
else:
cv = 10
# Get the scores for every feature
for feature in ["wl_matrix", "wl_matrix_bins_20_30", "wl_matrix_bins_10_20", "wl_avg", "wl_matrix_20"]:
# select the test/train data by the feature name and calculate the individual relative frequency
if feature == "wl_matrix":
x = get_rel_frequency(get_word_length_matrix(df_article).fillna(value=0), word_count['word_count'])
elif feature == "wl_matrix_bins_20_30":
x = get_rel_frequency(get_word_length_matrix_with_interval(df_article, 20, 30).fillna(value=0),
word_count['word_count'])
elif feature == "wl_matrix_bins_10_20":
x = get_rel_frequency(get_word_length_matrix_with_interval(df_article, 10, 20).fillna(value=0),
word_count['word_count'])
elif feature == "wl_avg":
x = get_average_word_length(df_article)
elif feature == "wl_matrix_20":
x = get_word_length_matrix_with_margin(df_article, 20)
# Scale the data, else high counter in wl_matrix can dominate and hyperparameter optimization for svc
# takes a while because of small differences from average
scaler = StandardScaler()
scaler.fit(x)
x = scaler.transform(x)
y = df_article['label_encoded']
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.4, random_state=42, stratify=y)
y = df_article['label_encoded']
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.4, random_state=42, stratify=y)
# calculate scores
gnb_score = get_f1_for_gnb(x_train, x_test, y_train, y_test)
svc_score = get_f1_for_svc(x_train, x_test, y_train, y_test, cv)
knn_score = get_f1_for_knn(x_train, x_test, y_train, y_test, cv)
# Append scores to dictionary
dic_f1_results[f'{feature}_gnb'].append(gnb_score)
dic_f1_results[f'{feature}_svc'].append(svc_score)
dic_f1_results[f'{feature}_knn'].append(knn_score)
# Console output
print(f"GNB-Score for {feature} with {author_texts}: {gnb_score}")
print(f"SVC-Score for {feature} with {author_texts}: {svc_score}")
print(f"KNN-Score for {feature} with {author_texts}: {knn_score}")
df_results = pd.DataFrame(dic_f1_results)
df_results['number_article'] = list_author_texts
return df_results
# Chapter 7.3.2. comparison of the digit feature alternatives
def compare_digit_features():
df_all_texts = pd.read_csv("musikreviews_balanced_authors.csv", sep=',', encoding="utf-8")
# Different values for the texts by authors
list_author_texts = [10, 15, 25, 50, 75, 100]
# save the results in a dictionary
dic_f1_results = {'digit_sum_gnb': [], 'digit_sum_svc': [], 'digit_sum_knn': [],
'digits_gnb': [], 'digits_svc': [], 'digits_knn': []}
for author_texts in list_author_texts:
# get article for n authors with number of author texts
df_article = get_n_article_by_author(df_all_texts, 25, author_texts)
# Get the word count for the individual relative frequency
char_count = get_char_count(df_article)
# define the splits for the hyperparameter tuning, cannot be greater than the number of members in each class
if author_texts * 0.4 < 10:
cv = int(author_texts * 0.4)
else:
cv = 10
# Get the scores for every feature
for feature in ["digit_sum", "digits"]:
# select the test/train data by the feature name and calculate the individual relative frequency
if feature == "digit_sum":
x = get_rel_frequency(get_sum_digits(df_article).fillna(value=0), char_count['char_count'])
elif feature == "digits":
x = get_rel_frequency(get_digits(df_article).fillna(value=0), char_count['char_count'])
y = df_article['label_encoded']
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.4, random_state=42, stratify=y)
# calculate scores
gnb_score = get_f1_for_gnb(x_train, x_test, y_train, y_test)
svc_score = get_f1_for_svc(x_train, x_test, y_train, y_test, cv)
knn_score = get_f1_for_knn(x_train, x_test, y_train, y_test, cv)
# Append scores to dictionary
dic_f1_results[f'{feature}_gnb'].append(gnb_score)
dic_f1_results[f'{feature}_svc'].append(svc_score)
dic_f1_results[f'{feature}_knn'].append(knn_score)
# Console output
print(f"GNB-Score for {feature} with {author_texts}: {gnb_score}")
print(f"SVC-Score for {feature} with {author_texts}: {svc_score}")
print(f"KNN-Score for {feature} with {author_texts}: {knn_score}")
df_results = pd.DataFrame(dic_f1_results)
df_results['number_article'] = list_author_texts
return df_results
# Chapter 7.3.3. comparison of the word ngrams with n 4-6
def compare_word_4_6_grams():
df_all_texts = pd.read_csv("musikreviews_balanced_authors.csv", sep=',', encoding="utf-8")
# Different values for the texts by authors
list_author_texts = [10, 15, 25, 50, 75, 100]
# save the results in a dictionary
dic_f1_results = {'w4g_gnb': [], 'w4g_svc': [], 'w4g_knn': [],
'w5g_gnb': [], 'w5g_svc': [], 'w5g_knn': [],
'w6g_gnb': [], 'w6g_svc': [], 'w6g_knn': []}
# load the data
df_w4g = pd.read_csv("daten/6_feature_analysis/input_data/word_4_gram_rel.csv", sep=',', encoding="utf-8")
df_w5g = pd.read_csv("daten/6_feature_analysis/input_data/word_5_gram_rel.csv", sep=',', encoding="utf-8")
df_w6g = pd.read_csv("daten/6_feature_analysis/input_data/word_6_gram_rel.csv", sep=',', encoding="utf-8")
for author_texts in list_author_texts:
# indices for article for n authors with m texts
index_list = get_n_article_index_by_author(df_all_texts, 25, author_texts)
# define the splits for the hyperparameter tuning, cannot be greater than the number of members in each class
if author_texts * 0.4 < 10:
cv = int(author_texts * 0.4)
# CV between 5 and 10 is unusual
cv = 5 if cv > 5 else cv
else:
cv = 10
# Get the scores for every feature
for feature in ["w4g", "w5g", "w6g"]:
# select the indices from the article rows by the given indices
if feature == "w4g":
x = df_w4g.iloc[index_list]
elif feature == "w5g":
x = df_w5g.iloc[index_list]
elif feature == "w6g":
x = df_w6g.iloc[index_list]
# Delete features which only occur once
x = trim_df_by_occurrence(x, 1)
# reset the indices to have a order from 0 to authors * text per author - 1
x = x.reset_index(drop=True)
y = df_all_texts.iloc[index_list]['label_encoded']
y = y.reset_index(drop=True)
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.4, random_state=42, stratify=y)
# calculate scores
gnb_score = get_f1_for_gnb(x_train, x_test, y_train, y_test)
svc_score = get_f1_for_svc(x_train, x_test, y_train, y_test, cv)
knn_score = get_f1_for_knn(x_train, x_test, y_train, y_test, cv)
# Append scores to dictionary
dic_f1_results[f'{feature}_gnb'].append(gnb_score)
dic_f1_results[f'{feature}_svc'].append(svc_score)
dic_f1_results[f'{feature}_knn'].append(knn_score)
# Console output
print(f"GNB-Score for {feature} with {author_texts}: {gnb_score}")
print(f"SVC-Score for {feature} with {author_texts}: {svc_score}")
print(f"KNN-Score for {feature} with {author_texts}: {knn_score}")
df_results = pd.DataFrame(dic_f1_results)
df_results['number_article'] = list_author_texts
return df_results
# Chapter 7.3.3. comparison of the word ngrams with n 2-3
def compare_word_2_3_grams():
df_all_texts = pd.read_csv("musikreviews_balanced_authors.csv", sep=',', encoding="utf-8")
# Different values for the texts by authors
list_author_texts = [10, 15, 25, 50, 75, 100]
# save the results in a dictionary
dic_f1_results = {'w2g_gnb': [], 'w2g_svc': [], 'w2g_knn': [],
'w3g_gnb': [], 'w3g_svc': [], 'w3g_knn': []}
for author_texts in list_author_texts:
print(f"Texte pro Autor: {author_texts}")
# indices for article for n authors with m texts
index_list = get_n_article_index_by_author(df_balanced, 25, author_texts)
# define the splits for the hyperparameter tuning, cannot be greater than the number of members in each class
if author_texts * 0.4 < 10:
cv = int(author_texts * 0.4)
# CV between 5 and 10 is unusual
cv = 5 if cv > 5 else cv
else:
cv = 10
# select the indices from the article rows by the given indices
df_balanced = df_all_texts.iloc[index_list].reset_index(drop=True)
print(f"Artikel: {len(df_balanced.index)}")
# extract the features
df_w2g = get_word_n_grams(df_balanced, 2)
df_w3g = get_word_n_grams(df_balanced, 3)
# Preprocessing steps
word_count = get_word_count(df_balanced)
df_w2g = preprocessing_steps_pos_tag_n_grams(df_w2g, word_count['word_count'])
df_w3g = preprocessing_steps_pos_tag_n_grams(df_w3g, word_count['word_count'])
# Scaler, else SVM need a lot of time with very low numbers.
scaler = StandardScaler()
df_w2g[df_w2g.columns] = scaler.fit_transform(df_w2g[df_w2g.columns])
df_w3g[df_w3g.columns] = scaler.fit_transform(df_w3g[df_w3g.columns])
label = df_balanced['label_encoded']
# Train/Test 60/40 split
df_w2g_train, df_w2g_test, df_w3g_train, df_w3g_test, label_train, label_test = \
train_test_split(df_w2g, df_w3g, label, test_size=0.4, random_state=42, stratify=label)
# Get the scores for every feature
for feature in ["w2g", "w3g"]:
# select the indices from the article rows by the given indices
# iterative filter
# returns df_x_train_gnb, df_x_test_gnb, df_x_train_svc, df_x_test_svc, df_x_train_knn, df_x_test_knn
if feature == "w2g":
x_gnb_train, x_gnb_test, x_svc_train, x_svc_test, x_knn_train, x_knn_test = \
feature_selection_iterative_filter(df_w2g_train, df_w2g_test, label_train, 1.0, mutual_info_classif)
elif feature == "w3g":
x_gnb_train, x_gnb_test, x_svc_train, x_svc_test, x_knn_train, x_knn_test = \
feature_selection_iterative_filter(df_w3g_train, df_w3g_test, label_train, 1.0, mutual_info_classif)
# Do not use iterative filter for gnb train caused by bad results
x_gnb_train, x_gnb_test, label_train, label_test = \
train_test_split(df_w3g, label, test_size=0.4, random_state=42, stratify=label)
print(f"cv: {cv}")
print(f"Train Labels: {label_train.value_counts()}")
print(f"Test Labels: {label_test.value_counts()}")
# calculate scores
gnb_score = get_f1_for_gnb(x_gnb_train, x_gnb_test, label_train, label_test)
svc_score = get_f1_for_svc(x_svc_train, x_svc_test, label_train, label_test, cv)
knn_score = get_f1_for_knn(x_knn_train, x_knn_test, label_train, label_test, cv)
# Append scores to dictionary
dic_f1_results[f'{feature}_gnb'].append(gnb_score)
dic_f1_results[f'{feature}_svc'].append(svc_score)
dic_f1_results[f'{feature}_knn'].append(knn_score)
# Console output
print(f"GNB-Score for {feature} with {author_texts}: {gnb_score}")
print(f"SVC-Score for {feature} with {author_texts}: {svc_score}")
print(f"KNN-Score for {feature} with {author_texts}: {knn_score}")
df_results = pd.DataFrame(dic_f1_results)
df_results['number_article'] = list_author_texts
return df_results
# Chapter 7.3.4. comparison of the different lengths of char ngrams
# Chapter 7.3.4. whole process of the comparison of the char-n-gram features
def compare_char_n_grams_process(par_base_path):
df_all_texts = pd.read_csv(f"musikreviews_balanced_authors.csv", sep=',', encoding="utf-8")
author_counts = [25]
text_counts = [10, 15, 25, 50, 75, 100]
for number_authors in author_counts:
for number_texts in text_counts:
index_list = get_n_article_index_by_author(df_all_texts, number_authors, number_texts)
df_balanced = df_all_texts.iloc[index_list].reset_index(drop=True)
extract_n_gram_features_to_csv(df_balanced, par_base_path, number_authors, number_texts)
iterative_filter_process(par_base_path, df_balanced, number_texts, number_authors)
compare_char_affix_ngrams(text_counts, author_counts, par_base_path, df_all_texts) \
.to_csv(f"{par_base_path}results/char_affix_n_grams.csv", index=False)
compare_char_word_ngrams(text_counts, author_counts, par_base_path, df_all_texts) \
.to_csv(f"{par_base_path}results/char_word_n_grams.csv", index=False)
compare_char_punct_ngrams(text_counts, author_counts, par_base_path, df_all_texts) \
.to_csv(f"{par_base_path}results/char_punct_n_grams.csv", index=False)
# Chapter 7.3.4. char-affix-ngrams
def compare_char_affix_ngrams(par_author_texts, par_authors, par_base_path, par_df):
# save the results in a dictionary
dic_f1_results = {'c_affix_2_gnb': [], 'c_affix_2_svc': [], 'c_affix_2_knn': [],
'c_affix_3_gnb': [], 'c_affix_3_svc': [], 'c_affix_3_knn': [],
'c_affix_4_gnb': [], 'c_affix_4_svc': [], 'c_affix_4_knn': [],
'c_affix_5_gnb': [], 'c_affix_5_svc': [], 'c_affix_5_knn': [],
'number_authors': [], 'number_texts': []}
for number_authors in par_authors:
for number_texts in par_author_texts:
index_list = get_n_article_index_by_author(par_df, number_authors, number_texts)
df_balanced = par_df.iloc[index_list].reset_index(drop=True)
# define the splits for the hyperparameter tuning, cannot be greater than number of members in each class
if number_texts * 0.4 < 10:
cv = int(number_texts * 0.4)
# CV between 5 and 10 is unusual
cv = 5 if cv > 5 else cv
else:
cv = 10
label = df_balanced['label_encoded']
# Get the scores for every feature
# Append authors and texts
dic_f1_results['number_authors'].append(number_authors)
dic_f1_results['number_texts'].append(number_texts)
for feature in ["c_affix_2", "c_affix_3", "c_affix_4", "c_affix_5"]:
# read the data based on n, texts and authors
if feature == "c_affix_2":
df_gnb = pd.read_csv(
f"{par_base_path}csv_after_filter/gnb_a{number_authors}_t{number_texts}"
f"_char_affix_2_gram_filtered.csv")
df_svc = pd.read_csv(
f"{par_base_path}csv_after_filter/svc_a{number_authors}_t{number_texts}"
f"_char_affix_2_gram_filtered.csv")
df_knn = pd.read_csv(
f"{par_base_path}csv_after_filter/knn_a{number_authors}_t{number_texts}"
f"_char_affix_2_gram_filtered.csv")
elif feature == "c_affix_3":
df_gnb = pd.read_csv(
f"{par_base_path}csv_after_filter/gnb_a{number_authors}_t{number_texts}"
f"_char_affix_3_gram_filtered.csv")
df_svc = pd.read_csv(
f"{par_base_path}csv_after_filter/svc_a{number_authors}_t{number_texts}"
f"_char_affix_3_gram_filtered.csv")
df_knn = pd.read_csv(
f"{par_base_path}csv_after_filter/knn_a{number_authors}_t{number_texts}"
f"_char_affix_3_gram_filtered.csv")
elif feature == "c_affix_4":
df_gnb = pd.read_csv(
f"{par_base_path}csv_after_filter/gnb_a{number_authors}_t{number_texts}"
f"_char_affix_4_gram_filtered.csv")
df_svc = pd.read_csv(
f"{par_base_path}csv_after_filter/svc_a{number_authors}_t{number_texts}"
f"_char_affix_4_gram_filtered.csv")
df_knn = pd.read_csv(
f"{par_base_path}csv_after_filter/knn_a{number_authors}_t{number_texts}"
f"_char_affix_4_gram_filtered.csv")
elif feature == "c_affix_5":
df_gnb = pd.read_csv(
f"{par_base_path}csv_after_filter/gnb_a{number_authors}_t{number_texts}"
f"_char_affix_5_gram_filtered.csv")
df_svc = pd.read_csv(
f"{par_base_path}csv_after_filter/svc_a{number_authors}_t{number_texts}"
f"_char_affix_5_gram_filtered.csv")
df_knn = pd.read_csv(
f"{par_base_path}csv_after_filter/knn_a{number_authors}_t{number_texts}"
f"_char_affix_5_gram_filtered.csv")
# Scaler, else SVM need a lot of time with very low numbers.
scaler = StandardScaler()
df_gnb[df_gnb.columns] = scaler.fit_transform(df_gnb[df_gnb.columns])
df_svc[df_svc.columns] = scaler.fit_transform(df_svc[df_svc.columns])
df_knn[df_knn.columns] = scaler.fit_transform(df_knn[df_knn.columns])
# Train/Test 60/40 split
x_gnb_train, x_gnb_test, x_svc_train, x_svc_test, x_knn_train, x_knn_test, label_train, label_test = \
train_test_split(df_gnb, df_svc, df_knn, label, test_size=0.4, random_state=42, stratify=label)
# calculate scores
gnb_score = get_f1_for_gnb(x_gnb_train, x_gnb_test, label_train, label_test)
svc_score = get_f1_for_svc(x_svc_train, x_svc_test, label_train, label_test, cv)
knn_score = get_f1_for_knn(x_knn_train, x_knn_test, label_train, label_test, cv)
# Append scores to dictionary
dic_f1_results[f'{feature}_gnb'].append(gnb_score)
dic_f1_results[f'{feature}_svc'].append(svc_score)
dic_f1_results[f'{feature}_knn'].append(knn_score)
# Console output
print(f"GNB-Score for {feature} with {number_authors} authors and {number_texts} texts: {gnb_score}")
print(f"SVC-Score for {feature} with {number_authors} authors and {number_texts} texts: {svc_score}")
print(f"KNN-Score for {feature} with {number_authors} authors and {number_texts} texts: {knn_score}")
return pd.DataFrame(dic_f1_results)
# Chapter 7.3.4. char-word-ngrams
def compare_char_word_ngrams(par_author_texts, par_authors, par_base_path, par_df):
# save the results in a dictionary
dic_f1_results = {'c_word_2_gnb': [], 'c_word_2_svc': [], 'c_word_2_knn': [],
'c_word_3_gnb': [], 'c_word_3_svc': [], 'c_word_3_knn': [],
'c_word_4_gnb': [], 'c_word_4_svc': [], 'c_word_4_knn': [],
'c_word_5_gnb': [], 'c_word_5_svc': [], 'c_word_5_knn': [],
'number_authors': [], 'number_texts': []}
for number_authors in par_authors:
for number_texts in par_author_texts:
index_list = get_n_article_index_by_author(par_df, number_authors, number_texts)
df_balanced = par_df.iloc[index_list].reset_index(drop=True)
# define the splits for the hyperparameter tuning, cannot be greater than number of members in each class
if number_texts * 0.4 < 10:
cv = int(number_texts * 0.4)
# CV between 5 and 10 is unusual
cv = 5 if cv > 5 else cv
else:
cv = 10
label = df_balanced['label_encoded']
# Get the scores for every feature
# Append authors and texts
dic_f1_results['number_authors'].append(number_authors)
dic_f1_results['number_texts'].append(number_texts)
for feature in ["c_word_2", "c_word_3", "c_word_4", "c_word_5"]:
# read the data based on n, texts and authors
if feature == "c_word_2":
df_gnb = pd.read_csv(
f"{par_base_path}csv_after_filter/gnb_a{number_authors}_t{number_texts}"
f"_char_word_2_gram_filtered.csv")
df_svc = pd.read_csv(
f"{par_base_path}csv_after_filter/svc_a{number_authors}_t{number_texts}"
f"_char_word_2_gram_filtered.csv")
df_knn = pd.read_csv(
f"{par_base_path}csv_after_filter/knn_a{number_authors}_t{number_texts}"
f"_char_word_2_gram_filtered.csv")
elif feature == "c_word_3":
df_gnb = pd.read_csv(
f"{par_base_path}csv_after_filter/gnb_a{number_authors}_t{number_texts}"
f"_char_word_3_gram_filtered.csv")
df_svc = pd.read_csv(
f"{par_base_path}csv_after_filter/svc_a{number_authors}_t{number_texts}"
f"_char_word_3_gram_filtered.csv")
df_knn = pd.read_csv(
f"{par_base_path}csv_after_filter/knn_a{number_authors}_t{number_texts}"
f"_char_word_3_gram_filtered.csv")
elif feature == "c_word_4":
df_gnb = pd.read_csv(
f"{par_base_path}csv_after_filter/gnb_a{number_authors}_t{number_texts}"
f"_char_word_4_gram_filtered.csv")
df_svc = pd.read_csv(
f"{par_base_path}csv_after_filter/svc_a{number_authors}_t{number_texts}"
f"_char_word_4_gram_filtered.csv")
df_knn = pd.read_csv(
f"{par_base_path}csv_after_filter/knn_a{number_authors}_t{number_texts}"
f"_char_word_4_gram_filtered.csv")
elif feature == "c_word_5":
df_gnb = pd.read_csv(
f"{par_base_path}csv_after_filter/gnb_a{number_authors}_t{number_texts}"
f"_char_word_5_gram_filtered.csv")
df_svc = pd.read_csv(
f"{par_base_path}csv_after_filter/svc_a{number_authors}_t{number_texts}"
f"_char_word_5_gram_filtered.csv")
df_knn = pd.read_csv(
f"{par_base_path}csv_after_filter/knn_a{number_authors}_t{number_texts}"
f"_char_word_5_gram_filtered.csv")
# Scaler, else SVM need a lot of time with very low numbers.
scaler = StandardScaler()
df_gnb[df_gnb.columns] = scaler.fit_transform(df_gnb[df_gnb.columns])
df_svc[df_svc.columns] = scaler.fit_transform(df_svc[df_svc.columns])
df_knn[df_knn.columns] = scaler.fit_transform(df_knn[df_knn.columns])
# Train/Test 60/40 split
x_gnb_train, x_gnb_test, x_svc_train, x_svc_test, x_knn_train, x_knn_test, label_train, label_test = \
train_test_split(df_gnb, df_svc, df_knn, label, test_size=0.4, random_state=42, stratify=label)
# calculate scores
gnb_score = get_f1_for_gnb(x_gnb_train, x_gnb_test, label_train, label_test)
svc_score = get_f1_for_svc(x_svc_train, x_svc_test, label_train, label_test, cv)
knn_score = get_f1_for_knn(x_knn_train, x_knn_test, label_train, label_test, cv)
# Append scores to dictionary
dic_f1_results[f'{feature}_gnb'].append(gnb_score)
dic_f1_results[f'{feature}_svc'].append(svc_score)
dic_f1_results[f'{feature}_knn'].append(knn_score)
# Console output
print(f"GNB-Score for {feature} with {number_authors} authors and {number_texts} texts: {gnb_score}")
print(f"SVC-Score for {feature} with {number_authors} authors and {number_texts} texts: {svc_score}")
print(f"KNN-Score for {feature} with {number_authors} authors and {number_texts} texts: {knn_score}")
return pd.DataFrame(dic_f1_results)
# Chapter 7.3.4. char-punct-ngrams
def compare_char_punct_ngrams(par_author_texts, par_authors, par_base_path, par_df):
# save the results in a dictionary
dic_f1_results = {'c_punct_2_gnb': [], 'c_punct_2_svc': [], 'c_punct_2_knn': [],
'c_punct_3_gnb': [], 'c_punct_3_svc': [], 'c_punct_3_knn': [],
'c_punct_4_gnb': [], 'c_punct_4_svc': [], 'c_punct_4_knn': [],
'c_punct_5_gnb': [], 'c_punct_5_svc': [], 'c_punct_5_knn': [],
'number_authors': [], 'number_texts': []}
for number_authors in par_authors:
for number_texts in par_author_texts:
index_list = get_n_article_index_by_author(par_df, number_authors, number_texts)
df_balanced = par_df.iloc[index_list].reset_index(drop=True)
# define the splits for the hyperparameter tuning, cannot be greater than number of members in each class
if number_texts * 0.4 < 10:
cv = int(number_texts * 0.4)
# CV between 5 and 10 is unusual
cv = 5 if cv > 5 else cv
else:
cv = 10
label = df_balanced['label_encoded']
# Get the scores for every feature
# Append authors and texts
dic_f1_results['number_authors'].append(number_authors)
dic_f1_results['number_texts'].append(number_texts)
for feature in ["c_punct_2", "c_punct_3", "c_punct_4", "c_punct_5"]:
# read the data based on n, texts and authors
if feature == "c_punct_2":
df_gnb = pd.read_csv(
f"{par_base_path}csv_after_filter/gnb_a{number_authors}_t{number_texts}"
f"_char_punct_2_gram_filtered.csv")
df_svc = pd.read_csv(
f"{par_base_path}csv_after_filter/svc_a{number_authors}_t{number_texts}"
f"_char_punct_2_gram_filtered.csv")
df_knn = pd.read_csv(
f"{par_base_path}csv_after_filter/knn_a{number_authors}_t{number_texts}"
f"_char_punct_2_gram_filtered.csv")
elif feature == "c_punct_3":
df_gnb = pd.read_csv(
f"{par_base_path}csv_after_filter/gnb_a{number_authors}_t{number_texts}"
f"_char_punct_3_gram_filtered.csv")
df_svc = pd.read_csv(
f"{par_base_path}csv_after_filter/svc_a{number_authors}_t{number_texts}"
f"_char_punct_3_gram_filtered.csv")
df_knn = pd.read_csv(
f"{par_base_path}csv_after_filter/knn_a{number_authors}_t{number_texts}"
f"_char_punct_3_gram_filtered.csv")
elif feature == "c_punct_4":
df_gnb = pd.read_csv(
f"{par_base_path}csv_after_filter/gnb_a{number_authors}_t{number_texts}"
f"_char_punct_4_gram_filtered.csv")
df_svc = pd.read_csv(
f"{par_base_path}csv_after_filter/svc_a{number_authors}_t{number_texts}"
f"_char_punct_4_gram_filtered.csv")
df_knn = pd.read_csv(
f"{par_base_path}csv_after_filter/knn_a{number_authors}_t{number_texts}"
f"_char_punct_4_gram_filtered.csv")
elif feature == "c_punct_5":
df_gnb = pd.read_csv(
f"{par_base_path}csv_after_filter/gnb_a{number_authors}_t{number_texts}"
f"_char_punct_5_gram_filtered.csv")
df_svc = pd.read_csv(
f"{par_base_path}csv_after_filter/svc_a{number_authors}_t{number_texts}"
f"_char_punct_5_gram_filtered.csv")
df_knn = pd.read_csv(
f"{par_base_path}csv_after_filter/knn_a{number_authors}_t{number_texts}"
f"_char_punct_5_gram_filtered.csv")
# Scaler, else SVM need a lot of time with very low numbers.
scaler = StandardScaler()
df_gnb[df_gnb.columns] = scaler.fit_transform(df_gnb[df_gnb.columns])
df_svc[df_svc.columns] = scaler.fit_transform(df_svc[df_svc.columns])
df_knn[df_knn.columns] = scaler.fit_transform(df_knn[df_knn.columns])
# Train/Test 60/40 split
x_gnb_train, x_gnb_test, x_svc_train, x_svc_test, x_knn_train, x_knn_test, label_train, label_test = \
train_test_split(df_gnb, df_svc, df_knn, label, test_size=0.4, random_state=42, stratify=label)
# calculate scores
gnb_score = get_f1_for_gnb(x_gnb_train, x_gnb_test, label_train, label_test)
svc_score = get_f1_for_svc(x_svc_train, x_svc_test, label_train, label_test, cv)
knn_score = get_f1_for_knn(x_knn_train, x_knn_test, label_train, label_test, cv)
# Append scores to dictionary
dic_f1_results[f'{feature}_gnb'].append(gnb_score)
dic_f1_results[f'{feature}_svc'].append(svc_score)
dic_f1_results[f'{feature}_knn'].append(knn_score)
# Console output
print(f"GNB-Score for {feature} with {number_authors} authors and {number_texts} texts: {gnb_score}")
print(f"SVC-Score for {feature} with {number_authors} authors and {number_texts} texts: {svc_score}")
print(f"KNN-Score for {feature} with {number_authors} authors and {number_texts} texts: {knn_score}")
return pd.DataFrame(dic_f1_results)
# Chapter 7.3.4. Print the char-n-gram features in different files
def extract_n_gram_features_to_csv(par_df, par_base_path, par_number_authors, par_number_texts):
char_count = get_char_count(par_df)
# n from 2-5
for n in range(2, 6):
ca_ng = get_char_affix_n_grams(par_df, n)
preprocessing_steps_char_n_grams(ca_ng, char_count['char_count'])\
.to_csv(f"{par_base_path}csv_before_filter/a{par_number_authors}_t{par_number_texts}"
f"_char_affix_{n}_gram.csv", index=False)
cw_ng = get_char_word_n_grams(par_df, n)
preprocessing_steps_char_n_grams(cw_ng, char_count['char_count'])\
.to_csv(f"{par_base_path}csv_before_filter/a{par_number_authors}_t{par_number_texts}"
f"_char_word_{n}_gram.csv", index=False)
cp_ng = get_char_punct_n_grams(par_df, n)
preprocessing_steps_char_n_grams(cp_ng, char_count['char_count'])\
.to_csv(f"{par_base_path}csv_before_filter/a{par_number_authors}_t{par_number_texts}"
f"_char_punct_{n}_gram.csv", index=False)
print(f"Extraction Round {n - 1} done")
return True
# combined preprocessing steps of the pos-tag-n-grams
def preprocessing_steps_pos_tag_n_grams(par_feature, length_metric):
# Filter features which only occur once
par_feature = trim_df_by_occurrence(par_feature, 1)
# Individual relative frequency
par_feature = get_rel_frequency(par_feature.fillna(value=0), length_metric)
return par_feature
# combined preprocessing steps of the char-n-grams
def preprocessing_steps_char_n_grams(par_feature, length_metric):
# Filter features which only occur once
par_feature = trim_df_sum_feature(par_feature, 5)
# Individual relative frequency
par_feature = get_rel_frequency(par_feature.fillna(value=0), length_metric)
return par_feature
# Feature selection with the iterative filter without printing the results in a file
def feature_selection_iterative_filter(par_x_train, par_x_test, par_y_train, par_step, par_classif):
df_sorted_features = sort_features_by_score(par_x_train, par_y_train, par_classif)
# Calculate the best percentiles of the data for the different classifier
best_perc_gnb = get_best_percentile_gnb(par_x_train, par_y_train, 50, df_sorted_features, par_step)[0]
best_perc_svc = get_best_percentile_svc(par_x_train, par_y_train, 50, df_sorted_features, par_step)[0]
best_perc_knn = get_best_percentile_knn(par_x_train, par_y_train, 50, df_sorted_features, par_step)[0]
# select the 1 percent of the features (len/100) multiplied by par_best_percent
# select the best features from the original dataset
df_x_train_gnb = par_x_train[
df_sorted_features['feature_names'][: round(best_perc_gnb * (len(par_x_train.columns) / 100))].tolist()]
df_x_test_gnb = par_x_test[
df_sorted_features['feature_names'][: round(best_perc_gnb * (len(par_x_train.columns) / 100))].tolist()]
df_x_train_svc = par_x_train[
df_sorted_features['feature_names'][: round(best_perc_svc * (len(par_x_train.columns) / 100))].tolist()]
df_x_test_svc = par_x_test[
df_sorted_features['feature_names'][: round(best_perc_svc * (len(par_x_train.columns) / 100))].tolist()]
df_x_train_knn = par_x_train[
df_sorted_features['feature_names'][: round(best_perc_knn * (len(par_x_train.columns) / 100))].tolist()]
df_x_test_knn = par_x_test[
df_sorted_features['feature_names'][: round(best_perc_knn * (len(par_x_train.columns) / 100))].tolist()]
return df_x_train_gnb, df_x_test_gnb, df_x_train_svc, df_x_test_svc, df_x_train_knn, df_x_test_knn
# Chapter 7.3.5. function to compare the pos-tag-n-grams
def compare_pos_tag_ngrams(par_author_texts, par_authors, par_base_path, par_df):
# save the results in a dictionary
dic_f1_results = {'pos_2_gnb': [], 'pos_2_svc': [], 'pos_2_knn': [],
'pos_3_gnb': [], 'pos_3_svc': [], 'pos_3_knn': [],
'pos_4_gnb': [], 'pos_4_svc': [], 'pos_4_knn': [],
'pos_5_gnb': [], 'pos_5_svc': [], 'pos_5_knn': [],
'number_authors': [], 'number_texts': []}
for number_authors in par_authors:
for number_texts in par_author_texts:
index_list = get_n_article_index_by_author(par_df, number_authors, number_texts)
df_balanced = par_df.iloc[index_list].reset_index(drop=True)
# define the splits for the hyperparameter tuning, cannot be greater than number of members in each class
if number_texts * 0.4 < 10:
cv = int(number_texts * 0.4)
# CV between 5 and 10 is unusual
cv = 5 if cv > 5 else cv
else:
cv = 10
label = df_balanced['label_encoded']
# Get the scores for every feature
# Append authors and texts
dic_f1_results['number_authors'].append(number_authors)
dic_f1_results['number_texts'].append(number_texts)
for feature in ["pos_2", "pos_3", "pos_4", "pos_5"]:
# read the data based on n, texts and authors
if feature == "pos_2":
df_gnb = pd.read_csv(
f"{par_base_path}csv_after_filter/gnb_a{number_authors}_t{number_texts}"
f"_pos_tag_2_gram_filtered.csv")
df_svc = pd.read_csv(
f"{par_base_path}csv_after_filter/svc_a{number_authors}_t{number_texts}"
f"_pos_tag_2_gram_filtered.csv")
df_knn = pd.read_csv(
f"{par_base_path}csv_after_filter/knn_a{number_authors}_t{number_texts}"
f"_pos_tag_2_gram_filtered.csv")
elif feature == "pos_3":
df_gnb = pd.read_csv(
f"{par_base_path}csv_after_filter/gnb_a{number_authors}_t{number_texts}"
f"_pos_tag_3_gram_filtered.csv")
df_svc = pd.read_csv(
f"{par_base_path}csv_after_filter/svc_a{number_authors}_t{number_texts}"
f"_pos_tag_3_gram_filtered.csv")
df_knn = pd.read_csv(
f"{par_base_path}csv_after_filter/knn_a{number_authors}_t{number_texts}"
f"_pos_tag_3_gram_filtered.csv")
elif feature == "pos_4":
df_gnb = pd.read_csv(
f"{par_base_path}csv_after_filter/gnb_a{number_authors}_t{number_texts}"
f"_pos_tag_4_gram_filtered.csv")
df_svc = pd.read_csv(
f"{par_base_path}csv_after_filter/svc_a{number_authors}_t{number_texts}"
f"_pos_tag_4_gram_filtered.csv")
df_knn = pd.read_csv(
f"{par_base_path}csv_after_filter/knn_a{number_authors}_t{number_texts}"
f"_pos_tag_4_gram_filtered.csv")
elif feature == "pos_5":
df_gnb = pd.read_csv(
f"{par_base_path}csv_after_filter/gnb_a{number_authors}_t{number_texts}"
f"_pos_tag_5_gram_filtered.csv")
df_svc = pd.read_csv(
f"{par_base_path}csv_after_filter/svc_a{number_authors}_t{number_texts}"
f"_pos_tag_5_gram_filtered.csv")
df_knn = pd.read_csv(
f"{par_base_path}csv_after_filter/knn_a{number_authors}_t{number_texts}"
f"_pos_tag_5_gram_filtered.csv")
# Scaler, else SVM need a lot of time with very low numbers.
scaler = StandardScaler()
df_gnb[df_gnb.columns] = scaler.fit_transform(df_gnb[df_gnb.columns])
df_svc[df_svc.columns] = scaler.fit_transform(df_svc[df_svc.columns])
df_knn[df_knn.columns] = scaler.fit_transform(df_knn[df_knn.columns])
# Train/Test 60/40 split
x_gnb_train, x_gnb_test, x_svc_train, x_svc_test, x_knn_train, x_knn_test, label_train, label_test = \
train_test_split(df_gnb, df_svc, df_knn, label, test_size=0.4, random_state=42, stratify=label)
# calculate scores
gnb_score = get_f1_for_gnb(x_gnb_train, x_gnb_test, label_train, label_test)
svc_score = get_f1_for_svc(x_svc_train, x_svc_test, label_train, label_test, cv)
knn_score = get_f1_for_knn(x_knn_train, x_knn_test, label_train, label_test, cv)
# Append scores to dictionary
dic_f1_results[f'{feature}_gnb'].append(gnb_score)
dic_f1_results[f'{feature}_svc'].append(svc_score)
dic_f1_results[f'{feature}_knn'].append(knn_score)
# Console output
print(f"GNB-Score for {feature} with {number_authors} authors and {number_texts} texts: {gnb_score}")
print(f"SVC-Score for {feature} with {number_authors} authors and {number_texts} texts: {svc_score}")
print(f"KNN-Score for {feature} with {number_authors} authors and {number_texts} texts: {knn_score}")
return pd.DataFrame(dic_f1_results)
# Chapter 7.3.5. complete process of the pos-tag-n-grams comparison
def compare_pos_n_grams_process(par_base_path):
df_all_texts = pd.read_csv(f"musikreviews_balanced_authors.csv", sep=',', encoding="utf-8")
author_counts = [25]
text_counts = [10, 15, 25, 50, 75, 100]
for number_authors in author_counts:
for number_texts in text_counts:
index_list = get_n_article_index_by_author(df_all_texts, number_authors, number_texts)
df_balanced = df_all_texts.iloc[index_list].reset_index(drop=True)
word_count = get_word_count(df_balanced)
# extract features and preprocessing
for n in range(2, 6):
pt_ng = get_pos_tags_n_grams(df_balanced, n)
preprocessing_steps_pos_tag_n_grams(pt_ng, word_count['word_count']) \
.to_csv(f"{par_base_path}csv_before_filter/"
f"a{number_authors}_t{number_texts}_pos_tag_{n}_gram.csv", index=False)
iterative_filter_process(par_base_path, df_balanced, number_texts, number_authors)
# 2 grams for svc get not filtered, overwrite unfiltered for svc
pt_ng = get_pos_tags_n_grams(df_balanced, 2)
preprocessing_steps_pos_tag_n_grams(pt_ng, word_count['word_count']) \
.to_csv(f"{par_base_path}csv_after_filter/"
f"svc_a{number_authors}_t{number_texts}_pos_tag_2_gram_filtered.csv", index=False)
compare_pos_tag_ngrams(text_counts, author_counts, par_base_path, df_all_texts) \
.to_csv(f"{par_base_path}results/pos_tag_n_grams.csv", index=False)
# Method to print all features for different counts of authors and texts
# Including all Preprocessing steps and filtering
def print_all_features_svc(par_base_path, par_article_path):
df_all_texts = pd.read_csv(f"{par_article_path}", sep=',', encoding="utf-8")
author_counts = [2, 3, 4, 5, 10, 15, 25]
text_counts = [5, 10, 15, 25, 50, 75, 100]
for number_authors in author_counts:
for number_texts in text_counts:
index_list = get_n_article_index_by_author(df_all_texts, number_authors, number_texts)
df_balanced = df_all_texts.iloc[index_list].reset_index(drop=True)
# get all the features
df_bow = get_bow_matrix(df_balanced)
df_word_2g = get_word_n_grams(df_balanced, 2)
df_word_count = get_word_count(df_balanced)
df_word_length = get_word_length_matrix_with_margin(df_balanced, 20)
df_yules_k = get_yules_k(df_balanced)
sc_label_vector = ["!", "„", "“", "§", "$", "%", "&", "/", "(", ")", "=", "?", "{", "}", "[", "]", "\\",
"@", "#",
"‚", "‘", "-", "_", "+", "*", ".", ",", ";"]
special_char_matrix = get_special_char_matrix(df_balanced, sc_label_vector)
sc_label_vector = ["s_char:" + sc for sc in sc_label_vector]
df_special_char = pd.DataFrame(data=special_char_matrix, columns=sc_label_vector)
df_char_affix_4g = get_char_affix_n_grams(df_balanced, 4)
df_char_word_3g = get_char_word_n_grams(df_balanced, 3)
df_char_punct_3g = get_char_punct_n_grams(df_balanced, 3)
df_digits = get_sum_digits(df_balanced)
df_fwords = get_function_words(df_balanced)
df_pos_tags = get_pos_tags(df_balanced)
df_pos_tag_2g = get_pos_tags_n_grams(df_balanced, 2)
df_start_pos, df_end_pos = get_sentence_end_start(df_balanced)
df_start_end_pos = pd.concat([df_start_pos, df_end_pos], axis=1)
df_fre = get_flesch_reading_ease_vector(df_balanced)
# 7.1.1 Remove low occurrence
df_bow = trim_df_by_occurrence(df_bow, 1)
df_word_2g = trim_df_by_occurrence(df_word_2g, 1)
df_fwords = trim_df_by_occurrence(df_fwords, 1)
df_pos_tag_2g = trim_df_by_occurrence(df_pos_tag_2g, 1)
df_char_affix_4g = trim_df_sum_feature(df_char_affix_4g, 5)
df_char_word_3g = trim_df_sum_feature(df_char_word_3g, 5)
df_char_punct_3g = trim_df_sum_feature(df_char_punct_3g, 5)
# 7.1.2 Remove high frequency
df_bow = trim_df_by_doc_freq(df_bow, 0.5)
df_word_2g = trim_df_by_doc_freq(df_word_2g, 0.5)
df_fwords = trim_df_by_doc_freq(df_fwords, 0.5)
# 7.1.4 individual relative frequency
df_len_metrics = pd.concat([get_char_count(df_balanced), get_sentence_count(df_balanced),
df_word_count], axis=1)
df_bow = get_rel_frequency(df_bow.fillna(value=0), df_len_metrics['word_count'])
df_word_2g = get_rel_frequency(df_word_2g.fillna(value=0), df_len_metrics['word_count'])
df_word_length = get_rel_frequency(df_word_length.fillna(value=0), df_len_metrics['word_count'])
df_special_char = get_rel_frequency(df_special_char.fillna(value=0), df_len_metrics['char_count'])
df_char_affix_4g = get_rel_frequency(df_char_affix_4g.fillna(value=0), df_len_metrics['char_count'])
df_char_word_3g = get_rel_frequency(df_char_word_3g.fillna(value=0), df_len_metrics['char_count'])
df_char_punct_3g = get_rel_frequency(df_char_punct_3g.fillna(value=0), df_len_metrics['char_count'])
df_digits = get_rel_frequency(df_digits.fillna(value=0), df_len_metrics['char_count'])
df_fwords = get_rel_frequency(df_fwords.fillna(value=0), df_len_metrics['word_count'])
df_pos_tags = get_rel_frequency(df_pos_tags.fillna(value=0), df_len_metrics['word_count'])
df_pos_tag_2g = get_rel_frequency(df_pos_tag_2g.fillna(value=0), df_len_metrics['word_count'])
df_start_end_pos = get_rel_frequency(df_start_end_pos.fillna(value=0), df_len_metrics['sentence_count'])
# Print to CSV
# Files for iterative filter
df_bow.to_csv(f"{par_base_path}csv_before_filter/a{number_authors}_t{number_texts}_bow.csv", index=False)
df_word_2g.to_csv(f"{par_base_path}csv_before_filter/a{number_authors}_t{number_texts}"
f"_word_2_gram.csv", index=False)
df_char_affix_4g.to_csv(f"{par_base_path}csv_before_filter/a{number_authors}_t{number_texts}"
f"_char_affix_4_gram.csv", index=False)
df_char_word_3g.to_csv(f"{par_base_path}csv_before_filter/a{number_authors}_t{number_texts}"
f"_char_word_3_gram.csv", index=False)
df_char_punct_3g.to_csv(f"{par_base_path}csv_before_filter/a{number_authors}_t{number_texts}"
f"_char_punct_3_gram.csv", index=False)
df_fwords.to_csv(f"{par_base_path}csv_before_filter/a{number_authors}_t{number_texts}"
f"_function_words.csv", index=False)
# Files not for iterative filter directly in after filter folder
df_word_count.to_csv(f"{par_base_path}csv_after_filter/a{number_authors}_t{number_texts}"
f"_word_count.csv", index=False)
df_word_length.to_csv(f"{par_base_path}csv_after_filter/a{number_authors}_t{number_texts}"
f"_word_length.csv", index=False)
df_yules_k.to_csv(f"{par_base_path}csv_after_filter/a{number_authors}_t{number_texts}"
f"_yules_k.csv", index=False)
df_special_char.to_csv(f"{par_base_path}csv_after_filter/a{number_authors}_t{number_texts}"
f"_special_char.csv", index=False)
df_digits.to_csv(f"{par_base_path}csv_after_filter/a{number_authors}_t{number_texts}"
f"_digits.csv", index=False)
df_pos_tags.to_csv(f"{par_base_path}csv_after_filter/a{number_authors}_t{number_texts}"
f"_pos_tag.csv", index=False)
df_pos_tag_2g.to_csv(f"{par_base_path}csv_after_filter/a{number_authors}_t{number_texts}"
f"_pos_tag_2_gram.csv", index=False)
df_start_end_pos.to_csv(f"{par_base_path}csv_after_filter/a{number_authors}_t{number_texts}"
f"_pos_tag_start_end.csv", index=False)
df_fre.to_csv(f"{par_base_path}csv_after_filter/a{number_authors}_t{number_texts}_fre.csv", index=False)
print(f"Extraction for {number_authors} authors with {number_texts} texts done. Starting iterative filter")
# Run the iterative filter
iterative_filter_process_svm(par_base_path, df_balanced, number_texts, number_authors)
# create a dataframe with the combined features for a specific number of authors and texts
# features can be excluded by name
def create_df_combined_features(par_path, par_num_texts, par_num_authors, par_exclude):
path = f'{par_path}csv_after_filter'
files = [f for f in listdir(path) if isfile(join(path, f))]
# Filter the files for author and text numbers
r = re.compile(f"a{par_num_authors}_")
files = list(filter(r.match, files))
r = re.compile(f".*t{par_num_texts}_")
files = list(filter(r.match, files))
# exclude a feature by regex
regex = re.compile(f'.*{par_exclude}')
files = [i for i in files if not regex.match(i)]
df_all = pd.DataFrame()
# combine all features
for feature in files:
df_feature = pd.read_csv(f"{par_path}csv_after_filter/{feature}", sep=',', encoding="utf-8")
df_all = pd.concat([df_all, df_feature], axis=1)
return df_all
# Chapter 8.4. comparison of normalization and standardization
def compare_normalization_standardization(par_article_path, par_feature_path, par_author_texts, par_authors):
df_all_texts = pd.read_csv(f"{par_article_path}", sep=',', encoding="utf-8")
dic_f1_results = {'without': [], 'standard': [], 'normal': [],
'number_authors': [], 'number_texts': []}
for number_authors in par_authors:
for number_texts in par_author_texts:
index_list = get_n_article_index_by_author(df_all_texts, number_authors, number_texts)
df_balanced = df_all_texts.iloc[index_list].reset_index(drop=True)
# define the splits for the hyperparameter tuning, cannot be greater than number of members in each class
if number_texts * 0.4 < 10:
cv = int(number_texts * 0.4)
# CV between 5 and 10 is unusual
cv = 5 if cv > 5 else cv
else:
cv = 10
label = df_balanced['label_encoded']
# Append authors and texts
dic_f1_results['number_authors'].append(number_authors)
dic_f1_results['number_texts'].append(number_texts)
df_features = create_df_combined_features(par_feature_path, number_texts, number_authors, "nothing")
# standardization of features
df_features_stand = copy.deepcopy(df_features)
scaler = StandardScaler()
df_features_stand[df_features_stand.columns] = \
scaler.fit_transform(df_features_stand[df_features_stand.columns])
# normalization of features
df_features_norm = copy.deepcopy(df_features)
normalizer = Normalizer()
df_features_norm[df_features_norm.columns] = \
normalizer.fit_transform(df_features_norm[df_features_norm.columns])
x_train, x_test, x_train_stand, x_test_stand, x_train_norm, x_test_norm, label_train, label_test = \
train_test_split(df_features, df_features_stand, df_features_norm, label,
test_size=0.4, random_state=42, stratify=label)
# append the results
dic_f1_results['without'].append(get_f1_for_svc(x_train, x_test, label_train, label_test, cv))
dic_f1_results['standard'].append(get_f1_for_svc(x_train_stand, x_test_stand, label_train,
label_test, cv))
dic_f1_results['normal'].append(get_f1_for_svc(x_train_norm, x_test_norm, label_train,
label_test, cv))
print(f"Scores for {number_authors} authors with {number_texts} texts created.")
return pd.DataFrame(dic_f1_results)
# Chapter 8.5.1. Comparison of the individual features, data for table 21
def compare_single_features(par_article_path, par_feature_path, par_author_texts, par_authors):
df_all_texts = pd.read_csv(f"{par_article_path}", sep=',', encoding="utf-8")
dic_results = {'number_authors': [], 'number_texts': []}
path = f'{par_feature_path}csv_after_filter'
files = [f for f in listdir(path) if isfile(join(path, f))]
# get unique values for the list of the features
feature_list = list(set([re.search(r"a\d+_t\d+_(.+?(?=$))", f).group(1) for f in files]))
for feature in feature_list:
dic_results[feature] = []
for number_authors in par_authors:
for number_texts in par_author_texts:
index_list = get_n_article_index_by_author(df_all_texts, number_authors, number_texts)
df_balanced = df_all_texts.iloc[index_list].reset_index(drop=True)
# define the splits for the hyperparameter tuning, cannot be greater than number of members in each class
if number_texts * 0.4 < 10:
cv = int(number_texts * 0.4)
# CV between 5 and 10 is unusual
cv = 5 if cv > 5 else cv
else:
cv = 10
label = df_balanced['label_encoded']
# Append authors and texts
dic_results['number_authors'].append(number_authors)
dic_results['number_texts'].append(number_texts)
for feature in feature_list:
df_feature = pd.read_csv(
f"{par_feature_path}csv_after_filter/a{number_authors}_t{number_texts}_{feature}")
# standardization of features
scaler = StandardScaler()
df_feature[df_feature.columns] = \
scaler.fit_transform(df_feature[df_feature.columns])
x_train, x_test, label_train, label_test = \
train_test_split(df_feature, label, test_size=0.4, random_state=42, stratify=label)
dic_results[feature].append(
get_f1_for_svc(x_train, x_test, label_train, label_test, cv))
print(f"Scores for {number_authors} authors with {number_texts} texts created.")
return pd.DataFrame(dic_results)
# Chapter 8.5.2. Get the values of the difference functions, data for table 22
def get_feature_function_difference(par_article_path, par_feature_path, par_author_texts, par_authors):
df_all_texts = pd.read_csv(f"{par_article_path}", sep=',', encoding="utf-8")
dic_f1_wo_feature = {'wo_bow': [], 'wo_word_2_gram': [], 'wo_word_count': [], 'wo_word_length': [],
'wo_yules_k': [], 'wo_special_char': [], 'wo_char_affix': [], 'wo_char_word': [],
'wo_char_punct': [], 'wo_digits': [], 'wo_function_words': [], 'wo_pos_tag.csv': [],
'wo_pos_tag_2_gram': [], 'wo_pos_tag_start_end': [], 'wo_fre': [], 'number_authors': [],
'number_texts': []}
dic_f1_diff_feature = {'diff_bow': [], 'diff_word_2_gram': [], 'diff_word_count': [], 'diff_word_length': [],
'diff_yules_k': [], 'diff_special_char': [], 'diff_char_affix': [], 'diff_char_word': [],
'diff_char_punct': [], 'diff_digits': [], 'diff_function_words': [], 'diff_pos_tag.csv': [],
'diff_pos_tag_2_gram': [], 'diff_pos_tag_start_end': [], 'diff_fre': [],
'number_authors': [],
'number_texts': []}
for number_authors in par_authors:
for number_texts in par_author_texts:
index_list = get_n_article_index_by_author(df_all_texts, number_authors, number_texts)
df_balanced = df_all_texts.iloc[index_list].reset_index(drop=True)
# define the splits for the hyperparameter tuning, cannot be greater than number of members in each class
if number_texts * 0.4 < 10:
cv = int(number_texts * 0.4)
# CV between 5 and 10 is unusual
cv = 5 if cv > 5 else cv
else:
cv = 10
label = df_balanced['label_encoded']
# Append authors and texts
dic_f1_wo_feature['number_authors'].append(number_authors)
dic_f1_wo_feature['number_texts'].append(number_texts)
dic_f1_diff_feature['number_authors'].append(number_authors)
dic_f1_diff_feature['number_texts'].append(number_texts)
# Read the f1 Score from the previous calculations
df_score_all = pd.read_csv(f"{par_feature_path}/results/compared_stand_normal.csv")
f1_score_all = df_score_all.loc[(df_score_all['number_authors'] == number_authors) &
(df_score_all['number_texts'] == number_texts)]['standard'].iloc[0]
for key in dic_f1_diff_feature:
if key != "number_authors" and key != "number_texts":
key = re.search(r'.+?(?=_)_(.*)', key).group(1)
# exclude the specific feature
df_features = create_df_combined_features(par_feature_path, number_texts, number_authors, key)
# standardization of features
scaler = StandardScaler()
df_features[df_features.columns] = \
scaler.fit_transform(df_features[df_features.columns])
x_train, x_test, label_train, label_test = \
train_test_split(df_features, label, test_size=0.4, random_state=42, stratify=label)
# append the results
score_wo = get_f1_for_svc(x_train, x_test, label_train, label_test, cv)
dic_f1_wo_feature[f'wo_{key}'].append(score_wo)
dic_f1_diff_feature[f'diff_{key}'].append(f1_score_all - score_wo)
print(f"{key} done for {number_authors} authors and {number_texts} texts.")
return | pd.DataFrame(dic_f1_wo_feature) | pandas.DataFrame |
from os.path import abspath, dirname, join, isfile, normpath, relpath
from pandas.testing import assert_frame_equal
from numpy.testing import assert_allclose
from scipy.interpolate import interp1d
import matplotlib.pylab as plt
from datetime import datetime
import mhkit.wave as wave
from io import StringIO
import pandas as pd
import numpy as np
import contextlib
import unittest
import netCDF4
import inspect
import pickle
import json
import sys
import os
import time
from random import seed, randint
testdir = dirname(abspath(__file__))
datadir = normpath(join(testdir,relpath('../../examples/data/wave')))
class TestResourceSpectrum(unittest.TestCase):
@classmethod
def setUpClass(self):
omega = np.arange(0.1,3.5,0.01)
self.f = omega/(2*np.pi)
self.Hs = 2.5
self.Tp = 8
df = self.f[1] - self.f[0]
Trep = 1/df
self.t = np.arange(0, Trep, 0.05)
@classmethod
def tearDownClass(self):
pass
def test_pierson_moskowitz_spectrum(self):
S = wave.resource.pierson_moskowitz_spectrum(self.f,self.Tp)
Tp0 = wave.resource.peak_period(S).iloc[0,0]
error = np.abs(self.Tp - Tp0)/self.Tp
self.assertLess(error, 0.01)
def test_bretschneider_spectrum(self):
S = wave.resource.bretschneider_spectrum(self.f,self.Tp,self.Hs)
Hm0 = wave.resource.significant_wave_height(S).iloc[0,0]
Tp0 = wave.resource.peak_period(S).iloc[0,0]
errorHm0 = np.abs(self.Tp - Tp0)/self.Tp
errorTp0 = np.abs(self.Hs - Hm0)/self.Hs
self.assertLess(errorHm0, 0.01)
self.assertLess(errorTp0, 0.01)
def test_surface_elevation_seed(self):
S = wave.resource.bretschneider_spectrum(self.f,self.Tp,self.Hs)
sig = inspect.signature(wave.resource.surface_elevation)
seednum = sig.parameters['seed'].default
eta0 = wave.resource.surface_elevation(S, self.t)
eta1 = wave.resource.surface_elevation(S, self.t, seed=seednum)
assert_frame_equal(eta0, eta1)
def test_surface_elevation_phasing(self):
S = wave.resource.bretschneider_spectrum(self.f,self.Tp,self.Hs)
eta0 = wave.resource.surface_elevation(S, self.t)
sig = inspect.signature(wave.resource.surface_elevation)
seednum = sig.parameters['seed'].default
np.random.seed(seednum)
phases = np.random.rand(len(S)) * 2 * np.pi
eta1 = wave.resource.surface_elevation(S, self.t, phases=phases)
assert_frame_equal(eta0, eta1)
def test_surface_elevation_phases_np_and_pd(self):
S0 = wave.resource.bretschneider_spectrum(self.f,self.Tp,self.Hs)
S1 = wave.resource.bretschneider_spectrum(self.f,self.Tp,self.Hs*1.1)
S = pd.concat([S0, S1], axis=1)
phases_np = np.random.rand(S.shape[0], S.shape[1]) * 2 * np.pi
phases_pd = pd.DataFrame(phases_np, index=S.index, columns=S.columns)
eta_np = wave.resource.surface_elevation(S, self.t, phases=phases_np)
eta_pd = wave.resource.surface_elevation(S, self.t, phases=phases_pd)
assert_frame_equal(eta_np, eta_pd)
def test_surface_elevation_frequency_bins_np_and_pd(self):
S0 = wave.resource.bretschneider_spectrum(self.f,self.Tp,self.Hs)
S1 = wave.resource.bretschneider_spectrum(self.f,self.Tp,self.Hs*1.1)
S = pd.concat([S0, S1], axis=1)
eta0 = wave.resource.surface_elevation(S, self.t)
f_bins_np = np.array([np.diff(S.index)[0]]*len(S))
f_bins_pd = pd.DataFrame(f_bins_np, index=S.index, columns=['df'])
eta_np = wave.resource.surface_elevation(S, self.t, frequency_bins=f_bins_np)
eta_pd = wave.resource.surface_elevation(S, self.t, frequency_bins=f_bins_pd)
assert_frame_equal(eta0, eta_np)
assert_frame_equal(eta_np, eta_pd)
def test_surface_elevation_moments(self):
S = wave.resource.jonswap_spectrum(self.f, self.Tp, self.Hs)
eta = wave.resource.surface_elevation(S, self.t)
dt = self.t[1] - self.t[0]
Sn = wave.resource.elevation_spectrum(eta, 1/dt, len(eta.values),
detrend=False, window='boxcar',
noverlap=0)
m0 = wave.resource.frequency_moment(S,0).m0.values[0]
m0n = wave.resource.frequency_moment(Sn,0).m0.values[0]
errorm0 = np.abs((m0 - m0n)/m0)
self.assertLess(errorm0, 0.01)
m1 = wave.resource.frequency_moment(S,1).m1.values[0]
m1n = wave.resource.frequency_moment(Sn,1).m1.values[0]
errorm1 = np.abs((m1 - m1n)/m1)
self.assertLess(errorm1, 0.01)
def test_surface_elevation_rmse(self):
S = wave.resource.jonswap_spectrum(self.f, self.Tp, self.Hs)
eta = wave.resource.surface_elevation(S, self.t)
dt = self.t[1] - self.t[0]
Sn = wave.resource.elevation_spectrum(eta, 1/dt, len(eta),
detrend=False, window='boxcar',
noverlap=0)
fSn = interp1d(Sn.index.values, Sn.values, axis=0)
rmse = (S.values - fSn(S.index.values))**2
rmse_sum = (np.sum(rmse)/len(rmse))**0.5
self.assertLess(rmse_sum, 0.02)
def test_jonswap_spectrum(self):
S = wave.resource.jonswap_spectrum(self.f, self.Tp, self.Hs)
Hm0 = wave.resource.significant_wave_height(S).iloc[0,0]
Tp0 = wave.resource.peak_period(S).iloc[0,0]
errorHm0 = np.abs(self.Tp - Tp0)/self.Tp
errorTp0 = np.abs(self.Hs - Hm0)/self.Hs
self.assertLess(errorHm0, 0.01)
self.assertLess(errorTp0, 0.01)
def test_plot_spectrum(self):
filename = abspath(join(testdir, 'wave_plot_spectrum.png'))
if isfile(filename):
os.remove(filename)
S = wave.resource.pierson_moskowitz_spectrum(self.f,self.Tp)
plt.figure()
wave.graphics.plot_spectrum(S)
plt.savefig(filename, format='png')
plt.close()
self.assertTrue(isfile(filename))
def test_plot_chakrabarti(self):
filename = abspath(join(testdir, 'wave_plot_chakrabarti.png'))
if isfile(filename):
os.remove(filename)
D = 5
H = 10
lambda_w = 200
wave.graphics.plot_chakrabarti(H, lambda_w, D)
plt.savefig(filename)
def test_plot_chakrabarti_np(self):
filename = abspath(join(testdir, 'wave_plot_chakrabarti_np.png'))
if isfile(filename):
os.remove(filename)
D = np.linspace(5, 15, 5)
H = 10 * np.ones_like(D)
lambda_w = 200 * np.ones_like(D)
wave.graphics.plot_chakrabarti(H, lambda_w, D)
plt.savefig(filename)
self.assertTrue(isfile(filename))
def test_plot_chakrabarti_pd(self):
filename = abspath(join(testdir, 'wave_plot_chakrabarti_pd.png'))
if isfile(filename):
os.remove(filename)
D = np.linspace(5, 15, 5)
H = 10 * np.ones_like(D)
lambda_w = 200 * np.ones_like(D)
df = pd.DataFrame([H.flatten(),lambda_w.flatten(),D.flatten()],
index=['H','lambda_w','D']).transpose()
wave.graphics.plot_chakrabarti(df.H, df.lambda_w, df.D)
plt.savefig(filename)
self.assertTrue(isfile(filename))
class TestResourceMetrics(unittest.TestCase):
@classmethod
def setUpClass(self):
omega = np.arange(0.1,3.5,0.01)
self.f = omega/(2*np.pi)
self.Hs = 2.5
self.Tp = 8
file_name = join(datadir, 'ValData1.json')
with open(file_name, "r") as read_file:
self.valdata1 = pd.DataFrame(json.load(read_file))
self.valdata2 = {}
file_name = join(datadir, 'ValData2_MC.json')
with open(file_name, "r") as read_file:
data = json.load(read_file)
self.valdata2['MC'] = data
for i in data.keys():
# Calculate elevation spectra
elevation = pd.DataFrame(data[i]['elevation'])
elevation.index = elevation.index.astype(float)
elevation.sort_index(inplace=True)
sample_rate = data[i]['sample_rate']
NFFT = data[i]['NFFT']
self.valdata2['MC'][i]['S'] = wave.resource.elevation_spectrum(elevation,
sample_rate, NFFT)
file_name = join(datadir, 'ValData2_AH.json')
with open(file_name, "r") as read_file:
data = json.load(read_file)
self.valdata2['AH'] = data
for i in data.keys():
# Calculate elevation spectra
elevation = pd.DataFrame(data[i]['elevation'])
elevation.index = elevation.index.astype(float)
elevation.sort_index(inplace=True)
sample_rate = data[i]['sample_rate']
NFFT = data[i]['NFFT']
self.valdata2['AH'][i]['S'] = wave.resource.elevation_spectrum(elevation,
sample_rate, NFFT)
file_name = join(datadir, 'ValData2_CDiP.json')
with open(file_name, "r") as read_file:
data = json.load(read_file)
self.valdata2['CDiP'] = data
for i in data.keys():
temp = pd.Series(data[i]['S']).to_frame('S')
temp.index = temp.index.astype(float)
self.valdata2['CDiP'][i]['S'] = temp
@classmethod
def tearDownClass(self):
pass
def test_kfromw(self):
for i in self.valdata1.columns:
f = np.array(self.valdata1[i]['w'])/(2*np.pi)
h = self.valdata1[i]['h']
rho = self.valdata1[i]['rho']
expected = self.valdata1[i]['k']
k = wave.resource.wave_number(f, h, rho)
calculated = k.loc[:,'k'].values
error = ((expected-calculated)**2).sum() # SSE
self.assertLess(error, 1e-6)
def test_kfromw_one_freq(self):
g = 9.81
f = 0.1
h = 1e9
w = np.pi*2*f # deep water dispersion
expected = w**2 / g
calculated = wave.resource.wave_number(f=f, h=h, g=g).values[0][0]
error = np.abs(expected-calculated)
self.assertLess(error, 1e-6)
def test_wave_length(self):
k_list=[1,2,10,3]
l_expected = (2.*np.pi/np.array(k_list)).tolist()
k_df = pd.DataFrame(k_list,index = [1,2,3,4])
k_series= k_df[0]
k_array=np.array(k_list)
for l in [k_list, k_df, k_series, k_array]:
l_calculated = wave.resource.wave_length(l)
self.assertListEqual(l_expected,l_calculated.tolist())
idx=0
k_int = k_list[idx]
l_calculated = wave.resource.wave_length(k_int)
self.assertEqual(l_expected[idx],l_calculated)
def test_depth_regime(self):
expected = [True,True,False,True]
l_list=[1,2,10,3]
l_df = pd.DataFrame(l_list,index = [1,2,3,4])
l_series= l_df[0]
l_array=np.array(l_list)
h = 10
for l in [l_list, l_df, l_series, l_array]:
calculated = wave.resource.depth_regime(l,h)
self.assertListEqual(expected,calculated.tolist())
idx=0
l_int = l_list[idx]
calculated = wave.resource.depth_regime(l_int,h)
self.assertEqual(expected[idx],calculated)
def test_wave_celerity(self):
# Depth regime ratio
dr_ratio=2
# small change in f will give similar value cg
f=np.linspace(20.0001,20.0005,5)
# Choose index to spike at. cg spike is inversly proportional to k
k_idx=2
k_tmp=[1, 1, 0.5, 1, 1]
k = pd.DataFrame(k_tmp, index=f)
# all shallow
cg_shallow1 = wave.resource.wave_celerity(k, h=0.0001,depth_check=True)
cg_shallow2 = wave.resource.wave_celerity(k, h=0.0001,depth_check=False)
self.assertTrue(all(cg_shallow1.squeeze().values ==
cg_shallow2.squeeze().values))
# all deep
cg = wave.resource.wave_celerity(k, h=1000,depth_check=True)
self.assertTrue(all(np.pi*f/k.squeeze().values == cg.squeeze().values))
def test_energy_flux_deep(self):
# Dependent on mhkit.resource.BS spectrum
S = wave.resource.bretschneider_spectrum(self.f,self.Tp,self.Hs)
Te = wave.resource.energy_period(S)
Hm0 = wave.resource.significant_wave_height(S)
rho=1025
g=9.80665
coeff = rho*(g**2)/(64*np.pi)
J = coeff*(Hm0.squeeze()**2)*Te.squeeze()
h=-1 # not used when deep=True
J_calc = wave.resource.energy_flux(S, h, deep=True)
self.assertTrue(J_calc.squeeze() == J)
def test_moments(self):
for file_i in self.valdata2.keys(): # for each file MC, AH, CDiP
datasets = self.valdata2[file_i]
for s in datasets.keys(): # for each set
data = datasets[s]
for m in data['m'].keys():
expected = data['m'][m]
S = data['S']
if s == 'CDiP1' or s == 'CDiP6':
f_bins=pd.Series(data['freqBinWidth'])
else:
f_bins = None
calculated = wave.resource.frequency_moment(S, int(m)
,frequency_bins=f_bins).iloc[0,0]
error = np.abs(expected-calculated)/expected
self.assertLess(error, 0.01)
def test_metrics(self):
for file_i in self.valdata2.keys(): # for each file MC, AH, CDiP
datasets = self.valdata2[file_i]
for s in datasets.keys(): # for each set
data = datasets[s]
S = data['S']
if file_i == 'CDiP':
f_bins=pd.Series(data['freqBinWidth'])
else:
f_bins = None
# Hm0
expected = data['metrics']['Hm0']
calculated = wave.resource.significant_wave_height(S,
frequency_bins=f_bins).iloc[0,0]
error = np.abs(expected-calculated)/expected
#print('Hm0', expected, calculated, error)
self.assertLess(error, 0.01)
# Te
expected = data['metrics']['Te']
calculated = wave.resource.energy_period(S,
frequency_bins=f_bins).iloc[0,0]
error = np.abs(expected-calculated)/expected
#print('Te', expected, calculated, error)
self.assertLess(error, 0.01)
# T0
expected = data['metrics']['T0']
calculated = wave.resource.average_zero_crossing_period(S,
frequency_bins=f_bins).iloc[0,0]
error = np.abs(expected-calculated)/expected
#print('T0', expected, calculated, error)
self.assertLess(error, 0.01)
# Tc
expected = data['metrics']['Tc']
calculated = wave.resource.average_crest_period(S,
# Tc = Tavg**2
frequency_bins=f_bins).iloc[0,0]**2
error = np.abs(expected-calculated)/expected
#print('Tc', expected, calculated, error)
self.assertLess(error, 0.01)
# Tm
expected = np.sqrt(data['metrics']['Tm'])
calculated = wave.resource.average_wave_period(S,
frequency_bins=f_bins).iloc[0,0]
error = np.abs(expected-calculated)/expected
#print('Tm', expected, calculated, error)
self.assertLess(error, 0.01)
# Tp
expected = data['metrics']['Tp']
calculated = wave.resource.peak_period(S).iloc[0,0]
error = np.abs(expected-calculated)/expected
#print('Tp', expected, calculated, error)
self.assertLess(error, 0.001)
# e
expected = data['metrics']['e']
calculated = wave.resource.spectral_bandwidth(S,
frequency_bins=f_bins).iloc[0,0]
error = np.abs(expected-calculated)/expected
#print('e', expected, calculated, error)
self.assertLess(error, 0.001)
# J
if file_i != 'CDiP':
for i,j in zip(data['h'],data['J']):
expected = data['J'][j]
calculated = wave.resource.energy_flux(S,i)
error = np.abs(expected-calculated.values)/expected
self.assertLess(error, 0.1)
# v
if file_i == 'CDiP':
# this should be updated to run on other datasets
expected = data['metrics']['v']
calculated = wave.resource.spectral_width(S,
frequency_bins=f_bins).iloc[0,0]
error = np.abs(expected-calculated)/expected
self.assertLess(error, 0.01)
if file_i == 'MC':
expected = data['metrics']['v']
# testing that default uniform frequency bin widths works
calculated = wave.resource.spectral_width(S).iloc[0,0]
error = np.abs(expected-calculated)/expected
self.assertLess(error, 0.01)
def test_plot_elevation_timeseries(self):
filename = abspath(join(testdir, 'wave_plot_elevation_timeseries.png'))
if isfile(filename):
os.remove(filename)
data = self.valdata2['MC']
temp = pd.DataFrame(data[list(data.keys())[0]]['elevation'])
temp.index = temp.index.astype(float)
temp.sort_index(inplace=True)
eta = temp.iloc[0:100,:]
plt.figure()
wave.graphics.plot_elevation_timeseries(eta)
plt.savefig(filename, format='png')
plt.close()
self.assertTrue(isfile(filename))
class TestResourceContours(unittest.TestCase):
@classmethod
def setUpClass(self):
f_name= 'Hm0_Te_46022.json'
self.Hm0Te = pd.read_json(join(datadir,f_name))
with open(join(datadir, 'principal_component_analysis.pkl'), 'rb') as f:
self.pca = pickle.load(f)
@classmethod
def tearDownClass(self):
pass
def test_environmental_contour(self):
Hm0Te = self.Hm0Te
df = Hm0Te[Hm0Te['Hm0'] < 20]
Hm0 = df.Hm0.values
Te = df.Te.values
dt_ss = (Hm0Te.index[2]-Hm0Te.index[1]).seconds
time_R = 100
Hm0_contour, Te_contour = wave.resource.environmental_contour(Hm0, Te,
dt_ss, time_R)
expected_contours = pd.read_csv(join(datadir,'Hm0_Te_contours_46022.csv'))
assert_allclose(expected_contours.Hm0_contour.values, Hm0_contour, rtol=1e-3)
def test__principal_component_analysis(self):
Hm0Te = self.Hm0Te
df = Hm0Te[Hm0Te['Hm0'] < 20]
Hm0 = df.Hm0.values
Te = df.Te.values
PCA = wave.resource._principal_component_analysis(Hm0,Te, bin_size=250)
assert_allclose(PCA['principal_axes'], self.pca['principal_axes'])
self.assertAlmostEqual(PCA['shift'], self.pca['shift'])
self.assertAlmostEqual(PCA['x1_fit']['mu'], self.pca['x1_fit']['mu'])
self.assertAlmostEqual(PCA['mu_fit'].slope, self.pca['mu_fit'].slope)
self.assertAlmostEqual(PCA['mu_fit'].intercept, self.pca['mu_fit'].intercept)
assert_allclose(PCA['sigma_fit']['x'], self.pca['sigma_fit']['x'])
def test_plot_environmental_contour(self):
filename = abspath(join(testdir, 'wave_plot_environmental_contour.png'))
if isfile(filename):
os.remove(filename)
Hm0Te = self.Hm0Te
df = Hm0Te[Hm0Te['Hm0'] < 20]
Hm0 = df.Hm0.values
Te = df.Te.values
dt_ss = (Hm0Te.index[2]-Hm0Te.index[1]).seconds
time_R = 100
Hm0_contour, Te_contour = wave.resource.environmental_contour(Hm0, Te,
dt_ss, time_R)
plt.figure()
wave.graphics.plot_environmental_contour(Te, Hm0,
Te_contour, Hm0_contour,
data_label='NDBC 46022',
contour_label='100-year Contour',
x_label = 'Te [s]',
y_label = 'Hm0 [m]')
plt.savefig(filename, format='png')
plt.close()
self.assertTrue(isfile(filename))
def test_plot_environmental_contour_multiyear(self):
filename = abspath(join(testdir,
'wave_plot_environmental_contour_multiyear.png'))
if isfile(filename):
os.remove(filename)
Hm0Te = self.Hm0Te
df = Hm0Te[Hm0Te['Hm0'] < 20]
Hm0 = df.Hm0.values
Te = df.Te.values
dt_ss = (Hm0Te.index[2]-Hm0Te.index[1]).seconds
time_R = np.array([100, 105, 110, 120, 150])
Hm0_contour, Te_contour = wave.resource.environmental_contour(Hm0, Te,
dt_ss, time_R)
contour_label = [f'{year}-year Contour' for year in time_R]
plt.figure()
wave.graphics.plot_environmental_contour(Te, Hm0,
Te_contour, Hm0_contour,
data_label='NDBC 46022',
contour_label=contour_label,
x_label = 'Te [s]',
y_label = 'Hm0 [m]')
plt.savefig(filename, format='png')
plt.close()
self.assertTrue(isfile(filename))
class TestPerformance(unittest.TestCase):
@classmethod
def setUpClass(self):
np.random.seed(123)
Hm0 = np.random.rayleigh(4, 100000)
Te = np.random.normal(4.5, .8, 100000)
P = np.random.normal(200, 40, 100000)
J = np.random.normal(300, 10, 100000)
self.data = pd.DataFrame({'Hm0': Hm0, 'Te': Te, 'P': P,'J': J})
self.Hm0_bins = np.arange(0,19,0.5)
self.Te_bins = np.arange(0,9,1)
@classmethod
def tearDownClass(self):
pass
def test_capture_length(self):
L = wave.performance.capture_length(self.data['P'], self.data['J'])
L_stats = wave.performance.statistics(L)
self.assertAlmostEqual(L_stats['mean'], 0.6676, 3)
def test_capture_length_matrix(self):
L = wave.performance.capture_length(self.data['P'], self.data['J'])
LM = wave.performance.capture_length_matrix(self.data['Hm0'], self.data['Te'],
L, 'std', self.Hm0_bins, self.Te_bins)
self.assertEqual(LM.shape, (38,9))
self.assertEqual(LM.isna().sum().sum(), 131)
def test_wave_energy_flux_matrix(self):
JM = wave.performance.wave_energy_flux_matrix(self.data['Hm0'], self.data['Te'],
self.data['J'], 'mean', self.Hm0_bins, self.Te_bins)
self.assertEqual(JM.shape, (38,9))
self.assertEqual(JM.isna().sum().sum(), 131)
def test_power_matrix(self):
L = wave.performance.capture_length(self.data['P'], self.data['J'])
LM = wave.performance.capture_length_matrix(self.data['Hm0'], self.data['Te'],
L, 'mean', self.Hm0_bins, self.Te_bins)
JM = wave.performance.wave_energy_flux_matrix(self.data['Hm0'], self.data['Te'],
self.data['J'], 'mean', self.Hm0_bins, self.Te_bins)
PM = wave.performance.power_matrix(LM, JM)
self.assertEqual(PM.shape, (38,9))
self.assertEqual(PM.isna().sum().sum(), 131)
def test_mean_annual_energy_production(self):
L = wave.performance.capture_length(self.data['P'], self.data['J'])
maep = wave.performance.mean_annual_energy_production_timeseries(L, self.data['J'])
self.assertAlmostEqual(maep, 1754020.077, 2)
def test_plot_matrix(self):
filename = abspath(join(testdir, 'wave_plot_matrix.png'))
if isfile(filename):
os.remove(filename)
M = wave.performance.wave_energy_flux_matrix(self.data['Hm0'], self.data['Te'],
self.data['J'], 'mean', self.Hm0_bins, self.Te_bins)
plt.figure()
wave.graphics.plot_matrix(M)
plt.savefig(filename, format='png')
plt.close()
self.assertTrue(isfile(filename))
class TestIOndbc(unittest.TestCase):
@classmethod
def setUpClass(self):
self.expected_columns_metRT = ['WDIR', 'WSPD', 'GST', 'WVHT', 'DPD',
'APD', 'MWD', 'PRES', 'ATMP', 'WTMP', 'DEWP', 'VIS', 'PTDY', 'TIDE']
self.expected_units_metRT = {'WDIR': 'degT', 'WSPD': 'm/s', 'GST': 'm/s',
'WVHT': 'm', 'DPD': 'sec', 'APD': 'sec', 'MWD': 'degT', 'PRES': 'hPa',
'ATMP': 'degC', 'WTMP': 'degC', 'DEWP': 'degC', 'VIS': 'nmi',
'PTDY': 'hPa', 'TIDE': 'ft'}
self.expected_columns_metH = ['WDIR', 'WSPD', 'GST', 'WVHT', 'DPD',
'APD', 'MWD', 'PRES', 'ATMP', 'WTMP', 'DEWP', 'VIS', 'TIDE']
self.expected_units_metH = {'WDIR': 'degT', 'WSPD': 'm/s', 'GST': 'm/s',
'WVHT': 'm', 'DPD': 'sec', 'APD': 'sec', 'MWD': 'deg', 'PRES': 'hPa',
'ATMP': 'degC', 'WTMP': 'degC', 'DEWP': 'degC', 'VIS': 'nmi',
'TIDE': 'ft'}
self.filenames=['46042w1996.txt.gz',
'46029w1997.txt.gz',
'46029w1998.txt.gz']
self.swden = pd.read_csv(join(datadir,self.filenames[0]), sep=r'\s+',
compression='gzip')
@classmethod
def tearDownClass(self):
pass
### Realtime data
def test_ndbc_read_realtime_met(self):
data, units = wave.io.ndbc.read_file(join(datadir, '46097.txt'))
expected_index0 = datetime(2019,4,2,13,50)
self.assertSetEqual(set(data.columns), set(self.expected_columns_metRT))
self.assertEqual(data.index[0], expected_index0)
self.assertEqual(data.shape, (6490, 14))
self.assertEqual(units,self.expected_units_metRT)
### Historical data
def test_ndbnc_read_historical_met(self):
# QC'd monthly data, Aug 2019
data, units = wave.io.ndbc.read_file(join(datadir, '46097h201908qc.txt'))
expected_index0 = datetime(2019,8,1,0,0)
self.assertSetEqual(set(data.columns), set(self.expected_columns_metH))
self.assertEqual(data.index[0], expected_index0)
self.assertEqual(data.shape, (4464, 13))
self.assertEqual(units,self.expected_units_metH)
### Spectral data
def test_ndbc_read_spectral(self):
data, units = wave.io.ndbc.read_file(join(datadir, 'data.txt'))
self.assertEqual(data.shape, (743, 47))
self.assertEqual(units, None)
def test_ndbc_available_data(self):
data=wave.io.ndbc.available_data('swden', buoy_number='46029')
cols = data.columns.tolist()
exp_cols = ['id', 'year', 'filename']
self.assertEqual(cols, exp_cols)
years = [int(year) for year in data.year.tolist()]
exp_years=[*range(1996,1996+len(years))]
self.assertEqual(years, exp_years)
self.assertEqual(data.shape, (len(data), 3))
def test__ndbc_parse_filenames(self):
filenames= pd.Series(self.filenames)
buoys = wave.io.ndbc._parse_filenames('swden', filenames)
years = buoys.year.tolist()
numbers = buoys.id.tolist()
fnames = buoys.filename.tolist()
self.assertEqual(buoys.shape, (len(filenames),3))
self.assertListEqual(years, ['1996','1997','1998'])
self.assertListEqual(numbers, ['46042','46029','46029'])
self.assertListEqual(fnames, self.filenames)
def test_ndbc_request_data(self):
filenames= pd.Series(self.filenames[0])
ndbc_data = wave.io.ndbc.request_data('swden', filenames)
self.assertTrue(self.swden.equals(ndbc_data['1996']))
def test_ndbc_request_data_from_dataframe(self):
filenames= pd.DataFrame( | pd.Series(data=self.filenames[0]) | pandas.Series |
from chatto_transform.transforms.transform_base import Transform
from chatto_transform.schema.schema_base import *
from chatto_transform.lib.mimic.session import load_table
from chatto_transform.schema.mimic.mimic_schema import \
chartevents_schema, labevents_schema, ioevents_schema, icustayevents_schema
from chatto_transform.schema.mimic.patient_history_schema import \
patient_history_schema, patient_history_relative_time_schema
from chatto_transform.lib.big_dt_tools import big_dt_to_num, num_to_big_dt
from chatto_transform.lib.chunks import from_chunks
import pandas as pd
import itertools
class ChartTransform(Transform):
chart_mappings = {
'HR': [211],
'TEMPERATURE': [676, 677, 678, 679],
'SYS ABP': [51],
'NI SYS ABP': [455],
'MAP': [52, 6702],
'NI MAP': [456],
'Arterial PH': [780],
'PaO2': [779],
'PaCO2': [778],
'SaO2': [834, 3495],
'GITUBE': [203, 3428],
'WEIGHT': [581],
'GCS': [198],
'FIO2': [3420, 190],
'VO2 SAT': [823],
'MIXED VO2': [822],
'PVO2': [859],
'MECH_VENT_FLAG': [543, 544, 545, 619, 39, 535, 683, 720, 721, 722, 732],
'SPONTANEOUS_RESP': [615, 618]
}
valid_chart_types = list(itertools.chain.from_iterable(chart_mappings.values()))
chart_types_ser = pd.Series(index=valid_chart_types)
for category, chart_types in chart_mappings.items():
chart_types_ser.loc[chart_types] = category
def input_schema(self):
return PartialSchema.from_schema(chartevents_schema)
def output_schema(self):
return PartialSchema.from_schema(patient_history_schema)
def _transform(self, chartevents):
df = chartevents
df = df[(df['itemid'].isin(self.valid_chart_types))
& (~df['value1num'].isnull())]
df['category'] = self.chart_types_ser.loc[df['itemid']].values
df['valuenum'] = df['value1num']
temp_mask = df['itemid'].isin([678, 679])
df.loc[temp_mask, 'valuenum'] = ((df.loc[temp_mask]['value1num'] - 32) * (5 / 9)).round(decimals=1)
round_mask = df['itemid'].isin([676, 677, 581])
df.loc[round_mask, 'valuenum'] = df.loc[round_mask]['value1num'].round(decimals=1)
percent_mask = df['itemid'] == 3420
df.loc[percent_mask, 'valuenum'] = df.loc[percent_mask]['value1num'] / 100
ventilated_resp_mask = df['itemid'].isin(self.chart_mappings['MECH_VENT_FLAG'])
df.loc[ventilated_resp_mask, 'valuenum'] = 1
spontaneous_resp_mask = (df['itemid'].isin(self.chart_mappings['SPONTANEOUS_RESP'])) \
& (~df['icustay_id'].isin(df[ventilated_resp_mask]['icustay_id'].unique()))
df.loc[spontaneous_resp_mask, 'valuenum'] = df.loc[spontaneous_resp_mask]['value1num']
dias_df = df[(df['itemid'].isin([51, 455])) & (~df['value2num'].isnull())]
dias_abp_mask = dias_df['itemid'] == 51
dias_df.loc[dias_abp_mask, 'category'] = 'DIAS ABP'
dias_df.loc[~dias_abp_mask, 'category'] = 'NI DIAS ABP'
dias_df['valuenum'] = dias_df['value2num']
df = df.append(dias_df, ignore_index=True)
df = df[['subject_id', 'charttime', 'category', 'valuenum']]
return df
class LabTransform(Transform):
lab_mappings = {
'HCT': [50383],
'WBC': [50316, 50468],
'GLUCOSE': [50112],
'BUN': [50177],
'HCO3': [50172],
'POTASSIUM': [50149],
'SODIUM': [50159],
'BILIRUBIN': [50170],
'LACTACTE': [50010],
'ALKALINE PHOSPHOTASE': [50061],
'AST': [50073],
'ALT': [50062],
'CHOLESTEROL': [50085],
'TROPONIN_T': [50189],
'TROPONIN_I': [50188],
'ALBUMIN': [50060],
'MAGNESIUM': [50140],
'PLATELETS': [50428],
'CREATININE': [50090],
'CHOLESTEROL': [50085]
}
valid_lab_types = list(itertools.chain.from_iterable(lab_mappings.values()))
lab_types_ser = pd.Series(index=valid_lab_types)
for category, lab_types in lab_mappings.items():
lab_types_ser.loc[lab_types] = category
def input_schema(self):
return PartialSchema.from_schema(labevents_schema)
def output_schema(self):
return PartialSchema.from_schema(patient_history_schema)
def _transform(self, labevents):
df = labevents
df = df[(df['itemid'].isin(LabTransform.valid_lab_types))
& (~df['valuenum'].isnull())]
df['category'] = self.lab_types_ser.loc[df['itemid']].values
df = df[['subject_id', 'charttime', 'category', 'valuenum']]
return df
class DemographicTransform(Transform):
def input_schema(self):
return PartialSchema.from_schema(icustay_detail_schema)
def output_schema(self):
return PartialSchema.from_schema(patient_history_schema)
def _transform(self, icustay_detail):
df = icustay_detail
computed_df = | pd.DataFrame(index=df.index) | pandas.DataFrame |
import numpy as np
import pandas as pd
import pyaf.Bench.web_traffic.Bench as be
logger = be.get_bench_logger()
def run_bench_process(arg):
(lBakcend, df , cols, last_date, H) = arg
res = {}
for col in cols:
fcst_dict = lBakcend.real_forecast_one_signal(df, col , last_date, H)
res[col] = fcst_dict
return (arg, res)
class cAbstractBackend:
def __init__(self):
pass
def forecast_all_signals(self, project_data, last_date, H):
df = project_data.mVisitsDF
forecasts = {}
for col in df.columns:
if(col != 'Date'):
fcst_dict = self.real_forecast_one_signal(df, col , last_date, H)
forecasts[col] = fcst_dict
# logger.info("FORECAST_SIGNAL " + str([self.__class__.__name__ , col]))
return forecasts
def forecast_all_signals_multiprocess(self, df , last_date, H):
import multiprocessing as mp
nbprocesses = 18
pool = mp.Pool(processes=nbprocesses, maxtasksperchild=None)
args = []
cols = []
# print(df.columns)
for col in df.columns:
if(col != 'Date'):
cols = cols + [col]
if(len(cols) > 50):
# print(cols , ['Date'] + cols)
df1 = df[['Date'] + cols]
args = args + [(self , df1, cols, last_date, H)];
cols = []
if(len(cols) > 0):
# print(cols , ['Date'] + cols)
df1 = df[['Date'] + cols]
args = args + [(self , df1, cols, last_date, H)];
cols = []
i = 1;
forecasts = {}
for res in pool.imap(run_bench_process, args):
signals = res[0][2]
for sig in signals:
logger.info("FINISHED_BENCH_FOR_SIGNAL " + str(sig) + " " + str(i) + "/" + str(len(df.columns)));
forecasts[sig] = res[1][sig]
i = i + 1
pool.close()
pool.join()
return forecasts
def forecast_zero_for_column(self, df, signal, last_date, H):
fcst_dict = {}
for h in range(H):
new_date = np.datetime64(last_date) + np.timedelta64(h + 1, 'D')
fcst_dict[new_date] = 0
return fcst_dict
class cZero_Backend (cAbstractBackend):
def __init__(self):
cAbstractBackend.__init__(self);
pass
def real_forecast_one_signal(self, df, signal, last_date, H):
fcst_dict = self.forecast_zero_for_column(df, signal, last_date, H)
return fcst_dict
class cPyAF_Backend (cAbstractBackend):
def __init__(self):
cAbstractBackend.__init__(self);
pass
def forecast_all_signals(self, project_data , last_date, H):
return self.forecast_all_signals_multiprocess(project_data.mVisitsDF, last_date, H)
def real_forecast_one_signal(self, df, signal, last_date, H):
import pyaf.ForecastEngine as autof
lEngine = autof.cForecastEngine()
lEngine.mOptions.mAddPredictionIntervals = False
lEngine.mOptions.mParallelMode = False
lEngine.mOptions.set_active_transformations(['None', 'Difference' , 'Anscombe'])
lEngine.mOptions.mMaxAROrder = 16
# lEngine
df1 = df[['Date' , signal]].fillna(0.0)
lEngine.train(df1, 'Date' , signal, 1);
lEngine.getModelInfo();
# lEngine.standrdPlots()
df_forecast = lEngine.forecast(iInputDS = df1, iHorizon = H)
dates = df_forecast['Date'].tail(H).values
predictions = df_forecast[str(signal) + '_Forecast'].tail(H).values
# logger.info(dates)
# logger.info(predictions)
fcst_dict = {}
for i in range(H):
ts = pd.to_datetime(str(dates[i]))
date_str = ts.strftime('%Y-%m-%d')
fcst_dict[date_str] = int(predictions[i])
logger.info("SIGNAL_FORECAST " + str(signal) + " " + str(fcst_dict))
return fcst_dict
class cPyAF_Backend_2 (cPyAF_Backend):
def __init__(self):
cPyAF_Backend.__init__(self);
pass
def forecast_all_signals(self, project_data , last_date, H):
df = project_data.mVisitsDF
df_clean = self.clean_all_signals(df)
forecasts = self.forecast_all_signals_multiprocess(df_clean, last_date, H)
for col in df.columns:
if(col not in df_clean.columns):
fcst_dict = self.forecast_zero_for_column(df, col, last_date, H)
forecasts[col] = fcst_dict
return forecasts
def is_significant_signal(self, sig):
lMinVisits = 10
lMinNonZero = 5
last_100_values = sig[-100:]
lNbNonZero = last_100_values[last_100_values > 0].count()
logger.info("SIGNAL_FILTER_INFO " + str([sig.name , sig.min() , sig.max() , sig.mean(), sig.std(), lNbNonZero]))
if(sig.max() < lMinVisits):
return False;
if(lNbNonZero < lMinNonZero):
return False
return True
def clean_all_signals(self , df):
df_out = | pd.DataFrame() | pandas.DataFrame |
import os
import pandas as pd
from sklearn.model_selection import KFold
def prepare_img_data(seed=2021, gt=None):
r'''
This function split the datasets into k folds which means k pairs of training sets and validation sets
Args:
seed: this parameter is magic for your model preference when generate different sets and `seed` affects the ordering of the
indices, which controls the randomness of each fold.
gt: this parameter contains whole indexs of the ground truth
Returns: K folds indexs or the customized outputs
'''
data_train = []
data_eval = []
kf = KFold(n_splits=5, random_state=seed, shuffle=True)
img_non = gt[gt['non'].isin([1])]
img_early = gt[gt['early'].isin([1])]
img_mid_advanced = gt[gt['mid_advanced'].isin([1])]
for (n_train, n_test), (e_train, e_test), (ma_train, ma_test) in zip(kf.split(img_non),
kf.split(img_early),
kf.split(img_mid_advanced)
):
import pdb
# pdb.set_trace()
n_train, n_test = img_non.iloc[n_train, :].reset_index(drop=True), \
img_non.iloc[n_test, :].reset_index(drop=True)
e_train, e_test = img_early.iloc[e_train, :].reset_index(drop=True), \
img_early.iloc[e_test, :].reset_index(drop=True)
ma_train, ma_test = img_mid_advanced.iloc[ma_train, :].reset_index(drop=True), \
img_mid_advanced.iloc[ma_test, :].reset_index(drop=True)
_train = pd.concat([n_train, e_train, ma_train], axis=0, ignore_index=True)
_test = | pd.concat([n_test, e_test, ma_test], axis=0, ignore_index=True) | pandas.concat |
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 23 21:53:32 2017
@author: gason
"""
import pandas as pd
import numpy as np
import re
import time
import os
from collections import Iterable
from pandas.api.types import is_string_dtype
from pandas.api.types import is_numeric_dtype
from pandas.api.types import is_number
from pandas.api.types import is_datetime64_any_dtype
from pandas.api.types import is_categorical_dtype
from scipy import stats
from sklearn import metrics
from . import report as _rpt
from . import config
from .report import genwordcloud
from .utils.metrics import entropyc
from .utils import iqr
#from sklearn.neighbors import KernelDensity
import matplotlib.pyplot as plt
import seaborn as sns
_thisdir = os.path.split(__file__)[0]
# default chinese font
from matplotlib.font_manager import FontProperties
font_path=config.font_path
if font_path:
myfont=FontProperties(fname=font_path)
sns.set(font=myfont.get_name())
__all__=['type_of_var',
'describe',
'plot',
'features_analysis',
'distributions',
'AnalysisReport',
'ClassifierReport']
def _freedman_diaconis_bins(a):
"""Calculate number of hist bins using Freedman-Diaconis rule."""
# From http://stats.stackexchange.com/questions/798/
a = np.asarray(a)
assert len(a.shape)>0
assert len(a)>0
h = 2 * iqr(a) / (len(a) ** (1 / 3))
# fall back to sqrt(a) bins if iqr is 0
if h == 0:
return int(np.sqrt(a.size))
else:
return int(np.ceil((a.max() - a.min()) / h))
def distributions(a,hist=True,bins=None,norm_hist=True,kde=False,grid=None,gridsize=100,clip=None):
'''数组的分布信息
hist=True,则返回分布直方图(counts,bins)
kde=True,则返回核密度估计数组(grid,y)
example
-------
a=np.random.randint(1,50,size=(1000,1))
'''
a = np.asarray(a).squeeze()
if hist:
if bins is None:
bins = min(_freedman_diaconis_bins(a), 50)
counts,bins=np.histogram(a,bins=bins)
if norm_hist:
counts=counts/counts.sum()
if kde:
bw='scott'
cut=3
if clip is None:
clip = (-np.inf, np.inf)
try:
kdemodel = stats.gaussian_kde(a, bw_method=bw)
except TypeError:
kdemodel = stats.gaussian_kde(a)
bw = "scotts" if bw == "scott" else bw
bw = getattr(kdemodel, "%s_factor" % bw)() * np.std(a)
if grid is None:
support_min = max(a.min() - bw * cut, clip[0])
support_max = min(a.max() + bw * cut, clip[1])
grid=np.linspace(support_min, support_max, gridsize)
y = kdemodel(grid)
if hist and not(kde):
return counts,bins
elif not(hist) and kde:
return grid,y
elif hist and kde:
return ((counts,bins),(grid,y))
else:
return None
def dtype_detection(data,category_detection=True,StructureText_detection=True,\
datetime_to_category=True,criterion='sqrt',min_mean_counts=5,fix=False):
'''检测数据中单个变量的数据类型
将数据类型分为以下4种
1. number,数值型
2. category,因子
3. datetime,时间类型
4. text,文本型
5. text_st,结构性文本,比如ID,
6. group_number,连续
parameter
---------
data: pd.Series 数据, 仅支持一维
# 如果有data,则函数会改变原来data的数据类型
category_detection: bool,根据 nunique 检测是否是因子类型
StructureText_detection: bool, 结构化文本,如列中都有一个分隔符"-"
datetime_to_category: 时间序列如果 nunique过少是否转化成因子变量
criterion: string or int, optional (default="sqrt",即样本数的开根号)
支持:'sqrt':样本量的开根号, int: 绝对数, 0-1的float:样本数的百分多少
检测因子变量时,如果一个特征的nunique小于criterion,则判定为因子变量
min_mean_counts: default 5,数值型判定为因子变量时,需要满足每个类别的平均频数要大于min_mean_counts
fix: bool,是否返回修改好类型的数据
return:
result:dict{
'name':列名,
'vtype':变量类型,
'ordered':是否是有序因子,
'categories':所有的因子}
'''
assert len(data.shape)==1
data=data.copy()
data=pd.Series(data)
dtype,name,n_sample=data.dtype,data.name,data.count()
min_mean_counts=5
if criterion=='sqrt':
max_nuniques=np.sqrt(n_sample)
elif isinstance(criterion,int):
max_nuniques=criterion
elif isinstance(criterion,float) and (0<criterion<1):
max_nuniques=criterion
else:
max_nuniques=np.sqrt(n_sample)
ordered=False
categories=[]
if is_numeric_dtype(dtype):
vtype='number'
ordered=False
categories=[]
# 纠正误分的数据类型。如将1.0,2.0,3.0都修正为1,2,3
if data.dropna().astype(np.int64).sum()==data.dropna().sum():
data[data.notnull()]=data[data.notnull()].astype(np.int64)
if category_detection:
nunique=len(data.dropna().unique())
mean_counts=data.value_counts().median()
if nunique<max_nuniques and mean_counts>=min_mean_counts:
data=data.astype('category')
ordered=data.cat.ordered
vtype='category'
categories=list(data.dropna().cat.categories)
result={'name':name,'vtype':vtype,'ordered':ordered,'categories':categories}
elif is_string_dtype(dtype):
# 处理时间类型
tmp=data.map(lambda x: np.nan if '%s'%x == 'nan' else len('%s'%x))
tmp=tmp.dropna().astype(np.int64)
if not(any(data.dropna().map(is_number))) and 7<tmp.max()<20 and tmp.std()<0.1:
try:
data=pd.to_datetime(data)
except :
pass
# 处理可能的因子类型
#时间格式是否处理为True 且
if datetime_to_category:
if len(data.dropna().unique())<np.sqrt(n_sample):
data=data.astype('category')
else:
nunique=len(data.dropna().unique())
#print(data.dtype)
if not(is_categorical_dtype(data.dtype)) and not(np.issubdtype(data.dtype,np.datetime64)) and nunique<max_nuniques:
data=data.astype('category')
# 在非因子类型的前提下,将百分数转化成浮点数,例如21.12%-->0.2112
if is_string_dtype(data.dtype) and not(is_categorical_dtype(data.dtype)) and all(data.str.contains('%')):
data=data.str.strip('%').astype(np.float64)/100
if is_categorical_dtype(data.dtype):
vtype='category'
categories=list(data.cat.categories)
ordered=data.cat.ordered
# 时间格式
elif np.issubdtype(data.dtype,np.datetime64):
vtype='datetime'
# 是否是结构化数组
elif StructureText_detection and tmp.dropna().std()==0:
# 不可迭代,不是字符串
if not(isinstance(data.dropna().iloc[0],Iterable)):
vtype='text'
else:
k=set(list(data.dropna().iloc[0]))
for x in data:
if isinstance(x,str) and len(x)>0:
k&=set(list(x))
if len(k)>0:
vtype='text_st'
else:
vtype='text'
elif is_numeric_dtype(data.dtype):
vtype='number'
ordered=False
categories=[]
else:
vtype='text'
result={'name':name,'vtype':vtype,'ordered':ordered,'categories':categories}
elif is_datetime64_any_dtype(dtype):
vtype='datetime'
result={'name':name,'vtype':vtype,'ordered':ordered,'categories':categories}
else:
print('unknown dtype!')
result=None
if fix:
return result,data
else:
return result
def type_of_var(data,category_detection=True,criterion='sqrt',min_mean_counts=5,copy=True):
'''返回各个变量的类型
将数据类型分为以下4种
1. number,数值型
2. category,因子
3. datetime,时间类型
4. text,文本型
5. text_st,结构性文本,比如ID,
parameters
----------
data: pd.DataFrame类型
category_detection: bool,根据 nunique 检测是否是因子类型
criterion: string or int, optional (default="sqrt",即样本数的开根号)
支持:'sqrt':样本量的开根号, int: 绝对数, 0-1的float:样本数的百分多少
检测因子变量时,如果一个特征的nunique小于criterion,则判定为因子变量
min_mean_counts: default 5,数值型判定为因子变量时,需要满足每个类别的平均频数要大于min_mean_counts
copy: bool, 是否更改数据类型,如时间格式、因子变量等
return:
--------
var_type:dict{
ColumnName:type,}
'''
assert isinstance(data,pd.core.frame.DataFrame)
var_type={}
for c in data.columns:
#print('type_of_var : ',c)
if copy:
data=data.copy()
result=dtype_detection(data[c],category_detection=category_detection,\
criterion=criterion,min_mean_counts=min_mean_counts,datetime_to_category=False,fix=False)
if result is not None:
var_type[c]=result['vtype']
else:
var_type[c]='unknown'
else:
result,tmp=dtype_detection(data[c],category_detection=category_detection,\
criterion=criterion,min_mean_counts=min_mean_counts,datetime_to_category=False,fix=True)
data[c]=tmp
if result is not None:
var_type[c]=result['vtype']
else:
var_type[c]='unknown'
return var_type
def var_detection(data,combine=True):
'''检测整个数据的变量类型,内部使用,外部请用type_of_var
parameter
---------
data: 数据,DataFrame格式
combine: 检测变量中是否有类似的变量,有的话则会合并。
return
------
var_list:[{'name':,'vtype':,'vlist':,'ordered':,'categories':,},]
'''
var_list=[]
for c in data.columns:
result,tmp=dtype_detection(data[c],fix=True)
data[c]=tmp
if result is not None:
result['vlist']=[c]
var_list.append(result)
if not(combine):
return var_list,data
var_group=[]
i=0
pattern=re.compile(r'(.*?)(\d+)')
while i < len(var_list)-1:
v=var_list[i]
vnext=var_list[i+1]
if v['vtype']!='number' or vnext['vtype']!='number':
i+=1
continue
tmp1=[]
for vv in var_list[i:]:
if vv['vtype']!='number':
break
w=re.findall(pattern,'%s'%vv['name'])
if len(w)==0 or (len(w)>0 and len(w[0])<2):
break
tmp1.append((w[0][0],w[0][1]))
if len(tmp1)<2:
i+=1
continue
flag1=len(set([t[0] for t in tmp1]))==1
flag2=np.diff([int(t[1]) for t in tmp1]).sum()==len(tmp1)-1
if flag1 and flag2:
var_group.append(list(range(i,i+len(tmp1))))
i+=len(tmp1)
var_group_new={}
var_group_total=[]#将所有的分组ind加起来
for vi in var_group:
var_group_total+=vi
name='{}-->{}'.format(var_list[vi[0]]['name'],var_list[vi[-1]]['name'])
vlist=[var_list[v]['name'] for v in vi]
vtype='group_number'
tmp={'name':name,'vtype':vtype,'vlist':vlist,'ordered':True,'categories':vlist}
var_group_new[vi[0]]=tmp
var_list_new=[]
var_list_have=[]
for i,v in enumerate(var_list):
if i not in var_group_total:
v['vlist']=[v['name']]
var_list_new.append(v)
var_list_have+=v['vlist']
elif i in var_group_total and v['name'] not in var_list_have:
var_list_new.append(var_group_new[i])
var_list_have+=var_group_new[i]['vlist']
return var_list_new,data
def describe(data):
'''
对每个变量生成统计指标特征
对于每一个变量,生成如下字段:
数据类型:
最大值/频数最大的那个:
最小值/频数最小的那个:
均值/频数中间的那个:
缺失率:
范围/唯一数:
'''
data=pd.DataFrame(data)
n_sample=len(data)
var_type=type_of_var(data,copy=True)
summary=pd.DataFrame(columns=data.columns,index=['dtype','max','min','mean','missing_pct','std/nuniue'])
for c in data.columns:
missing_pct=1-data[c].count()/n_sample
if var_type[c] == 'number':
max_value,min_value,mean_value=data[c].max(),data[c].min(),data[c].mean()
std_value=data[c].std()
summary.loc[:,c]=[var_type[c],max_value,min_value,mean_value,missing_pct,std_value]
elif var_type[c] == 'category' or is_categorical_dtype(data[c].dtype):
tmp=data[c].value_counts()
max_value,min_value=tmp.argmax(),tmp.argmin()
mean_value_index=tmp[tmp==tmp.median()].index
mean_value=mean_value_index[0] if len(mean_value_index)>0 else np.nan
summary.loc[:,c]=[var_type[c],max_value,min_value,mean_value,missing_pct,len(tmp)]
elif var_type[c] == 'datetime':
max_value,min_value=data[c].max(),data[c].min()
summary.loc[:,c]=[var_type[c],max_value,min_value,np.nan,missing_pct,np.nan]
else:
summary.loc[:,c]=[var_type[c],np.nan,np.nan,np.nan,missing_pct,np.nan]
return summary
def plot(data,figure_type='auto',chart_type='auto',vertical=False,ax=None):
'''auto choose the best chart type to draw the data 【还没完全写好】
paremeter
-----------
figure_type: 'mpl' or 'pptx' or 'html'
chart_type: 'hist' or 'dist' or 'kde' or 'bar' ......
return
-------
chart:dict format.
.type: equal to figure_type
.fig: only return if type == 'mpl'
.ax:
.chart_data:
'''
# 判别部分
# 绘制部分
data=pd.DataFrame(data)
assert len(data.dropna())>0
chart={}
if figure_type in ['mpl','matplotlib']:
chart['type']='mpl'
if ax is None:
fig,ax=plt.subplots()
if chart_type in ['hist','kde']:
for c in data.columns:
if len(data[c].dropna())>10:
sns.kdeplot(data[c].dropna(),shade=True,ax=ax)
else:
print('reportgen.plot:: ',c,'have no valid data!')
legend_label=ax.get_legend_handles_labels()
if len(legend_label)>0 and len(legend_label[0])>1:
ax.legend()
else:
try:
ax.legend_.remove()
except:
pass
ax.axis('auto')
elif chart_type in ['dist']:
for c in data.columns:
if len(data[c].dropna())>10:
sns.distplot(data[c].dropna(),ax=ax)
else:
print('reportgen.plot:: ',c,'have no valid data!')
legend_label=ax.get_legend_handles_labels()
if len(legend_label)>0 and len(legend_label[0])>1:
ax.legend()
else:
try:
ax.legend_.remove()
except:
pass
ax.axis('auto')
elif chart_type in ['scatter']:
ax.xaxis.set_ticks_position('none')
ax.yaxis.set_ticks_position('none')
ax.axhline(y=0, linestyle='-', linewidth=1.2, alpha=0.6)
ax.axvline(x=0, linestyle='-', linewidth=1.2, alpha=0.6)
color=['blue','red','green','dark']
if not isinstance(data,list):
data=[data]
for i,dd in enumerate(data):
if '%s'%dd.iloc[:,0] != 'nan' or '%s'%dd.iloc[:,1] != 'nan':
ax.scatter(dd.iloc[:,0], dd.iloc[:,1], c=color[i], s=50,
label=dd.columns[1])
for _, row in dd.iterrows():
ax.annotate(row.name, (row.iloc[0], row.iloc[1]), color=color[i],fontproperties=myfont,fontsize=10)
ax.axis('equal')
legend_label=ax.get_legend_handles_labels()
if len(legend_label)>0 and len(legend_label[0])>0:
ax.legend()
try:
chart['fig']=fig
except:
pass
chart['ax']=ax
return chart
if figure_type in ['pptx']:
chart['type']='pptx'
count,bins=distributions(data.iloc[:,0].dropna(),kde=False)
if all(pd.Series(bins).astype(int)==bins):
decimals_format='{:.0f}~'
else:
decimals_format='{:.2f}~'
bins_index=[decimals_format.format(b) for b in bins[:-1]]
decimals_format=decimals_format[:-1]
bins_index[-1]=bins_index[-1]+decimals_format.format(bins[-1])
chart_data=pd.DataFrame({'frequency':count*100},index=bins_index)
chart['chart_data']=chart_data
if isinstance(ax,_rpt.Report):
slide_data={'data':chart_data,'slide_type':'chart'}
ax.add_slide(data=slide_data,title='',summary='',footnote='')
# 暂时空缺,后期会将ax修改为Report接口
chart['ax']=ax
return chart
# 仅测试用
def features_analysis(X,y=None,out_file=None,categorical_features=[],number_features=[],\
max_leafs=5):
'''
categorical_features=None
number_features=None
categorical_features=[] if categorical_features is None else categorical_features
number_features=[] if number_features is None else number_features
X=data
'''
from graphviz import Digraph
import pydotplus
N=len(X)
X=X.copy()
if len(categorical_features)==0:
var_type=type_of_var(X)
categorical_features=[k for k in var_type if var_type[k]=='category']
#categorical_features=['grade','target','term']
#number_features=['tot_cur_bal','annual_inc']
X['_count_']=range(len(X))
# 根据唯一值个数的不同从小到大排列特征的顺序
nunique=X[categorical_features].apply(pd.Series.nunique).sort_values()
categorical_features=list(nunique.index)
for k in nunique[nunique>5].index:
topitems=X[k].value_counts().sort_values(ascending=False)
X[k]=X[k].replace(dict(zip(topitems.index[(max_leafs-1):],['others']*(len(topitems)-max_leafs+1))))
tmp=X.groupby(categorical_features)
# 针对因子变量计数,针对数值变量,计算分组均值
aggfun={'_count_':'count'}
for k in number_features:
aggfun.update({k:'mean'})
count_data=tmp.agg(aggfun)
# 每一个节点,定义一些属性1,父节点, 特征名称, value,
# 生成节点的索引表格
names=count_data.index.names
levels=count_data.index.levels
labels=pd.DataFrame(count_data.index.labels).T
labels.columns=names
for i in range(len(names)):
labels[names[i]]=labels[names[i]].replace(dict(zip(range(len(levels[i])),levels[i])))
labels_node=pd.DataFrame(index=labels.index,columns=labels.columns)
#labels_prenode=pd.DataFrame(index=labels.index,columns=labels.columns)
dot=Digraph()
nodes=[{'id':0,'column':'start','value':None}]
dot.node(str(nodes[-1]['id']),'Total\n{} , 100%'.format(N),shape="diamond")
for c in range(len(labels.columns)):
if c==len(labels.columns)-1:
count_data_tmp=count_data.copy()
else:
count_data_tmp=X.groupby(names[:c+1]).agg(aggfun)
for i in range(len(labels.index)):
value=labels.iloc[i,c]
if value!=nodes[-1]['value'] and c!=nodes[-1]['column']:
# 增加一个新节点
addnode={'id':nodes[-1]['id']+1,'column':names[c],'value':value}
nodes.append(addnode)
node_id=str(nodes[-1]['id'])
#cond=labels.iloc[i,:c+1]
#n=_cal_count(X,labels.iloc[i,:c+1])
if len(count_data_tmp.index.names)==1:
n=count_data_tmp.loc[labels.iloc[i,c],'_count_']
else:
n=count_data_tmp.xs(list(labels.iloc[i,:c+1]))['_count_']
label='{} = {}\ncount:{:.0f} , {:.2f}%'.format(names[c],value,n,n*100/N)
for k in number_features:
if len(count_data_tmp.index.names)==1:
vmean=count_data_tmp.loc[labels.iloc[i,c],k]
else:
vmean=count_data_tmp.xs(list(labels.iloc[i,:c+1]))[k]
label=label+'\n{}: {:.1f}'.format(k,vmean)
dot.node(node_id,label)
if c==0:
pre_node_id='0'
else:
pre_node_id=labels_node.iloc[i,c-1]
dot.edge(pre_node_id,node_id)
#print('---创建节点{},节点信息如下'.format(node_id))
#print(label)
#print('{} 连接节点{}'.format(node_id,pre_node_id))
#labels_prenode.iloc[i,c]=pre_node_id
labels_node.iloc[i,c]=str(nodes[-1]['id'])
if out_file is not None:
graph=pydotplus.graphviz.graph_from_dot_data(dot.source)
graph.write(out_file,format=os.path.splitext(out_file)[1][1:])
#graph.write_png(out_file)
else:
dot.view()
return dot
def AnalysisReport(data,filename=None,var_list=None,save_pptx=True,return_report=False,combine=False):
'''
直接生成报告
'''
if var_list is None:
var_list,data=var_detection(data,combine=combine)
#print(var_list)
#print('============')
slides_data=[]
if filename is None:
filename='AnalysisReport'+time.strftime('_%Y%m%d%H%M', time.localtime())
p=_rpt.Report()
p.add_cover(title=os.path.splitext(filename)[0])
elif isinstance(filename,str):
p=_rpt.Report()
p.add_cover(title=os.path.splitext(filename)[0])
elif isinstance(filename,_rpt.Report):
p=filename
filename='AnalysisReport'+time.strftime('_%Y%m%d%H%M', time.localtime())
else:
print('reportgen.AnalysisReport::cannot understand the filename')
return None
summary=describe(data)
f_cut=10# 每一页展示的最大字段数
n_cut=round(summary.shape[1]/f_cut)
n_cut=1 if n_cut==0 else n_cut
for i in range(n_cut):
if i!=n_cut-1:
summary_tmp=summary.iloc[:,f_cut*i:f_cut*i+f_cut]
else:
summary_tmp=summary.iloc[:,f_cut*i:]
slide_data={'data':summary_tmp,'slide_type':'table'}
title='数据字段描述{}-{}'.format(i*f_cut+1,min(summary.shape[1],i*f_cut+f_cut))
p.add_slide(data=slide_data,title=title)
for v in var_list:
vtype=v['vtype']
name=v['name']
vlist=v['vlist']
#print(name,':',vtype)
if len(data.loc[:,vlist].dropna())==0:
print('the field: ',name,'have no valid data!')
continue
# 之前的方案,暂时留着测试用,后期稳定后删除
if vtype == 'number_test':
chart=plot(data[name],figure_type='mpl',chart_type='kde')
chart['fig'].savefig('kdeplot1.png',dpi=200)
chart['fig'].clf()
del chart
chart=plot(data[name],figure_type='mpl',chart_type='dist')
chart['fig'].savefig('kdeplot2.png',dpi=200)
chart['fig'].clf()
del chart
summary='''平均数为:{:.2f},标准差为:{:.2f},最大为:{}'''\
.format(data[name].mean(),data[name].std(),data[name].max())
footnote='注: 样本N={}'.format(data[name].count())
slide_data=[{'data':'kdeplot1.png','slide_type':'picture'},{'data':'kdeplot2.png','slide_type':'picture'}]
p.add_slide(data=slide_data,title=name+' 的分析',summary=summary,footnote=footnote)
slides_data.append(slide_data)
os.remove('kdeplot1.png')
os.remove('kdeplot2.png')
if vtype == 'number':
if len(data[name].dropna())==1:
print('the fiele ',name,' of number type must have more than two items.')
continue
chart=plot(data[name],figure_type='mpl',chart_type='kde')
chart['fig'].savefig('kdeplot.png',dpi=200)
chart['fig'].clf()
del chart
chart=plot(data[name],figure_type='pptx',chart_type='bar')
summary='''MIN: {}, MAX: {}, MEAN: {:.2f}, STD: {:.2f}'''\
.format(data[name].min(),data[name].max(),data[name].mean(),data[name].std())
footnote='注: 样本N={}'.format(data[name].count())
slide_data=[{'data':chart['chart_data'],'slide_type':'chart'},{'data':'kdeplot.png','slide_type':'picture'}]
p.add_slide(data=slide_data,title=name+' 的分析',summary=summary,footnote=footnote)
slides_data.append(slide_data)
os.remove('kdeplot.png')
elif vtype == 'category':
tmp=pd.DataFrame(data[name].value_counts())
tmp=tmp*100/tmp.sum()#转换成百分数
if ('ordered' in v) and v['ordered']:
tmp= | pd.DataFrame(tmp,index=v['categories']) | pandas.DataFrame |
"""
Operator classes for eval.
"""
from __future__ import annotations
from datetime import datetime
from functools import partial
import operator
from typing import (
Callable,
Iterable,
)
import numpy as np
from pandas._libs.tslibs import Timestamp
from pandas.core.dtypes.common import (
is_list_like,
is_scalar,
)
import pandas.core.common as com
from pandas.core.computation.common import (
ensure_decoded,
result_type_many,
)
from pandas.core.computation.scope import DEFAULT_GLOBALS
from pandas.io.formats.printing import (
pprint_thing,
pprint_thing_encoded,
)
REDUCTIONS = ("sum", "prod")
_unary_math_ops = (
"sin",
"cos",
"exp",
"log",
"expm1",
"log1p",
"sqrt",
"sinh",
"cosh",
"tanh",
"arcsin",
"arccos",
"arctan",
"arccosh",
"arcsinh",
"arctanh",
"abs",
"log10",
"floor",
"ceil",
)
_binary_math_ops = ("arctan2",)
MATHOPS = _unary_math_ops + _binary_math_ops
LOCAL_TAG = "__pd_eval_local_"
class UndefinedVariableError(NameError):
"""
NameError subclass for local variables.
"""
def __init__(self, name: str, is_local: bool | None = None) -> None:
base_msg = f"{repr(name)} is not defined"
if is_local:
msg = f"local variable {base_msg}"
else:
msg = f"name {base_msg}"
super().__init__(msg)
class Term:
def __new__(cls, name, env, side=None, encoding=None):
klass = Constant if not isinstance(name, str) else cls
# error: Argument 2 for "super" not an instance of argument 1
supr_new = super(Term, klass).__new__ # type: ignore[misc]
return supr_new(klass)
is_local: bool
def __init__(self, name, env, side=None, encoding=None) -> None:
# name is a str for Term, but may be something else for subclasses
self._name = name
self.env = env
self.side = side
tname = str(name)
self.is_local = tname.startswith(LOCAL_TAG) or tname in DEFAULT_GLOBALS
self._value = self._resolve_name()
self.encoding = encoding
@property
def local_name(self) -> str:
return self.name.replace(LOCAL_TAG, "")
def __repr__(self) -> str:
return pprint_thing(self.name)
def __call__(self, *args, **kwargs):
return self.value
def evaluate(self, *args, **kwargs):
return self
def _resolve_name(self):
res = self.env.resolve(self.local_name, is_local=self.is_local)
self.update(res)
if hasattr(res, "ndim") and res.ndim > 2:
raise NotImplementedError(
"N-dimensional objects, where N > 2, are not supported with eval"
)
return res
def update(self, value):
"""
search order for local (i.e., @variable) variables:
scope, key_variable
[('locals', 'local_name'),
('globals', 'local_name'),
('locals', 'key'),
('globals', 'key')]
"""
key = self.name
# if it's a variable name (otherwise a constant)
if isinstance(key, str):
self.env.swapkey(self.local_name, key, new_value=value)
self.value = value
@property
def is_scalar(self) -> bool:
return is_scalar(self._value)
@property
def type(self):
try:
# potentially very slow for large, mixed dtype frames
return self._value.values.dtype
except AttributeError:
try:
# ndarray
return self._value.dtype
except AttributeError:
# scalar
return type(self._value)
return_type = type
@property
def raw(self) -> str:
return f"{type(self).__name__}(name={repr(self.name)}, type={self.type})"
@property
def is_datetime(self) -> bool:
try:
t = self.type.type
except AttributeError:
t = self.type
return issubclass(t, (datetime, np.datetime64))
@property
def value(self):
return self._value
@value.setter
def value(self, new_value):
self._value = new_value
@property
def name(self):
return self._name
@property
def ndim(self) -> int:
return self._value.ndim
class Constant(Term):
def __init__(self, value, env, side=None, encoding=None) -> None:
super().__init__(value, env, side=side, encoding=encoding)
def _resolve_name(self):
return self._name
@property
def name(self):
return self.value
def __repr__(self) -> str:
# in python 2 str() of float
# can truncate shorter than repr()
return repr(self.name)
_bool_op_map = {"not": "~", "and": "&", "or": "|"}
class Op:
"""
Hold an operator of arbitrary arity.
"""
op: str
def __init__(self, op: str, operands: Iterable[Term | Op], encoding=None) -> None:
self.op = _bool_op_map.get(op, op)
self.operands = operands
self.encoding = encoding
def __iter__(self):
return iter(self.operands)
def __repr__(self) -> str:
"""
Print a generic n-ary operator and its operands using infix notation.
"""
# recurse over the operands
parened = (f"({pprint_thing(opr)})" for opr in self.operands)
return pprint_thing(f" {self.op} ".join(parened))
@property
def return_type(self):
# clobber types to bool if the op is a boolean operator
if self.op in (CMP_OPS_SYMS + BOOL_OPS_SYMS):
return np.bool_
return result_type_many(*(term.type for term in com.flatten(self)))
@property
def has_invalid_return_type(self) -> bool:
types = self.operand_types
obj_dtype_set = frozenset([np.dtype("object")])
return self.return_type == object and types - obj_dtype_set
@property
def operand_types(self):
return frozenset(term.type for term in | com.flatten(self) | pandas.core.common.flatten |
import json
import joblib
import nltk
import numpy as np
import pandas as pd
import plotly
from flask import Flask, render_template, request
from nltk.stem import WordNetLemmatizer
from nltk.tokenize import word_tokenize
from plotly.graph_objs import Bar, Pie
from scipy.stats.mstats import gmean
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.metrics import fbeta_score
from sqlalchemy import create_engine
# nltk.download(['punkt', 'wordnet', 'averaged_perceptron_tagger'])
app = Flask(__name__)
def tokenize(text):
"""
Tokenize function to process text data including lemmatize, normalize
case, and remove leading/trailing white space
Args:
text (str): list of text messages (english)
Returns:
clean_tokens: tokenized text, clean and ready to feed ML modeling
"""
tokens = word_tokenize(text)
lemmatizer = WordNetLemmatizer()
clean_tokens = []
for token in tokens:
clean_token = lemmatizer.lemmatize(token).lower().strip()
clean_tokens.append(clean_token)
return clean_tokens
class StartingVerbExtractor(BaseEstimator, TransformerMixin):
"""
This class extract the starting verb of a sentence,
creating a new feature for the ML classifier
"""
@staticmethod
def isverb(text):
"""
Check if the starting word is a verb
Args:
text (str): text messages to be checked
Returns:
boolean (bool)
"""
sentence_list = nltk.sent_tokenize(text)
for sentence in sentence_list:
pos_tags = nltk.pos_tag(tokenize(sentence))
first_word, first_tag = pos_tags[0]
if first_tag in ['VB', 'VBP'] or first_word == 'RT':
return True
return False
# def fit(self, x, y=None):
# """
# Fit the model
# """
# model.fit(x,y)
# return self
def transform(self, x):
"""
Transform incoming dataframe to check for starting verb
Args:
x:
Returns:
dataframe with tagged verbs (pd.Dataframe)
"""
x_tagged = | pd.Series(x) | pandas.Series |
"""
This module contains all US-specific data loading and data cleaning routines.
"""
import requests
import pandas as pd
import numpy as np
idx = pd.IndexSlice
def get_raw_covidtracking_data():
""" Gets the current daily CSV from COVIDTracking """
url = "https://covidtracking.com/api/v1/states/daily.csv"
data = pd.read_csv(url)
return data
def process_covidtracking_data(data: pd.DataFrame, run_date: pd.Timestamp):
""" Processes raw COVIDTracking data to be in a form for the GenerativeModel.
In many cases, we need to correct data errors or obvious outliers."""
data = data.rename(columns={"state": "region"})
data["date"] = pd.to_datetime(data["date"], format="%Y%m%d")
data = data.set_index(["region", "date"]).sort_index()
data = data[["positive", "total"]]
# Too little data or unreliable reporting in the data source.
data = data.drop(["MP", "GU", "AS", "PR", "VI"])
# On Jun 5 Covidtracking started counting probable cases too
# which increases the amount by 5014.
# https://covidtracking.com/screenshots/MI/MI-20200605-184320.png
data.loc[idx["MI", | pd.Timestamp("2020-06-05") | pandas.Timestamp |
import copy
from builtins import range
from datetime import datetime
import numpy as np
import pandas as pd
import pytest
from ..testing_utils import make_ecommerce_entityset
from featuretools import variable_types
from featuretools.entityset import EntitySet
@pytest.fixture()
def entityset():
return make_ecommerce_entityset()
@pytest.fixture
def entity(entityset):
return entityset['log']
class TestQueryFuncs(object):
def test_query_by_id(self, entityset):
df = entityset['log'].query_by_values(instance_vals=[0])
assert df['id'].values[0] == 0
def test_query_by_id_with_sort(self, entityset):
df = entityset['log'].query_by_values(
instance_vals=[2, 1, 3],
return_sorted=True)
assert df['id'].values.tolist() == [2, 1, 3]
def test_query_by_id_with_time(self, entityset):
df = entityset['log'].query_by_values(
instance_vals=[0, 1, 2, 3, 4],
time_last=datetime(2011, 4, 9, 10, 30, 2 * 6))
assert df['id'].get_values().tolist() == [0, 1, 2]
def test_query_by_variable_with_time(self, entityset):
df = entityset['log'].query_by_values(
instance_vals=[0, 1, 2], variable_id='session_id',
time_last=datetime(2011, 4, 9, 10, 50, 0))
true_values = [
i * 5 for i in range(5)] + [i * 1 for i in range(4)] + [0]
assert df['id'].get_values().tolist() == list(range(10))
assert df['value'].get_values().tolist() == true_values
def test_query_by_variable_with_training_window(self, entityset):
df = entityset['log'].query_by_values(
instance_vals=[0, 1, 2], variable_id='session_id',
time_last=datetime(2011, 4, 9, 10, 50, 0),
training_window='15m')
assert df['id'].get_values().tolist() == [9]
assert df['value'].get_values().tolist() == [0]
def test_query_by_indexed_variable(self, entityset):
df = entityset['log'].query_by_values(
instance_vals=['taco clock'],
variable_id='product_id')
assert df['id'].get_values().tolist() == [15, 16]
def test_query_by_non_unique_sort_raises(self, entityset):
with pytest.raises(ValueError):
entityset['log'].query_by_values(
instance_vals=[0, 2, 1],
variable_id='session_id', return_sorted=True)
class TestVariableHandling(object):
# TODO: rewrite now that ds and entityset are seperate
def test_check_variables_and_dataframe(self):
# matches
df = pd.DataFrame({'id': [0, 1, 2], 'category': ['a', 'b', 'a']})
vtypes = {'id': variable_types.Categorical,
'category': variable_types.Categorical}
entityset = EntitySet(id='test')
entityset.entity_from_dataframe('test_entity', df, index='id',
variable_types=vtypes)
assert entityset.entity_stores['test_entity'].variable_types['category'] == variable_types.Categorical
def test_make_index_variable_ordering(self):
df = pd.DataFrame({'id': [0, 1, 2], 'category': ['a', 'b', 'a']})
vtypes = {'id': variable_types.Categorical,
'category': variable_types.Categorical}
entityset = EntitySet(id='test')
entityset.entity_from_dataframe(entity_id='test_entity',
index='id1',
make_index=True,
variable_types=vtypes,
dataframe=df)
assert entityset.entity_stores['test_entity'].df.columns[0] == 'id1'
def test_extra_variable_type(self):
# more variables
df = pd.DataFrame({'id': [0, 1, 2], 'category': ['a', 'b', 'a']})
vtypes = {'id': variable_types.Categorical,
'category': variable_types.Categorical,
'category2': variable_types.Categorical}
with pytest.raises(LookupError):
entityset = EntitySet(id='test')
entityset.entity_from_dataframe(entity_id='test_entity',
index='id',
variable_types=vtypes, dataframe=df)
def test_unknown_index(self):
# more variables
df = pd.DataFrame({'category': ['a', 'b', 'a']})
vtypes = {'category': variable_types.Categorical}
entityset = EntitySet(id='test')
entityset.entity_from_dataframe(entity_id='test_entity',
index='id',
variable_types=vtypes, dataframe=df)
assert entityset['test_entity'].index == 'id'
assert entityset['test_entity'].df['id'].tolist() == list(range(3))
def test_bad_index_variables(self):
# more variables
df = pd.DataFrame({'id': [0, 1, 2], 'category': ['a', 'b', 'a']})
vtypes = {'id': variable_types.Categorical,
'category': variable_types.Categorical}
with pytest.raises(LookupError):
entityset = EntitySet(id='test')
entityset.entity_from_dataframe(entity_id='test_entity',
index='id',
variable_types=vtypes,
dataframe=df,
time_index='time')
def test_converts_variable_types_on_init(self):
df = pd.DataFrame({'id': [0, 1, 2],
'category': ['a', 'b', 'a'],
'category_int': [1, 2, 3],
'ints': ['1', '2', '3'],
'floats': ['1', '2', '3.0']})
df["category_int"] = df["category_int"].astype("category")
vtypes = {'id': variable_types.Categorical,
'ints': variable_types.Numeric,
'floats': variable_types.Numeric}
entityset = EntitySet(id='test')
entityset.entity_from_dataframe(entity_id='test_entity', index='id',
variable_types=vtypes, dataframe=df)
entity_df = entityset.get_dataframe('test_entity')
assert entity_df['ints'].dtype.name in variable_types.PandasTypes._pandas_numerics
assert entity_df['floats'].dtype.name in variable_types.PandasTypes._pandas_numerics
# this is infer from pandas dtype
e = entityset["test_entity"]
assert isinstance(e['category_int'], variable_types.Categorical)
def test_converts_variable_type_after_init(self):
df = pd.DataFrame({'id': [0, 1, 2],
'category': ['a', 'b', 'a'],
'ints': ['1', '2', '1']})
df["category"] = df["category"].astype("category")
entityset = EntitySet(id='test')
entityset.entity_from_dataframe(entity_id='test_entity', index='id',
dataframe=df)
e = entityset['test_entity']
df = entityset.get_dataframe('test_entity')
e.convert_variable_type('ints', variable_types.Numeric)
assert isinstance(e['ints'], variable_types.Numeric)
assert df['ints'].dtype.name in variable_types.PandasTypes._pandas_numerics
e.convert_variable_type('ints', variable_types.Categorical)
assert isinstance(e['ints'], variable_types.Categorical)
e.convert_variable_type('ints', variable_types.Ordinal)
assert isinstance(e['ints'], variable_types.Ordinal)
e.convert_variable_type('ints', variable_types.Boolean,
true_val=1, false_val=2)
assert isinstance(e['ints'], variable_types.Boolean)
assert df['ints'].dtype.name == 'bool'
def test_converts_datetime(self):
# string converts to datetime correctly
# This test fails without defining vtypes. Entityset
# infers time column should be numeric type
times = pd.date_range('1/1/2011', periods=3, freq='H')
time_strs = times.strftime('%Y-%m-%d')
df = pd.DataFrame({'id': [0, 1, 2], 'time': time_strs})
vtypes = {'id': variable_types.Categorical,
'time': variable_types.Datetime}
entityset = EntitySet(id='test')
entityset._import_from_dataframe(entity_id='test_entity', index='id',
time_index="time", variable_types=vtypes,
dataframe=df)
pd_col = entityset.get_column_data('test_entity', 'time')
# assert type(es['test_entity']['time']) == variable_types.Datetime
assert type(pd_col[0]) == pd.Timestamp
def test_handles_datetime_format(self):
# check if we load according to the format string
# pass in an ambigious date
datetime_format = "%d-%m-%Y"
actual = pd.Timestamp('Jan 2, 2011')
time_strs = [actual.strftime(datetime_format)] * 3
df = pd.DataFrame(
{'id': [0, 1, 2], 'time_format': time_strs, 'time_no_format': time_strs})
vtypes = {'id': variable_types.Categorical,
'time_format': (variable_types.Datetime, {"format": datetime_format}),
'time_no_format': variable_types.Datetime}
entityset = EntitySet(id='test')
entityset._import_from_dataframe(entity_id='test_entity', index='id',
variable_types=vtypes, dataframe=df)
col_format = entityset.get_column_data('test_entity', 'time_format')
col_no_format = entityset.get_column_data(
'test_entity', 'time_no_format')
# without formatting pandas gets it wrong
assert (col_no_format != actual).all()
# with formatting we correctly get jan2
assert (col_format == actual).all()
def test_handles_datetime_mismatch(self):
# can't convert arbitrary strings
df = pd.DataFrame({'id': [0, 1, 2], 'time': ['a', 'b', 'tomorrow']})
vtypes = {'id': variable_types.Categorical,
'time': variable_types.Datetime}
with pytest.raises(ValueError):
entityset = EntitySet(id='test')
entityset.entity_from_dataframe('test_entity', df, 'id',
time_index='time', variable_types=vtypes)
def test_calculates_statistics_on_init(self):
df = pd.DataFrame({'id': [0, 1, 2],
'time': [datetime(2011, 4, 9, 10, 31, 3 * i)
for i in range(3)],
'category': ['a', 'b', 'a'],
'number': [4, 5, 6],
'boolean': [True, False, True],
'boolean_with_nan': [True, False, np.nan]})
vtypes = {'id': variable_types.Categorical,
'time': variable_types.Datetime,
'category': variable_types.Categorical,
'number': variable_types.Numeric,
'boolean': variable_types.Boolean,
'boolean_with_nan': variable_types.Boolean}
entityset = EntitySet(id='test')
entityset.entity_from_dataframe('stats_test_entity', df, 'id',
variable_types=vtypes)
e = entityset["stats_test_entity"]
# numerics don't have nunique or percent_unique defined
for v in ['time', 'category', 'number']:
assert e[v].count == 3
for v in ['time', 'number']:
with pytest.raises(AttributeError):
e[v].nunique
with pytest.raises(AttributeError):
e[v].percent_unique
# 'id' column automatically parsed as id
assert e['id'].count == 3
# categoricals have nunique and percent_unique defined
assert e['category'].nunique == 2
assert e['category'].percent_unique == 2. / 3
# booleans have count and number of true/false labels defined
assert e['boolean'].count == 3
# assert e['boolean'].num_true == 3
assert e['boolean'].num_true == 2
assert e['boolean'].num_false == 1
# TODO: the below fails, but shouldn't
# boolean with nan have count and number of true/false labels defined
# assert e['boolean_with_nan'].count == 2
# assert e['boolean_with_nan'].num_true == 1
# assert e['boolean_with_nan'].num_false == 1
def test_column_funcs(self, entityset):
# Note: to convert the time column directly either the variable type
# or convert_date_columns must be specifie
df = pd.DataFrame({'id': [0, 1, 2],
'time': [datetime(2011, 4, 9, 10, 31, 3 * i)
for i in range(3)],
'category': ['a', 'b', 'a'],
'number': [4, 5, 6]})
vtypes = {'time': variable_types.Datetime}
entityset.entity_from_dataframe('test_entity', df, index='id',
time_index='time', variable_types=vtypes)
assert entityset.get_dataframe('test_entity').shape == df.shape
assert entityset.get_index('test_entity') == 'id'
assert entityset.get_time_index('test_entity') == 'time'
assert set(entityset.get_column_names(
'test_entity')) == set(df.columns)
assert entityset.get_column_max('test_entity', 'number') == 6
assert entityset.get_column_min('test_entity', 'number') == 4
assert entityset.get_column_std('test_entity', 'number') == 1
assert entityset.get_column_count('test_entity', 'number') == 3
assert entityset.get_column_mean('test_entity', 'number') == 5
assert entityset.get_column_nunique('test_entity', 'number') == 3
assert entityset.get_column_type(
'test_entity', 'time') == df['time'].dtype
assert set(entityset.get_column_data(
'test_entity', 'id')) == set(df['id'])
def test_combine_variables(self, entityset):
# basic case
entityset.combine_variables('log', 'comment+product_id',
['comments', 'product_id'])
assert entityset['log']['comment+product_id'].dtype == 'categorical'
assert 'comment+product_id' in entityset['log'].df
# one variable to combine
entityset.combine_variables('log', 'comment+',
['comments'])
assert entityset['log']['comment+'].dtype == 'categorical'
assert 'comment+' in entityset['log'].df
# drop columns
entityset.combine_variables('log', 'new_priority_level',
['priority_level'],
drop=True)
assert entityset['log']['new_priority_level'].dtype == 'categorical'
assert 'new_priority_level' in entityset['log'].df
assert 'priority_level' not in entityset['log'].df
assert 'priority_level' not in entityset['log'].variables
# hashed
entityset.combine_variables('log', 'hashed_comment_product',
['comments', 'product_id'],
hashed=True)
assert entityset['log']['comment+product_id'].dtype == 'categorical'
assert entityset['log'].df['hashed_comment_product'].dtype == 'int64'
assert 'comment+product_id' in entityset['log'].df
def test_add_parent_time_index(self, entityset):
entityset = copy.deepcopy(entityset)
entityset.add_parent_time_index(entity_id='sessions',
parent_entity_id='customers',
parent_time_index_variable=None,
child_time_index_variable='session_date',
include_secondary_time_index=True,
secondary_time_index_variables=['cancel_reason'])
sessions = entityset['sessions']
assert sessions.time_index == 'session_date'
assert sessions.secondary_time_index == {
'cancel_date': ['cancel_reason']}
true_session_dates = ([datetime(2011, 4, 6)] +
[datetime(2011, 4, 8)] * 3 +
[datetime(2011, 4, 9)] * 2)
for t, x in zip(true_session_dates, sessions.df['session_date']):
assert t == x.to_pydatetime()
true_cancel_dates = ([datetime(2012, 1, 6)] +
[datetime(2011, 6, 8)] * 3 +
[datetime(2011, 10, 9)] * 2)
for t, x in zip(true_cancel_dates, sessions.df['cancel_date']):
assert t == x.to_pydatetime()
true_cancel_reasons = (['reason_1'] +
['reason_1'] * 3 +
['reason_2'] * 2)
for t, x in zip(true_cancel_reasons, sessions.df['cancel_reason']):
assert t == x
def test_sort_time_id(self):
transactions_df = pd.DataFrame({"id": [1, 2, 3, 4, 5, 6],
"transaction_time": pd.date_range(start="10:00", periods=6, freq="10s")[::-1]})
es = EntitySet("test", entities={"t": (
transactions_df, "id", "transaction_time")})
times = es["t"].df.transaction_time.tolist()
assert times == sorted(transactions_df.transaction_time.tolist())
def test_already_sorted_parameter(self):
transactions_df = pd.DataFrame({"id": [1, 2, 3, 4, 5, 6],
"transaction_time": [datetime(2014, 4, 6),
datetime(
2012, 4, 8),
datetime(
2012, 4, 8),
datetime(
2013, 4, 8),
datetime(
2015, 4, 8),
datetime(2016, 4, 9)]})
es = EntitySet(id='test')
es.entity_from_dataframe('t',
transactions_df,
index='id',
time_index="transaction_time",
already_sorted=True)
times = es["t"].df.transaction_time.tolist()
assert times == transactions_df.transaction_time.tolist()
def test_concat_entitysets(self, entityset):
df = pd.DataFrame({'id': [0, 1, 2], 'category': ['a', 'b', 'a']})
vtypes = {'id': variable_types.Categorical,
'category': variable_types.Categorical}
entityset.entity_from_dataframe(entity_id='test_entity',
index='id1',
make_index=True,
variable_types=vtypes,
dataframe=df)
import copy
assert entityset.__eq__(entityset)
entityset_1 = copy.deepcopy(entityset)
entityset_2 = copy.deepcopy(entityset)
emap = {
'log': [list(range(10)) + [14, 15, 16], list(range(10, 14)) + [15, 16]],
'sessions': [[0, 1, 2, 5], [1, 3, 4, 5]],
'customers': [[0, 2], [1, 2]],
'test_entity': [[0, 1], [0, 2]],
}
for i, es in enumerate([entityset_1, entityset_2]):
for entity, rows in emap.items():
df = es[entity].df
es[entity].update_data(df.loc[rows[i]])
for r in entityset.relationships:
es.index_data(r)
# make sure internal indexes work before concat
regions = entityset_1['customers'].query_by_values(['United States'], variable_id='region_id')
assert regions.index.isin(entityset_1['customers'].df.index).all()
assert entityset_1.__eq__(entityset_2)
assert not entityset_1.__eq__(entityset_2, deep=True)
old_entityset_1 = copy.deepcopy(entityset_1)
old_entityset_2 = copy.deepcopy(entityset_2)
entityset_3 = entityset_1.concat(entityset_2)
assert old_entityset_1.__eq__(entityset_1, deep=True)
assert old_entityset_2.__eq__(entityset_2, deep=True)
assert entityset_3.__eq__(entityset, deep=True)
for entity in entityset.entities:
df = entityset[entity.id].df.sort_index()
df_3 = entityset_3[entity.id].df.sort_index()
for column in df:
for x, y in zip(df[column], df_3[column]):
assert (( | pd.isnull(x) | pandas.isnull |
# extract.py: extracts data from runs and computes summary statistics into folder output in collated_outputs folder. Summary in summary.csv
# requirements: have already run iterations of prescient and saved the files to download folder, starting with prefix compilation_prefix
# intended system: Tiger or local
# dependencies: analyze_prescient_output
# created by: <NAME>
# email: <EMAIL>
# date: June 21, 2021
import os
import pandas as pd
from prescient_helpers.analyze_prescient_output import CVaR
import numpy as np
def output_summary(compilation_prefix = "scen"):
os.chdir("..")
os.chdir("./downloads")
all_files = os.listdir()
dictionary = {}
for dir in all_files:
if (dir.startswith(compilation_prefix) and os.path.exists("./"+dir+"/output/overall_simulation_output.csv")):
dictionary.setdefault(dir[0:-3], [])
output_data = | pd.read_csv("./"+dir+"/output/overall_simulation_output.csv") | pandas.read_csv |
from __future__ import division
from datetime import timedelta
from functools import partial
import itertools
from nose.tools import assert_true
from parameterized import parameterized
import numpy as np
from numpy.testing import assert_array_equal, assert_almost_equal
import pandas as pd
from toolz import merge
from zipline.pipeline import SimplePipelineEngine, Pipeline, CustomFactor
from zipline.pipeline.common import (
EVENT_DATE_FIELD_NAME,
FISCAL_QUARTER_FIELD_NAME,
FISCAL_YEAR_FIELD_NAME,
SID_FIELD_NAME,
TS_FIELD_NAME,
)
from zipline.pipeline.data import DataSet
from zipline.pipeline.data import Column
from zipline.pipeline.domain import EquitySessionDomain
import platform
if platform.system() != 'Windows':
from zipline.pipeline.loaders.blaze.estimates import (
BlazeNextEstimatesLoader,
BlazeNextSplitAdjustedEstimatesLoader,
BlazePreviousEstimatesLoader,
BlazePreviousSplitAdjustedEstimatesLoader,
)
from zipline.pipeline.loaders.earnings_estimates import (
INVALID_NUM_QTRS_MESSAGE,
NextEarningsEstimatesLoader,
NextSplitAdjustedEarningsEstimatesLoader,
normalize_quarters,
PreviousEarningsEstimatesLoader,
PreviousSplitAdjustedEarningsEstimatesLoader,
split_normalized_quarters,
)
from zipline.testing.fixtures import (
WithAdjustmentReader,
WithTradingSessions,
ZiplineTestCase,
)
from zipline.testing.predicates import assert_equal, assert_raises_regex
from zipline.testing.predicates import assert_frame_equal
from zipline.utils.numpy_utils import datetime64ns_dtype
from zipline.utils.numpy_utils import float64_dtype
import platform
import unittest
class Estimates(DataSet):
event_date = Column(dtype=datetime64ns_dtype)
fiscal_quarter = Column(dtype=float64_dtype)
fiscal_year = Column(dtype=float64_dtype)
estimate = Column(dtype=float64_dtype)
class MultipleColumnsEstimates(DataSet):
event_date = Column(dtype=datetime64ns_dtype)
fiscal_quarter = Column(dtype=float64_dtype)
fiscal_year = Column(dtype=float64_dtype)
estimate1 = Column(dtype=float64_dtype)
estimate2 = Column(dtype=float64_dtype)
def QuartersEstimates(announcements_out):
class QtrEstimates(Estimates):
num_announcements = announcements_out
name = Estimates
return QtrEstimates
def MultipleColumnsQuartersEstimates(announcements_out):
class QtrEstimates(MultipleColumnsEstimates):
num_announcements = announcements_out
name = Estimates
return QtrEstimates
def QuartersEstimatesNoNumQuartersAttr(num_qtr):
class QtrEstimates(Estimates):
name = Estimates
return QtrEstimates
def create_expected_df_for_factor_compute(start_date,
sids,
tuples,
end_date):
"""
Given a list of tuples of new data we get for each sid on each critical
date (when information changes), create a DataFrame that fills that
data through a date range ending at `end_date`.
"""
df = pd.DataFrame(tuples,
columns=[SID_FIELD_NAME,
'estimate',
'knowledge_date'])
df = df.pivot_table(columns=SID_FIELD_NAME,
values='estimate',
index='knowledge_date')
df = df.reindex(
pd.date_range(start_date, end_date)
)
# Index name is lost during reindex.
df.index = df.index.rename('knowledge_date')
df['at_date'] = end_date.tz_localize('utc')
df = df.set_index(['at_date', df.index.tz_localize('utc')]).ffill()
new_sids = set(sids) - set(df.columns)
df = df.reindex(columns=df.columns.union(new_sids))
return df
class WithEstimates(WithTradingSessions, WithAdjustmentReader):
"""
ZiplineTestCase mixin providing cls.loader and cls.events as class
level fixtures.
Methods
-------
make_loader(events, columns) -> PipelineLoader
Method which returns the loader to be used throughout tests.
events : pd.DataFrame
The raw events to be used as input to the pipeline loader.
columns : dict[str -> str]
The dictionary mapping the names of BoundColumns to the
associated column name in the events DataFrame.
make_columns() -> dict[BoundColumn -> str]
Method which returns a dictionary of BoundColumns mapped to the
associated column names in the raw data.
"""
# Short window defined in order for test to run faster.
START_DATE = pd.Timestamp('2014-12-28')
END_DATE = pd.Timestamp('2015-02-04')
@classmethod
def make_loader(cls, events, columns):
raise NotImplementedError('make_loader')
@classmethod
def make_events(cls):
raise NotImplementedError('make_events')
@classmethod
def get_sids(cls):
return cls.events[SID_FIELD_NAME].unique()
@classmethod
def make_columns(cls):
return {
Estimates.event_date: 'event_date',
Estimates.fiscal_quarter: 'fiscal_quarter',
Estimates.fiscal_year: 'fiscal_year',
Estimates.estimate: 'estimate'
}
def make_engine(self, loader=None):
if loader is None:
loader = self.loader
return SimplePipelineEngine(
lambda x: loader,
self.asset_finder,
default_domain=EquitySessionDomain(
self.trading_days, self.ASSET_FINDER_COUNTRY_CODE,
),
)
@classmethod
def init_class_fixtures(cls):
cls.events = cls.make_events()
cls.ASSET_FINDER_EQUITY_SIDS = cls.get_sids()
cls.ASSET_FINDER_EQUITY_SYMBOLS = [
's' + str(n) for n in cls.ASSET_FINDER_EQUITY_SIDS
]
# We need to instantiate certain constants needed by supers of
# `WithEstimates` before we call their `init_class_fixtures`.
super(WithEstimates, cls).init_class_fixtures()
cls.columns = cls.make_columns()
# Some tests require `WithAdjustmentReader` to be set up by the time we
# make the loader.
cls.loader = cls.make_loader(cls.events, {column.name: val for
column, val in
cls.columns.items()})
class WithOneDayPipeline(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
events : pd.DataFrame
A simple DataFrame with columns needed for estimates and a single sid
and no other data.
Tests
------
test_wrong_num_announcements_passed()
Tests that loading with an incorrect quarter number raises an error.
test_no_num_announcements_attr()
Tests that the loader throws an AssertionError if the dataset being
loaded has no `num_announcements` attribute.
"""
@classmethod
def make_columns(cls):
return {
MultipleColumnsEstimates.event_date: 'event_date',
MultipleColumnsEstimates.fiscal_quarter: 'fiscal_quarter',
MultipleColumnsEstimates.fiscal_year: 'fiscal_year',
MultipleColumnsEstimates.estimate1: 'estimate1',
MultipleColumnsEstimates.estimate2: 'estimate2'
}
@classmethod
def make_events(cls):
return pd.DataFrame({
SID_FIELD_NAME: [0] * 2,
TS_FIELD_NAME: [pd.Timestamp('2015-01-01'),
pd.Timestamp('2015-01-06')],
EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-10'),
pd.Timestamp('2015-01-20')],
'estimate1': [1., 2.],
'estimate2': [3., 4.],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: [2015, 2015]
})
@classmethod
def make_expected_out(cls):
raise NotImplementedError('make_expected_out')
@classmethod
def init_class_fixtures(cls):
super(WithOneDayPipeline, cls).init_class_fixtures()
cls.sid0 = cls.asset_finder.retrieve_asset(0)
cls.expected_out = cls.make_expected_out()
def test_load_one_day(self):
# We want to test multiple columns
dataset = MultipleColumnsQuartersEstimates(1)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline({c.name: c.latest for c in dataset.columns}),
start_date=pd.Timestamp('2015-01-15', tz='utc'),
end_date=pd.Timestamp('2015-01-15', tz='utc'),
)
assert_frame_equal(results, self.expected_out)
class PreviousWithOneDayPipeline(WithOneDayPipeline, ZiplineTestCase):
"""
Tests that previous quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_out(cls):
return pd.DataFrame(
{
EVENT_DATE_FIELD_NAME: pd.Timestamp('2015-01-10'),
'estimate1': 1.,
'estimate2': 3.,
FISCAL_QUARTER_FIELD_NAME: 1.,
FISCAL_YEAR_FIELD_NAME: 2015.,
},
index=pd.MultiIndex.from_tuples(
((pd.Timestamp('2015-01-15', tz='utc'), cls.sid0),)
)
)
class NextWithOneDayPipeline(WithOneDayPipeline, ZiplineTestCase):
"""
Tests that next quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_out(cls):
return pd.DataFrame(
{
EVENT_DATE_FIELD_NAME: pd.Timestamp('2015-01-20'),
'estimate1': 2.,
'estimate2': 4.,
FISCAL_QUARTER_FIELD_NAME: 2.,
FISCAL_YEAR_FIELD_NAME: 2015.,
},
index=pd.MultiIndex.from_tuples(
((pd.Timestamp('2015-01-15', tz='utc'), cls.sid0),)
)
)
dummy_df = pd.DataFrame({SID_FIELD_NAME: 0},
columns=[SID_FIELD_NAME,
TS_FIELD_NAME,
EVENT_DATE_FIELD_NAME,
FISCAL_QUARTER_FIELD_NAME,
FISCAL_YEAR_FIELD_NAME,
'estimate'],
index=[0])
class WithWrongLoaderDefinition(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
events : pd.DataFrame
A simple DataFrame with columns needed for estimates and a single sid
and no other data.
Tests
------
test_wrong_num_announcements_passed()
Tests that loading with an incorrect quarter number raises an error.
test_no_num_announcements_attr()
Tests that the loader throws an AssertionError if the dataset being
loaded has no `num_announcements` attribute.
"""
@classmethod
def make_events(cls):
return dummy_df
def test_wrong_num_announcements_passed(self):
bad_dataset1 = QuartersEstimates(-1)
bad_dataset2 = QuartersEstimates(-2)
good_dataset = QuartersEstimates(1)
engine = self.make_engine()
columns = {c.name + str(dataset.num_announcements): c.latest
for dataset in (bad_dataset1,
bad_dataset2,
good_dataset)
for c in dataset.columns}
p = Pipeline(columns)
with self.assertRaises(ValueError) as e:
engine.run_pipeline(
p,
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
assert_raises_regex(e, INVALID_NUM_QTRS_MESSAGE % "-1,-2")
def test_no_num_announcements_attr(self):
dataset = QuartersEstimatesNoNumQuartersAttr(1)
engine = self.make_engine()
p = Pipeline({c.name: c.latest for c in dataset.columns})
with self.assertRaises(AttributeError):
engine.run_pipeline(
p,
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
class PreviousWithWrongNumQuarters(WithWrongLoaderDefinition,
ZiplineTestCase):
"""
Tests that previous quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
class NextWithWrongNumQuarters(WithWrongLoaderDefinition,
ZiplineTestCase):
"""
Tests that next quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
options = ["split_adjustments_loader",
"split_adjusted_column_names",
"split_adjusted_asof"]
class WrongSplitsLoaderDefinition(WithEstimates, ZiplineTestCase):
"""
Test class that tests that loaders break correctly when incorrectly
instantiated.
Tests
-----
test_extra_splits_columns_passed(SplitAdjustedEstimatesLoader)
A test that checks that the loader correctly breaks when an
unexpected column is passed in the list of split-adjusted columns.
"""
@classmethod
def init_class_fixtures(cls):
super(WithEstimates, cls).init_class_fixtures()
@parameterized.expand(itertools.product(
(NextSplitAdjustedEarningsEstimatesLoader,
PreviousSplitAdjustedEarningsEstimatesLoader),
))
def test_extra_splits_columns_passed(self, loader):
columns = {
Estimates.event_date: 'event_date',
Estimates.fiscal_quarter: 'fiscal_quarter',
Estimates.fiscal_year: 'fiscal_year',
Estimates.estimate: 'estimate'
}
with self.assertRaises(ValueError):
loader(dummy_df,
{column.name: val for column, val in
columns.items()},
split_adjustments_loader=self.adjustment_reader,
split_adjusted_column_names=["estimate", "extra_col"],
split_adjusted_asof=pd.Timestamp("2015-01-01"))
class WithEstimatesTimeZero(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
cls.events : pd.DataFrame
Generated dynamically in order to test inter-leavings of estimates and
event dates for multiple quarters to make sure that we select the
right immediate 'next' or 'previous' quarter relative to each date -
i.e., the right 'time zero' on the timeline. We care about selecting
the right 'time zero' because we use that to calculate which quarter's
data needs to be returned for each day.
Methods
-------
get_expected_estimate(q1_knowledge,
q2_knowledge,
comparable_date) -> pd.DataFrame
Retrieves the expected estimate given the latest knowledge about each
quarter and the date on which the estimate is being requested. If
there is no expected estimate, returns an empty DataFrame.
Tests
------
test_estimates()
Tests that we get the right 'time zero' value on each day for each
sid and for each column.
"""
# Shorter date range for performance
END_DATE = pd.Timestamp('2015-01-28')
q1_knowledge_dates = [pd.Timestamp('2015-01-01'),
pd.Timestamp('2015-01-04'),
pd.Timestamp('2015-01-07'),
pd.Timestamp('2015-01-11')]
q2_knowledge_dates = [pd.Timestamp('2015-01-14'),
pd.Timestamp('2015-01-17'),
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-23')]
# We want to model the possibility of an estimate predicting a release date
# that doesn't match the actual release. This could be done by dynamically
# generating more combinations with different release dates, but that
# significantly increases the amount of time it takes to run the tests.
# These hard-coded cases are sufficient to know that we can update our
# beliefs when we get new information.
q1_release_dates = [pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-14')] # One day late
q2_release_dates = [pd.Timestamp('2015-01-25'), # One day early
pd.Timestamp('2015-01-26')]
@classmethod
def make_events(cls):
"""
In order to determine which estimate we care about for a particular
sid, we need to look at all estimates that we have for that sid and
their associated event dates.
We define q1 < q2, and thus event1 < event2 since event1 occurs
during q1 and event2 occurs during q2 and we assume that there can
only be 1 event per quarter. We assume that there can be multiple
estimates per quarter leading up to the event. We assume that estimates
will not surpass the relevant event date. We will look at 2 estimates
for an event before the event occurs, since that is the simplest
scenario that covers the interesting edge cases:
- estimate values changing
- a release date changing
- estimates for different quarters interleaving
Thus, we generate all possible inter-leavings of 2 estimates per
quarter-event where estimate1 < estimate2 and all estimates are < the
relevant event and assign each of these inter-leavings to a
different sid.
"""
sid_estimates = []
sid_releases = []
# We want all permutations of 2 knowledge dates per quarter.
it = enumerate(
itertools.permutations(cls.q1_knowledge_dates +
cls.q2_knowledge_dates,
4)
)
for sid, (q1e1, q1e2, q2e1, q2e2) in it:
# We're assuming that estimates must come before the relevant
# release.
if (q1e1 < q1e2 and
q2e1 < q2e2 and
# All estimates are < Q2's event, so just constrain Q1
# estimates.
q1e1 < cls.q1_release_dates[0] and
q1e2 < cls.q1_release_dates[0]):
sid_estimates.append(cls.create_estimates_df(q1e1,
q1e2,
q2e1,
q2e2,
sid))
sid_releases.append(cls.create_releases_df(sid))
return pd.concat(sid_estimates +
sid_releases).reset_index(drop=True)
@classmethod
def get_sids(cls):
sids = cls.events[SID_FIELD_NAME].unique()
# Tack on an extra sid to make sure that sids with no data are
# included but have all-null columns.
return list(sids) + [max(sids) + 1]
@classmethod
def create_releases_df(cls, sid):
# Final release dates never change. The quarters have very tight date
# ranges in order to reduce the number of dates we need to iterate
# through when testing.
return pd.DataFrame({
TS_FIELD_NAME: [pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-26')],
EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-26')],
'estimate': [0.5, 0.8],
FISCAL_QUARTER_FIELD_NAME: [1.0, 2.0],
FISCAL_YEAR_FIELD_NAME: [2015.0, 2015.0],
SID_FIELD_NAME: sid
})
@classmethod
def create_estimates_df(cls,
q1e1,
q1e2,
q2e1,
q2e2,
sid):
return pd.DataFrame({
EVENT_DATE_FIELD_NAME: cls.q1_release_dates + cls.q2_release_dates,
'estimate': [.1, .2, .3, .4],
FISCAL_QUARTER_FIELD_NAME: [1.0, 1.0, 2.0, 2.0],
FISCAL_YEAR_FIELD_NAME: [2015.0, 2015.0, 2015.0, 2015.0],
TS_FIELD_NAME: [q1e1, q1e2, q2e1, q2e2],
SID_FIELD_NAME: sid,
})
def get_expected_estimate(self,
q1_knowledge,
q2_knowledge,
comparable_date):
return pd.DataFrame()
def test_estimates(self):
dataset = QuartersEstimates(1)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline({c.name: c.latest for c in dataset.columns}),
start_date=self.trading_days[1],
end_date=self.trading_days[-2],
)
for sid in self.ASSET_FINDER_EQUITY_SIDS:
sid_estimates = results.xs(sid, level=1)
# Separate assertion for all-null DataFrame to avoid setting
# column dtypes on `all_expected`.
if sid == max(self.ASSET_FINDER_EQUITY_SIDS):
assert_true(sid_estimates.isnull().all().all())
else:
ts_sorted_estimates = self.events[
self.events[SID_FIELD_NAME] == sid
].sort_values(TS_FIELD_NAME)
q1_knowledge = ts_sorted_estimates[
ts_sorted_estimates[FISCAL_QUARTER_FIELD_NAME] == 1
]
q2_knowledge = ts_sorted_estimates[
ts_sorted_estimates[FISCAL_QUARTER_FIELD_NAME] == 2
]
all_expected = pd.concat(
[self.get_expected_estimate(
q1_knowledge[q1_knowledge[TS_FIELD_NAME] <=
date.tz_localize(None)],
q2_knowledge[q2_knowledge[TS_FIELD_NAME] <=
date.tz_localize(None)],
date.tz_localize(None),
).set_index([[date]]) for date in sid_estimates.index],
axis=0)
assert_equal(all_expected[sid_estimates.columns],
sid_estimates)
class NextEstimate(WithEstimatesTimeZero, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
def get_expected_estimate(self,
q1_knowledge,
q2_knowledge,
comparable_date):
# If our latest knowledge of q1 is that the release is
# happening on this simulation date or later, then that's
# the estimate we want to use.
if (not q1_knowledge.empty and
q1_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] >=
comparable_date):
return q1_knowledge.iloc[-1:]
# If q1 has already happened or we don't know about it
# yet and our latest knowledge indicates that q2 hasn't
# happened yet, then that's the estimate we want to use.
elif (not q2_knowledge.empty and
q2_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] >=
comparable_date):
return q2_knowledge.iloc[-1:]
return pd.DataFrame(columns=q1_knowledge.columns,
index=[comparable_date])
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazeNextEstimateLoaderTestCase(NextEstimate):
"""
Run the same tests as EventsLoaderTestCase, but using a BlazeEventsLoader.
"""
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazeNextEstimatesLoader(
bz.data(events),
columns,
)
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class PreviousEstimate(WithEstimatesTimeZero, ZiplineTestCase):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
def get_expected_estimate(self,
q1_knowledge,
q2_knowledge,
comparable_date):
# The expected estimate will be for q2 if the last thing
# we've seen is that the release date already happened.
# Otherwise, it'll be for q1, as long as the release date
# for q1 has already happened.
if (not q2_knowledge.empty and
q2_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] <=
comparable_date):
return q2_knowledge.iloc[-1:]
elif (not q1_knowledge.empty and
q1_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] <=
comparable_date):
return q1_knowledge.iloc[-1:]
return pd.DataFrame(columns=q1_knowledge.columns,
index=[comparable_date])
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazePreviousEstimateLoaderTestCase(PreviousEstimate):
"""
Run the same tests as EventsLoaderTestCase, but using a BlazeEventsLoader.
"""
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazePreviousEstimatesLoader(
bz.data(events),
columns,
)
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class WithEstimateMultipleQuarters(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events, cls.make_expected_out as
class-level fixtures and self.test_multiple_qtrs_requested as a test.
Attributes
----------
events : pd.DataFrame
Simple DataFrame with estimates for 2 quarters for a single sid.
Methods
-------
make_expected_out() --> pd.DataFrame
Returns the DataFrame that is expected as a result of running a
Pipeline where estimates are requested for multiple quarters out.
fill_expected_out(expected)
Fills the expected DataFrame with data.
Tests
------
test_multiple_qtrs_requested()
Runs a Pipeline that calculate which estimates for multiple quarters
out and checks that the returned columns contain data for the correct
number of quarters out.
"""
@classmethod
def make_events(cls):
return pd.DataFrame({
SID_FIELD_NAME: [0] * 2,
TS_FIELD_NAME: [pd.Timestamp('2015-01-01'),
pd.Timestamp('2015-01-06')],
EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-10'),
pd.Timestamp('2015-01-20')],
'estimate': [1., 2.],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: [2015, 2015]
})
@classmethod
def init_class_fixtures(cls):
super(WithEstimateMultipleQuarters, cls).init_class_fixtures()
cls.expected_out = cls.make_expected_out()
@classmethod
def make_expected_out(cls):
expected = pd.DataFrame(columns=[cls.columns[col] + '1'
for col in cls.columns] +
[cls.columns[col] + '2'
for col in cls.columns],
index=cls.trading_days)
for (col, raw_name), suffix in itertools.product(
cls.columns.items(), ('1', '2')
):
expected_name = raw_name + suffix
if col.dtype == datetime64ns_dtype:
expected[expected_name] = pd.to_datetime(
expected[expected_name]
)
else:
expected[expected_name] = expected[
expected_name
].astype(col.dtype)
cls.fill_expected_out(expected)
return expected.reindex(cls.trading_days)
def test_multiple_qtrs_requested(self):
dataset1 = QuartersEstimates(1)
dataset2 = QuartersEstimates(2)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline(
merge([{c.name + '1': c.latest for c in dataset1.columns},
{c.name + '2': c.latest for c in dataset2.columns}])
),
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
q1_columns = [col.name + '1' for col in self.columns]
q2_columns = [col.name + '2' for col in self.columns]
# We now expect a column for 1 quarter out and a column for 2
# quarters out for each of the dataset columns.
assert_equal(sorted(np.array(q1_columns + q2_columns)),
sorted(results.columns.values))
assert_equal(self.expected_out.sort_index(axis=1),
results.xs(0, level=1).sort_index(axis=1))
class NextEstimateMultipleQuarters(
WithEstimateMultipleQuarters, ZiplineTestCase
):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def fill_expected_out(cls, expected):
# Fill columns for 1 Q out
for raw_name in cls.columns.values():
expected.loc[
pd.Timestamp('2015-01-01'):pd.Timestamp('2015-01-11'),
raw_name + '1'
] = cls.events[raw_name].iloc[0]
expected.loc[
pd.Timestamp('2015-01-11'):pd.Timestamp('2015-01-20'),
raw_name + '1'
] = cls.events[raw_name].iloc[1]
# Fill columns for 2 Q out
# We only have an estimate and event date for 2 quarters out before
# Q1's event happens; after Q1's event, we know 1 Q out but not 2 Qs
# out.
for col_name in ['estimate', 'event_date']:
expected.loc[
pd.Timestamp('2015-01-06'):pd.Timestamp('2015-01-10'),
col_name + '2'
] = cls.events[col_name].iloc[1]
# But we know what FQ and FY we'd need in both Q1 and Q2
# because we know which FQ is next and can calculate from there
expected.loc[
pd.Timestamp('2015-01-01'):pd.Timestamp('2015-01-09'),
FISCAL_QUARTER_FIELD_NAME + '2'
] = 2
expected.loc[
pd.Timestamp('2015-01-12'):pd.Timestamp('2015-01-20'),
FISCAL_QUARTER_FIELD_NAME + '2'
] = 3
expected.loc[
pd.Timestamp('2015-01-01'):pd.Timestamp('2015-01-20'),
FISCAL_YEAR_FIELD_NAME + '2'
] = 2015
return expected
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazeNextEstimateMultipleQuarters(NextEstimateMultipleQuarters):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazeNextEstimatesLoader(
bz.data(events),
columns,
)
class PreviousEstimateMultipleQuarters(
WithEstimateMultipleQuarters,
ZiplineTestCase
):
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def fill_expected_out(cls, expected):
# Fill columns for 1 Q out
for raw_name in cls.columns.values():
expected[raw_name + '1'].loc[
pd.Timestamp('2015-01-12'):pd.Timestamp('2015-01-19')
] = cls.events[raw_name].iloc[0]
expected[raw_name + '1'].loc[
pd.Timestamp('2015-01-20'):
] = cls.events[raw_name].iloc[1]
# Fill columns for 2 Q out
for col_name in ['estimate', 'event_date']:
expected[col_name + '2'].loc[
pd.Timestamp('2015-01-20'):
] = cls.events[col_name].iloc[0]
expected[
FISCAL_QUARTER_FIELD_NAME + '2'
].loc[pd.Timestamp('2015-01-12'):pd.Timestamp('2015-01-20')] = 4
expected[
FISCAL_YEAR_FIELD_NAME + '2'
].loc[pd.Timestamp('2015-01-12'):pd.Timestamp('2015-01-20')] = 2014
expected[
FISCAL_QUARTER_FIELD_NAME + '2'
].loc[pd.Timestamp('2015-01-20'):] = 1
expected[
FISCAL_YEAR_FIELD_NAME + '2'
].loc[pd.Timestamp('2015-01-20'):] = 2015
return expected
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazePreviousEstimateMultipleQuarters(PreviousEstimateMultipleQuarters):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazePreviousEstimatesLoader(
bz.data(events),
columns,
)
class WithVaryingNumEstimates(WithEstimates):
"""
ZiplineTestCase mixin providing fixtures and a test to ensure that we
have the correct overwrites when the event date changes. We want to make
sure that if we have a quarter with an event date that gets pushed back,
we don't start overwriting for the next quarter early. Likewise,
if we have a quarter with an event date that gets pushed forward, we want
to make sure that we start applying adjustments at the appropriate, earlier
date, rather than the later date.
Methods
-------
assert_compute()
Defines how to determine that results computed for the `SomeFactor`
factor are correct.
Tests
-----
test_windows_with_varying_num_estimates()
Tests that we create the correct overwrites from 2015-01-13 to
2015-01-14 regardless of how event dates were updated for each
quarter for each sid.
"""
@classmethod
def make_events(cls):
return pd.DataFrame({
SID_FIELD_NAME: [0] * 3 + [1] * 3,
TS_FIELD_NAME: [pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-12'),
pd.Timestamp('2015-01-13')] * 2,
EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-12'),
pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-12'),
pd.Timestamp('2015-01-20')],
'estimate': [11., 12., 21.] * 2,
FISCAL_QUARTER_FIELD_NAME: [1, 1, 2] * 2,
FISCAL_YEAR_FIELD_NAME: [2015] * 6
})
@classmethod
def assert_compute(cls, estimate, today):
raise NotImplementedError('assert_compute')
def test_windows_with_varying_num_estimates(self):
dataset = QuartersEstimates(1)
assert_compute = self.assert_compute
class SomeFactor(CustomFactor):
inputs = [dataset.estimate]
window_length = 3
def compute(self, today, assets, out, estimate):
assert_compute(estimate, today)
engine = self.make_engine()
engine.run_pipeline(
Pipeline({'est': SomeFactor()}),
start_date=pd.Timestamp('2015-01-13', tz='utc'),
# last event date we have
end_date=pd.Timestamp('2015-01-14', tz='utc'),
)
class PreviousVaryingNumEstimates(
WithVaryingNumEstimates,
ZiplineTestCase
):
def assert_compute(self, estimate, today):
if today == pd.Timestamp('2015-01-13', tz='utc'):
assert_array_equal(estimate[:, 0],
np.array([np.NaN, np.NaN, 12]))
assert_array_equal(estimate[:, 1],
np.array([np.NaN, 12, 12]))
else:
assert_array_equal(estimate[:, 0],
np.array([np.NaN, 12, 12]))
assert_array_equal(estimate[:, 1],
np.array([12, 12, 12]))
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazePreviousVaryingNumEstimates(PreviousVaryingNumEstimates):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazePreviousEstimatesLoader(
bz.data(events),
columns,
)
class NextVaryingNumEstimates(
WithVaryingNumEstimates,
ZiplineTestCase
):
def assert_compute(self, estimate, today):
if today == pd.Timestamp('2015-01-13', tz='utc'):
assert_array_equal(estimate[:, 0],
np.array([11, 12, 12]))
assert_array_equal(estimate[:, 1],
np.array([np.NaN, np.NaN, 21]))
else:
assert_array_equal(estimate[:, 0],
np.array([np.NaN, 21, 21]))
assert_array_equal(estimate[:, 1],
np.array([np.NaN, 21, 21]))
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazeNextVaryingNumEstimates(NextVaryingNumEstimates):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazeNextEstimatesLoader(
bz.data(events),
columns,
)
class WithEstimateWindows(WithEstimates):
"""
ZiplineTestCase mixin providing fixures and a test to test running a
Pipeline with an estimates loader over differently-sized windows.
Attributes
----------
events : pd.DataFrame
DataFrame with estimates for 2 quarters for 2 sids.
window_test_start_date : pd.Timestamp
The date from which the window should start.
timelines : dict[int -> pd.DataFrame]
A dictionary mapping to the number of quarters out to
snapshots of how the data should look on each date in the date range.
Methods
-------
make_expected_timelines() -> dict[int -> pd.DataFrame]
Creates a dictionary of expected data. See `timelines`, above.
Tests
-----
test_estimate_windows_at_quarter_boundaries()
Tests that we overwrite values with the correct quarter's estimate at
the correct dates when we have a factor that asks for a window of data.
"""
END_DATE = pd.Timestamp('2015-02-10')
window_test_start_date = pd.Timestamp('2015-01-05')
critical_dates = [pd.Timestamp('2015-01-09', tz='utc'),
pd.Timestamp('2015-01-15', tz='utc'),
pd.Timestamp('2015-01-20', tz='utc'),
pd.Timestamp('2015-01-26', tz='utc'),
pd.Timestamp('2015-02-05', tz='utc'),
pd.Timestamp('2015-02-10', tz='utc')]
# Starting date, number of announcements out.
window_test_cases = list(itertools.product(critical_dates, (1, 2)))
@classmethod
def make_events(cls):
# Typical case: 2 consecutive quarters.
sid_0_timeline = pd.DataFrame({
TS_FIELD_NAME: [cls.window_test_start_date,
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-12'),
pd.Timestamp('2015-02-10'),
# We want a case where we get info for a later
# quarter before the current quarter is over but
# after the split_asof_date to make sure that
# we choose the correct date to overwrite until.
pd.Timestamp('2015-01-18')],
EVENT_DATE_FIELD_NAME:
[pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-02-10'),
pd.Timestamp('2015-02-10'),
pd.Timestamp('2015-04-01')],
'estimate': [100., 101.] + [200., 201.] + [400],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [2] * 2 + [4],
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 0,
})
# We want a case where we skip a quarter. We never find out about Q2.
sid_10_timeline = pd.DataFrame({
TS_FIELD_NAME: [pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-12'),
pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-15')],
EVENT_DATE_FIELD_NAME:
[pd.Timestamp('2015-01-22'), pd.Timestamp('2015-01-22'),
pd.Timestamp('2015-02-05'), pd.Timestamp('2015-02-05')],
'estimate': [110., 111.] + [310., 311.],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [3] * 2,
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 10
})
# We want to make sure we have correct overwrites when sid quarter
# boundaries collide. This sid's quarter boundaries collide with sid 0.
sid_20_timeline = pd.DataFrame({
TS_FIELD_NAME: [cls.window_test_start_date,
pd.Timestamp('2015-01-07'),
cls.window_test_start_date,
pd.Timestamp('2015-01-17')],
EVENT_DATE_FIELD_NAME:
[pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-02-10'),
pd.Timestamp('2015-02-10')],
'estimate': [120., 121.] + [220., 221.],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [2] * 2,
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 20
})
concatted = pd.concat([sid_0_timeline,
sid_10_timeline,
sid_20_timeline]).reset_index()
np.random.seed(0)
return concatted.reindex(np.random.permutation(concatted.index))
@classmethod
def get_sids(cls):
sids = sorted(cls.events[SID_FIELD_NAME].unique())
# Add extra sids between sids in our data. We want to test that we
# apply adjustments to the correct sids.
return [sid for i in range(len(sids) - 1)
for sid in range(sids[i], sids[i+1])] + [sids[-1]]
@classmethod
def make_expected_timelines(cls):
return {}
@classmethod
def init_class_fixtures(cls):
super(WithEstimateWindows, cls).init_class_fixtures()
cls.create_expected_df_for_factor_compute = partial(
create_expected_df_for_factor_compute,
cls.window_test_start_date,
cls.get_sids()
)
cls.timelines = cls.make_expected_timelines()
@parameterized.expand(window_test_cases)
def test_estimate_windows_at_quarter_boundaries(self,
start_date,
num_announcements_out):
dataset = QuartersEstimates(num_announcements_out)
trading_days = self.trading_days
timelines = self.timelines
# The window length should be from the starting index back to the first
# date on which we got data. The goal is to ensure that as we
# progress through the timeline, all data we got, starting from that
# first date, is correctly overwritten.
window_len = (
self.trading_days.get_loc(start_date) -
self.trading_days.get_loc(self.window_test_start_date) + 1
)
class SomeFactor(CustomFactor):
inputs = [dataset.estimate]
window_length = window_len
def compute(self, today, assets, out, estimate):
today_idx = trading_days.get_loc(today)
today_timeline = timelines[
num_announcements_out
].loc[today].reindex(
trading_days[:today_idx + 1]
).values
timeline_start_idx = (len(today_timeline) - window_len)
assert_almost_equal(estimate,
today_timeline[timeline_start_idx:])
engine = self.make_engine()
engine.run_pipeline(
Pipeline({'est': SomeFactor()}),
start_date=start_date,
# last event date we have
end_date=pd.Timestamp('2015-02-10', tz='utc'),
)
class PreviousEstimateWindows(WithEstimateWindows, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_timelines(cls):
oneq_previous = pd.concat([
pd.concat([
cls.create_expected_df_for_factor_compute([
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date)
], end_date)
for end_date in pd.date_range('2015-01-09', '2015-01-19')
]),
cls.create_expected_df_for_factor_compute(
[(0, 101, pd.Timestamp('2015-01-20')),
(10, np.NaN, cls.window_test_start_date),
(20, 121, pd.Timestamp('2015-01-20'))],
pd.Timestamp('2015-01-20')
),
cls.create_expected_df_for_factor_compute(
[(0, 101, pd.Timestamp('2015-01-20')),
(10, np.NaN, cls.window_test_start_date),
(20, 121, pd.Timestamp('2015-01-20'))],
pd.Timestamp('2015-01-21')
),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 101, pd.Timestamp('2015-01-20')),
(10, 111, pd.Timestamp('2015-01-22')),
(20, 121, pd.Timestamp('2015-01-20'))],
end_date
) for end_date in pd.date_range('2015-01-22', '2015-02-04')
]),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 101, pd.Timestamp('2015-01-20')),
(10, 311, pd.Timestamp('2015-02-05')),
(20, 121, pd.Timestamp('2015-01-20'))],
end_date
) for end_date in pd.date_range('2015-02-05', '2015-02-09')
]),
cls.create_expected_df_for_factor_compute(
[(0, 201, pd.Timestamp('2015-02-10')),
(10, 311, pd.Timestamp('2015-02-05')),
(20, 221, pd.Timestamp('2015-02-10'))],
pd.Timestamp('2015-02-10')
),
])
twoq_previous = pd.concat(
[cls.create_expected_df_for_factor_compute(
[(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date)],
end_date
) for end_date in pd.date_range('2015-01-09', '2015-02-09')] +
# We never get estimates for S1 for 2Q ago because once Q3
# becomes our previous quarter, 2Q ago would be Q2, and we have
# no data on it.
[cls.create_expected_df_for_factor_compute(
[(0, 101, pd.Timestamp('2015-02-10')),
(10, np.NaN, pd.Timestamp('2015-02-05')),
(20, 121, pd.Timestamp('2015-02-10'))],
pd.Timestamp('2015-02-10')
)]
)
return {
1: oneq_previous,
2: twoq_previous
}
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazePreviousEstimateWindows(PreviousEstimateWindows):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazePreviousEstimatesLoader(bz.data(events), columns)
class NextEstimateWindows(WithEstimateWindows, ZiplineTestCase):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_timelines(cls):
oneq_next = pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 100, cls.window_test_start_date),
(10, 110, pd.Timestamp('2015-01-09')),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp('2015-01-07'))],
pd.Timestamp('2015-01-09')
),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 100, cls.window_test_start_date),
(10, 110, pd.Timestamp('2015-01-09')),
(10, 111, pd.Timestamp('2015-01-12')),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp('2015-01-07'))],
end_date
) for end_date in pd.date_range('2015-01-12', '2015-01-19')
]),
cls.create_expected_df_for_factor_compute(
[(0, 100, cls.window_test_start_date),
(0, 101, pd.Timestamp('2015-01-20')),
(10, 110, pd.Timestamp('2015-01-09')),
(10, 111, pd.Timestamp('2015-01-12')),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp('2015-01-07'))],
pd.Timestamp('2015-01-20')
),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 200, pd.Timestamp('2015-01-12')),
(10, 110, pd.Timestamp('2015-01-09')),
(10, 111, pd.Timestamp('2015-01-12')),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp('2015-01-17'))],
end_date
) for end_date in pd.date_range('2015-01-21', '2015-01-22')
]),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 200, pd.Timestamp('2015-01-12')),
(10, 310, pd.Timestamp('2015-01-09')),
(10, 311, pd.Timestamp('2015-01-15')),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp('2015-01-17'))],
end_date
) for end_date in pd.date_range('2015-01-23', '2015-02-05')
]),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 200, pd.Timestamp('2015-01-12')),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp('2015-01-17'))],
end_date
) for end_date in pd.date_range('2015-02-06', '2015-02-09')
]),
cls.create_expected_df_for_factor_compute(
[(0, 200, pd.Timestamp('2015-01-12')),
(0, 201, pd.Timestamp('2015-02-10')),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp('2015-01-17'))],
pd.Timestamp('2015-02-10')
)
])
twoq_next = pd.concat(
[cls.create_expected_df_for_factor_compute(
[(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date)],
end_date
) for end_date in pd.date_range('2015-01-09', '2015-01-11')] +
[cls.create_expected_df_for_factor_compute(
[(0, 200, pd.Timestamp('2015-01-12')),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date)],
end_date
) for end_date in pd.date_range('2015-01-12', '2015-01-16')] +
[cls.create_expected_df_for_factor_compute(
[(0, 200, pd.Timestamp('2015-01-12')),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp('2015-01-17'))],
pd.Timestamp('2015-01-20')
)] +
[cls.create_expected_df_for_factor_compute(
[(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date)],
end_date
) for end_date in pd.date_range('2015-01-21', '2015-02-10')]
)
return {
1: oneq_next,
2: twoq_next
}
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazeNextEstimateWindows(NextEstimateWindows):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazeNextEstimatesLoader(bz.data(events), columns)
class WithSplitAdjustedWindows(WithEstimateWindows):
"""
ZiplineTestCase mixin providing fixures and a test to test running a
Pipeline with an estimates loader over differently-sized windows and with
split adjustments.
"""
split_adjusted_asof_date = pd.Timestamp('2015-01-14')
@classmethod
def make_events(cls):
# Add an extra sid that has a release before the split-asof-date in
# order to test that we're reversing splits correctly in the previous
# case (without an overwrite) and in the next case (with an overwrite).
sid_30 = pd.DataFrame({
TS_FIELD_NAME: [cls.window_test_start_date,
pd.Timestamp('2015-01-09'),
# For Q2, we want it to start early enough
# that we can have several adjustments before
# the end of the first quarter so that we
# can test un-adjusting & readjusting with an
# overwrite.
cls.window_test_start_date,
# We want the Q2 event date to be enough past
# the split-asof-date that we can have
# several splits and can make sure that they
# are applied correctly.
pd.Timestamp('2015-01-20')],
EVENT_DATE_FIELD_NAME:
[pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-20')],
'estimate': [130., 131., 230., 231.],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [2] * 2,
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 30
})
# An extra sid to test no splits before the split-adjusted-asof-date.
# We want an event before and after the split-adjusted-asof-date &
# timestamps for data points also before and after
# split-adjsuted-asof-date (but also before the split dates, so that
# we can test that splits actually get applied at the correct times).
sid_40 = pd.DataFrame({
TS_FIELD_NAME: [pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-15')],
EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-02-10')],
'estimate': [140., 240.],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 40
})
# An extra sid to test all splits before the
# split-adjusted-asof-date. All timestamps should be before that date
# so that we have cases where we un-apply and re-apply splits.
sid_50 = pd.DataFrame({
TS_FIELD_NAME: [pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-12')],
EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-02-10')],
'estimate': [150., 250.],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 50
})
return pd.concat([
# Slightly hacky, but want to make sure we're using the same
# events as WithEstimateWindows.
cls.__base__.make_events(),
sid_30,
sid_40,
sid_50,
])
@classmethod
def make_splits_data(cls):
# For sid 0, we want to apply a series of splits before and after the
# split-adjusted-asof-date we well as between quarters (for the
# previous case, where we won't see any values until after the event
# happens).
sid_0_splits = pd.DataFrame({
SID_FIELD_NAME: 0,
'ratio': (-1., 2., 3., 4., 5., 6., 7., 100),
'effective_date': (pd.Timestamp('2014-01-01'), # Filter out
# Split before Q1 event & after first estimate
pd.Timestamp('2015-01-07'),
# Split before Q1 event
pd.Timestamp('2015-01-09'),
# Split before Q1 event
pd.Timestamp('2015-01-13'),
# Split before Q1 event
pd.Timestamp('2015-01-15'),
# Split before Q1 event
pd.Timestamp('2015-01-18'),
# Split after Q1 event and before Q2 event
pd.Timestamp('2015-01-30'),
# Filter out - this is after our date index
pd.Timestamp('2016-01-01'))
})
sid_10_splits = pd.DataFrame({
SID_FIELD_NAME: 10,
'ratio': (.2, .3),
'effective_date': (
# We want a split before the first estimate and before the
# split-adjusted-asof-date but within our calendar index so
# that we can test that the split is NEVER applied.
pd.Timestamp('2015-01-07'),
# Apply a single split before Q1 event.
pd.Timestamp('2015-01-20')),
})
# We want a sid with split dates that collide with another sid (0) to
# make sure splits are correctly applied for both sids.
sid_20_splits = pd.DataFrame({
SID_FIELD_NAME: 20,
'ratio': (.4, .5, .6, .7, .8, .9,),
'effective_date': (
pd.Timestamp('2015-01-07'),
pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-15'),
pd.Timestamp('2015-01-18'),
pd.Timestamp('2015-01-30')),
})
# This sid has event dates that are shifted back so that we can test
# cases where an event occurs before the split-asof-date.
sid_30_splits = pd.DataFrame({
SID_FIELD_NAME: 30,
'ratio': (8, 9, 10, 11, 12),
'effective_date': (
# Split before the event and before the
# split-asof-date.
pd.Timestamp('2015-01-07'),
# Split on date of event but before the
# split-asof-date.
pd.Timestamp('2015-01-09'),
# Split after the event, but before the
# split-asof-date.
pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-15'),
pd.Timestamp('2015-01-18')),
})
# No splits for a sid before the split-adjusted-asof-date.
sid_40_splits = pd.DataFrame({
SID_FIELD_NAME: 40,
'ratio': (13, 14),
'effective_date': (
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-22')
)
})
# No splits for a sid after the split-adjusted-asof-date.
sid_50_splits = pd.DataFrame({
SID_FIELD_NAME: 50,
'ratio': (15, 16),
'effective_date': (
pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-14')
)
})
return pd.concat([
sid_0_splits,
sid_10_splits,
sid_20_splits,
sid_30_splits,
sid_40_splits,
sid_50_splits,
])
class PreviousWithSplitAdjustedWindows(WithSplitAdjustedWindows,
ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return PreviousSplitAdjustedEarningsEstimatesLoader(
events,
columns,
split_adjustments_loader=cls.adjustment_reader,
split_adjusted_column_names=['estimate'],
split_adjusted_asof=cls.split_adjusted_asof_date,
)
@classmethod
def make_expected_timelines(cls):
oneq_previous = pd.concat([
pd.concat([
cls.create_expected_df_for_factor_compute([
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
# Undo all adjustments that haven't happened yet.
(30, 131*1/10, pd.Timestamp('2015-01-09')),
(40, 140., pd.Timestamp('2015-01-09')),
(50, 150 * 1 / 15 * 1 / 16, pd.Timestamp('2015-01-09')),
], end_date)
for end_date in pd.date_range('2015-01-09', '2015-01-12')
]),
cls.create_expected_df_for_factor_compute([
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, 131, pd.Timestamp('2015-01-09')),
(40, 140., pd.Timestamp('2015-01-09')),
(50, 150. * 1 / 16, pd.Timestamp('2015-01-09')),
], pd.Timestamp('2015-01-13')),
cls.create_expected_df_for_factor_compute([
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, 131, pd.Timestamp('2015-01-09')),
(40, 140., pd.Timestamp('2015-01-09')),
(50, 150., pd.Timestamp('2015-01-09'))
], pd.Timestamp('2015-01-14')),
pd.concat([
cls.create_expected_df_for_factor_compute([
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, 131*11, pd.Timestamp('2015-01-09')),
(40, 140., pd.Timestamp('2015-01-09')),
(50, 150., pd.Timestamp('2015-01-09')),
], end_date)
for end_date in pd.date_range('2015-01-15', '2015-01-16')
]),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 101, pd.Timestamp('2015-01-20')),
(10, np.NaN, cls.window_test_start_date),
(20, 121*.7*.8, pd.Timestamp('2015-01-20')),
(30, 231, pd.Timestamp('2015-01-20')),
(40, 140.*13, pd.Timestamp('2015-01-09')),
(50, 150., pd.Timestamp('2015-01-09'))],
end_date
) for end_date in pd.date_range('2015-01-20', '2015-01-21')
]),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 101, pd.Timestamp('2015-01-20')),
(10, 111*.3, pd.Timestamp('2015-01-22')),
(20, 121*.7*.8, pd.Timestamp('2015-01-20')),
(30, 231, pd.Timestamp('2015-01-20')),
(40, 140.*13*14, pd.Timestamp('2015-01-09')),
(50, 150., pd.Timestamp('2015-01-09'))],
end_date
) for end_date in pd.date_range('2015-01-22', '2015-01-29')
]),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 101*7, pd.Timestamp('2015-01-20')),
(10, 111*.3, pd.Timestamp('2015-01-22')),
(20, 121*.7*.8*.9, pd.Timestamp('2015-01-20')),
(30, 231, pd.Timestamp('2015-01-20')),
(40, 140.*13*14, pd.Timestamp('2015-01-09')),
(50, 150., pd.Timestamp('2015-01-09'))],
end_date
) for end_date in pd.date_range('2015-01-30', '2015-02-04')
]),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 101*7, pd.Timestamp('2015-01-20')),
(10, 311*.3, pd.Timestamp('2015-02-05')),
(20, 121*.7*.8*.9, pd.Timestamp('2015-01-20')),
(30, 231, pd.Timestamp('2015-01-20')),
(40, 140.*13*14, pd.Timestamp('2015-01-09')),
(50, 150., pd.Timestamp('2015-01-09'))],
end_date
) for end_date in pd.date_range('2015-02-05', '2015-02-09')
]),
cls.create_expected_df_for_factor_compute(
[(0, 201, pd.Timestamp('2015-02-10')),
(10, 311*.3, pd.Timestamp('2015-02-05')),
(20, 221*.8*.9, pd.Timestamp('2015-02-10')),
(30, 231, pd.Timestamp('2015-01-20')),
(40, 240.*13*14, pd.Timestamp('2015-02-10')),
(50, 250., pd.Timestamp('2015-02-10'))],
pd.Timestamp('2015-02-10')
),
])
twoq_previous = pd.concat(
[cls.create_expected_df_for_factor_compute(
[(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, np.NaN, cls.window_test_start_date)],
end_date
) for end_date in pd.date_range('2015-01-09', '2015-01-19')] +
[cls.create_expected_df_for_factor_compute(
[(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, 131*11*12, pd.Timestamp('2015-01-20'))],
end_date
) for end_date in pd.date_range('2015-01-20', '2015-02-09')] +
# We never get estimates for S1 for 2Q ago because once Q3
# becomes our previous quarter, 2Q ago would be Q2, and we have
# no data on it.
[cls.create_expected_df_for_factor_compute(
[(0, 101*7, pd.Timestamp('2015-02-10')),
(10, np.NaN, pd.Timestamp('2015-02-05')),
(20, 121*.7*.8*.9, pd.Timestamp('2015-02-10')),
(30, 131*11*12, pd.Timestamp('2015-01-20')),
(40, 140. * 13 * 14, pd.Timestamp('2015-02-10')),
(50, 150., pd.Timestamp('2015-02-10'))],
pd.Timestamp('2015-02-10')
)]
)
return {
1: oneq_previous,
2: twoq_previous
}
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazePreviousWithSplitAdjustedWindows(PreviousWithSplitAdjustedWindows):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazePreviousSplitAdjustedEstimatesLoader(
bz.data(events),
columns,
split_adjustments_loader=cls.adjustment_reader,
split_adjusted_column_names=['estimate'],
split_adjusted_asof=cls.split_adjusted_asof_date,
)
class NextWithSplitAdjustedWindows(WithSplitAdjustedWindows, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return NextSplitAdjustedEarningsEstimatesLoader(
events,
columns,
split_adjustments_loader=cls.adjustment_reader,
split_adjusted_column_names=['estimate'],
split_adjusted_asof=cls.split_adjusted_asof_date,
)
@classmethod
def make_expected_timelines(cls):
oneq_next = pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 100*1/4, cls.window_test_start_date),
(10, 110, pd.Timestamp('2015-01-09')),
(20, 120*5/3, cls.window_test_start_date),
(20, 121*5/3, pd.Timestamp('2015-01-07')),
(30, 130*1/10, cls.window_test_start_date),
(30, 131*1/10, pd.Timestamp('2015-01-09')),
(40, 140, pd.Timestamp('2015-01-09')),
(50, 150.*1/15*1/16, pd.Timestamp('2015-01-09'))],
pd.Timestamp('2015-01-09')
),
cls.create_expected_df_for_factor_compute(
[(0, 100*1/4, cls.window_test_start_date),
(10, 110, pd.Timestamp('2015-01-09')),
(10, 111, pd.Timestamp('2015-01-12')),
(20, 120*5/3, cls.window_test_start_date),
(20, 121*5/3, pd.Timestamp('2015-01-07')),
(30, 230*1/10, cls.window_test_start_date),
(40, np.NaN, pd.Timestamp('2015-01-10')),
(50, 250.*1/15*1/16, pd.Timestamp('2015-01-12'))],
pd.Timestamp('2015-01-12')
),
cls.create_expected_df_for_factor_compute(
[(0, 100, cls.window_test_start_date),
(10, 110, pd.Timestamp('2015-01-09')),
(10, 111, pd.Timestamp('2015-01-12')),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp('2015-01-07')),
(30, 230, cls.window_test_start_date),
(40, np.NaN, pd.Timestamp('2015-01-10')),
(50, 250.*1/16, pd.Timestamp('2015-01-12'))],
pd.Timestamp('2015-01-13')
),
cls.create_expected_df_for_factor_compute(
[(0, 100, cls.window_test_start_date),
(10, 110, pd.Timestamp('2015-01-09')),
(10, 111, pd.Timestamp('2015-01-12')),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp('2015-01-07')),
(30, 230, cls.window_test_start_date),
(40, np.NaN, pd.Timestamp('2015-01-10')),
(50, 250., pd.Timestamp('2015-01-12'))],
pd.Timestamp('2015-01-14')
),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 100*5, cls.window_test_start_date),
(10, 110, pd.Timestamp('2015-01-09')),
(10, 111, pd.Timestamp('2015-01-12')),
(20, 120*.7, cls.window_test_start_date),
(20, 121*.7, pd.Timestamp('2015-01-07')),
(30, 230*11, cls.window_test_start_date),
(40, 240, pd.Timestamp('2015-01-15')),
(50, 250., pd.Timestamp('2015-01-12'))],
end_date
) for end_date in pd.date_range('2015-01-15', '2015-01-16')
]),
cls.create_expected_df_for_factor_compute(
[(0, 100*5*6, cls.window_test_start_date),
(0, 101, pd.Timestamp('2015-01-20')),
(10, 110*.3, pd.Timestamp('2015-01-09')),
(10, 111*.3, pd.Timestamp('2015-01-12')),
(20, 120*.7*.8, cls.window_test_start_date),
(20, 121*.7*.8, pd.Timestamp('2015-01-07')),
(30, 230*11*12, cls.window_test_start_date),
(30, 231, pd.Timestamp('2015-01-20')),
(40, 240*13, pd.Timestamp('2015-01-15')),
(50, 250., pd.Timestamp('2015-01-12'))],
pd.Timestamp('2015-01-20')
),
cls.create_expected_df_for_factor_compute(
[(0, 200 * 5 * 6, pd.Timestamp('2015-01-12')),
(10, 110 * .3, pd.Timestamp('2015-01-09')),
(10, 111 * .3, pd.Timestamp('2015-01-12')),
(20, 220 * .7 * .8, cls.window_test_start_date),
(20, 221 * .8, pd.Timestamp('2015-01-17')),
(40, 240 * 13, pd.Timestamp('2015-01-15')),
(50, 250., pd.Timestamp('2015-01-12'))],
pd.Timestamp('2015-01-21')
),
cls.create_expected_df_for_factor_compute(
[(0, 200 * 5 * 6, pd.Timestamp('2015-01-12')),
(10, 110 * .3, pd.Timestamp('2015-01-09')),
(10, 111 * .3, pd.Timestamp('2015-01-12')),
(20, 220 * .7 * .8, cls.window_test_start_date),
(20, 221 * .8, pd.Timestamp('2015-01-17')),
(40, 240 * 13 * 14, pd.Timestamp('2015-01-15')),
(50, 250., pd.Timestamp('2015-01-12'))],
pd.Timestamp('2015-01-22')
),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 200*5*6, pd.Timestamp('2015-01-12')),
(10, 310*.3, pd.Timestamp('2015-01-09')),
(10, 311*.3, pd.Timestamp('2015-01-15')),
(20, 220*.7*.8, cls.window_test_start_date),
(20, 221*.8, pd.Timestamp('2015-01-17')),
(40, 240 * 13 * 14, pd.Timestamp('2015-01-15')),
(50, 250., pd.Timestamp('2015-01-12'))],
end_date
) for end_date in pd.date_range('2015-01-23', '2015-01-29')
]),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 200*5*6*7, pd.Timestamp('2015-01-12')),
(10, 310*.3, pd.Timestamp('2015-01-09')),
(10, 311*.3, pd.Timestamp('2015-01-15')),
(20, 220*.7*.8*.9, cls.window_test_start_date),
(20, 221*.8*.9, pd.Timestamp('2015-01-17')),
(40, 240 * 13 * 14, pd.Timestamp('2015-01-15')),
(50, 250., pd.Timestamp('2015-01-12'))],
end_date
) for end_date in pd.date_range('2015-01-30', '2015-02-05')
]),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 200*5*6*7, pd.Timestamp('2015-01-12')),
(10, np.NaN, cls.window_test_start_date),
(20, 220*.7*.8*.9, cls.window_test_start_date),
(20, 221*.8*.9, pd.Timestamp('2015-01-17')),
(40, 240 * 13 * 14, pd.Timestamp('2015-01-15')),
(50, 250., pd.Timestamp('2015-01-12'))],
end_date
) for end_date in pd.date_range('2015-02-06', '2015-02-09')
]),
cls.create_expected_df_for_factor_compute(
[(0, 200*5*6*7, pd.Timestamp('2015-01-12')),
(0, 201, pd.Timestamp('2015-02-10')),
(10, np.NaN, cls.window_test_start_date),
(20, 220*.7*.8*.9, cls.window_test_start_date),
(20, 221*.8*.9, pd.Timestamp('2015-01-17')),
(40, 240 * 13 * 14, pd.Timestamp('2015-01-15')),
(50, 250., pd.Timestamp('2015-01-12'))],
pd.Timestamp('2015-02-10')
)
])
twoq_next = pd.concat(
[cls.create_expected_df_for_factor_compute(
[(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, 220*5/3, cls.window_test_start_date),
(30, 230*1/10, cls.window_test_start_date),
(40, np.NaN, cls.window_test_start_date),
(50, np.NaN, cls.window_test_start_date)],
pd.Timestamp('2015-01-09')
)] +
[cls.create_expected_df_for_factor_compute(
[(0, 200*1/4, pd.Timestamp('2015-01-12')),
(10, np.NaN, cls.window_test_start_date),
(20, 220*5/3, cls.window_test_start_date),
(30, np.NaN, cls.window_test_start_date),
(40, np.NaN, cls.window_test_start_date)],
pd.Timestamp('2015-01-12')
)] +
[cls.create_expected_df_for_factor_compute(
[(0, 200, pd.Timestamp('2015-01-12')),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
(30, np.NaN, cls.window_test_start_date),
(40, np.NaN, cls.window_test_start_date)],
end_date
) for end_date in pd.date_range('2015-01-13', '2015-01-14')] +
[cls.create_expected_df_for_factor_compute(
[(0, 200*5, pd.Timestamp('2015-01-12')),
(10, np.NaN, cls.window_test_start_date),
(20, 220*.7, cls.window_test_start_date),
(30, np.NaN, cls.window_test_start_date),
(40, np.NaN, cls.window_test_start_date)],
end_date
) for end_date in pd.date_range('2015-01-15', '2015-01-16')] +
[cls.create_expected_df_for_factor_compute(
[(0, 200*5*6, pd.Timestamp('2015-01-12')),
(10, np.NaN, cls.window_test_start_date),
(20, 220*.7*.8, cls.window_test_start_date),
(20, 221*.8, pd.Timestamp('2015-01-17')),
(30, np.NaN, cls.window_test_start_date),
(40, np.NaN, cls.window_test_start_date)],
pd.Timestamp('2015-01-20')
)] +
[cls.create_expected_df_for_factor_compute(
[(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, np.NaN, cls.window_test_start_date),
(40, np.NaN, cls.window_test_start_date)],
end_date
) for end_date in pd.date_range('2015-01-21', '2015-02-10')]
)
return {
1: oneq_next,
2: twoq_next
}
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazeNextWithSplitAdjustedWindows(NextWithSplitAdjustedWindows):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazeNextSplitAdjustedEstimatesLoader(
bz.data(events),
columns,
split_adjustments_loader=cls.adjustment_reader,
split_adjusted_column_names=['estimate'],
split_adjusted_asof=cls.split_adjusted_asof_date,
)
class WithSplitAdjustedMultipleEstimateColumns(WithEstimates):
"""
ZiplineTestCase mixin for having multiple estimate columns that are
split-adjusted to make sure that adjustments are applied correctly.
Attributes
----------
test_start_date : pd.Timestamp
The start date of the test.
test_end_date : pd.Timestamp
The start date of the test.
split_adjusted_asof : pd.Timestamp
The split-adjusted-asof-date of the data used in the test, to be used
to create all loaders of test classes that subclass this mixin.
Methods
-------
make_expected_timelines_1q_out -> dict[pd.Timestamp -> dict[str ->
np.array]]
The expected array of results for each date of the date range for
each column. Only for 1 quarter out.
make_expected_timelines_2q_out -> dict[pd.Timestamp -> dict[str ->
np.array]]
The expected array of results for each date of the date range. For 2
quarters out, so only for the column that is requested to be loaded
with 2 quarters out.
Tests
-----
test_adjustments_with_multiple_adjusted_columns
Tests that if you have multiple columns, we still split-adjust
correctly.
test_multiple_datasets_different_num_announcements
Tests that if you have multiple datasets that ask for a different
number of quarters out, and each asks for a different estimates column,
we still split-adjust correctly.
"""
END_DATE = pd.Timestamp('2015-02-10')
test_start_date = pd.Timestamp('2015-01-06', tz='utc')
test_end_date = pd.Timestamp('2015-01-12', tz='utc')
split_adjusted_asof = pd.Timestamp('2015-01-08')
@classmethod
def make_columns(cls):
return {
MultipleColumnsEstimates.event_date: 'event_date',
MultipleColumnsEstimates.fiscal_quarter: 'fiscal_quarter',
MultipleColumnsEstimates.fiscal_year: 'fiscal_year',
MultipleColumnsEstimates.estimate1: 'estimate1',
MultipleColumnsEstimates.estimate2: 'estimate2'
}
@classmethod
def make_events(cls):
sid_0_events = pd.DataFrame({
# We only want a stale KD here so that adjustments
# will be applied.
TS_FIELD_NAME: [pd.Timestamp('2015-01-05'),
pd.Timestamp('2015-01-05')],
EVENT_DATE_FIELD_NAME:
[pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-12')],
'estimate1': [1100., 1200.],
'estimate2': [2100., 2200.],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 0,
})
# This is just an extra sid to make sure that we apply adjustments
# correctly for multiple columns when we have multiple sids.
sid_1_events = pd.DataFrame({
# We only want a stale KD here so that adjustments
# will be applied.
TS_FIELD_NAME: [ | pd.Timestamp('2015-01-05') | pandas.Timestamp |
##################################################
### import ###
##################################################
# basic lib
from ast import literal_eval
import itertools
import json
import numpy as np
import os
import pandas as pd
from pandarallel import pandarallel
pandarallel.initialize(use_memory_fs=False)
from scipy import ndimage
from scipy.stats import entropy
import sys
from googletrans import Translator
# logging lib
import logging
import src.log as log
# time lib
from time import time
# multiprocess lib
import multiprocessing as mp
PROCESS_NUM = mp.cpu_count()-2
# custom lib
import src.utils as utils
import src.aggregator as aggregator
def cal_score(aggregated_df, gold_df):
ans_df = aggregated_df.loc[aggregated_df['ans'] == True][['id', 'candidates', 'prob']] # answered candidates
fil_df = aggregated_df.loc[aggregated_df['ans'] == False][['id', 'candidates', 'prob']] # filtered candidates
n_ans = len(ans_df)
n_aggregated = len(aggregated_df)
n_gold = len(gold_df)
if fil_df.empty:
FN_df = pd.DataFrame(columns=aggregated_df.columns)
TN_df = pd.DataFrame(columns=aggregated_df.columns)
n_TN = 0
else:
FN_df = fil_df.loc[fil_df['id'].isin(gold_df['id'])] # false negative (filtered out answers)
TN_df = fil_df.loc[~fil_df['id'].isin(gold_df['id'])] # true negative (correctly filtered)
n_TN = len(TN_df)
if ans_df.empty:
FP_df = pd.DataFrame(columns=ans_df.columns)
TP_df = | pd.DataFrame(columns=ans_df.columns) | pandas.DataFrame |
#!/usr/bin/env python
import logging
from pathlib import Path
import pandas as pd
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s - %(name)s - %(levelname)s - %(process)d - %(message)s",
)
LOGGER = logging.getLogger("post_pearson")
DATA_DIR = Path.cwd() / "data-20210523-emb"
def process_cell_group(cell_group: str, threshold: float = 0.6):
LOGGER.info(
"process cell group, cell_group=%s, threshold=%f", cell_group, threshold
)
assert threshold > 0, "threshold should > 0"
input_file = DATA_DIR / f"{cell_group}.pearson.all.csv"
pearson = pd.read_csv(input_file, index_col=0)
# Filter 1: out any column with a corr-value >= threshold.
filtered = pearson[pearson.abs().ge(threshold).any(1)]
# Filter 2: all the columns should have at least 2 values largher than the threshold.
filtered = filtered[
filtered.columns[filtered[filtered.abs() >= threshold].count() > 1]
]
ans = pd.DataFrame(0, index=filtered.index, columns=filtered.columns, dtype=str)
for r in filtered.index:
for c in filtered.columns:
corr = filtered.at[r, c]
if abs(corr) < threshold:
ans.at[r, c] = "0"
else:
ans.at[r, c] = f"{corr:.4f}"
output_file = DATA_DIR / f"{cell_group}.pearson.filtered.{threshold}.csv"
ans.to_csv(output_file)
def main():
meta = | pd.read_csv(DATA_DIR / "guide.csv") | pandas.read_csv |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# *****************************************************************************/
# * Authors: <NAME>
# *****************************************************************************/
"""transformCSV.py
This module contains the basic functions for creating the content of a configuration file from CSV.
Args:
--inFile: Path for the configuration file where the time series data values CSV
--outFile: Path for the configuration file where the time series data values INI
--debug: Boolean flag to activate verbose printing for debug use
Example:
Default usage:
$ python transformCSV.py
Specific usage:
$ python transformCSV.py
--inFile C:\raad\src\software\time-series.csv
--outFile C:\raad\src\software\time-series.ini
--debug True
"""
import sys
import datetime
import optparse
import traceback
import pandas
import numpy
import os
import pprint
import csv
if sys.version_info.major > 2:
import configparser as cF
else:
import ConfigParser as cF
class TransformMetaData(object):
debug = False
fileName = None
fileLocation = None
columnsList = None
analysisFrameFormat = None
uniqueLists = None
analysisFrame = None
def __init__(self, inputFileName=None, debug=False, transform=False, sectionName=None, outFolder=None,
outFile='time-series-madness.ini'):
if isinstance(debug, bool):
self.debug = debug
if inputFileName is None:
return
elif os.path.exists(os.path.abspath(inputFileName)):
self.fileName = inputFileName
self.fileLocation = os.path.exists(os.path.abspath(inputFileName))
(analysisFrame, analysisFrameFormat, uniqueLists, columnNamesList) = self.CSVtoFrame(
inputFileName=self.fileName)
self.analysisFrame = analysisFrame
self.columnsList = columnNamesList
self.analysisFrameFormat = analysisFrameFormat
self.uniqueLists = uniqueLists
if transform:
passWrite = self.frameToINI(analysisFrame=analysisFrame, sectionName=sectionName, outFolder=outFolder,
outFile=outFile)
print(f"Pass Status is : {passWrite}")
return
def getColumnList(self):
return self.columnsList
def getAnalysisFrameFormat(self):
return self.analysisFrameFormat
def getuniqueLists(self):
return self.uniqueLists
def getAnalysisFrame(self):
return self.analysisFrame
@staticmethod
def getDateParser(formatString="%Y-%m-%d %H:%M:%S.%f"):
return (lambda x: pandas.datetime.strptime(x, formatString)) # 2020-06-09 19:14:00.000
def getHeaderFromFile(self, headerFilePath=None, method=1):
if headerFilePath is None:
return (None, None)
if method == 1:
fieldnames = | pandas.read_csv(headerFilePath, index_col=0, nrows=0) | pandas.read_csv |
import numpy as np
import pandas as pd
from tests.fixtures import DataTestCase
from tsfresh.feature_extraction.data import to_tsdata, LongTsFrameAdapter, WideTsFrameAdapter, TsDictAdapter
from tsfresh.utilities.distribution import MultiprocessingDistributor
TEST_DATA_EXPECTED_TUPLES = \
[(10, 'a', pd.Series([36, 71, 27, 62, 56, 58, 67, 11, 2, 24, 45, 30, 0,
9, 41, 28, 33, 19, 29, 43],
index=[10] * 20)),
(10, 'b', pd.Series([78, 37, 23, 44, 6, 3, 21, 61, 39, 31, 53, 16, 66,
50, 40, 47, 7, 42, 38, 55],
index=[10] * 20)),
(500, 'a', pd.Series([76, 72, 74, 75, 32, 64, 46, 35, 15, 70, 57, 65,
51, 26, 5, 25, 10, 69, 73, 77],
index=[500] * 20)),
(500, 'b', pd.Series([8, 60, 12, 68, 22, 17, 18, 63, 49, 34, 20, 52,
48, 14, 79, 4, 1, 59, 54, 13],
index=[500] * 20))]
WIDE_TEST_DATA_EXPECTED_TUPLES = \
[(10, 'a', pd.Series([11, 9, 67, 45, 30, 58, 62, 19, 56, 29, 0, 27, 36,
43, 33, 2, 24, 71, 41, 28],
index=list(range(20)))),
(10, 'b', pd.Series([50, 40, 39, 7, 53, 23, 16, 37, 66, 38, 6, 47, 3,
61, 44, 42, 78, 31, 21, 55],
index=list(range(20)))),
(500, 'a', pd.Series([15, 35, 25, 32, 69, 65, 70, 64, 51, 46, 5, 77,
26, 73, 76, 75, 72, 74, 10, 57],
index=list(range(20, 40)))),
(500, 'b', pd.Series([4, 14, 68, 22, 18, 52, 54, 60, 79, 12, 49, 63,
8, 59, 1, 13, 20, 17, 48, 34],
index=list(range(20, 40))))]
class DataAdapterTestCase(DataTestCase):
def test_long_tsframe(self):
df = self.create_test_data_sample()
data = LongTsFrameAdapter(df, "id", "kind", "val", "sort")
self.assert_tsdata(data, TEST_DATA_EXPECTED_TUPLES)
def test_wide_tsframe(self):
df = self.create_test_data_sample_wide()
data = WideTsFrameAdapter(df, "id", "sort")
self.assert_tsdata(data, WIDE_TEST_DATA_EXPECTED_TUPLES)
def test_dict_tsframe(self):
df = {key: df for key, df in self.create_test_data_sample().groupby(["kind"])}
data = TsDictAdapter(df, "id", "val", "sort")
self.assert_tsdata(data, TEST_DATA_EXPECTED_TUPLES)
def assert_tsdata(self, data, expected):
self.assertEqual(len(data), len(expected))
self.assertEqual(sum(1 for _ in data), len(data))
self.assertEqual(sum(1 for _ in data.partition(1)), len(expected))
self.assertEqual((sum(sum(1 for _ in g) for g in data.partition(1))), len(data))
self.assert_data_chunk_object_equal(data, expected)
def assert_data_chunk_object_equal(self, result, expected):
dic_result = {str(x[0]) + "_" + str(x[1]): x[2] for x in result}
dic_expected = {str(x[0]) + "_" + str(x[1]): x[2] for x in expected}
for k in dic_result.keys():
| pd.testing.assert_series_equal(dic_result[k], dic_expected[k], check_names=False) | pandas.testing.assert_series_equal |
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:light
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.4.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from google.cloud import bigquery
# %reload_ext google.cloud.bigquery
client = bigquery.Client()
# %load_ext google.cloud.bigquery
# +
from notebooks import parameters
DATASET = parameters.LATEST_DATASET
LOOKUP_TABLES = parameters.LOOKUP_TABLES
print(f"Dataset to use: {DATASET}")
print(f"Lookup tables: {LOOKUP_TABLES}")
# +
import warnings
warnings.filterwarnings('ignore')
import pandas as pd
import matplotlib.pyplot as plt
import os
plt.style.use('ggplot')
pd.options.display.max_rows = 999
pd.options.display.max_columns = 999
pd.options.display.max_colwidth = 999
def cstr(s, color='black'):
return "<text style=color:{}>{}</text>".format(color, s)
# -
cwd = os.getcwd()
cwd = str(cwd)
print("Current working directory is: {cwd}".format(cwd=cwd))
# ### Get the list of HPO IDs
#
# ### NOTE: This assumes that all of the relevant HPOs have a person table.
hpo_id_query = f"""
SELECT REPLACE(table_id, '_person', '') AS src_hpo_id
FROM
`{DATASET}.__TABLES__`
WHERE table_id LIKE '%person'
AND table_id
NOT LIKE '%unioned_ehr_%'
AND table_id NOT LIKE '\\\_%'
"""
site_df = pd.io.gbq.read_gbq(hpo_id_query, dialect='standard')
get_full_names = f"""
select * from {LOOKUP_TABLES}.hpo_site_id_mappings
"""
full_names_df = pd.io.gbq.read_gbq(get_full_names, dialect='standard')
# +
full_names_df.columns = ['org_id', 'src_hpo_id', 'site_name', 'display_order']
columns_to_use = ['src_hpo_id', 'site_name']
full_names_df = full_names_df[columns_to_use]
full_names_df['src_hpo_id'] = full_names_df['src_hpo_id'].str.lower()
# +
cols_to_join = ['src_hpo_id']
site_df = pd.merge(site_df, full_names_df, on=['src_hpo_id'], how='left')
# -
# # No data point exists beyond 30 days of the death date. (Achilles rule_id #3)
# ## Visit Occurrence Table
# +
######################################
print('Getting the data from the database...')
######################################
temporal_df = pd.io.gbq.read_gbq('''
SELECT
src_hpo_id,
COUNT(*) AS total,
sum(case when (DATE_DIFF(visit_start_date, death_date, DAY)>30) then 1 else 0 end) as wrong_death_date
FROM
`{DATASET}.unioned_ehr_visit_occurrence` AS t1
INNER JOIN
`{DATASET}.unioned_ehr_death` AS t2
ON
t1.person_id=t2.person_id
INNER JOIN
(SELECT
DISTINCT *
FROM
`{DATASET}._mapping_visit_occurrence`) AS t3
ON
t1.visit_occurrence_id=t3.visit_occurrence_id
GROUP BY
1
'''.format(DATASET=DATASET), dialect='standard')
temporal_df.shape
print(temporal_df.shape[0], 'records received.')
# -
temporal_df['failure_rate'] = round(
100 * temporal_df['wrong_death_date'] / temporal_df['total'], 1)
temporal_df
# - main reason death date entered as default value ("1890")
visit_occurrence = temporal_df.rename(
columns={"failure_rate": "visit_occurrence"})
visit_occurrence = visit_occurrence[["src_hpo_id", "visit_occurrence"]]
visit_occurrence = visit_occurrence.fillna(0)
visit_occurrence
# ## Condition Occurrence Table
# +
######################################
print('Getting the data from the database...')
######################################
temporal_df = pd.io.gbq.read_gbq('''
SELECT
src_hpo_id,
COUNT(*) AS total,
sum(case when (DATE_DIFF(condition_start_date, death_date, DAY)>30) then 1 else 0 end) as wrong_death_date
FROM
`{DATASET}.unioned_ehr_condition_occurrence` AS t1
INNER JOIN
`{DATASET}.unioned_ehr_death` AS t2
ON
t1.person_id=t2.person_id
INNER JOIN
(SELECT
DISTINCT *
FROM
`{DATASET}._mapping_condition_occurrence`) AS t3
ON
t1.condition_occurrence_id=t3.condition_occurrence_id
GROUP BY
1
'''.format(DATASET=DATASET),
dialect='standard')
temporal_df.shape
print(temporal_df.shape[0], 'records received.')
# -
temporal_df['failure_rate'] = round(
100 * temporal_df['wrong_death_date'] / temporal_df['total'], 1)
temporal_df
condition_occurrence = temporal_df.rename(
columns={"failure_rate": "condition_occurrence"})
condition_occurrence = condition_occurrence[[
"src_hpo_id", "condition_occurrence"
]]
condition_occurrence = condition_occurrence.fillna(0)
condition_occurrence
# ## Drug Exposure Table
# +
######################################
print('Getting the data from the database...')
######################################
temporal_df = pd.io.gbq.read_gbq('''
SELECT
src_hpo_id,
COUNT(*) AS total,
sum(case when (DATE_DIFF(drug_exposure_start_date, death_date, DAY)>30) then 1 else 0 end) as wrong_death_date
FROM
`{DATASET}.unioned_ehr_drug_exposure` AS t1
INNER JOIN
`{DATASET}.unioned_ehr_death` AS t2
ON
t1.person_id=t2.person_id
INNER JOIN
(SELECT
DISTINCT *
FROM
`{DATASET}._mapping_drug_exposure`) AS t3
ON
t1.drug_exposure_id=t3.drug_exposure_id
GROUP BY
1
'''.format(DATASET=DATASET), dialect='standard')
temporal_df.shape
print(temporal_df.shape[0], 'records received.')
# -
temporal_df['failure_rate'] = round(
100 * temporal_df['wrong_death_date'] / temporal_df['total'], 1)
temporal_df
drug_exposure = temporal_df.rename(columns={"failure_rate": "drug_exposure"})
drug_exposure = drug_exposure[["src_hpo_id", "drug_exposure"]]
drug_exposure = drug_exposure.fillna(0)
drug_exposure
# ## Measurement Table
# +
######################################
print('Getting the data from the database...')
######################################
temporal_df = pd.io.gbq.read_gbq('''
SELECT
src_hpo_id,
COUNT(*) AS total,
sum(case when (DATE_DIFF(measurement_date, death_date, DAY)>30) then 1 else 0 end) as wrong_death_date
FROM
`{DATASET}.unioned_ehr_measurement` AS t1
INNER JOIN
`{DATASET}.unioned_ehr_death` AS t2
ON
t1.person_id=t2.person_id
INNER JOIN
(SELECT
DISTINCT *
FROM
`{DATASET}._mapping_measurement`) AS t3
ON
t1.measurement_id=t3.measurement_id
GROUP BY
1
'''.format(DATASET=DATASET),dialect='standard')
temporal_df.shape
print(temporal_df.shape[0], 'records received.')
# -
temporal_df['failure_rate'] = round(
100 * temporal_df['wrong_death_date'] / temporal_df['total'], 1)
temporal_df
measurement = temporal_df.rename(columns={"failure_rate": "measurement"})
measurement = measurement[["src_hpo_id", "measurement"]]
measurement = measurement.fillna(0)
measurement
# ## Procedure Occurrence Table
# +
######################################
print('Getting the data from the database...')
######################################
temporal_df = pd.io.gbq.read_gbq('''
SELECT
src_hpo_id,
COUNT(*) AS total,
sum(case when (DATE_DIFF(procedure_date, death_date, DAY)>30) then 1 else 0 end) as wrong_death_date
FROM
`{DATASET}.unioned_ehr_procedure_occurrence` AS t1
INNER JOIN
`{DATASET}.unioned_ehr_death` AS t2
ON
t1.person_id=t2.person_id
INNER JOIN
(SELECT
DISTINCT *
FROM
`{DATASET}._mapping_procedure_occurrence`) AS t3
ON
t1.procedure_occurrence_id=t3.procedure_occurrence_id
GROUP BY
1
'''.format(DATASET=DATASET), dialect='standard')
temporal_df.shape
print(temporal_df.shape[0], 'records received.')
# -
temporal_df['failure_rate'] = round(
100 * temporal_df['wrong_death_date'] / temporal_df['total'], 1)
temporal_df
procedure_occurrence = temporal_df.rename(
columns={"failure_rate": "procedure_occurrence"})
procedure_occurrence = procedure_occurrence[[
"src_hpo_id", "procedure_occurrence"
]]
procedure_occurrence = procedure_occurrence.fillna(0)
procedure_occurrence
# ## Observation Table
# +
######################################
print('Getting the data from the database...')
######################################
temporal_df = pd.io.gbq.read_gbq('''
SELECT
src_hpo_id,
COUNT(*) AS total,
sum(case when (DATE_DIFF(observation_date, death_date, DAY)>30) then 1 else 0 end) as wrong_death_date
FROM
`{DATASET}.unioned_ehr_observation` AS t1
INNER JOIN
`{DATASET}.unioned_ehr_death` AS t2
ON
t1.person_id=t2.person_id
INNER JOIN
(SELECT
DISTINCT *
FROM
`{DATASET}._mapping_observation`) AS t3
ON
t1.observation_id=t3.observation_id
GROUP BY
1
'''.format(DATASET=DATASET), dialect='standard')
temporal_df.shape
print(temporal_df.shape[0], 'records received.')
# -
temporal_df['failure_rate'] = round(
100 * temporal_df['wrong_death_date'] / temporal_df['total'], 1)
temporal_df
observation = temporal_df.rename(columns={"failure_rate": "observation"})
observation = observation[["src_hpo_id", "observation"]]
observation = observation.fillna(0)
observation
# ## 4. Success Rate Temporal Data Points - Data After Death Date
datas = [
condition_occurrence, drug_exposure, measurement, procedure_occurrence,
observation]
master_df = visit_occurrence
for filename in datas:
master_df = | pd.merge(master_df, filename, on='src_hpo_id', how='outer') | pandas.merge |
from datetime import timedelta
from functools import partial
from itertools import permutations
import dask.bag as db
import numpy as np
import pandas as pd
import pandas.testing as pdt
import pytest
from hypothesis import given, settings
from hypothesis import strategies as st
from kartothek.core.cube.conditions import (
C,
Conjunction,
EqualityCondition,
GreaterEqualCondition,
GreaterThanCondition,
InequalityCondition,
InIntervalCondition,
IsInCondition,
LessEqualCondition,
LessThanCondition,
)
from kartothek.core.cube.cube import Cube
from kartothek.io.dask.bag_cube import build_cube_from_bag
from kartothek.io.eager import build_dataset_indices
from kartothek.io.eager_cube import append_to_cube, build_cube, remove_partitions
__all__ = (
"apply_condition_unsafe",
"data_no_part",
"fullrange_cube",
"fullrange_data",
"fullrange_df",
"massive_partitions_cube",
"massive_partitions_data",
"massive_partitions_df",
"multipartition_cube",
"multipartition_df",
"no_part_cube",
"no_part_df",
"other_part_cube",
"sparse_outer_cube",
"sparse_outer_data",
"sparse_outer_df",
"sparse_outer_opt_cube",
"sparse_outer_opt_df",
"test_complete",
"test_condition",
"test_condition_on_null",
"test_cube",
"test_delayed_index_build_correction_restriction",
"test_delayed_index_build_partition_by",
"test_df",
"test_fail_blocksize_negative",
"test_fail_blocksize_wrong_type",
"test_fail_blocksize_zero",
"test_fail_empty_dimension_columns",
"test_fail_missing_condition_columns",
"test_fail_missing_dimension_columns",
"test_fail_missing_partition_by",
"test_fail_missing_payload_columns",
"test_fail_no_store_factory",
"test_fail_projection",
"test_fail_unindexed_partition_by",
"test_fail_unstable_dimension_columns",
"test_fail_unstable_partition_by",
"test_filter_select",
"test_hypothesis",
"test_overlay_tricky",
"test_partition_by",
"test_projection",
"test_select",
"test_simple_roundtrip",
"test_sort",
"test_stresstest_index_select_row",
"test_wrong_condition_type",
"testset",
"updated_cube",
"updated_df",
)
@pytest.fixture(scope="module")
def fullrange_data():
return {
"seed": pd.DataFrame(
{
"x": [0, 1, 0, 1, 0, 1, 0, 1, 2, 3, 2, 3, 2, 3, 2, 3],
"y": [0, 0, 1, 1, 2, 2, 3, 3, 0, 0, 1, 1, 2, 2, 3, 3],
"z": 0,
"p": [0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1],
"q": [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1],
"v1": np.arange(16),
"i1": np.arange(16),
}
),
"enrich_dense": pd.DataFrame(
{
"x": [0, 1, 0, 1, 0, 1, 0, 1, 2, 3, 2, 3, 2, 3, 2, 3],
"y": [0, 0, 1, 1, 2, 2, 3, 3, 0, 0, 1, 1, 2, 2, 3, 3],
"z": 0,
"p": [0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1],
"q": [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1],
"v2": np.arange(16),
"i2": np.arange(16),
}
),
"enrich_sparse": pd.DataFrame(
{
"y": [0, 1, 2, 3, 0, 1, 2, 3],
"z": 0,
"p": [0, 0, 1, 1, 0, 0, 1, 1],
"q": [0, 0, 0, 0, 1, 1, 1, 1],
"v3": np.arange(8),
"i3": np.arange(8),
}
),
}
@pytest.fixture(scope="module")
def fullrange_cube(module_store, fullrange_data):
cube = Cube(
dimension_columns=["x", "y", "z"],
partition_columns=["p", "q"],
uuid_prefix="fullrange_cube",
index_columns=["i1", "i2", "i3"],
)
build_cube(data=fullrange_data, store=module_store, cube=cube)
return cube
@pytest.fixture(scope="module")
def multipartition_cube(module_store, fullrange_data, fullrange_cube):
def _gen(part):
result = {}
for dataset_id, df in fullrange_data.items():
df = df.copy()
df["z"] = part
result[dataset_id] = df
return result
cube = fullrange_cube.copy(uuid_prefix="multipartition_cube")
build_cube_from_bag(
data=db.from_sequence([0, 1], partition_size=1).map(_gen),
store=module_store,
cube=cube,
ktk_cube_dataset_ids=["seed", "enrich_dense", "enrich_sparse"],
).compute()
return cube
@pytest.fixture(scope="module")
def sparse_outer_data():
return {
"seed": pd.DataFrame(
{
"x": [0, 1, 0],
"y": [0, 0, 1],
"z": 0,
"p": [0, 1, 2],
"q": 0,
"v1": [0, 3, 7],
"i1": [0, 3, 7],
}
),
"enrich_dense": pd.DataFrame(
{
"x": [0, 0],
"y": [0, 1],
"z": 0,
"p": [0, 2],
"q": 0,
"v2": [0, 7],
"i2": [0, 7],
}
),
"enrich_sparse": pd.DataFrame(
{"y": [0, 0], "z": 0, "p": [0, 1], "q": 0, "v3": [0, 3], "i3": [0, 3]}
),
}
@pytest.fixture(scope="module")
def sparse_outer_cube(module_store, sparse_outer_data):
cube = Cube(
dimension_columns=["x", "y", "z"],
partition_columns=["p", "q"],
uuid_prefix="sparse_outer_cube",
index_columns=["i1", "i2", "i3"],
)
build_cube(data=sparse_outer_data, store=module_store, cube=cube)
return cube
@pytest.fixture(scope="module")
def sparse_outer_opt_cube(
module_store,
sparse_outer_data,
sparse_outer_cube,
sparse_outer_df,
sparse_outer_opt_df,
):
data = {}
for dataset_id in sparse_outer_data.keys():
df = sparse_outer_data[dataset_id].copy()
for col in sparse_outer_opt_df.columns:
if col in df.columns:
dtype = sparse_outer_opt_df[col].dtype
if dtype == np.float64:
dtype = np.int64
elif dtype == np.float32:
dtype = np.int32
elif dtype == np.float16:
dtype = np.int16
df[col] = df[col].astype(dtype)
data[dataset_id] = df
cube = sparse_outer_cube.copy(uuid_prefix="sparse_outer_opt_cube")
build_cube(data=data, store=module_store, cube=cube)
return cube
@pytest.fixture(scope="module")
def massive_partitions_data():
n = 17
return {
"seed": pd.DataFrame(
{
"x": np.arange(n),
"y": np.arange(n),
"z": np.arange(n),
"p": np.arange(n),
"q": np.arange(n),
"v1": np.arange(n),
"i1": np.arange(n),
}
),
"enrich_1": pd.DataFrame(
{
"x": np.arange(n),
"y": np.arange(n),
"z": np.arange(n),
"p": np.arange(n),
"q": np.arange(n),
"v2": np.arange(n),
"i2": np.arange(n),
}
),
"enrich_2": pd.DataFrame(
{
"y": np.arange(n),
"z": np.arange(n),
"p": np.arange(n),
"q": np.arange(n),
"v3": np.arange(n),
"i3": np.arange(n),
}
),
}
@pytest.fixture(scope="module")
def massive_partitions_cube(module_store, massive_partitions_data):
cube = Cube(
dimension_columns=["x", "y", "z"],
partition_columns=["p", "q"],
uuid_prefix="massive_partitions_cube",
index_columns=["i1", "i2", "i3"],
)
build_cube(data=massive_partitions_data, store=module_store, cube=cube)
return cube
@pytest.fixture(scope="module")
def fullrange_df():
return (
pd.DataFrame(
data={
"x": [0, 1, 0, 1, 0, 1, 0, 1, 2, 3, 2, 3, 2, 3, 2, 3],
"y": [0, 0, 1, 1, 2, 2, 3, 3, 0, 0, 1, 1, 2, 2, 3, 3],
"z": 0,
"p": [0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1],
"q": [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1],
"v1": np.arange(16),
"v2": np.arange(16),
"v3": [0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7],
"i1": np.arange(16),
"i2": np.arange(16),
"i3": [0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7],
},
columns=["i1", "i2", "i3", "p", "q", "v1", "v2", "v3", "x", "y", "z"],
)
.sort_values(["x", "y", "z", "p", "q"])
.reset_index(drop=True)
)
@pytest.fixture(scope="module")
def multipartition_df(fullrange_df):
dfs = []
for z in (0, 1):
df = fullrange_df.copy()
df["z"] = z
dfs.append(df)
return (
pd.concat(dfs, ignore_index=True)
.sort_values(["x", "y", "z", "p", "q"])
.reset_index(drop=True)
)
@pytest.fixture(scope="module")
def sparse_outer_df():
return (
pd.DataFrame(
data={
"x": [0, 1, 0],
"y": [0, 0, 1],
"z": 0,
"p": [0, 1, 2],
"q": 0,
"v1": [0, 3, 7],
"v2": [0, np.nan, 7],
"v3": [0, 3, np.nan],
"i1": [0, 3, 7],
"i2": [0, np.nan, 7],
"i3": [0, 3, np.nan],
},
columns=["i1", "i2", "i3", "p", "q", "v1", "v2", "v3", "x", "y", "z"],
)
.sort_values(["x", "y", "z", "p", "q"])
.reset_index(drop=True)
)
@pytest.fixture(scope="module")
def sparse_outer_opt_df(sparse_outer_df):
df = sparse_outer_df.copy()
df["x"] = df["x"].astype(np.int16)
df["y"] = df["y"].astype(np.int32)
df["z"] = df["z"].astype(np.int8)
df["v1"] = df["v1"].astype(np.int8)
df["i1"] = df["i1"].astype(np.int8)
return df
@pytest.fixture(scope="module")
def massive_partitions_df():
n = 17
return (
pd.DataFrame(
data={
"x": np.arange(n),
"y": np.arange(n),
"z": np.arange(n),
"p": np.arange(n),
"q": np.arange(n),
"v1": np.arange(n),
"v2": np.arange(n),
"v3": np.arange(n),
"i1": np.arange(n),
"i2": np.arange(n),
"i3": np.arange(n),
},
columns=["i1", "i2", "i3", "p", "q", "v1", "v2", "v3", "x", "y", "z"],
)
.sort_values(["x", "y", "z", "p", "q"])
.reset_index(drop=True)
)
@pytest.fixture(scope="module")
def updated_cube(module_store, fullrange_data):
cube = Cube(
dimension_columns=["x", "y", "z"],
partition_columns=["p", "q"],
uuid_prefix="updated_cube",
index_columns=["i1", "i2", "i3"],
)
build_cube(
data={
cube.seed_dataset: pd.DataFrame(
{
"x": [0, 0, 1, 1, 2, 2],
"y": [0, 1, 0, 1, 0, 1],
"z": 0,
"p": [0, 0, 1, 1, 2, 2],
"q": 0,
"v1": np.arange(6),
"i1": np.arange(6),
}
),
"enrich": pd.DataFrame(
{
"x": [0, 0, 1, 1, 2, 2],
"y": [0, 1, 0, 1, 0, 1],
"z": 0,
"p": [0, 0, 1, 1, 2, 2],
"q": 0,
"v2": np.arange(6),
"i2": np.arange(6),
}
),
"extra": pd.DataFrame(
{
"y": [0, 1, 0, 1, 0, 1],
"z": 0,
"p": [0, 0, 1, 1, 2, 2],
"q": 0,
"v3": np.arange(6),
"i3": np.arange(6),
}
),
},
store=module_store,
cube=cube,
)
remove_partitions(
cube=cube,
store=module_store,
ktk_cube_dataset_ids=["enrich"],
conditions=C("p") >= 1,
)
append_to_cube(
data={
"enrich": pd.DataFrame(
{
"x": [1, 1],
"y": [0, 1],
"z": 0,
"p": [1, 1],
"q": 0,
"v2": [7, 8],
"i2": [7, 8],
}
)
},
store=module_store,
cube=cube,
)
return cube
@pytest.fixture(scope="module")
def updated_df():
return (
pd.DataFrame(
data={
"x": [0, 0, 1, 1, 2, 2],
"y": [0, 1, 0, 1, 0, 1],
"z": 0,
"p": [0, 0, 1, 1, 2, 2],
"q": 0,
"v1": np.arange(6),
"v2": [0, 1, 7, 8, np.nan, np.nan],
"v3": np.arange(6),
"i1": np.arange(6),
"i2": [0, 1, 7, 8, np.nan, np.nan],
"i3": np.arange(6),
},
columns=["i1", "i2", "i3", "p", "q", "v1", "v2", "v3", "x", "y", "z"],
)
.sort_values(["x", "y", "z", "p", "q"])
.reset_index(drop=True)
)
@pytest.fixture(scope="module")
def data_no_part():
return {
"seed": pd.DataFrame(
{
"x": [0, 1, 0, 1, 0, 1, 0, 1, 2, 3, 2, 3, 2, 3, 2, 3],
"y": [0, 0, 1, 1, 2, 2, 3, 3, 0, 0, 1, 1, 2, 2, 3, 3],
"z": 0,
"p": [0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1],
"q": [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1],
"v1": np.arange(16),
"i1": np.arange(16),
}
),
"enrich_dense": pd.DataFrame(
{
"x": [0, 1, 0, 1, 0, 1, 0, 1, 2, 3, 2, 3, 2, 3, 2, 3],
"y": [0, 0, 1, 1, 2, 2, 3, 3, 0, 0, 1, 1, 2, 2, 3, 3],
"z": 0,
"v2": np.arange(16),
"i2": np.arange(16),
}
),
"enrich_sparse": pd.DataFrame(
{"y": [0, 1, 2, 3], "z": 0, "v3": np.arange(4), "i3": np.arange(4)}
),
}
@pytest.fixture(scope="module")
def no_part_cube(module_store, data_no_part):
cube = Cube(
dimension_columns=["x", "y", "z"],
partition_columns=["p", "q"],
uuid_prefix="data_no_part",
index_columns=["i1", "i2", "i3"],
)
build_cube(
data=data_no_part,
store=module_store,
cube=cube,
partition_on={"enrich_dense": [], "enrich_sparse": []},
)
return cube
@pytest.fixture(scope="module")
def other_part_cube(module_store, data_no_part):
cube = Cube(
dimension_columns=["x", "y", "z"],
partition_columns=["p", "q"],
uuid_prefix="other_part_cube",
index_columns=["i1", "i2", "i3"],
)
build_cube(
data=data_no_part,
store=module_store,
cube=cube,
partition_on={"enrich_dense": ["i2"], "enrich_sparse": ["i3"]},
)
return cube
@pytest.fixture(scope="module")
def no_part_df():
return (
pd.DataFrame(
data={
"x": [0, 1, 0, 1, 0, 1, 0, 1, 2, 3, 2, 3, 2, 3, 2, 3],
"y": [0, 0, 1, 1, 2, 2, 3, 3, 0, 0, 1, 1, 2, 2, 3, 3],
"z": 0,
"p": [0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1],
"q": [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1],
"v1": np.arange(16),
"v2": np.arange(16),
"v3": [0, 0, 1, 1, 2, 2, 3, 3, 0, 0, 1, 1, 2, 2, 3, 3],
"i1": np.arange(16),
"i2": np.arange(16),
"i3": [0, 0, 1, 1, 2, 2, 3, 3, 0, 0, 1, 1, 2, 2, 3, 3],
},
columns=["i1", "i2", "i3", "p", "q", "v1", "v2", "v3", "x", "y", "z"],
)
.sort_values(["x", "y", "z", "p", "q"])
.reset_index(drop=True)
)
@pytest.fixture(
params=[
"fullrange",
"multipartition",
"sparse_outer",
"sparse_outer_opt",
"massive_partitions",
"updated",
"no_part",
"other_part",
],
scope="module",
)
def testset(request):
return request.param
@pytest.fixture(scope="module")
def test_cube(
testset,
fullrange_cube,
multipartition_cube,
sparse_outer_cube,
sparse_outer_opt_cube,
massive_partitions_cube,
updated_cube,
no_part_cube,
other_part_cube,
):
if testset == "fullrange":
return fullrange_cube
elif testset == "multipartition":
return multipartition_cube
elif testset == "sparse_outer":
return sparse_outer_cube
elif testset == "sparse_outer_opt":
return sparse_outer_opt_cube
elif testset == "massive_partitions":
return massive_partitions_cube
elif testset == "updated":
return updated_cube
elif testset == "no_part":
return no_part_cube
elif testset == "other_part":
return other_part_cube
else:
raise ValueError("Unknown param {}".format(testset))
@pytest.fixture(scope="module")
def test_df(
testset,
fullrange_df,
multipartition_df,
sparse_outer_df,
sparse_outer_opt_df,
massive_partitions_df,
updated_df,
no_part_df,
):
if testset == "fullrange":
return fullrange_df
elif testset == "multipartition":
return multipartition_df
elif testset == "sparse_outer":
return sparse_outer_df
elif testset == "sparse_outer_opt":
return sparse_outer_opt_df
elif testset == "massive_partitions":
return massive_partitions_df
elif testset == "updated":
return updated_df
elif testset in ("no_part", "other_part"):
return no_part_df
else:
raise ValueError("Unknown param {}".format(testset))
def test_simple_roundtrip(driver, function_store, function_store_rwro):
df = pd.DataFrame({"x": [0, 1, 2, 3], "p": [0, 0, 1, 1], "v": [10, 11, 12, 13]})
cube = Cube(dimension_columns=["x"], partition_columns=["p"], uuid_prefix="cube")
build_cube(data=df, cube=cube, store=function_store)
result = driver(cube=cube, store=function_store_rwro)
assert len(result) == 1
df_actual = result[0]
df_expected = df.reindex(columns=["p", "v", "x"])
pdt.assert_frame_equal(df_actual, df_expected)
def test_complete(driver, module_store, test_cube, test_df):
result = driver(cube=test_cube, store=module_store)
assert len(result) == 1
df_actual = result[0]
pdt.assert_frame_equal(df_actual, test_df)
def apply_condition_unsafe(df, cond):
# For the sparse_outer testset, the test_df has the wrong datatype because we cannot encode missing integer data in
# pandas.
#
# The condition will not be applicable to the DF because the DF has floats while conditions have ints. We fix that
# by modifying the the condition.
#
# In case there is no missing data because of the right conditions, kartothek will return integer data.
# assert_frame_equal will then complain about this. So in case there is no missing data, let's recover the correct
# dtype here.
if not isinstance(cond, Conjunction):
cond = Conjunction(cond)
float_cols = {col for col in df.columns if df[col].dtype == float}
# convert int to float conditions
cond2 = Conjunction([])
for col, conj in cond.split_by_column().items():
if col in float_cols:
parts = []
for part in conj.conditions:
if isinstance(part, IsInCondition):
part = IsInCondition(
column=part.column, value=tuple((float(v) for v in part.value))
)
elif isinstance(part, InIntervalCondition):
part = InIntervalCondition(
column=part.column,
start=float(part.start),
stop=float(part.stop),
)
else:
part = part.__class__(column=part.column, value=float(part.value))
parts.append(part)
conj = Conjunction(parts)
cond2 &= conj
# apply conditions
df = cond2.filter_df(df).reset_index(drop=True)
# convert float columns to int columns
for col in df.columns:
if df[col].notnull().all():
dtype = df[col].dtype
if dtype == np.float64:
dtype = np.int64
elif dtype == np.float32:
dtype = np.int32
elif dtype == np.float16:
dtype = np.int16
df[col] = df[col].astype(dtype)
return df
@pytest.mark.parametrize(
"cond",
[
C("v1") >= 7,
C("v1") >= 10000,
C("v2") >= 7,
C("v3") >= 3,
C("i1") >= 7,
C("i1") >= 10000,
C("i2") >= 7,
C("i2") != 0,
C("i3") >= 3,
C("p") >= 1,
C("q") >= 1,
C("x") >= 1,
C("y") >= 1,
(C("x") == 3) & (C("y") == 3),
(C("i1") > 0) & (C("i2") > 0),
Conjunction([]),
],
)
def test_condition(driver, module_store, test_cube, test_df, cond):
result = driver(cube=test_cube, store=module_store, conditions=cond)
df_expected = apply_condition_unsafe(test_df, cond)
if df_expected.empty:
assert len(result) == 0
else:
assert len(result) == 1
df_actual = result[0]
pdt.assert_frame_equal(df_actual, df_expected)
@pytest.mark.parametrize("payload_columns", [["v1", "v2"], ["v2", "v3"], ["v3"]])
def test_select(driver, module_store, test_cube, test_df, payload_columns):
result = driver(cube=test_cube, store=module_store, payload_columns=payload_columns)
assert len(result) == 1
df_actual = result[0]
df_expected = test_df.loc[
:, sorted(set(payload_columns) | {"x", "y", "z", "p", "q"})
]
pdt.assert_frame_equal(df_actual, df_expected)
def test_filter_select(driver, module_store, test_cube, test_df):
result = driver(
cube=test_cube,
store=module_store,
payload_columns=["v1", "v2"],
conditions=(C("i3") >= 3), # completely unrelated to the payload
)
assert len(result) == 1
df_actual = result[0]
df_expected = test_df.loc[
test_df["i3"] >= 3, ["p", "q", "v1", "v2", "x", "y", "z"]
].reset_index(drop=True)
pdt.assert_frame_equal(df_actual, df_expected)
@pytest.mark.parametrize(
"partition_by",
[["i1"], ["i2"], ["i3"], ["x"], ["y"], ["p"], ["q"], ["i1", "i2"], ["x", "y"]],
)
def test_partition_by(driver, module_store, test_cube, test_df, partition_by):
dfs_actual = driver(cube=test_cube, store=module_store, partition_by=partition_by)
dfs_expected = [
df_g.reset_index(drop=True)
for g, df_g in test_df.groupby(partition_by, sort=True)
]
for df_expected in dfs_expected:
for col in df_expected.columns:
if df_expected[col].dtype == float:
try:
df_expected[col] = df_expected[col].astype(int)
except Exception:
pass
assert len(dfs_actual) == len(dfs_expected)
for df_actual, df_expected in zip(dfs_actual, dfs_expected):
pdt.assert_frame_equal(df_actual, df_expected)
@pytest.mark.parametrize("dimension_columns", list(permutations(["x", "y", "z"])))
def test_sort(driver, module_store, test_cube, test_df, dimension_columns):
result = driver(
cube=test_cube, store=module_store, dimension_columns=dimension_columns
)
assert len(result) == 1
df_actual = result[0]
df_expected = test_df.sort_values(
list(dimension_columns) + list(test_cube.partition_columns)
).reset_index(drop=True)
pdt.assert_frame_equal(df_actual, df_expected)
@pytest.mark.parametrize("payload_columns", [["y", "z"], ["y", "z", "v3"]])
def test_projection(driver, module_store, test_cube, test_df, payload_columns):
result = driver(
cube=test_cube,
store=module_store,
dimension_columns=["y", "z"],
payload_columns=payload_columns,
)
assert len(result) == 1
df_actual = result[0]
df_expected = (
test_df.loc[:, sorted(set(payload_columns) | {"y", "z", "p", "q"})]
.drop_duplicates()
.sort_values(["y", "z", "p", "q"])
.reset_index(drop=True)
)
pdt.assert_frame_equal(df_actual, df_expected)
def test_stresstest_index_select_row(driver, function_store):
n_indices = 100
n_rows = 1000
data = {"x": np.arange(n_rows), "p": 0}
for i in range(n_indices):
data["i{}".format(i)] = np.arange(n_rows)
df = pd.DataFrame(data)
cube = Cube(
dimension_columns=["x"],
partition_columns=["p"],
uuid_prefix="cube",
index_columns=["i{}".format(i) for i in range(n_indices)],
)
build_cube(data=df, cube=cube, store=function_store)
conditions = Conjunction([(C("i{}".format(i)) == 0) for i in range(n_indices)])
result = driver(
cube=cube,
store=function_store,
conditions=conditions,
payload_columns=["p", "x"],
)
assert len(result) == 1
df_actual = result[0]
df_expected = df.loc[df["x"] == 0].reindex(columns=["p", "x"])
pdt.assert_frame_equal(df_actual, df_expected)
def test_fail_missing_dimension_columns(driver, module_store, test_cube, test_df):
with pytest.raises(ValueError) as exc:
driver(cube=test_cube, store=module_store, dimension_columns=["x", "a", "b"])
assert (
"Following dimension columns were requested but are missing from the cube: a, b"
in str(exc.value)
)
def test_fail_empty_dimension_columns(driver, module_store, test_cube, test_df):
with pytest.raises(ValueError) as exc:
driver(cube=test_cube, store=module_store, dimension_columns=[])
assert "Dimension columns cannot be empty." in str(exc.value)
def test_fail_missing_partition_by(driver, module_store, test_cube, test_df):
with pytest.raises(ValueError) as exc:
driver(cube=test_cube, store=module_store, partition_by=["foo"])
assert (
"Following partition-by columns were requested but are missing from the cube: foo"
in str(exc.value)
)
def test_fail_unindexed_partition_by(driver, module_store, test_cube, test_df):
with pytest.raises(ValueError) as exc:
driver(cube=test_cube, store=module_store, partition_by=["v1", "v2"])
assert (
"Following partition-by columns are not indexed and cannot be used: v1, v2"
in str(exc.value)
)
def test_fail_missing_condition_columns(driver, module_store, test_cube, test_df):
with pytest.raises(ValueError) as exc:
driver(
cube=test_cube,
store=module_store,
conditions=(C("foo") == 1) & (C("bar") == 2),
)
assert (
"Following condition columns are required but are missing from the cube: bar, foo"
in str(exc.value)
)
def test_fail_missing_payload_columns(driver, module_store, test_cube, test_df):
with pytest.raises(ValueError) as exc:
driver(cube=test_cube, store=module_store, payload_columns=["foo", "bar"])
assert "Cannot find the following requested payload columns: bar, foo" in str(
exc.value
)
def test_fail_projection(driver, module_store, test_cube, test_df):
with pytest.raises(ValueError) as exc:
driver(
cube=test_cube,
store=module_store,
dimension_columns=["y", "z"],
payload_columns=["v1"],
)
assert (
'Cannot project dataset "seed" with dimensionality [x, y, z] to [y, z] '
"while keeping the following payload intact: v1" in str(exc.value)
)
def test_fail_unstable_dimension_columns(driver, module_store, test_cube, test_df):
with pytest.raises(TypeError) as exc:
driver(cube=test_cube, store=module_store, dimension_columns={"x", "y"})
assert "which has type set has an unstable iteration order" in str(exc.value)
def test_fail_unstable_partition_by(driver, module_store, test_cube, test_df):
with pytest.raises(TypeError) as exc:
driver(cube=test_cube, store=module_store, partition_by={"x", "y"})
assert "which has type set has an unstable iteration order" in str(exc.value)
def test_wrong_condition_type(driver, function_store, driver_name):
types = {
"int": pd.Series([-1], dtype=np.int64),
"uint": pd.Series([1], dtype=np.uint64),
"float": pd.Series([1.3], dtype=np.float64),
"bool": pd.Series([True], dtype=np.bool_),
"str": pd.Series(["foo"], dtype=object),
}
cube = Cube(
dimension_columns=["d_{}".format(t) for t in sorted(types.keys())],
partition_columns=["p_{}".format(t) for t in sorted(types.keys())],
uuid_prefix="typed_cube",
index_columns=["i_{}".format(t) for t in sorted(types.keys())],
)
data = {
"seed": pd.DataFrame(
{
"{}_{}".format(prefix, t): types[t]
for t in sorted(types.keys())
for prefix in ["d", "p", "v1"]
}
),
"enrich": pd.DataFrame(
{
"{}_{}".format(prefix, t): types[t]
for t in sorted(types.keys())
for prefix in ["d", "p", "i", "v2"]
}
),
}
build_cube(data=data, store=function_store, cube=cube)
df = pd.DataFrame(
{
"{}_{}".format(prefix, t): types[t]
for t in sorted(types.keys())
for prefix in ["d", "p", "i", "v1", "v2"]
}
)
for col in df.columns:
t1 = col.split("_")[1]
for t2 in sorted(types.keys()):
cond = C(col) == types[t2].values[0]
if t1 == t2:
result = driver(cube=cube, store=function_store, conditions=cond)
assert len(result) == 1
df_actual = result[0]
df_expected = cond.filter_df(df).reset_index(drop=True)
pdt.assert_frame_equal(df_actual, df_expected, check_like=True)
else:
with pytest.raises(TypeError) as exc:
driver(cube=cube, store=function_store, conditions=cond)
assert "has wrong type" in str(exc.value)
def test_condition_on_null(driver, function_store):
df = pd.DataFrame(
{
"x": | pd.Series([0, 1, 2], dtype=np.int64) | pandas.Series |
# Licensed to Modin Development Team under one or more contributor license agreements.
# See the NOTICE file distributed with this work for additional information regarding
# copyright ownership. The Modin Development Team licenses this file to you under the
# Apache License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
import pytest
import numpy as np
import pandas
from pandas.testing import assert_index_equal
import matplotlib
import modin.pandas as pd
import sys
from modin.pandas.test.utils import (
NROWS,
RAND_LOW,
RAND_HIGH,
df_equals,
arg_keys,
name_contains,
test_data,
test_data_values,
test_data_keys,
axis_keys,
axis_values,
int_arg_keys,
int_arg_values,
create_test_dfs,
eval_general,
generate_multiindex,
extra_test_parameters,
)
from modin.config import NPartitions
NPartitions.put(4)
# Force matplotlib to not use any Xwindows backend.
matplotlib.use("Agg")
def eval_setitem(md_df, pd_df, value, col=None, loc=None):
if loc is not None:
col = pd_df.columns[loc]
value_getter = value if callable(value) else (lambda *args, **kwargs: value)
eval_general(
md_df, pd_df, lambda df: df.__setitem__(col, value_getter(df)), __inplace__=True
)
@pytest.mark.parametrize(
"dates",
[
["2018-02-27 09:03:30", "2018-02-27 09:04:30"],
["2018-02-27 09:03:00", "2018-02-27 09:05:00"],
],
)
@pytest.mark.parametrize("subset", ["a", "b", ["a", "b"], None])
def test_asof_with_nan(dates, subset):
data = {"a": [10, 20, 30, 40, 50], "b": [None, None, None, None, 500]}
index = pd.DatetimeIndex(
[
"2018-02-27 09:01:00",
"2018-02-27 09:02:00",
"2018-02-27 09:03:00",
"2018-02-27 09:04:00",
"2018-02-27 09:05:00",
]
)
modin_where = pd.DatetimeIndex(dates)
pandas_where = pandas.DatetimeIndex(dates)
compare_asof(data, index, modin_where, pandas_where, subset)
@pytest.mark.parametrize(
"dates",
[
["2018-02-27 09:03:30", "2018-02-27 09:04:30"],
["2018-02-27 09:03:00", "2018-02-27 09:05:00"],
],
)
@pytest.mark.parametrize("subset", ["a", "b", ["a", "b"], None])
def test_asof_without_nan(dates, subset):
data = {"a": [10, 20, 30, 40, 50], "b": [70, 600, 30, -200, 500]}
index = pd.DatetimeIndex(
[
"2018-02-27 09:01:00",
"2018-02-27 09:02:00",
"2018-02-27 09:03:00",
"2018-02-27 09:04:00",
"2018-02-27 09:05:00",
]
)
modin_where = pd.DatetimeIndex(dates)
pandas_where = pandas.DatetimeIndex(dates)
compare_asof(data, index, modin_where, pandas_where, subset)
@pytest.mark.parametrize(
"lookup",
[
[60, 70, 90],
[60.5, 70.5, 100],
],
)
@pytest.mark.parametrize("subset", ["col2", "col1", ["col1", "col2"], None])
def test_asof_large(lookup, subset):
data = test_data["float_nan_data"]
index = list(range(NROWS))
modin_where = pd.Index(lookup)
pandas_where = pandas.Index(lookup)
compare_asof(data, index, modin_where, pandas_where, subset)
def compare_asof(
data, index, modin_where: pd.Index, pandas_where: pandas.Index, subset
):
modin_df = pd.DataFrame(data, index=index)
pandas_df = pandas.DataFrame(data, index=index)
df_equals(
modin_df.asof(modin_where, subset=subset),
pandas_df.asof(pandas_where, subset=subset),
)
df_equals(
modin_df.asof(modin_where.values, subset=subset),
pandas_df.asof(pandas_where.values, subset=subset),
)
df_equals(
modin_df.asof(list(modin_where.values), subset=subset),
pandas_df.asof(list(pandas_where.values), subset=subset),
)
df_equals(
modin_df.asof(modin_where.values[0], subset=subset),
pandas_df.asof(pandas_where.values[0], subset=subset),
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_first_valid_index(data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
assert modin_df.first_valid_index() == (pandas_df.first_valid_index())
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("n", int_arg_values, ids=arg_keys("n", int_arg_keys))
def test_head(data, n):
# Test normal dataframe head
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.head(n), pandas_df.head(n))
df_equals(modin_df.head(len(modin_df) + 1), pandas_df.head(len(pandas_df) + 1))
# Test head when we call it from a QueryCompilerView
modin_result = modin_df.loc[:, ["col1", "col3", "col3"]].head(n)
pandas_result = pandas_df.loc[:, ["col1", "col3", "col3"]].head(n)
df_equals(modin_result, pandas_result)
@pytest.mark.skip(reason="Defaulting to Pandas")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_iat(data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
with pytest.raises(NotImplementedError):
modin_df.iat()
@pytest.mark.gpu
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_iloc(request, data):
modin_df = pd.DataFrame(data)
pandas_df = | pandas.DataFrame(data) | pandas.DataFrame |
"""\
Main class and helper functions.
"""
import warnings
import collections.abc as cabc
from collections import OrderedDict
from copy import copy, deepcopy
from enum import Enum
from functools import partial, singledispatch
from pathlib import Path
from os import PathLike
from textwrap import dedent
from typing import Any, Union, Optional # Meta
from typing import Iterable, Sequence, Mapping, MutableMapping # Generic ABCs
from typing import Tuple, List # Generic
import h5py
from natsort import natsorted
import numpy as np
from numpy import ma
import pandas as pd
from pandas.api.types import infer_dtype, is_string_dtype, is_categorical_dtype
from scipy import sparse
from scipy.sparse import issparse, csr_matrix
from .raw import Raw
from .index import _normalize_indices, _subset, Index, Index1D, get_vector
from .file_backing import AnnDataFileManager, to_memory
from .access import ElementRef
from .aligned_mapping import (
AxisArrays,
AxisArraysView,
PairwiseArrays,
PairwiseArraysView,
Layers,
LayersView,
)
from .views import (
ArrayView,
DictView,
DataFrameView,
as_view,
_resolve_idxs,
)
from .sparse_dataset import SparseDataset
from .. import utils
from ..utils import convert_to_dict, ensure_df_homogeneous
from ..logging import anndata_logger as logger
from ..compat import (
ZarrArray,
ZappyArray,
DaskArray,
Literal,
_slice_uns_sparse_matrices,
_move_adj_mtx,
_overloaded_uns,
OverloadedDict,
)
class StorageType(Enum):
Array = np.ndarray
Masked = ma.MaskedArray
Sparse = sparse.spmatrix
ZarrArray = ZarrArray
ZappyArray = ZappyArray
DaskArray = DaskArray
@classmethod
def classes(cls):
return tuple(c.value for c in cls.__members__.values())
# for backwards compat
def _find_corresponding_multicol_key(key, keys_multicol):
"""Find the corresponding multicolumn key."""
for mk in keys_multicol:
if key.startswith(mk) and "of" in key:
return mk
return None
# for backwards compat
def _gen_keys_from_multicol_key(key_multicol, n_keys):
"""Generates single-column keys from multicolumn key."""
keys = [f"{key_multicol}{i + 1:03}of{n_keys:03}" for i in range(n_keys)]
return keys
def _check_2d_shape(X):
"""\
Check shape of array or sparse matrix.
Assure that X is always 2D: Unlike numpy we always deal with 2D arrays.
"""
if X.dtype.names is None and len(X.shape) != 2:
raise ValueError(
f"X needs to be 2-dimensional, not {len(X.shape)}-dimensional."
)
@singledispatch
def _gen_dataframe(anno, length, index_names):
if anno is None or len(anno) == 0:
return pd.DataFrame(index= | pd.RangeIndex(0, length, name=None) | pandas.RangeIndex |
#!/usr/bin/env python
# coding: utf-8
# ## Exploration of domain adaptation algorithms
# In[1]:
import time
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from tqdm import tqdm
from transfertools.models import CORAL, TCA
get_ipython().run_line_magic('reload_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
# ### Simulated data
#
# How do domain adaptation algorithms available in `transfertools` scale with the number of samples/features?
# In[2]:
def coral_samples(n_samples, n_features, tol=1e-8, seed=1):
np.random.seed(seed)
t = time.time()
transform = CORAL(scaling='none', tol=tol)
Xs = np.random.normal(size=(n_samples, n_features))
Xt = np.random.normal(size=(n_samples, n_features))
Xs_trans, Xt_trans = transform.fit_transfer(Xs, Xt)
return time.time() - t
def tca_samples(n_samples, n_features, n_components=10, seed=1):
np.random.seed(seed)
t = time.time()
transform = TCA(scaling='none',
n_components=n_components)
Xs = np.random.normal(size=(n_samples, n_features))
Xt = np.random.normal(size=(n_samples, n_features))
Xs_trans, Xt_trans = transform.fit_transfer(Xs, Xt)
return time.time() - t
# In[3]:
coral_times = []
tca_times = []
n_samples = 1000
n_feats_list = [10, 50, 100, 500, 1000, 2000]
for n_features in tqdm(n_feats_list):
coral_times.append((n_features,
coral_samples(n_samples, n_features)))
tca_times.append((n_features,
tca_samples(n_samples, n_features)))
# In[4]:
print(coral_times)
print(tca_times)
# In[5]:
sns.set()
coral_plot_times = list(zip(*coral_times))
tca_plot_times = list(zip(*tca_times))
plt.plot(coral_plot_times[0], coral_plot_times[1], label='CORAL')
plt.plot(tca_plot_times[0], tca_plot_times[1], label='TCA')
plt.xlabel('Number of features')
plt.ylabel('Runtime (seconds)')
plt.legend()
# In[6]:
coral_samples_times = []
tca_samples_times = []
n_features = 1000
n_samples_list = [10, 50, 100, 500, 1000, 2000]
for n_samples in tqdm(n_samples_list):
coral_samples_times.append((n_samples,
coral_samples(n_samples, n_features)))
tca_samples_times.append((n_samples,
tca_samples(n_samples, n_features)))
# In[7]:
print(coral_samples_times)
print(tca_samples_times)
# In[8]:
sns.set()
coral_plot_times = list(zip(*coral_samples_times))
tca_plot_times = list(zip(*tca_samples_times))
plt.plot(coral_plot_times[0], coral_plot_times[1], label='CORAL')
plt.plot(tca_plot_times[0], tca_plot_times[1], label='TCA')
plt.xlabel('Number of samples')
plt.ylabel('Runtime (seconds)')
plt.legend()
# ### Real data
#
# Does CORAL help us generalize our mutation prediction classifiers across cancer types?
# In[9]:
import os
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import pancancer_evaluation.config as cfg
import pancancer_evaluation.utilities.analysis_utilities as au
# In[10]:
lambda_vals = [0.01, 0.1, 1, 10, 100, 1000, 10000, 100000, 1000000]
coral_df = | pd.DataFrame() | pandas.DataFrame |
""" Classifier class file """
from collections import OrderedDict
from pathlib import Path
import pickle
import os
import warnings
from zlib import crc32
import pandas as pd
import numpy as np
from sklearn.metrics import f1_score
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from .utils import napari_info
def make_identifier(df): # pylint: disable-msg=C0103, C0116
str_id = df.apply(lambda x: "_".join(map(str, x)), axis=1)
return str_id
def test_set_check(identifier, test_ratio): # pylint: disable-msg=C0116
return crc32(np.int64(hash(identifier))) & 0xFFFFFFFF < test_ratio * 2**32
def load_classifier(classifier_path): # pylint: disable-msg=C0116
with open(classifier_path, "rb") as f: # pylint: disable-msg=C0103
clf = pickle.loads(f.read())
return clf
# pylint: disable-msg=C0116
def rename_classifier(classifier_path, new_name, delete_old_version=False):
with open(classifier_path, "rb") as f: # pylint: disable-msg=C0103
clf = pickle.loads(f.read())
clf.name = new_name
clf.save()
if delete_old_version:
os.remove(classifier_path)
# pylint: disable-msg=R0902, R0913
class Classifier:
"""
Classifier class to classify objects by a set of features
Paramters
---------
name: str
Name of the classifier. E.g. "test". Will then be saved as test.clf
features: pd.DataFrame
Dataframe containing the features used for classification
training_features: list
List of features that are used for training the classifier
method: str
What classification method is used. Defaults to rfc => RandomForestClassifier
Could also use "lrc" for a logistic regression classifier
directory: pathlib.Path
Directory where the classifier is saved
index_columns: list or tuple
Columns that are used to index the dataframe
Attributes
----------
name: str
Name of the classifier. E.g. "test". Will then be saved as test.clf
directory: pathlib.Path
Directory where the classifier is saved
clf: sklearn classifier class
sklearn classifier that is used
index_columns: list or tuple
Columns that are used to index the dataframe
train_data: pd.DataFrame
Dataframe containing a "train" column to save annotations by the user
predict_data: pd.DataFrame
Dataframe containing a "predict" column to save predictions made by the
classifier
training_features: list
List of features that are used for training the classifier
data: pd.DataFrame
Dataframe containing only yhe features define in "training_features"
is_trained: boolean
Flag of whether the classifier has been trained since data was added to it
"""
def __init__(
self,
name,
features,
training_features,
method="rfc",
directory=Path("."),
index_columns=None,
):
# TODO: Think about changing the not classified class to NaN instead of 0
# (when manually using the classifier, a user may provide 0s as training
# input when predicting some binary result)
self.name = name
self.directory = directory
if method == "rfc":
self.clf = RandomForestClassifier()
elif method == "lrc":
self.clf = LogisticRegression()
full_data = features
full_data.loc[:, "train"] = 0
full_data.loc[:, "predict"] = 0
self.index_columns = index_columns
self.train_data = full_data[["train"]]
self.predict_data = full_data[["predict"]]
self.training_features = training_features
self.data = full_data[self.training_features]
# TODO: Improve this flag. Currently user needs to set the flag to
# false when changing training data. How can I automatically change
# the flag whenever someone modifies self.train_data?
# Could try something like this, but worried about the overhead:
# https://stackoverflow.com/questions/6190468/how-to-trigger-function-on-value-change
# Flag of whether the classifier has been trained since features have
# changed last (new site added or train_data modified)
self.is_trained = False
# TODO: Check if data is numeric.
# 1. Throw some exception for strings
# 2. Handle nans: Inform the user.
# Some heuristic: If only < 10% of objects contain nan, ignore those objects
# If a feature is mostly nans (> 10%), ignore the feature (if multiple
# features are available) or show a warning
# Give the user an option to turn this off? E.g. via channel properties
# on the label image?
# => Current implementation should just give NaN results for all cells
# containing NaNs
# Have a way to notify the user of which features were NaNs? e.g. if
# one feature is always NaN, the classifier wouldn't do anything anymore
# 3. Handle booleans: Convert to numeric 0 & 1.
@staticmethod
def train_test_split(
df, test_perc=0.2, index_columns=None
): # pylint: disable-msg=C0103
in_test_set = make_identifier(df.reset_index()[list(index_columns)]).apply(
test_set_check, args=(test_perc,)
)
if in_test_set.sum() == 0:
warnings.warn(
"Not enough training data. No training data was put in the "
"test set and classifier will fail."
)
if in_test_set.sum() == len(in_test_set):
warnings.warn(
"Not enough training data. All your selections became test "
"data and there is nothing to train the classifier on."
)
return df.iloc[~in_test_set.values, :], df.iloc[in_test_set.values, :]
def add_data(self, features, training_features, index_columns):
# Check that training features agree with already existing training features
assert training_features == self.training_features, (
"The training "
"features provided to the classifier are different to what has "
"been used for training so far. This has not been implemented "
f"yet. Old vs. new: {self.training_features} vs. {training_features}"
)
# Check if data with the same index already exists. If so, do nothing
assert index_columns == self.index_columns, (
"The newly added dataframe "
"uses different index columns "
"than what was used in the "
f"classifier before: New {index_columns}, "
f"before {self.index_columns}"
)
# Check which indices already exist in the data, only add the others
new_indices = self._index_not_in_other_df(features, self.train_data)
new_data = features.loc[new_indices["index_new"]]
if len(new_data.index) == 0:
# No new data to be added: The classifier is being loaded for a
# site where the data has been loaded before
# TODO: Is there a low-priority logging this could be sent to?
# Not a warning, just info or debug
pass
else:
new_data["train"] = 0
new_data["predict"] = 0
# self.train_data = self.train_data.append(new_data[["train"]])
# self.predict_data = self.predict_data.append(new_data[["predict"]])
# self.data = self.data.append(new_data[training_features])
self.train_data = pd.concat([self.train_data, new_data[["train"]]])
self.predict_data = | pd.concat([self.predict_data, new_data[["predict"]]]) | pandas.concat |
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Computes model performance metrics."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import hashlib
import itertools
import os
from typing import Any, Dict, List, Optional, Text
from absl import logging
import blast_utils
import blundell_constants
import db
import hmmer_utils
import inference_lib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import parallel
import pfam_utils
import protein_task
import seaborn as sns
import sklearn.decomposition
from statsmodels.stats import contingency_tables
from statsmodels.stats import proportion
import tensorflow.compat.v1 as tf
import util as classification_util
INTERVAL_DATAFRAME_KEY = 'interval'
Subsequence = collections.namedtuple('Subsequence', ['name', 'range'])
# A part of an amino acid sequence that is of particular interest.
ATPASE_START_INDEX_OF_DOMAIN = 160
# https://pfam.xfam.org/protein/AT1A1_PIG, residues 161-352 (1-indexed)
ATPASE_PIG_SEQUENCE = 'NMVPQQALVIRNGEKMSINAEEVVVGDLVEVKGGDRIPADLRIISANGCKVDNSSLTGESEPQTRSPDFTNENPLETRNIAFFSTNCVEGTARGIVVYTGDRTVMGRIATLASGLEGGQTPIAAEIEHFIHIITGVAVFLGVSFFILSLILEYTWLEAVIFLIGIIVANVPEGLLATVTVCLTLTAKRMARK' # pylint: disable=line-too-long
# https://pfam.xfam.org/protein/AT1A1_PIG
ATPASE_ANNOTATED_SUBSEQUENCES = (
Subsequence('disordered', slice(213, 231)),
Subsequence('helical', slice(288, 313)),
Subsequence('helical', slice(318, 342)),
)
# Since our v2r_human is only the domain (not the whole protein),
# we have have to shift by 54 - 1 (because of zero-index); 54 is the start site
# according to http://pfam.xfam.org/protein/P30518.
V2R_START_INDEX_OF_DOMAIN = 53
# http://pfam.xfam.org/protein/P30518
V2R_HUMAN_SEQUENCE = 'SNGLVLAALARRGRRGHWAPIHVFIGHLCLADLAVALFQVLPQLAWKATDRFRGPDALCRAVKYLQMVGMYASSYMILAMTLDRHRAICRPMLAYRHGSGAHWNRPVLVAWAFSLLLSLPQLFIFAQRNVEGGSGVTDCWACFAEPWGRRTYVTWIALMVFVAPTLGIAACQVLIFREIHASLVPGPSERPGGRRRGRRTGSPGEGAHVSAAVAKTVRMTLVIVVVYVLCWAPFFLVQLWAAWDPEAPLEGAPFVLLMLLASLNSCTNPWIY' # pylint: disable=line-too-long
# http://pfam.xfam.org/protein/P30518
V2R_ANNOTATED_SUBSEQUENCES = (
Subsequence('helical', slice(41, 63)),
Subsequence('helical', slice(74, 95)),
Subsequence('helical', slice(114, 136)),
Subsequence('helical', slice(156, 180)),
Subsequence('helical', slice(205, 230)),
Subsequence('helical', slice(274, 297)),
Subsequence('helical', slice(308, 329)),
)
_PRECISION_RECALL_PERCENTILE_THRESHOLDS = np.arange(0, 1., .05)
RECALL_PRECISION_RECALL_KEY = 'recall'
PRECISION_PRECISION_RECALL_KEY = 'precision'
THRESHOLD_PRECISION_RECALL_KEY = 'threshold'
_PRECISION_RECALL_COLUMNS = [
THRESHOLD_PRECISION_RECALL_KEY, PRECISION_PRECISION_RECALL_KEY,
RECALL_PRECISION_RECALL_KEY
]
GATHERING_THRESHOLDS_PATH = 'testdata/gathering_thresholds_v32.0.csv'
TMP_TABLE_NAME = 'seed_train'
ACCURACY_KEY = 'accuracy'
FAMILY_ACCESSION_KEY = 'family_accession'
NUM_EXAMPLES_KEY = 'num_examples'
AVERAGE_SEQUENCE_LENGTH_KEY = 'average_length'
OUT_OF_VOCABULARY_FAMILY_ACCESSION = 'PF00000.0'
# Container for basic accuracy computations: unweighted accuracy, mean per class
# accuracy, and mean per clan accuracy.
BasicAccuracyComputations = collections.namedtuple(
'BasicAccuracyComputations', [
'unweighted_accuracy', 'mean_per_class_accuracy',
'mean_per_clan_accuracy'
])
def get_latest_prediction_file_from_checkpoint_dir(prediction_dir):
"""Return path to prediction file that is for the most recent global_step.
Args:
prediction_dir: Path to directory containing csv-formatted predictions.
Returns:
string. Path to csv file containing latest predictions.
"""
files = tf.io.gfile.Glob(os.path.join(prediction_dir, '*.csv'))
def get_global_step_from_filename(filename):
return int(os.path.basename(filename).replace('.csv', ''))
return max(files, key=get_global_step_from_filename)
def load_prediction_file(filename, idx_to_family_accession):
"""Load csv file containing predictions into pandas dataframe.
Args:
filename: string. Path to csv file outputted when training a
pfam model. The csv file should contain 3 columns,
`classification_util.PREDICTION_FILE_COLUMN_NAMES`.
idx_to_family_accession: dict from int to string. The keys
are indices of the families in the dataset descriptor used in training
(which correspond to the logits of the classes). The values are the
accession ids corresponding to that index.
Returns:
pandas DataFrame with 3 columns,
classification_util.PREDICTION_FILE_COLUMN_NAMES.
"""
# Read in csv.
with tf.io.tf.io.gfile.GFile(filename, 'r') as f:
dataframe = pd.read_csv(
f, names=classification_util.PREDICTION_FILE_COLUMN_NAMES)
# Convert true and predicted labels from class indexes to accession ids.
dataframe[classification_util.TRUE_LABEL_KEY] = dataframe[
classification_util.TRUE_LABEL_KEY].apply(
lambda true_class_idx: idx_to_family_accession[true_class_idx])
dataframe[classification_util.PREDICTED_LABEL_KEY] = dataframe[
classification_util.PREDICTED_LABEL_KEY].apply(
# pylint: disable=g-long-lambda
lambda predicted_class_idx: (idx_to_family_accession.get(
predicted_class_idx, OUT_OF_VOCABULARY_FAMILY_ACCESSION)))
return dataframe
def mean_per_class_accuracy(predictions_dataframe):
"""Compute accuracy of predictions, giving equal weight to all classes.
Args:
predictions_dataframe: pandas DataFrame with 3 columns,
classification_util.PREDICTION_FILE_COLUMN_NAMES.
Returns:
float. The average of all class-level accuracies.
"""
grouped_predictions = collections.defaultdict(list)
for row in predictions_dataframe.itertuples():
grouped_predictions[row.true_label].append(row.predicted_label)
accuracy_per_class = {
true_label: np.mean(predicted_label == np.array(true_label))
for true_label, predicted_label in grouped_predictions.items()
}
return np.mean(list(accuracy_per_class.values()))
def raw_unweighted_accuracy(
predictions_dataframe,
true_label=classification_util.TRUE_LABEL_KEY,
predicted_label=classification_util.PREDICTED_LABEL_KEY):
"""Compute accuracy, regardless of which class each prediction corresponds to.
Args:
predictions_dataframe: pandas DataFrame with at least 2 columns, true_label
and predicted_label.
true_label: str. Column name of true labels.
predicted_label: str. Column name of predicted labels.
Returns:
float. Accuracy.
"""
num_correct = (predictions_dataframe[true_label] ==
predictions_dataframe[predicted_label]).sum()
total = len(predictions_dataframe)
return num_correct / total
def number_correct(predictions_dataframe,
true_label=classification_util.TRUE_LABEL_KEY,
predicted_label=classification_util.PREDICTED_LABEL_KEY):
"""Computes the number of correct predictions.
Args:
predictions_dataframe: pandas DataFrame with at least 2 columns, true_label
and predicted_label.
true_label: str. Column name of true labels.
predicted_label: str. Column name of predicted labels.
Returns:
int.
"""
return (predictions_dataframe[true_label] ==
predictions_dataframe[predicted_label]).sum()
def family_predictions_to_clan_predictions(predictions_dataframe,
family_to_clan_dict):
"""Convert family predictions to clan predictions.
If a true label has no clan, it is omitted from the returned dataframe.
If a predicted label has no clan, it is *included* in the outputted dataframe
with clan prediction `None`.
Args:
predictions_dataframe: pandas DataFrame with 3 columns,
classification_util.PREDICTION_FILE_COLUMN_NAMES. The true and
predicted label are allowed to have version numbers on the accession ids
(like PF12345.x).
family_to_clan_dict: dictionary from string to string, like
{'PF12345': 'CL1234'}, (where PF stands for protein family, and CL stands
for clan. No version information is on the accession numbers (like
PF12345.x).
Returns:
pandas DataFrame with 3 columns,
classification_util.PREDICTION_FILE_COLUMN_NAMES, where
the true and predicted labels are converted from family labels to clan
labels.
"""
# Avoid mutating original dataframe by making a copy.
clan_prediction_dataframe = predictions_dataframe.copy(deep=True)
# Since the family_to_clan_dict has no versioning on accession numbers,
# we strip those versions from the predictions.
clan_prediction_dataframe[classification_util
.TRUE_LABEL_KEY] = clan_prediction_dataframe[
classification_util.TRUE_LABEL_KEY].apply(
pfam_utils.parse_pfam_accession)
clan_prediction_dataframe[classification_util
.PREDICTED_LABEL_KEY] = clan_prediction_dataframe[
classification_util.PREDICTED_LABEL_KEY].apply(
pfam_utils.parse_pfam_accession)
# Filter to only predictions for which the true labels are in clans.
clan_prediction_dataframe = clan_prediction_dataframe[
clan_prediction_dataframe.true_label.isin(family_to_clan_dict.keys())]
# Convert family predictions to clan predictions for true labels.
clan_prediction_dataframe[classification_util
.TRUE_LABEL_KEY] = clan_prediction_dataframe[
classification_util.TRUE_LABEL_KEY].apply(
lambda label: family_to_clan_dict[label])
# Convert family predictions to clan predictions for predicted labels.
# Use `None` when there is no clan for our predicted label.
clan_prediction_dataframe[classification_util
.PREDICTED_LABEL_KEY] = clan_prediction_dataframe[
classification_util.PREDICTED_LABEL_KEY].apply(
family_to_clan_dict.get)
return clan_prediction_dataframe
def families_with_more_than_n_examples(size_of_training_set_by_family, n):
"""Return list of family accession ids with more than n training examples.
Args:
size_of_training_set_by_family: pandas DataFrame with two columns,
NUM_EXAMPLES_KEY and FAMILY_ACCESSION_KEY
n: int.
Returns:
list of string: accession ids for large families.
"""
filtered_dataframe = size_of_training_set_by_family[
size_of_training_set_by_family.num_examples > n]
return filtered_dataframe[FAMILY_ACCESSION_KEY].values
def mean_class_per_accuracy_for_only_large_classes(
all_predictions_dataframe, class_minimum_size,
size_of_training_set_by_family):
"""Compute mean per class accuracy on classes with lots of training data.
Args:
all_predictions_dataframe: pandas DataFrame with 3 columns,
classification_util.PREDICTION_FILE_COLUMN_NAMES. The true and predicted
label are allowed to have version numbers on the accession ids
(like PF12345.x).
class_minimum_size: int.
size_of_training_set_by_family: pandas DataFrame with two columns,
NUM_EXAMPLES_KEY and FAMILY_ACCESSION_KEY
Returns:
float.
"""
qualifying_families = families_with_more_than_n_examples(
size_of_training_set_by_family, class_minimum_size)
qualifying_predictions = all_predictions_dataframe[
all_predictions_dataframe.true_label.isin(qualifying_families)]
return mean_per_class_accuracy(qualifying_predictions)
def accuracy_by_family(family_predictions):
"""Return DataFrame that has accuracy by classification_util.TRUE_LABEL_KEY.
Args:
family_predictions: pandas DataFrame with 3 columns,
classification_util.PREDICTION_FILE_COLUMN_NAMES. The true and predicted
label are allowed to have version numbers on the accession ids (like
PF12345.x).
Returns:
pandas DataFrame with two columns, classification_util.TRUE_LABEL_KEY and
ACCURACY_KEY.
"""
return family_predictions.groupby([
classification_util.TRUE_LABEL_KEY
]).apply(raw_unweighted_accuracy).reset_index(name=ACCURACY_KEY)
def pca_embedding_for_sequences(list_of_seqs, inferrer, num_pca_dims=2):
"""Take top num_pca_dims of an embedding of each sequence in list_of_seqs.
Args:
list_of_seqs: list of string. Amino acid characters only.
inferrer: inference_lib.Inferrer instance.
num_pca_dims: the number of prinicple components to retain.
Returns:
np.array of shape (len(list_of_seqs), num_pca_dims).
"""
activations_batch = inferrer.get_activations(list_of_seqs=list_of_seqs)
pca = sklearn.decomposition.PCA(n_components=num_pca_dims, whiten=True)
return pca.fit_transform(np.stack(activations_batch, axis=0))
def accuracy_by_size_of_family(family_predictions, size_of_family):
"""Return DataFrame with the accuracy computed, segmented by family.
Args:
family_predictions: pandas DataFrame with 3 columns,
classification_util.PREDICTION_FILE_COLUMN_NAMES. The true and predicted
label are allowed to have version numbers on the accession ids (like
PF12345.x).
size_of_family: pandas DataFrame with two columns, NUM_EXAMPLES_KEY and
FAMILY_ACCESSION_KEY
Returns:
pandas DataFrame with two columns, NUM_EXAMPLES_KEY and ACCURACY_KEY.
"""
return pd.merge(
accuracy_by_family(family_predictions),
size_of_family,
left_on=classification_util.TRUE_LABEL_KEY,
right_on=FAMILY_ACCESSION_KEY)[[NUM_EXAMPLES_KEY, ACCURACY_KEY]]
def accuracy_by_sequence_length(family_predictions,
length_of_examples_by_family):
"""Return DataFrame with accuracy computed by avg sequence length per family.
Args:
family_predictions: pandas DataFrame with 3 columns,
classification_util.PREDICTION_FILE_COLUMN_NAMES. The true and predicted
label are allowed to have version numbers on the accession ids (like
PF12345.x).
length_of_examples_by_family: pandas DataFrame with two columns,
AVERAGE_SEQUENCE_LENGTH_KEY and FAMILY_ACCESSION_KEY
Returns:
pandas DataFrame with two columns, AVERAGE_SEQUENCE_LENGTH_KEY and
ACCURACY_KEY.
"""
return pd.merge(
accuracy_by_family(family_predictions),
length_of_examples_by_family,
left_on=classification_util.TRUE_LABEL_KEY,
right_on=FAMILY_ACCESSION_KEY)[[
AVERAGE_SEQUENCE_LENGTH_KEY, ACCURACY_KEY
]]
def num_examples_per_class(connection, table_name):
"""Compute number of examples per class.
Args:
connection: a connection that has temporary table `table_name`
in it's session.
table_name: name of table to query. The table should have a family_accession
column.
Returns:
pandas DataFrame with two columns: FAMILY_ACCESSION_KEY and num_examples.
num_examples is the number of examples with that family_accession.
"""
connection.ExecuteQuery(r"""SELECT
family_accession,
COUNT(*) AS num_examples
FROM
""" + table_name + r"""
GROUP BY
family_accession""")
result = db.ResultToFrame(connection)
return result
def average_sequence_length_per_class(connection, table_name):
"""Compute average length of sequence per class.
Args:
connection: a connection that has temporary table `table_name`
in it's session.
table_name: name of table to query. The table should have a family_accession
column.
Returns:
pandas DataFrame with two columns: FAMILY_ACCESSION_KEY and
AVERAGE_LENGTH_KEY. average_length is the average length of sequences
that have that family_accession.
"""
connection.ExecuteQuery(r"""SELECT
family_accession,
AVG(LENGTH(sequence)) as average_length
FROM
""" + table_name + r"""
GROUP BY
family_accession""")
result = db.ResultToFrame(connection)
return result
def _pad_front_of_all_mutations(all_mutation_measurements, pad_amount,
pad_value):
"""Pad all_mutation_measurements with pad_value in the front.
Adds a "mutation measurement" of pad_value for each amino acid,
pad_amount times.
For example, if the input shape of all_mutation_measurements is (20, 100),
and pad_amount is 17, the output shape is (20, 117)
Args:
all_mutation_measurements: np.array of float, with shape
(len(pfam_utils.AMINO_ACID_VOCABULARY), len(amino_acid_sequence)). The
output of `measure_all_mutations`.
pad_amount: the amount to pad the front of the amino acid measurements.
pad_value: float.
Returns:
np.array of shape (len(pfam_utils.AMINO_ACID_VOCABULARY),
all_mutation_measurements.shape[1] + pad_amount).
"""
padded_acids = []
for acid_index in range(all_mutation_measurements.shape[0]):
front_pad = np.full(pad_amount, pad_value)
padded_acid = np.append(front_pad, all_mutation_measurements[acid_index])
padded_acids.append(padded_acid)
padded_acids = np.array(padded_acids)
return padded_acids
def _round_to_base(x, base=5):
"""Round to nearest multiple of `base`."""
return int(base * round(float(x) / base))
def plot_all_mutations(all_mutation_measurements_excluding_pad, subsequences,
start_index_of_mutation_predictions):
"""Plot all mutations, annotated with domains, along with average values.
Args:
all_mutation_measurements_excluding_pad: np.array of float, with shape
(len(pfam_utils.AMINO_ACID_VOCABULARY), len(amino_acid_sequence)).
The output of `measure_all_mutations`.
subsequences: List of `Subsequence`, which annotates the particular areas
of interest of a protein/domain.
start_index_of_mutation_predictions: Start index of the amino acid sequence
that generated parameter `all_mutation_measurements_excluding_pad`.
Since we often only predict mutations for a domain of a protein, which
doesn't necessarily start at amino acid index 0, we have to offset the
plot to appropriately line up indices.
"""
sns.set_style('whitegrid')
min_x_index = min(
min([subsequence.range.start for subsequence in subsequences]),
start_index_of_mutation_predictions)
# https://www.compoundchem.com/2014/09/16/aminoacids/ explains this ordering.
amino_acid_semantic_grouping_reordering = list('CMGAVILPFWYSTNQDERKH')
amino_acid_reordering_indexes = [
pfam_utils.AMINO_ACID_VOCABULARY.index(aa)
for aa in amino_acid_semantic_grouping_reordering
]
all_mutation_measurements_excluding_pad = (
all_mutation_measurements_excluding_pad[amino_acid_reordering_indexes])
all_mutation_measurements_including_pad = _pad_front_of_all_mutations(
all_mutation_measurements=all_mutation_measurements_excluding_pad,
pad_amount=start_index_of_mutation_predictions,
pad_value=np.min(all_mutation_measurements_excluding_pad))
### FIGURE
plt.figure(figsize=(35, 10))
gs = plt.GridSpec(2, 1, height_ratios=[1, 10, 1], hspace=0)
## PLOT 0
share_axis = plt.subplot(gs[0])
share_axis.set_xlim(left=min_x_index)
share_axis.get_xaxis().set_visible(False)
share_axis.get_yaxis().set_visible(False)
for subsequence in subsequences:
plt.hlines(
0,
subsequence.range.start,
subsequence.range.stop,
linewidth=10,
color='k')
text_x_location = (subsequence.range.stop + subsequence.range.start) / 2
plt.text(
text_x_location,
.03,
subsequence.name,
horizontalalignment='center',
fontsize=18)
share_axis.set_yticks([])
share_axis.grid(False, axis='y')
### PLOT 1
plt.subplot(gs[1], sharex=share_axis)
plt.imshow(
all_mutation_measurements_including_pad,
cmap='Blues',
interpolation='none',
clim=[
np.min(all_mutation_measurements_excluding_pad),
np.percentile(all_mutation_measurements_excluding_pad.flatten(), 80)
])
plt.axis('tight')
ax = plt.gca()
ax.set_xticks([x[1].start for x in subsequences] +
[x[1].stop for x in subsequences])
ax.tick_params(
axis='x',
which='both', # both major and minor ticks are affected
top=False)
ax.tick_params(
axis='y',
which='both', # both major and minor ticks are affected
right=False)
ax.set_yticks(
np.arange(0, len(amino_acid_semantic_grouping_reordering), 1.),
minor=True)
ax.set_yticklabels(
amino_acid_semantic_grouping_reordering,
minor=True,
fontdict={'fontsize': 16})
ax.set_yticks([], minor=False)
plt.tick_params(labelsize=16)
sns.despine(top=True, left=True, right=True, bottom=False)
plt.xlim(xmin=min_x_index)
def get_basic_accuracy_computations(all_family_predictions):
"""Returns unweighted, mean-per-class, and clan-level accuracy.
Args:
all_family_predictions: pandas DataFrame with 3 columns,
classification_util.PREDICTION_FILE_COLUMN_NAMES.
Returns:
BasicAccuracyComputations object with unweighted_accuracy (float),
mean_per_class_accuracy (float), and mean_per_clan_accuracy (float) fields.
"""
family_to_clan_dict = pfam_utils.family_to_clan_mapping()
clan_predictions = family_predictions_to_clan_predictions(
all_family_predictions, family_to_clan_dict)
return BasicAccuracyComputations(
unweighted_accuracy=raw_unweighted_accuracy(all_family_predictions),
mean_per_class_accuracy=mean_per_class_accuracy(all_family_predictions),
mean_per_clan_accuracy=mean_per_class_accuracy(clan_predictions),
)
def print_basic_accuracy_computations(all_family_predictions):
"""Print unweighted, mean-per-class, and clan-level accuracy.
Args:
all_family_predictions: pandas DataFrame with 3 columns,
classification_util.PREDICTION_FILE_COLUMN_NAMES.
"""
basic_accuracy_computations = get_basic_accuracy_computations(
all_family_predictions)
print('Unweighted accuracy: {:.5f}'.format(
basic_accuracy_computations.unweighted_accuracy))
print('Mean per class accuracy: {:.5f}'.format(
basic_accuracy_computations.mean_per_class_accuracy))
print('Mean per clan accuracy: {:.5f}'.format(
basic_accuracy_computations.mean_per_clan_accuracy))
def show_size_of_family_accuracy_comparisons(all_family_predictions):
"""Compare, using charts and measurements, effect of size on accuracy.
Args:
all_family_predictions: pandas DataFrame with 3 columns,
classification_util.PREDICTION_FILE_COLUMN_NAMES.
"""
cxn = pfam_utils.connection_with_tmp_table(
TMP_TABLE_NAME, blundell_constants.RANDOM_SPLIT_ALL_SEED_DATA_PATH)
size_by_family = num_examples_per_class(cxn, TMP_TABLE_NAME)
accuracy_by_size_of_family_dataframe = accuracy_by_size_of_family(
all_family_predictions, size_by_family)
imperfect_classes = accuracy_by_size_of_family_dataframe[
accuracy_by_size_of_family_dataframe[ACCURACY_KEY] != 1.0]
grid = sns.JointGrid(
x=NUM_EXAMPLES_KEY,
y=ACCURACY_KEY,
data=imperfect_classes,
xlim=(-10, 2000),
ylim=(.01, 1.01),
)
grid = grid.plot_joint(plt.scatter, color='k', s=10)
grid = grid.plot_marginals(
sns.distplot,
kde=False,
color='.5',
)
grid = grid.set_axis_labels(
xlabel='Number of examples in unsplit seed dataset', ylabel='Accuracy')
# pytype incorrectly decides that this is not an attribute, but it is.
# https://seaborn.pydata.org/generated/seaborn.JointGrid.html
grid.ax_marg_x.set_axis_off() # pytype: disable=attribute-error
grid.ax_marg_y.set_axis_off() # pytype: disable=attribute-error
plt.show()
print('Correlation between number of seed examples and accuracy: '
'{:.5f}'.format(accuracy_by_size_of_family_dataframe[ACCURACY_KEY].corr(
accuracy_by_size_of_family_dataframe[NUM_EXAMPLES_KEY])))
def show_sequence_length_accuracy_comparisons(all_family_predictions):
"""Compare, using charts and measurements, effect of length on accuracy.
Args:
all_family_predictions: pandas DataFrame with 3 columns,
classification_util.PREDICTION_FILE_COLUMN_NAMES.
"""
cxn = pfam_utils.connection_with_tmp_table(
TMP_TABLE_NAME, blundell_constants.RANDOM_SPLIT_ALL_SEED_DATA_PATH)
length_of_examples_by_family = average_sequence_length_per_class(
cxn, TMP_TABLE_NAME)
accuracy_by_sequence_length_dataframe = accuracy_by_sequence_length(
all_family_predictions, length_of_examples_by_family)
imperfect_classes = accuracy_by_sequence_length_dataframe[
accuracy_by_sequence_length_dataframe[ACCURACY_KEY] != 1.0]
grid = sns.JointGrid(
x=AVERAGE_SEQUENCE_LENGTH_KEY,
y=ACCURACY_KEY,
data=imperfect_classes,
xlim=(-10, 2000),
ylim=(.01, 1.01),
)
grid = grid.plot_joint(plt.scatter, color='k', s=10)
grid = grid.set_axis_labels(
xlabel='Avg sequence length per family (incl train and test)',
ylabel='Accuracy of predictions')
grid = grid.plot_marginals(sns.distplot, kde=False, color='.5')
# pytype incorrectly decides that this is not an attribute, but it is.
# https://seaborn.pydata.org/generated/seaborn.JointGrid.html
grid.ax_marg_x.set_axis_off() # pytype: disable=attribute-error
grid.ax_marg_y.set_axis_off() # pytype: disable=attribute-error
plt.show()
print(
'Correlation between sequence length of seed examples and accuracy: '
'{:.5f}'.format(accuracy_by_sequence_length_dataframe[ACCURACY_KEY].corr(
accuracy_by_sequence_length_dataframe[AVERAGE_SEQUENCE_LENGTH_KEY])))
def show_transmembrane_mutation_matrices(savedmodel_dir_path):
"""Compute and plot mutation matrices for various transmembrane domains.
Args:
savedmodel_dir_path: path to directory where a SavedModel pb or
pbtxt is stored. The SavedModel must only have one input per signature
and only one output per signature.
"""
inferrer = inference_lib.Inferrer(
savedmodel_dir_path=savedmodel_dir_path,
activation_type=protein_task.LOGITS_SAVEDMODEL_SIGNATURE_KEY)
show_mutation_matrix(
sequence=V2R_HUMAN_SEQUENCE,
subsequences=V2R_ANNOTATED_SUBSEQUENCES,
start_index_of_domain=V2R_START_INDEX_OF_DOMAIN,
inferrer=inferrer)
atpase_pig_subsequences = ATPASE_ANNOTATED_SUBSEQUENCES
show_mutation_matrix(
sequence=ATPASE_PIG_SEQUENCE,
subsequences=atpase_pig_subsequences,
start_index_of_domain=ATPASE_START_INDEX_OF_DOMAIN,
inferrer=inferrer)
def _filter_hmmer_first_pass_by_gathering_threshold(
hmmer_prediction_with_scores_df, gathering_thresholds_df):
"""Filters predictions to only include those above the gathering thresholds.
Args:
hmmer_prediction_with_scores_df: pandas DataFrame with 4 columns,
hmmer_util.HMMER_OUTPUT_CSV_COLUMN_HEADERS. The true and predicted label
are allowed to have version numbers on the accession ids (like PF12345.x).
gathering_thresholds_df: pandas DataFrame with 2 columns,
classification_util.TRUE_LABEL_KEY and
hmmer_utils.DATAFRAME_SCORE_NAME_KEY. The true label is allowed to have
version numbers on the accession ids (like PF12345.x).
Returns:
pandas DataFrame with columns hmmer_util.HMMER_OUTPUT_CSV_COLUMN_HEADERS.
Raises:
KeyError: If there is a true label that's not in
gathering_thresholds_df that is in hmmer_prediction_with_scores_df.
ValueError: If there is a true_label that is repeated in
gathering_thresholds_df.
"""
# Avoid modifying passed arguments.
hmmer_prediction_with_scores_df = hmmer_prediction_with_scores_df.copy(
deep=True)
gathering_thresholds_df = gathering_thresholds_df.copy(deep=True)
# Sanitize family accessions to not have version numbers.
hmmer_scores_key = classification_util.PREDICTED_LABEL_KEY + '_sanitized'
gathering_thresholds_key = classification_util.TRUE_LABEL_KEY + '_sanitized'
hmmer_prediction_with_scores_df[
hmmer_scores_key] = hmmer_prediction_with_scores_df[
classification_util.PREDICTED_LABEL_KEY].apply(
pfam_utils.parse_pfam_accession)
gathering_thresholds_df[gathering_thresholds_key] = gathering_thresholds_df[
classification_util.TRUE_LABEL_KEY].apply(pfam_utils.parse_pfam_accession)
if np.any(
gathering_thresholds_df.duplicated(
classification_util.TRUE_LABEL_KEY).values):
raise ValueError('One or more of the true labels in the gathering '
'thresholds dataframe was duplicated: {}'.format(
gathering_thresholds_df.groupby(
classification_util.TRUE_LABEL_KEY).size() > 1))
gathering_thresholds_dict = pd.Series(
gathering_thresholds_df.score.values,
index=gathering_thresholds_df[gathering_thresholds_key]).to_dict()
threshold_key = hmmer_utils.DATAFRAME_SCORE_NAME_KEY + '_thresh'
# Will raise KeyError if the family is not found in the gathering thresholds
# dict.
hmmer_prediction_with_scores_df[
threshold_key] = hmmer_prediction_with_scores_df[hmmer_scores_key].apply(
lambda x: gathering_thresholds_dict[x])
filtered = hmmer_prediction_with_scores_df[
hmmer_prediction_with_scores_df[hmmer_utils.DATAFRAME_SCORE_NAME_KEY] >
hmmer_prediction_with_scores_df[threshold_key]]
logging.info('Size before filtering by gathering thresh: %d',
len(hmmer_prediction_with_scores_df))
logging.info('Size after filtering: %d', len(filtered))
assert len(filtered) <= len(hmmer_prediction_with_scores_df)
# Get rid of extra columns.
return filtered[hmmer_utils.HMMER_OUTPUT_CSV_COLUMN_HEADERS]
def _group_by_size_histogram_data(dataframe, group_by_key):
"""Returns a histogram of the number of elements per group.
If you group by sequence_name, the dictionary you get returned is:
key: number of predictions a sequence has
value: number of sequences with `key` many predictions.
Args:
dataframe: pandas DataFrame that has column group_by_key.
group_by_key: string. The column to group dataframe by.
Returns:
dict from int to int.
"""
return dataframe.groupby(group_by_key).size().to_frame('size').groupby(
'size').size().to_dict()
def _had_more_than_one_prediction_and_in_clan(predictions_df, family_to_clan):
"""Returns the number of sequences with >1 prediction and also in a clan.
Args:
predictions_df: pandas DataFrame with 3 columns:
hmmer_utils.DATAFRAME_SCORE_NAME_KEY, classification_util.TRUE_LABEL_KEY,
and classification_util.PREDICTED_LABEL_KEY. Version numbers are
acceptable on the family names.
family_to_clan: dict from string to string, e.g. {'PF12345': 'CL9999'}.
Version numbers are acceptable on the family names.
Returns:
int.
"""
# Avoid mutating original object.
predictions_df = predictions_df.copy(deep=True)
number_of_predictions = predictions_df.groupby(
classification_util.DATAFRAME_SEQUENCE_NAME_KEY).size().to_frame(
'number_of_predictions')
number_of_predictions.reset_index(inplace=True)
predictions_df = pd.merge(
predictions_df,
number_of_predictions,
left_on=classification_util.DATAFRAME_SEQUENCE_NAME_KEY,
right_on=classification_util.DATAFRAME_SEQUENCE_NAME_KEY)
multiple_predictions = predictions_df.copy(deep=True)
multiple_predictions = multiple_predictions[
multiple_predictions['number_of_predictions'] > 1]
multiple_predictions['parsed_true_label'] = multiple_predictions[
classification_util.TRUE_LABEL_KEY].map(pfam_utils.parse_pfam_accession)
family_to_clan_parsed = {
pfam_utils.parse_pfam_accession(k): v for k, v in family_to_clan.items()
}
in_clans = multiple_predictions[
multiple_predictions['parsed_true_label'].isin(family_to_clan_parsed)]
return len(in_clans.groupby(classification_util.DATAFRAME_SEQUENCE_NAME_KEY))
def show_hmmer_first_pass_gathering_threshold_statistics(
hmmer_prediction_with_scores_csv_path):
"""Print number of predictions per sequence over gathering threshold.
Args:
hmmer_prediction_with_scores_csv_path: string. Path to csv file that has
columns hmmer_utils.HMMER_OUTPUT_CSV_COLUMN_HEADERS.
"""
with tf.io.gfile.GFile(
GATHERING_THRESHOLDS_PATH) as gathering_thresholds_file:
gathering_thresholds_df = pd.read_csv(
gathering_thresholds_file,
names=[
classification_util.TRUE_LABEL_KEY,
hmmer_utils.DATAFRAME_SCORE_NAME_KEY
])
with tf.io.gfile.GFile(
hmmer_prediction_with_scores_csv_path) as hmmer_output_file:
hmmer_scores_df = pd.read_csv(hmmer_output_file)
filtered = _filter_hmmer_first_pass_by_gathering_threshold(
hmmer_scores_df, gathering_thresholds_df)
counted_by_num_predictions = _group_by_size_histogram_data(
filtered, classification_util.DATAFRAME_SEQUENCE_NAME_KEY)
family_to_clan_dict = pfam_utils.family_to_clan_mapping()
meet_reporting_criteria = _had_more_than_one_prediction_and_in_clan(
filtered, family_to_clan_dict)
print('Count of seqs that had more than one prediction, '
'and also were in a clan {}'.format(meet_reporting_criteria))
print('Count of sequences by number of predictions: {}'.format(
counted_by_num_predictions))
def show_mutation_matrix(sequence, subsequences, start_index_of_domain,
inferrer):
"""Compute and display predicted effects of mutating sequence everywhere.
Args:
sequence: string of amino acid characters.
subsequences: list of Subsequence. These areas will be displayed alongside
the mutation predictions.
start_index_of_domain: int. Because most domains do not begin at index 0 of
the protein, but Subsequence.slice indexing does, we have to offset where
we display the start of the mutation predictions in the plot. This
argument is 0-indexed.
inferrer: inference_lib.Inferrer instance.
"""
mutation_measurements = inference_lib.measure_all_mutations(
sequence, inferrer)
plot_all_mutations(
mutation_measurements,
start_index_of_mutation_predictions=start_index_of_domain,
subsequences=subsequences)
def precision_recall_dataframe(
predictions_df,
percentile_thresholds=_PRECISION_RECALL_PERCENTILE_THRESHOLDS):
"""Return dataframe with precision and recall for each percentile in list.
Args:
predictions_df: pandas DataFrame with 3 columns:
hmmer_utils.DATAFRAME_SCORE_NAME_KEY, classification_util.TRUE_LABEL_KEY,
and classification_util.PREDICTED_LABEL_KEY.
percentile_thresholds: list of float between 0 and 1. These values will be
used as percentiles for varying the thresholding of
hmmer_utils.DATAFRAME_SCORE_NAME_KEY to compute precision and recall.
Returns:
pandas dataframe with columns PRECISION_RECALL_COLUMNS.
"""
# Avoid mutating original object.
predictions_df = predictions_df.copy(deep=True)
precision_recall_df = pd.DataFrame(columns=_PRECISION_RECALL_COLUMNS)
for percentile in percentile_thresholds:
percentile_cutoff = predictions_df[
hmmer_utils.DATAFRAME_SCORE_NAME_KEY].quantile(
percentile, interpolation='nearest')
called_elements = predictions_df[predictions_df[
hmmer_utils.DATAFRAME_SCORE_NAME_KEY] >= percentile_cutoff]
true_positive = len(called_elements[called_elements[
classification_util.TRUE_LABEL_KEY] == called_elements[
classification_util.PREDICTED_LABEL_KEY]])
false_positive = len(called_elements[
called_elements[classification_util.TRUE_LABEL_KEY] != called_elements[
classification_util.PREDICTED_LABEL_KEY]])
if true_positive == 0 and false_positive == 0:
# Avoid division by zero error; we called zero elements.
precision = 0
else:
precision = float(true_positive) / (true_positive + false_positive)
uncalled_elements = predictions_df[predictions_df[
hmmer_utils.DATAFRAME_SCORE_NAME_KEY] < percentile_cutoff]
false_negative = len(uncalled_elements[uncalled_elements[
classification_util.TRUE_LABEL_KEY] == uncalled_elements[
classification_util.PREDICTED_LABEL_KEY]])
if true_positive == 0 and false_negative == 0:
# Avoid division by zero error.
recall = 0.
else:
recall = float(true_positive) / len(predictions_df)
precision_recall_df = precision_recall_df.append(
{
THRESHOLD_PRECISION_RECALL_KEY: percentile_cutoff,
PRECISION_PRECISION_RECALL_KEY: precision,
RECALL_PRECISION_RECALL_KEY: recall,
},
ignore_index=True,
)
return precision_recall_df
def show_precision_recall(predictions_df):
"""Compute precision and recall for predictions, and graph.
Args:
predictions_df: pandas DataFrame with 3 columns:
hmmer_utils.DATAFRAME_SCORE_NAME_KEY, classification_util.TRUE_LABEL_KEY,
and classification_util.PREDICTED_LABEL_KEY.
"""
precision_recall_df = precision_recall_dataframe(predictions_df)
ax = sns.scatterplot(
x=PRECISION_PRECISION_RECALL_KEY,
y=RECALL_PRECISION_RECALL_KEY,
data=precision_recall_df,
color='k')
ax.set_xlim(left=0, right=1)
ax.set_ylim(bottom=0, top=1)
plt.show()
def output_basic_measurements_and_figures(all_family_predictions):
"""Show basic charts and graphs about the given predictions' accuracy.
Args:
all_family_predictions: pandas DataFrame with 3 columns,
classification_util.PREDICTION_FILE_COLUMN_NAMES. The true and predicted
label are allowed to have version numbers on the accession ids
(like PF12345.x).
"""
print_basic_accuracy_computations(all_family_predictions)
show_size_of_family_accuracy_comparisons(all_family_predictions)
show_sequence_length_accuracy_comparisons(all_family_predictions)
def output_all_measurements_and_figures(savedmodel_dir_path,
prediction_dirs_and_names,
dataset_descriptor_path):
"""Output all measurements for trained model needed for benchmark paper.
Args:
savedmodel_dir_path: path to directory where a SavedModel pb or pbtxt is
stored. The SavedModel must only have one input per signature and only one
output per signature.
prediction_dirs_and_names: dictionary from str to str. Keys are human
readable descriptions of keys. Keys are paths to csv prediction files
from a model
dataset_descriptor_path: Path to dataset descriptor used when training the
classification model.
"""
for name, prediction_dir in prediction_dirs_and_names.items():
prediction_csv_path = get_latest_prediction_file_from_checkpoint_dir(
prediction_dir)
print('Predictions for {} in path {} '.format(name, prediction_csv_path))
index_to_family_accession = classification_util.idx_to_family_id_dict(
dataset_descriptor_path)
all_family_predictions = load_prediction_file(prediction_csv_path,
index_to_family_accession)
output_basic_measurements_and_figures(all_family_predictions)
print('\n')
show_transmembrane_mutation_matrices(savedmodel_dir_path=savedmodel_dir_path)
def read_blundell_style_csv(
fin,
all_sequence_names=None,
names=None,
raise_if_missing=True,
missing_sequence_name=hmmer_utils.NO_SEQUENCE_MATCH_SEQUENCE_NAME_SENTINEL,
deduplicate=True):
"""Reads a blundell-style CSV of predictions.
This function is a similar but more general function than
`load_prediction_file()` as it makes fewer assumptions about the encoding of
the family names, directly handles deduplication, verifies the completeness
of the predictions, and handles NA or missing sequence sentinel values.
This function handles missing predictions with the all_sequence_names and
raise_if_missing arguments. For example, blast's output CSV is expected to
have missing predictions because not all sequences can be aligned with blast
using the default parameters. In such cases, we want to fill in NaN
predictions for all of these missing sequences. We do so by providing
all_sequence_names and raise_if_missing=False, which will result in the
returned dataframe having NaN values filled in for all of the missing
sequences. Not that if raise_if_missing=True, this function would raise a
ValueError if any sequences are missing.
In some situations (e.g., with hmmer) the CSV isn't outright missing
predictions for some sequences but rather there are special output rows that
are marked with a sentinel value indicating they aren't real predictions but
are rather missing values. To support such situation, any row in our dataframe
whose sequence name is equal to `missing_sequence_name` will be removed and
treated just like a genuinely missing row, where the response columns (
basically everything except classification_util.DATAFRAME_SEQUENCE_NAME_KEY)
set to NaN values.
If deduplicate=True, `yield_top_el_by_score_for_each_sequence_name` will be
run on the raw dataframe read from fin to select only the best scoring
prediction for each sequence.
Args:
fin: file-like. Where we will read the CSV.
all_sequence_names: Option set/list of expected sequence names. If not None,
this set of strings should contain all of the sequence names expected to
occur in fin.
names: optional list of strings. Defaults to None. If provided, will be
passed as the `names` argument to pandas.read_csv, allowing us to read a
CSV from fin without a proper header line.
raise_if_missing: bool. Defaults to True. Should we raise an exception if
any sequence in all_sequence_names is missing in the read in dataframe?
missing_sequence_name: string. Defaults to
`hmmer_utils.NO_SEQUENCE_MATCH_SEQUENCE_NAME_SENTINEL`. If provided, any
sequence name in the dataframe matching this string will be treated as a
missing value marker.
deduplicate: bool. Defaults to True. If True, we will deduplicate the
dataframe, selecting only the best scoring prediction when there are
multiple predictions per sequence.
Returns:
A Pandas dataframe containing four columns: sequence_name, true_label,
predicted_label, score, and domain_evalue.
The dataframe is sorted by sequence_name.
"""
df = | pd.read_csv(fin, names=names) | pandas.read_csv |
# This script runs expanded econometric models using both old and new data
# Import required modules
import pandas as pd
import statsmodels.api as stats
from matplotlib import pyplot as plt
import numpy as np
from ToTeX import restab
# Reading in the data
data = | pd.read_csv('C:/Users/User/Documents/Data/demoforestation_differenced.csv') | pandas.read_csv |
#
# Copyright 2020 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Testing out the datacompy functionality
"""
import io
import logging
import sys
from datetime import datetime
from decimal import Decimal
from unittest import mock
import numpy as np
import pandas as pd
import pytest
from pandas.util.testing import assert_series_equal
from pytest import raises
import datacompy
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
def test_numeric_columns_equal_abs():
data = """a|b|expected
1|1|True
2|2.1|True
3|4|False
4|NULL|False
NULL|4|False
NULL|NULL|True"""
df = pd.read_csv(io.StringIO(data), sep="|")
actual_out = datacompy.columns_equal(df.a, df.b, abs_tol=0.2)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_numeric_columns_equal_rel():
data = """a|b|expected
1|1|True
2|2.1|True
3|4|False
4|NULL|False
NULL|4|False
NULL|NULL|True"""
df = pd.read_csv(io.StringIO(data), sep="|")
actual_out = datacompy.columns_equal(df.a, df.b, rel_tol=0.2)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_string_columns_equal():
data = """a|b|expected
Hi|Hi|True
Yo|Yo|True
Hey|Hey |False
résumé|resume|False
résumé|résumé|True
💩|💩|True
💩|🤔|False
| |True
| |False
datacompy|DataComPy|False
something||False
|something|False
||True"""
df = pd.read_csv(io.StringIO(data), sep="|")
actual_out = datacompy.columns_equal(df.a, df.b, rel_tol=0.2)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_string_columns_equal_with_ignore_spaces():
data = """a|b|expected
Hi|Hi|True
Yo|Yo|True
Hey|Hey |True
résumé|resume|False
résumé|résumé|True
💩|💩|True
💩|🤔|False
| |True
| |True
datacompy|DataComPy|False
something||False
|something|False
||True"""
df = pd.read_csv(io.StringIO(data), sep="|")
actual_out = datacompy.columns_equal(df.a, df.b, rel_tol=0.2, ignore_spaces=True)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_string_columns_equal_with_ignore_spaces_and_case():
data = """a|b|expected
Hi|Hi|True
Yo|Yo|True
Hey|Hey |True
résumé|resume|False
résumé|résumé|True
💩|💩|True
💩|🤔|False
| |True
| |True
datacompy|DataComPy|True
something||False
|something|False
||True"""
df = pd.read_csv(io.StringIO(data), sep="|")
actual_out = datacompy.columns_equal(
df.a, df.b, rel_tol=0.2, ignore_spaces=True, ignore_case=True
)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_date_columns_equal():
data = """a|b|expected
2017-01-01|2017-01-01|True
2017-01-02|2017-01-02|True
2017-10-01|2017-10-10|False
2017-01-01||False
|2017-01-01|False
||True"""
df = pd.read_csv(io.StringIO(data), sep="|")
# First compare just the strings
actual_out = datacompy.columns_equal(df.a, df.b, rel_tol=0.2)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
# Then compare converted to datetime objects
df["a"] = pd.to_datetime(df["a"])
df["b"] = pd.to_datetime(df["b"])
actual_out = datacompy.columns_equal(df.a, df.b, rel_tol=0.2)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
# and reverse
actual_out_rev = datacompy.columns_equal(df.b, df.a, rel_tol=0.2)
assert_series_equal(expect_out, actual_out_rev, check_names=False)
def test_date_columns_equal_with_ignore_spaces():
data = """a|b|expected
2017-01-01|2017-01-01 |True
2017-01-02 |2017-01-02|True
2017-10-01 |2017-10-10 |False
2017-01-01||False
|2017-01-01|False
||True"""
df = pd.read_csv(io.StringIO(data), sep="|")
# First compare just the strings
actual_out = datacompy.columns_equal(df.a, df.b, rel_tol=0.2, ignore_spaces=True)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
# Then compare converted to datetime objects
df["a"] = pd.to_datetime(df["a"])
df["b"] = pd.to_datetime(df["b"])
actual_out = datacompy.columns_equal(df.a, df.b, rel_tol=0.2, ignore_spaces=True)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
# and reverse
actual_out_rev = datacompy.columns_equal(df.b, df.a, rel_tol=0.2, ignore_spaces=True)
assert_series_equal(expect_out, actual_out_rev, check_names=False)
def test_date_columns_equal_with_ignore_spaces_and_case():
data = """a|b|expected
2017-01-01|2017-01-01 |True
2017-01-02 |2017-01-02|True
2017-10-01 |2017-10-10 |False
2017-01-01||False
|2017-01-01|False
||True"""
df = pd.read_csv(io.StringIO(data), sep="|")
# First compare just the strings
actual_out = datacompy.columns_equal(
df.a, df.b, rel_tol=0.2, ignore_spaces=True, ignore_case=True
)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
# Then compare converted to datetime objects
df["a"] = pd.to_datetime(df["a"])
df["b"] = pd.to_datetime(df["b"])
actual_out = datacompy.columns_equal(df.a, df.b, rel_tol=0.2, ignore_spaces=True)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
# and reverse
actual_out_rev = datacompy.columns_equal(df.b, df.a, rel_tol=0.2, ignore_spaces=True)
assert_series_equal(expect_out, actual_out_rev, check_names=False)
def test_date_columns_unequal():
"""I want datetime fields to match with dates stored as strings
"""
df = pd.DataFrame([{"a": "2017-01-01", "b": "2017-01-02"}, {"a": "2017-01-01"}])
df["a_dt"] = pd.to_datetime(df["a"])
df["b_dt"] = pd.to_datetime(df["b"])
assert datacompy.columns_equal(df.a, df.a_dt).all()
assert datacompy.columns_equal(df.b, df.b_dt).all()
assert datacompy.columns_equal(df.a_dt, df.a).all()
assert datacompy.columns_equal(df.b_dt, df.b).all()
assert not datacompy.columns_equal(df.b_dt, df.a).any()
assert not datacompy.columns_equal(df.a_dt, df.b).any()
assert not datacompy.columns_equal(df.a, df.b_dt).any()
assert not datacompy.columns_equal(df.b, df.a_dt).any()
def test_bad_date_columns():
"""If strings can't be coerced into dates then it should be false for the
whole column.
"""
df = pd.DataFrame(
[{"a": "2017-01-01", "b": "2017-01-01"}, {"a": "2017-01-01", "b": "217-01-01"}]
)
df["a_dt"] = pd.to_datetime(df["a"])
assert not datacompy.columns_equal(df.a_dt, df.b).any()
def test_rounded_date_columns():
"""If strings can't be coerced into dates then it should be false for the
whole column.
"""
df = pd.DataFrame(
[
{"a": "2017-01-01", "b": "2017-01-01 00:00:00.000000", "exp": True},
{"a": "2017-01-01", "b": "2017-01-01 00:00:00.123456", "exp": False},
{"a": "2017-01-01", "b": "2017-01-01 00:00:01.000000", "exp": False},
{"a": "2017-01-01", "b": "2017-01-01 00:00:00", "exp": True},
]
)
df["a_dt"] = pd.to_datetime(df["a"])
actual = datacompy.columns_equal(df.a_dt, df.b)
expected = df["exp"]
assert_series_equal(actual, expected, check_names=False)
def test_decimal_float_columns_equal():
df = pd.DataFrame(
[
{"a": Decimal("1"), "b": 1, "expected": True},
{"a": Decimal("1.3"), "b": 1.3, "expected": True},
{"a": Decimal("1.000003"), "b": 1.000003, "expected": True},
{"a": Decimal("1.000000004"), "b": 1.000000003, "expected": False},
{"a": Decimal("1.3"), "b": 1.2, "expected": False},
{"a": np.nan, "b": np.nan, "expected": True},
{"a": np.nan, "b": 1, "expected": False},
{"a": Decimal("1"), "b": np.nan, "expected": False},
]
)
actual_out = datacompy.columns_equal(df.a, df.b)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_decimal_float_columns_equal_rel():
df = pd.DataFrame(
[
{"a": Decimal("1"), "b": 1, "expected": True},
{"a": Decimal("1.3"), "b": 1.3, "expected": True},
{"a": Decimal("1.000003"), "b": 1.000003, "expected": True},
{"a": Decimal("1.000000004"), "b": 1.000000003, "expected": True},
{"a": Decimal("1.3"), "b": 1.2, "expected": False},
{"a": np.nan, "b": np.nan, "expected": True},
{"a": np.nan, "b": 1, "expected": False},
{"a": Decimal("1"), "b": np.nan, "expected": False},
]
)
actual_out = datacompy.columns_equal(df.a, df.b, abs_tol=0.001)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_decimal_columns_equal():
df = pd.DataFrame(
[
{"a": Decimal("1"), "b": Decimal("1"), "expected": True},
{"a": Decimal("1.3"), "b": Decimal("1.3"), "expected": True},
{"a": Decimal("1.000003"), "b": Decimal("1.000003"), "expected": True},
{"a": Decimal("1.000000004"), "b": Decimal("1.000000003"), "expected": False},
{"a": Decimal("1.3"), "b": Decimal("1.2"), "expected": False},
{"a": np.nan, "b": np.nan, "expected": True},
{"a": np.nan, "b": Decimal("1"), "expected": False},
{"a": Decimal("1"), "b": np.nan, "expected": False},
]
)
actual_out = datacompy.columns_equal(df.a, df.b)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_decimal_columns_equal_rel():
df = pd.DataFrame(
[
{"a": Decimal("1"), "b": Decimal("1"), "expected": True},
{"a": Decimal("1.3"), "b": Decimal("1.3"), "expected": True},
{"a": Decimal("1.000003"), "b": Decimal("1.000003"), "expected": True},
{"a": Decimal("1.000000004"), "b": Decimal("1.000000003"), "expected": True},
{"a": Decimal("1.3"), "b": Decimal("1.2"), "expected": False},
{"a": np.nan, "b": np.nan, "expected": True},
{"a": np.nan, "b": Decimal("1"), "expected": False},
{"a": Decimal("1"), "b": np.nan, "expected": False},
]
)
actual_out = datacompy.columns_equal(df.a, df.b, abs_tol=0.001)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_infinity_and_beyond():
df = pd.DataFrame(
[
{"a": np.inf, "b": np.inf, "expected": True},
{"a": -np.inf, "b": -np.inf, "expected": True},
{"a": -np.inf, "b": np.inf, "expected": False},
{"a": np.inf, "b": -np.inf, "expected": False},
{"a": 1, "b": 1, "expected": True},
{"a": 1, "b": 0, "expected": False},
]
)
actual_out = datacompy.columns_equal(df.a, df.b)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_mixed_column():
df = pd.DataFrame(
[
{"a": "hi", "b": "hi", "expected": True},
{"a": 1, "b": 1, "expected": True},
{"a": np.inf, "b": np.inf, "expected": True},
{"a": Decimal("1"), "b": Decimal("1"), "expected": True},
{"a": 1, "b": "1", "expected": False},
{"a": 1, "b": "yo", "expected": False},
]
)
actual_out = datacompy.columns_equal(df.a, df.b)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_mixed_column_with_ignore_spaces():
df = pd.DataFrame(
[
{"a": "hi", "b": "hi ", "expected": True},
{"a": 1, "b": 1, "expected": True},
{"a": np.inf, "b": np.inf, "expected": True},
{"a": Decimal("1"), "b": Decimal("1"), "expected": True},
{"a": 1, "b": "1 ", "expected": False},
{"a": 1, "b": "yo ", "expected": False},
]
)
actual_out = datacompy.columns_equal(df.a, df.b, ignore_spaces=True)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_mixed_column_with_ignore_spaces_and_case():
df = pd.DataFrame(
[
{"a": "hi", "b": "hi ", "expected": True},
{"a": 1, "b": 1, "expected": True},
{"a": np.inf, "b": np.inf, "expected": True},
{"a": Decimal("1"), "b": Decimal("1"), "expected": True},
{"a": 1, "b": "1 ", "expected": False},
{"a": 1, "b": "yo ", "expected": False},
{"a": "Hi", "b": "hI ", "expected": True},
{"a": "HI", "b": "HI ", "expected": True},
{"a": "hi", "b": "hi ", "expected": True},
]
)
actual_out = datacompy.columns_equal(df.a, df.b, ignore_spaces=True, ignore_case=True)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_compare_df_setter_bad():
df = pd.DataFrame([{"a": 1, "A": 2}, {"a": 2, "A": 2}])
with raises(TypeError, match="df1 must be a pandas DataFrame"):
compare = datacompy.Compare("a", "a", ["a"])
with raises(ValueError, match="df1 must have all columns from join_columns"):
compare = datacompy.Compare(df, df.copy(), ["b"])
with raises(ValueError, match="df1 must have unique column names"):
compare = datacompy.Compare(df, df.copy(), ["a"])
df_dupe = pd.DataFrame([{"a": 1, "b": 2}, {"a": 1, "b": 3}])
assert datacompy.Compare(df_dupe, df_dupe.copy(), ["a", "b"]).df1.equals(df_dupe)
def test_compare_df_setter_good():
df1 = pd.DataFrame([{"a": 1, "b": 2}, {"a": 2, "b": 2}])
df2 = pd.DataFrame([{"A": 1, "B": 2}, {"A": 2, "B": 3}])
compare = datacompy.Compare(df1, df2, ["a"])
assert compare.df1.equals(df1)
assert compare.df2.equals(df2)
assert compare.join_columns == ["a"]
compare = datacompy.Compare(df1, df2, ["A", "b"])
assert compare.df1.equals(df1)
assert compare.df2.equals(df2)
assert compare.join_columns == ["a", "b"]
def test_compare_df_setter_different_cases():
df1 = pd.DataFrame([{"a": 1, "b": 2}, {"a": 2, "b": 2}])
df2 = pd.DataFrame([{"A": 1, "b": 2}, {"A": 2, "b": 3}])
compare = datacompy.Compare(df1, df2, ["a"])
assert compare.df1.equals(df1)
assert compare.df2.equals(df2)
def test_compare_df_setter_bad_index():
df = pd.DataFrame([{"a": 1, "A": 2}, {"a": 2, "A": 2}])
with raises(TypeError, match="df1 must be a pandas DataFrame"):
compare = datacompy.Compare("a", "a", on_index=True)
with raises(ValueError, match="df1 must have unique column names"):
compare = datacompy.Compare(df, df.copy(), on_index=True)
def test_compare_on_index_and_join_columns():
df = pd.DataFrame([{"a": 1, "b": 2}, {"a": 2, "b": 2}])
with raises(Exception, match="Only provide on_index or join_columns"):
compare = datacompy.Compare(df, df.copy(), on_index=True, join_columns=["a"])
def test_compare_df_setter_good_index():
df1 = pd.DataFrame([{"a": 1, "b": 2}, {"a": 2, "b": 2}])
df2 = pd.DataFrame([{"a": 1, "b": 2}, {"a": 2, "b": 3}])
compare = datacompy.Compare(df1, df2, on_index=True)
assert compare.df1.equals(df1)
assert compare.df2.equals(df2)
def test_columns_overlap():
df1 = pd.DataFrame([{"a": 1, "b": 2}, {"a": 2, "b": 2}])
df2 = pd.DataFrame([{"a": 1, "b": 2}, {"a": 2, "b": 3}])
compare = datacompy.Compare(df1, df2, ["a"])
assert compare.df1_unq_columns() == set()
assert compare.df2_unq_columns() == set()
assert compare.intersect_columns() == {"a", "b"}
def test_columns_no_overlap():
df1 = pd.DataFrame([{"a": 1, "b": 2, "c": "hi"}, {"a": 2, "b": 2, "c": "yo"}])
df2 = pd.DataFrame([{"a": 1, "b": 2, "d": "oh"}, {"a": 2, "b": 3, "d": "ya"}])
compare = datacompy.Compare(df1, df2, ["a"])
assert compare.df1_unq_columns() == {"c"}
assert compare.df2_unq_columns() == {"d"}
assert compare.intersect_columns() == {"a", "b"}
def test_10k_rows():
df1 = pd.DataFrame(np.random.randint(0, 100, size=(10000, 2)), columns=["b", "c"])
df1.reset_index(inplace=True)
df1.columns = ["a", "b", "c"]
df2 = df1.copy()
df2["b"] = df2["b"] + 0.1
compare_tol = datacompy.Compare(df1, df2, ["a"], abs_tol=0.2)
assert compare_tol.matches()
assert len(compare_tol.df1_unq_rows) == 0
assert len(compare_tol.df2_unq_rows) == 0
assert compare_tol.intersect_columns() == {"a", "b", "c"}
assert compare_tol.all_columns_match()
assert compare_tol.all_rows_overlap()
assert compare_tol.intersect_rows_match()
compare_no_tol = datacompy.Compare(df1, df2, ["a"])
assert not compare_no_tol.matches()
assert len(compare_no_tol.df1_unq_rows) == 0
assert len(compare_no_tol.df2_unq_rows) == 0
assert compare_no_tol.intersect_columns() == {"a", "b", "c"}
assert compare_no_tol.all_columns_match()
assert compare_no_tol.all_rows_overlap()
assert not compare_no_tol.intersect_rows_match()
@mock.patch("datacompy.logging.debug")
def test_subset(mock_debug):
df1 = pd.DataFrame([{"a": 1, "b": 2, "c": "hi"}, {"a": 2, "b": 2, "c": "yo"}])
df2 = pd.DataFrame([{"a": 1, "c": "hi"}])
comp = datacompy.Compare(df1, df2, ["a"])
assert comp.subset()
assert mock_debug.called_with("Checking equality")
@mock.patch("datacompy.logging.info")
def test_not_subset(mock_info):
df1 = | pd.DataFrame([{"a": 1, "b": 2, "c": "hi"}, {"a": 2, "b": 2, "c": "yo"}]) | pandas.DataFrame |
import ast
import pandas as pd
import numpy as np
from sklearn.preprocessing import OneHotEncoder
class CleanedStrains():
"""Clean output of WeedmapsStrains class"""
def __init__(self, filepath):
"""Initialize CleanedStrains class
Args:
filepath: Output from WeedmapsStrains `scrape_to_csv` method
"""
self.filepath = filepath
self.df = pd.read_csv(self.filepath)
def clean_crop_save(self, output_filepath):
"""Clean, crop, and save output from WeedmapsStrains `scrape_to_csv` method
Args:
output_filepath: path to save csv file with cleaned and cropped data
Returns:
Cleaned and cropped data at output_filepath
"""
self.output_filepath = output_filepath
def cleaning_function(cell_contents):
if type(cell_contents) == dict:
return cell_contents["name"]
return "No data"
def remove_no_data_from_tuple_series(cellcontents):
intermediate = list(cellcontents)
if "No data" in intermediate:
return intermediate[:].remove("No data")
return intermediate
self.effects_df = pd.DataFrame.from_records(self.df["effects"].apply(ast.literal_eval))
self.null_dict = {"name":"no data"}
self.effects_df.replace([None], self.null_dict, inplace=True)
self.effects_result = list(zip(self.effects_df[0].apply(cleaning_function).tolist(),
self.effects_df[1].apply(cleaning_function).tolist(),
self.effects_df[2].apply(cleaning_function).tolist()))
self.df["effects_cleaned"] = pd.Series(self.effects_result)
self.flavors_df = pd.DataFrame.from_records(self.df["flavors"].apply(ast.literal_eval))
self.flavors_df.replace([None], self.null_dict, inplace=True)
self.flavors_result = list(zip(self.flavors_df[0].apply(cleaning_function).tolist(),
self.flavors_df[1].apply(cleaning_function).tolist(),
self.flavors_df[2].apply(cleaning_function).tolist()))
self.df["flavors_cleaned"] = | pd.Series(self.flavors_result) | pandas.Series |
import pickle
import json
import pandas as pd
from annoy import AnnoyIndex
from bert_squad import QABot
import feature_pipeline as fp
# Needs to be in memory to load pipeline
from feature_pipeline import DenseTransformer
N_ANSWERS = 5
section_text_df = pd.read_csv(fp.ARTICLE_SECTION_DF_PATH)
full_text_df = | pd.read_csv(fp.ARTICLE_FULL_DF_PATH) | pandas.read_csv |
import numpy as np
import pytest
import pandas.util._test_decorators as td
from pandas.core.dtypes.generic import ABCIndexClass
import pandas as pd
import pandas._testing as tm
from pandas.api.types import is_float, is_float_dtype, is_integer, is_scalar
from pandas.core.arrays import IntegerArray, integer_array
from pandas.core.arrays.integer import (
Int8Dtype,
Int16Dtype,
Int32Dtype,
Int64Dtype,
UInt8Dtype,
UInt16Dtype,
UInt32Dtype,
UInt64Dtype,
)
from pandas.tests.extension.base import BaseOpsUtil
def make_data():
return list(range(8)) + [np.nan] + list(range(10, 98)) + [np.nan] + [99, 100]
@pytest.fixture(
params=[
Int8Dtype,
Int16Dtype,
Int32Dtype,
Int64Dtype,
UInt8Dtype,
UInt16Dtype,
UInt32Dtype,
UInt64Dtype,
]
)
def dtype(request):
return request.param()
@pytest.fixture
def data(dtype):
return integer_array(make_data(), dtype=dtype)
@pytest.fixture
def data_missing(dtype):
return integer_array([np.nan, 1], dtype=dtype)
@pytest.fixture(params=["data", "data_missing"])
def all_data(request, data, data_missing):
"""Parametrized fixture giving 'data' and 'data_missing'"""
if request.param == "data":
return data
elif request.param == "data_missing":
return data_missing
def test_dtypes(dtype):
# smoke tests on auto dtype construction
if dtype.is_signed_integer:
assert np.dtype(dtype.type).kind == "i"
else:
assert np.dtype(dtype.type).kind == "u"
assert dtype.name is not None
@pytest.mark.parametrize(
"dtype, expected",
[
(Int8Dtype(), "Int8Dtype()"),
(Int16Dtype(), "Int16Dtype()"),
(Int32Dtype(), "Int32Dtype()"),
(Int64Dtype(), "Int64Dtype()"),
(UInt8Dtype(), "UInt8Dtype()"),
(UInt16Dtype(), "UInt16Dtype()"),
(UInt32Dtype(), "UInt32Dtype()"),
(UInt64Dtype(), "UInt64Dtype()"),
],
)
def test_repr_dtype(dtype, expected):
assert repr(dtype) == expected
def test_repr_array():
result = repr(integer_array([1, None, 3]))
expected = "<IntegerArray>\n[1, <NA>, 3]\nLength: 3, dtype: Int64"
assert result == expected
def test_repr_array_long():
data = integer_array([1, 2, None] * 1000)
expected = (
"<IntegerArray>\n"
"[ 1, 2, <NA>, 1, 2, <NA>, 1, 2, <NA>, 1,\n"
" ...\n"
" <NA>, 1, 2, <NA>, 1, 2, <NA>, 1, 2, <NA>]\n"
"Length: 3000, dtype: Int64"
)
result = repr(data)
assert result == expected
class TestConstructors:
def test_uses_pandas_na(self):
a = pd.array([1, None], dtype=pd.Int64Dtype())
assert a[1] is pd.NA
def test_from_dtype_from_float(self, data):
# construct from our dtype & string dtype
dtype = data.dtype
# from float
expected = pd.Series(data)
result = pd.Series(
data.to_numpy(na_value=np.nan, dtype="float"), dtype=str(dtype)
)
tm.assert_series_equal(result, expected)
# from int / list
expected = pd.Series(data)
result = pd.Series(np.array(data).tolist(), dtype=str(dtype))
tm.assert_series_equal(result, expected)
# from int / array
expected = pd.Series(data).dropna().reset_index(drop=True)
dropped = np.array(data.dropna()).astype(np.dtype((dtype.type)))
result = pd.Series(dropped, dtype=str(dtype))
tm.assert_series_equal(result, expected)
class TestArithmeticOps(BaseOpsUtil):
def _check_divmod_op(self, s, op, other, exc=None):
super()._check_divmod_op(s, op, other, None)
def _check_op(self, s, op_name, other, exc=None):
op = self.get_op_from_name(op_name)
result = op(s, other)
# compute expected
mask = s.isna()
# if s is a DataFrame, squeeze to a Series
# for comparison
if isinstance(s, pd.DataFrame):
result = result.squeeze()
s = s.squeeze()
mask = mask.squeeze()
# other array is an Integer
if isinstance(other, IntegerArray):
omask = getattr(other, "mask", None)
mask = getattr(other, "data", other)
if omask is not None:
mask |= omask
# 1 ** na is na, so need to unmask those
if op_name == "__pow__":
mask = np.where(~s.isna() & (s == 1), False, mask)
elif op_name == "__rpow__":
other_is_one = other == 1
if isinstance(other_is_one, pd.Series):
other_is_one = other_is_one.fillna(False)
mask = np.where(other_is_one, False, mask)
# float result type or float op
if (
is_float_dtype(other)
or is_float(other)
or op_name in ["__rtruediv__", "__truediv__", "__rdiv__", "__div__"]
):
rs = s.astype("float")
expected = op(rs, other)
self._check_op_float(result, expected, mask, s, op_name, other)
# integer result type
else:
rs = pd.Series(s.values._data, name=s.name)
expected = op(rs, other)
self._check_op_integer(result, expected, mask, s, op_name, other)
def _check_op_float(self, result, expected, mask, s, op_name, other):
# check comparisons that are resulting in float dtypes
expected[mask] = np.nan
if "floordiv" in op_name:
# Series op sets 1//0 to np.inf, which IntegerArray does not do (yet)
mask2 = np.isinf(expected) & np.isnan(result)
expected[mask2] = np.nan
tm.assert_series_equal(result, expected)
def _check_op_integer(self, result, expected, mask, s, op_name, other):
# check comparisons that are resulting in integer dtypes
# to compare properly, we convert the expected
# to float, mask to nans and convert infs
# if we have uints then we process as uints
# then convert to float
# and we ultimately want to create a IntArray
# for comparisons
fill_value = 0
# mod/rmod turn floating 0 into NaN while
# integer works as expected (no nan)
if op_name in ["__mod__", "__rmod__"]:
if is_scalar(other):
if other == 0:
expected[s.values == 0] = 0
else:
expected = expected.fillna(0)
else:
expected[
(s.values == 0).fillna(False)
& ((expected == 0).fillna(False) | expected.isna())
] = 0
try:
expected[
((expected == np.inf) | (expected == -np.inf)).fillna(False)
] = fill_value
original = expected
expected = expected.astype(s.dtype)
except ValueError:
expected = expected.astype(float)
expected[
((expected == np.inf) | (expected == -np.inf)).fillna(False)
] = fill_value
original = expected
expected = expected.astype(s.dtype)
expected[mask] = pd.NA
# assert that the expected astype is ok
# (skip for unsigned as they have wrap around)
if not s.dtype.is_unsigned_integer:
original = pd.Series(original)
# we need to fill with 0's to emulate what an astype('int') does
# (truncation) for certain ops
if op_name in ["__rtruediv__", "__rdiv__"]:
mask |= original.isna()
original = original.fillna(0).astype("int")
original = original.astype("float")
original[mask] = np.nan
tm.assert_series_equal(original, expected.astype("float"))
# assert our expected result
tm.assert_series_equal(result, expected)
def test_arith_integer_array(self, data, all_arithmetic_operators):
# we operate with a rhs of an integer array
op = all_arithmetic_operators
s = pd.Series(data)
rhs = pd.Series([1] * len(data), dtype=data.dtype)
rhs.iloc[-1] = np.nan
self._check_op(s, op, rhs)
def test_arith_series_with_scalar(self, data, all_arithmetic_operators):
# scalar
op = all_arithmetic_operators
s = pd.Series(data)
self._check_op(s, op, 1, exc=TypeError)
def test_arith_frame_with_scalar(self, data, all_arithmetic_operators):
# frame & scalar
op = all_arithmetic_operators
df = pd.DataFrame({"A": data})
self._check_op(df, op, 1, exc=TypeError)
def test_arith_series_with_array(self, data, all_arithmetic_operators):
# ndarray & other series
op = all_arithmetic_operators
s = pd.Series(data)
other = np.ones(len(s), dtype=s.dtype.type)
self._check_op(s, op, other, exc=TypeError)
def test_arith_coerce_scalar(self, data, all_arithmetic_operators):
op = all_arithmetic_operators
s = pd.Series(data)
other = 0.01
self._check_op(s, op, other)
@pytest.mark.parametrize("other", [1.0, np.array(1.0)])
def test_arithmetic_conversion(self, all_arithmetic_operators, other):
# if we have a float operand we should have a float result
# if that is equal to an integer
op = self.get_op_from_name(all_arithmetic_operators)
s = pd.Series([1, 2, 3], dtype="Int64")
result = op(s, other)
assert result.dtype is np.dtype("float")
def test_arith_len_mismatch(self, all_arithmetic_operators):
# operating with a list-like with non-matching length raises
op = self.get_op_from_name(all_arithmetic_operators)
other = np.array([1.0])
s = pd.Series([1, 2, 3], dtype="Int64")
with pytest.raises(ValueError, match="Lengths must match"):
op(s, other)
@pytest.mark.parametrize("other", [0, 0.5])
def test_arith_zero_dim_ndarray(self, other):
arr = integer_array([1, None, 2])
result = arr + np.array(other)
expected = arr + other
tm.assert_equal(result, expected)
def test_error(self, data, all_arithmetic_operators):
# invalid ops
op = all_arithmetic_operators
s = pd.Series(data)
ops = getattr(s, op)
opa = getattr(data, op)
# invalid scalars
msg = (
r"(:?can only perform ops with numeric values)"
r"|(:?IntegerArray cannot perform the operation mod)"
)
with pytest.raises(TypeError, match=msg):
ops("foo")
with pytest.raises(TypeError, match=msg):
ops(pd.Timestamp("20180101"))
# invalid array-likes
with pytest.raises(TypeError, match=msg):
ops(pd.Series("foo", index=s.index))
if op != "__rpow__":
# TODO(extension)
# rpow with a datetimelike coerces the integer array incorrectly
msg = (
"can only perform ops with numeric values|"
"cannot perform .* with this index type: DatetimeArray|"
"Addition/subtraction of integers and integer-arrays "
"with DatetimeArray is no longer supported. *"
)
with pytest.raises(TypeError, match=msg):
ops(pd.Series(pd.date_range("20180101", periods=len(s))))
# 2d
result = opa(pd.DataFrame({"A": s}))
assert result is NotImplemented
msg = r"can only perform ops with 1-d structures"
with pytest.raises(NotImplementedError, match=msg):
opa(np.arange(len(s)).reshape(-1, len(s)))
@pytest.mark.parametrize("zero, negative", [(0, False), (0.0, False), (-0.0, True)])
def test_divide_by_zero(self, zero, negative):
# https://github.com/pandas-dev/pandas/issues/27398
a = pd.array([0, 1, -1, None], dtype="Int64")
result = a / zero
expected = np.array([np.nan, np.inf, -np.inf, np.nan])
if negative:
expected *= -1
tm.assert_numpy_array_equal(result, expected)
def test_pow_scalar(self):
a = pd.array([-1, 0, 1, None, 2], dtype="Int64")
result = a ** 0
expected = pd.array([1, 1, 1, 1, 1], dtype="Int64")
tm.assert_extension_array_equal(result, expected)
result = a ** 1
expected = pd.array([-1, 0, 1, None, 2], dtype="Int64")
tm.assert_extension_array_equal(result, expected)
result = a ** pd.NA
expected = pd.array([None, None, 1, None, None], dtype="Int64")
tm.assert_extension_array_equal(result, expected)
result = a ** np.nan
expected = np.array([np.nan, np.nan, 1, np.nan, np.nan], dtype="float64")
tm.assert_numpy_array_equal(result, expected)
# reversed
a = a[1:] # Can't raise integers to negative powers.
result = 0 ** a
expected = pd.array([1, 0, None, 0], dtype="Int64")
tm.assert_extension_array_equal(result, expected)
result = 1 ** a
expected = pd.array([1, 1, 1, 1], dtype="Int64")
tm.assert_extension_array_equal(result, expected)
result = pd.NA ** a
expected = pd.array([1, None, None, None], dtype="Int64")
tm.assert_extension_array_equal(result, expected)
result = np.nan ** a
expected = np.array([1, np.nan, np.nan, np.nan], dtype="float64")
tm.assert_numpy_array_equal(result, expected)
def test_pow_array(self):
a = integer_array([0, 0, 0, 1, 1, 1, None, None, None])
b = integer_array([0, 1, None, 0, 1, None, 0, 1, None])
result = a ** b
expected = integer_array([1, 0, None, 1, 1, 1, 1, None, None])
tm.assert_extension_array_equal(result, expected)
def test_rpow_one_to_na(self):
# https://github.com/pandas-dev/pandas/issues/22022
# https://github.com/pandas-dev/pandas/issues/29997
arr = integer_array([np.nan, np.nan])
result = np.array([1.0, 2.0]) ** arr
expected = np.array([1.0, np.nan])
tm.assert_numpy_array_equal(result, expected)
class TestComparisonOps(BaseOpsUtil):
def _compare_other(self, data, op_name, other):
op = self.get_op_from_name(op_name)
# array
result = pd.Series(op(data, other))
expected = pd.Series(op(data._data, other), dtype="boolean")
# fill the nan locations
expected[data._mask] = pd.NA
tm.assert_series_equal(result, expected)
# series
s = pd.Series(data)
result = op(s, other)
expected = op(pd.Series(data._data), other)
# fill the nan locations
expected[data._mask] = pd.NA
expected = expected.astype("boolean")
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("other", [True, False, pd.NA, -1, 0, 1])
def test_scalar(self, other, all_compare_operators):
op = self.get_op_from_name(all_compare_operators)
a = pd.array([1, 0, None], dtype="Int64")
result = op(a, other)
if other is pd.NA:
expected = pd.array([None, None, None], dtype="boolean")
else:
values = op(a._data, other)
expected = pd.arrays.BooleanArray(values, a._mask, copy=True)
tm.assert_extension_array_equal(result, expected)
# ensure we haven't mutated anything inplace
result[0] = pd.NA
tm.assert_extension_array_equal(a, pd.array([1, 0, None], dtype="Int64"))
def test_array(self, all_compare_operators):
op = self.get_op_from_name(all_compare_operators)
a = pd.array([0, 1, 2, None, None, None], dtype="Int64")
b = pd.array([0, 1, None, 0, 1, None], dtype="Int64")
result = op(a, b)
values = op(a._data, b._data)
mask = a._mask | b._mask
expected = pd.arrays.BooleanArray(values, mask)
tm.assert_extension_array_equal(result, expected)
# ensure we haven't mutated anything inplace
result[0] = pd.NA
tm.assert_extension_array_equal(
a, pd.array([0, 1, 2, None, None, None], dtype="Int64")
)
tm.assert_extension_array_equal(
b, pd.array([0, 1, None, 0, 1, None], dtype="Int64")
)
def test_compare_with_booleanarray(self, all_compare_operators):
op = self.get_op_from_name(all_compare_operators)
a = pd.array([True, False, None] * 3, dtype="boolean")
b = pd.array([0] * 3 + [1] * 3 + [None] * 3, dtype="Int64")
other = pd.array([False] * 3 + [True] * 3 + [None] * 3, dtype="boolean")
expected = op(a, other)
result = op(a, b)
tm.assert_extension_array_equal(result, expected)
def test_no_shared_mask(self, data):
result = data + 1
assert np.shares_memory(result._mask, data._mask) is False
def test_compare_to_string(self, any_nullable_int_dtype):
# GH 28930
s = pd.Series([1, None], dtype=any_nullable_int_dtype)
result = s == "a"
expected = pd.Series([False, pd.NA], dtype="boolean")
self.assert_series_equal(result, expected)
def test_compare_to_int(self, any_nullable_int_dtype, all_compare_operators):
# GH 28930
s1 = pd.Series([1, None, 3], dtype=any_nullable_int_dtype)
s2 = pd.Series([1, None, 3], dtype="float")
method = getattr(s1, all_compare_operators)
result = method(2)
method = getattr(s2, all_compare_operators)
expected = method(2).astype("boolean")
expected[s2.isna()] = pd.NA
self.assert_series_equal(result, expected)
class TestCasting:
@pytest.mark.parametrize("dropna", [True, False])
def test_construct_index(self, all_data, dropna):
# ensure that we do not coerce to Float64Index, rather
# keep as Index
all_data = all_data[:10]
if dropna:
other = np.array(all_data[~all_data.isna()])
else:
other = all_data
result = pd.Index(integer_array(other, dtype=all_data.dtype))
expected = pd.Index(other, dtype=object)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("dropna", [True, False])
def test_astype_index(self, all_data, dropna):
# as an int/uint index to Index
all_data = all_data[:10]
if dropna:
other = all_data[~all_data.isna()]
else:
other = all_data
dtype = all_data.dtype
idx = pd.Index(np.array(other))
assert isinstance(idx, ABCIndexClass)
result = idx.astype(dtype)
expected = idx.astype(object).astype(dtype)
tm.assert_index_equal(result, expected)
def test_astype(self, all_data):
all_data = all_data[:10]
ints = all_data[~all_data.isna()]
mixed = all_data
dtype = Int8Dtype()
# coerce to same type - ints
s = pd.Series(ints)
result = s.astype(all_data.dtype)
expected = pd.Series(ints)
tm.assert_series_equal(result, expected)
# coerce to same other - ints
s = pd.Series(ints)
result = s.astype(dtype)
expected = pd.Series(ints, dtype=dtype)
tm.assert_series_equal(result, expected)
# coerce to same numpy_dtype - ints
s = pd.Series(ints)
result = s.astype(all_data.dtype.numpy_dtype)
expected = pd.Series(ints._data.astype(all_data.dtype.numpy_dtype))
tm.assert_series_equal(result, expected)
# coerce to same type - mixed
s = pd.Series(mixed)
result = s.astype(all_data.dtype)
expected = pd.Series(mixed)
tm.assert_series_equal(result, expected)
# coerce to same other - mixed
s = pd.Series(mixed)
result = s.astype(dtype)
expected = pd.Series(mixed, dtype=dtype)
tm.assert_series_equal(result, expected)
# coerce to same numpy_dtype - mixed
s = pd.Series(mixed)
msg = r"cannot convert to .*-dtype NumPy array with missing values.*"
with pytest.raises(ValueError, match=msg):
s.astype(all_data.dtype.numpy_dtype)
# coerce to object
s = pd.Series(mixed)
result = s.astype("object")
expected = pd.Series(np.asarray(mixed))
tm.assert_series_equal(result, expected)
def test_astype_to_larger_numpy(self):
a = pd.array([1, 2], dtype="Int32")
result = a.astype("int64")
expected = np.array([1, 2], dtype="int64")
tm.assert_numpy_array_equal(result, expected)
a = pd.array([1, 2], dtype="UInt32")
result = a.astype("uint64")
expected = np.array([1, 2], dtype="uint64")
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("dtype", [Int8Dtype(), "Int8", UInt32Dtype(), "UInt32"])
def test_astype_specific_casting(self, dtype):
s = pd.Series([1, 2, 3], dtype="Int64")
result = s.astype(dtype)
expected = pd.Series([1, 2, 3], dtype=dtype)
tm.assert_series_equal(result, expected)
s = pd.Series([1, 2, 3, None], dtype="Int64")
result = s.astype(dtype)
expected = pd.Series([1, 2, 3, None], dtype=dtype)
tm.assert_series_equal(result, expected)
def test_construct_cast_invalid(self, dtype):
msg = "cannot safely"
arr = [1.2, 2.3, 3.7]
with pytest.raises(TypeError, match=msg):
integer_array(arr, dtype=dtype)
with pytest.raises(TypeError, match=msg):
pd.Series(arr).astype(dtype)
arr = [1.2, 2.3, 3.7, np.nan]
with pytest.raises(TypeError, match=msg):
integer_array(arr, dtype=dtype)
with pytest.raises(TypeError, match=msg):
pd.Series(arr).astype(dtype)
@pytest.mark.parametrize("in_series", [True, False])
def test_to_numpy_na_nan(self, in_series):
a = pd.array([0, 1, None], dtype="Int64")
if in_series:
a = pd.Series(a)
result = a.to_numpy(dtype="float64", na_value=np.nan)
expected = np.array([0.0, 1.0, np.nan], dtype="float64")
tm.assert_numpy_array_equal(result, expected)
result = a.to_numpy(dtype="int64", na_value=-1)
expected = np.array([0, 1, -1], dtype="int64")
tm.assert_numpy_array_equal(result, expected)
result = a.to_numpy(dtype="bool", na_value=False)
expected = np.array([False, True, False], dtype="bool")
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("in_series", [True, False])
@pytest.mark.parametrize("dtype", ["int32", "int64", "bool"])
def test_to_numpy_dtype(self, dtype, in_series):
a = pd.array([0, 1], dtype="Int64")
if in_series:
a = pd.Series(a)
result = a.to_numpy(dtype=dtype)
expected = np.array([0, 1], dtype=dtype)
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("dtype", ["float64", "int64", "bool"])
def test_to_numpy_na_raises(self, dtype):
a = pd.array([0, 1, None], dtype="Int64")
with pytest.raises(ValueError, match=dtype):
a.to_numpy(dtype=dtype)
def test_astype_str(self):
a = pd.array([1, 2, None], dtype="Int64")
expected = np.array(["1", "2", "<NA>"], dtype=object)
tm.assert_numpy_array_equal(a.astype(str), expected)
tm.assert_numpy_array_equal(a.astype("str"), expected)
def test_astype_boolean(self):
# https://github.com/pandas-dev/pandas/issues/31102
a = pd.array([1, 0, -1, 2, None], dtype="Int64")
result = a.astype("boolean")
expected = pd.array([True, False, True, True, None], dtype="boolean")
tm.assert_extension_array_equal(result, expected)
def test_frame_repr(data_missing):
df = pd.DataFrame({"A": data_missing})
result = repr(df)
expected = " A\n0 <NA>\n1 1"
assert result == expected
def test_conversions(data_missing):
# astype to object series
df = pd.DataFrame({"A": data_missing})
result = df["A"].astype("object")
expected = pd.Series(np.array([np.nan, 1], dtype=object), name="A")
tm.assert_series_equal(result, expected)
# convert to object ndarray
# we assert that we are exactly equal
# including type conversions of scalars
result = df["A"].astype("object").values
expected = np.array([pd.NA, 1], dtype=object)
tm.assert_numpy_array_equal(result, expected)
for r, e in zip(result, expected):
if pd.isnull(r):
assert pd.isnull(e)
elif is_integer(r):
assert r == e
assert is_integer(e)
else:
assert r == e
assert type(r) == type(e)
def test_integer_array_constructor():
values = np.array([1, 2, 3, 4], dtype="int64")
mask = np.array([False, False, False, True], dtype="bool")
result = IntegerArray(values, mask)
expected = integer_array([1, 2, 3, np.nan], dtype="int64")
tm.assert_extension_array_equal(result, expected)
msg = r".* should be .* numpy array. Use the 'integer_array' function instead"
with pytest.raises(TypeError, match=msg):
IntegerArray(values.tolist(), mask)
with pytest.raises(TypeError, match=msg):
IntegerArray(values, mask.tolist())
with pytest.raises(TypeError, match=msg):
IntegerArray(values.astype(float), mask)
msg = r"__init__\(\) missing 1 required positional argument: 'mask'"
with pytest.raises(TypeError, match=msg):
IntegerArray(values)
@pytest.mark.parametrize(
"a, b",
[
([1, None], [1, np.nan]),
([None], [np.nan]),
([None, np.nan], [np.nan, np.nan]),
([np.nan, np.nan], [np.nan, np.nan]),
],
)
def test_integer_array_constructor_none_is_nan(a, b):
result = integer_array(a)
expected = integer_array(b)
tm.assert_extension_array_equal(result, expected)
def test_integer_array_constructor_copy():
values = np.array([1, 2, 3, 4], dtype="int64")
mask = np.array([False, False, False, True], dtype="bool")
result = IntegerArray(values, mask)
assert result._data is values
assert result._mask is mask
result = IntegerArray(values, mask, copy=True)
assert result._data is not values
assert result._mask is not mask
@pytest.mark.parametrize(
"values",
[
["foo", "bar"],
["1", "2"],
"foo",
1,
1.0,
pd.date_range("20130101", periods=2),
np.array(["foo"]),
[[1, 2], [3, 4]],
[np.nan, {"a": 1}],
],
)
def test_to_integer_array_error(values):
# error in converting existing arrays to IntegerArrays
msg = (
r"(:?.* cannot be converted to an IntegerDtype)"
r"|(:?values must be a 1D list-like)"
)
with pytest.raises(TypeError, match=msg):
integer_array(values)
def test_to_integer_array_inferred_dtype():
# if values has dtype -> respect it
result = integer_array(np.array([1, 2], dtype="int8"))
assert result.dtype == Int8Dtype()
result = integer_array(np.array([1, 2], dtype="int32"))
assert result.dtype == Int32Dtype()
# if values have no dtype -> always int64
result = integer_array([1, 2])
assert result.dtype == Int64Dtype()
def test_to_integer_array_dtype_keyword():
result = integer_array([1, 2], dtype="int8")
assert result.dtype == Int8Dtype()
# if values has dtype -> override it
result = integer_array(np.array([1, 2], dtype="int8"), dtype="int32")
assert result.dtype == Int32Dtype()
def test_to_integer_array_float():
result = integer_array([1.0, 2.0])
expected = integer_array([1, 2])
tm.assert_extension_array_equal(result, expected)
with pytest.raises(TypeError, match="cannot safely cast non-equivalent"):
integer_array([1.5, 2.0])
# for float dtypes, the itemsize is not preserved
result = integer_array(np.array([1.0, 2.0], dtype="float32"))
assert result.dtype == Int64Dtype()
@pytest.mark.parametrize(
"bool_values, int_values, target_dtype, expected_dtype",
[
([False, True], [0, 1], Int64Dtype(), Int64Dtype()),
([False, True], [0, 1], "Int64", Int64Dtype()),
([False, True, np.nan], [0, 1, np.nan], Int64Dtype(), Int64Dtype()),
],
)
def test_to_integer_array_bool(bool_values, int_values, target_dtype, expected_dtype):
result = integer_array(bool_values, dtype=target_dtype)
assert result.dtype == expected_dtype
expected = integer_array(int_values, dtype=target_dtype)
tm.assert_extension_array_equal(result, expected)
@pytest.mark.parametrize(
"values, to_dtype, result_dtype",
[
(np.array([1], dtype="int64"), None, Int64Dtype),
(np.array([1, np.nan]), None, Int64Dtype),
(np.array([1, np.nan]), "int8", Int8Dtype),
],
)
def test_to_integer_array(values, to_dtype, result_dtype):
# convert existing arrays to IntegerArrays
result = integer_array(values, dtype=to_dtype)
assert result.dtype == result_dtype()
expected = integer_array(values, dtype=result_dtype())
tm.assert_extension_array_equal(result, expected)
def test_cross_type_arithmetic():
df = pd.DataFrame(
{
"A": pd.Series([1, 2, np.nan], dtype="Int64"),
"B": pd.Series([1, np.nan, 3], dtype="UInt8"),
"C": [1, 2, 3],
}
)
result = df.A + df.C
expected = pd.Series([2, 4, np.nan], dtype="Int64")
tm.assert_series_equal(result, expected)
result = (df.A + df.C) * 3 == 12
expected = pd.Series([False, True, None], dtype="boolean")
tm.assert_series_equal(result, expected)
result = df.A + df.B
expected = pd.Series([2, np.nan, np.nan], dtype="Int64")
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("op", ["sum", "min", "max", "prod"])
def test_preserve_dtypes(op):
# TODO(#22346): preserve Int64 dtype
# for ops that enable (mean would actually work here
# but generally it is a float return value)
df = pd.DataFrame(
{
"A": ["a", "b", "b"],
"B": [1, None, 3],
"C": integer_array([1, None, 3], dtype="Int64"),
}
)
# op
result = getattr(df.C, op)()
assert isinstance(result, int)
# groupby
result = getattr(df.groupby("A"), op)()
expected = pd.DataFrame(
{"B": np.array([1.0, 3.0]), "C": integer_array([1, 3], dtype="Int64")},
index=pd.Index(["a", "b"], name="A"),
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("op", ["mean"])
def test_reduce_to_float(op):
# some reduce ops always return float, even if the result
# is a rounded number
df = pd.DataFrame(
{
"A": ["a", "b", "b"],
"B": [1, None, 3],
"C": integer_array([1, None, 3], dtype="Int64"),
}
)
# op
result = getattr(df.C, op)()
assert isinstance(result, float)
# groupby
result = getattr(df.groupby("A"), op)()
expected = pd.DataFrame(
{"B": np.array([1.0, 3.0]), "C": integer_array([1, 3], dtype="Int64")},
index=pd.Index(["a", "b"], name="A"),
)
tm.assert_frame_equal(result, expected)
def test_astype_nansafe():
# see gh-22343
arr = integer_array([np.nan, 1, 2], dtype="Int8")
msg = "cannot convert to 'uint32'-dtype NumPy array with missing values."
with pytest.raises(ValueError, match=msg):
arr.astype("uint32")
@pytest.mark.parametrize("ufunc", [np.abs, np.sign])
# np.sign emits a warning with nans, <https://github.com/numpy/numpy/issues/15127>
@pytest.mark.filterwarnings("ignore:invalid value encountered in sign")
def test_ufuncs_single_int(ufunc):
a = integer_array([1, 2, -3, np.nan])
result = ufunc(a)
expected = integer_array(ufunc(a.astype(float)))
tm.assert_extension_array_equal(result, expected)
s = pd.Series(a)
result = ufunc(s)
expected = pd.Series(integer_array(ufunc(a.astype(float))))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("ufunc", [np.log, np.exp, np.sin, np.cos, np.sqrt])
def test_ufuncs_single_float(ufunc):
a = integer_array([1, 2, -3, np.nan])
with np.errstate(invalid="ignore"):
result = ufunc(a)
expected = ufunc(a.astype(float))
tm.assert_numpy_array_equal(result, expected)
s = pd.Series(a)
with np.errstate(invalid="ignore"):
result = ufunc(s)
expected = ufunc(s.astype(float))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("ufunc", [np.add, np.subtract])
def test_ufuncs_binary_int(ufunc):
# two IntegerArrays
a = integer_array([1, 2, -3, np.nan])
result = ufunc(a, a)
expected = integer_array(ufunc(a.astype(float), a.astype(float)))
tm.assert_extension_array_equal(result, expected)
# IntegerArray with numpy array
arr = np.array([1, 2, 3, 4])
result = ufunc(a, arr)
expected = integer_array(ufunc(a.astype(float), arr))
tm.assert_extension_array_equal(result, expected)
result = ufunc(arr, a)
expected = integer_array(ufunc(arr, a.astype(float)))
| tm.assert_extension_array_equal(result, expected) | pandas._testing.assert_extension_array_equal |
import string
import sys
import matplotlib.pyplot as plt
import numpy
import pandas as pd
import seaborn as sn
import tabio.config
def transform_classes(sequence):
result = []
for i in sequence:
if i == '-':
result.append(i)
# elif i == "Table" or 'TableSparseMulticolumn' in i or "TableSparseColumnHeader" in i or "TableSparse" in i:
elif "Table" in i:
# elif i=="Table" or "Frame" in i:
result.append('Table')
else:
result.append('Else')
print(i+" "+result[-1])
return result
def pr_rec(status, hypothesis, reference):
print(reference[:100])
print(hypothesis[:100])
reference = transform_classes(reference)
hypothesis = transform_classes(hypothesis)
tp = 0
fn = 0
fp = 0
tn = 0
allref = reference
allhyp = hypothesis
for i in range(len(allref)):
if allref[i] == "Table":
if allhyp[i] == "Table":
tp = tp + 1.0
else:
fn = fn + 1.0
else:
if allhyp[i] == "Table":
fp = fp + 1.0
else:
tn = tn + 1.0
p = tp/(tp+fp)
r = tp/(tp+fn)
print("Precision="+str(p))
print("Recall="+str(r))
print("F1-Score="+str(2.0*p*r/(p+r)))
print("False positive rate="+str(fp/(tn+fp)))
def confusionMatrix(allref, allvi):
outputs = ''
classes = tabio.config.classes
array_vi = numpy.zeros(shape=(len(classes), len(classes)))
dicClasses = {classe: i for i, classe in enumerate(classes)}
for ref, vi in zip(allref, allvi):
if(ref not in classes or vi not in classes):
continue
array_vi[dicClasses[str(vi)]][dicClasses[str(ref)]] += 1
#array[line][column] : array[y][x]
df_cm_vi = | pd.DataFrame(array_vi, classes, classes) | pandas.DataFrame |
import streamlit as st
import pandas as pd
import numpy as np
import json
import pickle
from PIL import Image
import streamlit.components.v1 as components
import xgboost
import shap
import matplotlib.pyplot as plt
from matplotlib.colors import LinearSegmentedColormap
import os
#Setting the page configuration
square_icon = Image.open(os.path.abspath('Project/streamlit/images/skincare_square.jpeg'))
long_icon = Image.open(os.path.abspath('Project/streamlit/images/top_banner.png'))
long_bw = Image.open(os.path.abspath("Project/streamlit/images/bw_long.jpeg"))
square_logo = Image.open(os.path.abspath("Project/streamlit/images//teen_beauty.png"))
logo = Image.open(os.path.abspath("Project/streamlit/images//logo_trans.png"))
end_icon = Image.open(os.path.abspath("Project/streamlit/images//lower_banner.png"))
st.set_page_config(
page_title="Product and Ingredient Analysis",
page_icon=square_logo,
layout="centered",
initial_sidebar_state="auto")
#loading necessary files
@st.cache
def fetch_data(path):
df = | pd.read_json(path) | pandas.read_json |
# coding=utf-8
# Copyright 2021 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tensorflow_model_remediation.tools.tutorials_utils.uci.utils."""
import unittest.mock as mock
import pandas as pd
import tensorflow as tf
from tensorflow_model_remediation.tools.tutorials_utils.uci import utils
class UCIDataTest(tf.test.TestCase):
def setUp(self):
super().setUp()
utils._uci_full_dataframes = {} # Clear any caches.
def tearDown(self):
super().tearDown()
utils._uci_full_dataframes = {} # Clear any caches.
@mock.patch('pandas.read_csv', autospec=True)
def testGetTrainUciDataAsDefault(self, mock_read_csv):
_ = utils.get_uci_min_diff_datasets()
mock_read_csv.assert_called_once_with(
utils._UCI_DATA_URL_TEMPLATE.format('data'),
names=utils._UCI_COLUMN_NAMES,
header=None)
@mock.patch('pandas.read_csv', autospec=True)
def testGetTrainUciData(self, mock_read_csv):
_ = utils.get_uci_min_diff_datasets(split='train')
mock_read_csv.assert_called_once_with(
utils._UCI_DATA_URL_TEMPLATE.format('data'),
names=utils._UCI_COLUMN_NAMES,
header=None)
@mock.patch('pandas.read_csv', autospec=True)
def testGetTestUciData(self, mock_read_csv):
utils.get_uci_min_diff_datasets(split='test')
mock_read_csv.assert_called_once_with(
utils._UCI_DATA_URL_TEMPLATE.format('test'),
names=utils._UCI_COLUMN_NAMES,
header=None)
@mock.patch('pandas.read_csv', autospec=True)
def testGetSampledUciData(self, mock_read_csv):
mock_df = mock.MagicMock()
mock_df.sample = mock.MagicMock()
mock_read_csv.return_value = mock_df
sample = 0.45 # Arbitrary number.
utils.get_uci_min_diff_datasets(sample=sample)
mock_read_csv.assert_called_once()
mock_df.sample.assert_called_once_with(
frac=sample, replace=True, random_state=1)
@mock.patch('pandas.read_csv', autospec=True)
def testUciDataCached(self, mock_read_csv):
utils.get_uci_min_diff_datasets()
mock_read_csv.assert_called_once()
# pandas.read_csv should not be called again for the same split ('train')
mock_read_csv.reset_mock()
utils.get_uci_min_diff_datasets()
mock_read_csv.assert_not_called()
# pandas.read_csv should be called again for a different split
mock_read_csv.reset_mock()
utils.get_uci_min_diff_datasets(split='test')
mock_read_csv.assert_called_once()
# pandas.read_csv should not be called again (both splits have been cached)
mock_read_csv.reset_mock()
utils.get_uci_min_diff_datasets(split='train')
utils.get_uci_min_diff_datasets(split='test')
mock_read_csv.assert_not_called()
def testGetUciDataWithBadSplitRaisesError(self):
with self.assertRaisesRegex(ValueError,
'split must be.*train.*test.*given.*bad_split'):
utils.get_uci_min_diff_datasets('bad_split')
def testConvertToDataset(self):
expected_vals = range(10) # Arbitrary values
expected_labels = [0, 0, 1, 0, 1, 1, 1, 1, 0, 0] # Arbitrary labels.
data = {
'col': pd.Series(expected_vals),
'target': pd.Series(expected_labels)
}
df = pd.DataFrame(data)
dataset = utils.df_to_dataset(df)
vals, labels = zip(*[(val, label.numpy()) for val, label in dataset])
# Assert values are all dicts with exactly one column.
for val in vals:
self.assertSetEqual(set(val.keys()), set(['col']))
vals = [val['col'].numpy() for val in vals]
self.assertAllClose(vals, expected_vals)
self.assertAllClose(labels, expected_labels)
def testConvertToDatasetWithShuffle(self):
expected_vals = range(10) # Arbitrary values
expected_labels = [0, 0, 1, 0, 1, 1, 1, 1, 0, 0] # Arbitrary labels.
data = {
'col': pd.Series(expected_vals),
'target': pd.Series(expected_labels)
}
df = pd.DataFrame(data)
dataset = utils.df_to_dataset(df, shuffle=True)
vals, labels = zip(*[(val, label.numpy()) for val, label in dataset])
# Assert values are all dicts with exactly one column.
for val in vals:
self.assertSetEqual(set(val.keys()), set(['col']))
vals = [val['col'].numpy() for val in vals]
# These values should *NOT* be close because vals should be out of order
# since we set shuffle=True. Note that it seems like the tests provide a
# consistent seed so we don't have to worry about getting unlucky. If this
# changes then we can look into explicitly setting the a seed.
self.assertNotAllClose(vals, expected_vals)
# Assert that the contents are the same, just reordered
self.assertAllClose(sorted(vals), sorted(expected_vals))
self.assertAllClose(sorted(labels), sorted(expected_labels))
class MinDiffDatasetsTest(tf.test.TestCase):
@mock.patch('tensorflow_model_remediation.tools.tutorials_utils'
'.uci.utils.df_to_dataset',
autospec=True)
@mock.patch('tensorflow_model_remediation.tools.tutorials_utils'
'.uci.utils.get_uci_data',
autospec=True)
def testGetMinDiffDatasets(self, mock_get_uci_data, mock_df_to_dataset):
mock_md_ds1 = mock.MagicMock()
mock_md_ds1.batch.return_value = 'batched_d1'
mock_md_ds2 = mock.MagicMock()
mock_md_ds2.batch.return_value = 'batched_d2'
mock_df_to_dataset.side_effect = ['og_ds', mock_md_ds1, mock_md_ds2]
sample = 0.56 # Arbitrary sample value.
split = 'fake_split'
original_batch_size = 19 # Arbitrary size.
min_diff_batch_size = 23 # Different arbitrary size.
res = utils.get_uci_min_diff_datasets(
split=split,
sample=sample,
original_batch_size=original_batch_size,
min_diff_batch_size=min_diff_batch_size)
# Assert outputs come from the right place.
expected_res = ('og_ds', 'batched_d1', 'batched_d2')
self.assertEqual(res, expected_res)
# Assert proper calls have been made.
self.assertEqual(mock_get_uci_data.call_count, 2)
mock_get_uci_data.assert_has_calls(
[mock.call(split=split, sample=sample),
mock.call(split=split)],
any_order=True)
self.assertEqual(mock_df_to_dataset.call_count, 3)
mock_df_to_dataset.assert_has_calls([
mock.call(mock.ANY, shuffle=True, batch_size=original_batch_size),
mock.call(mock.ANY, shuffle=True),
mock.call(mock.ANY, shuffle=True),
])
mock_md_ds1.batch.assert_called_once_with(
min_diff_batch_size, drop_remainder=True)
mock_md_ds2.batch.assert_called_once_with(
min_diff_batch_size, drop_remainder=True)
@mock.patch('tensorflow_model_remediation.tools.tutorials_utils'
'.uci.utils.get_uci_data',
autospec=True)
def testGetMinDiffDatasetsDefaults(self, mock_get_uci_data):
_ = utils.get_uci_min_diff_datasets()
mock_get_uci_data.assert_has_calls(
[mock.call(split='train', sample=None),
mock.call(split='train')],
any_order=True)
@mock.patch('tensorflow_model_remediation.min_diff.keras.utils.'
'pack_min_diff_data',
autospec=True)
@mock.patch('tensorflow_model_remediation.tools.tutorials_utils'
'.uci.utils.get_uci_min_diff_datasets',
autospec=True)
def testGetUciWithMinDiffDataset(self, mock_get_uci_min_diff_datasets,
mock_pack_data):
mock_get_uci_min_diff_datasets.return_value = ('og', 'md1', 'md2')
mock_pack_data.return_value = 'packed_data'
sample = 0.56 # Arbitrary sample value.
split = 'fake_split'
res = utils.get_uci_with_min_diff_dataset(split=split, sample=sample)
mock_get_uci_min_diff_datasets.assert_called_once_with(
split=split, sample=sample)
mock_pack_data.assert_called_once_with(
original_dataset='og',
nonsensitive_group_dataset='md1',
sensitive_group_dataset='md2')
self.assertEqual(res, 'packed_data')
@mock.patch('tensorflow_model_remediation.min_diff.keras.utils.'
'pack_min_diff_data',
autospec=True)
@mock.patch('tensorflow_model_remediation.tools.tutorials_utils'
'.uci.utils.get_uci_min_diff_datasets',
autospec=True)
def testGetUciWithMinDiffDatasetDefaults(self, mock_get_uci_min_diff_datasets,
mock_pack_data):
mock_ds = mock.MagicMock()
mock_get_uci_min_diff_datasets.return_value = (mock_ds, mock_ds, mock_ds)
_ = utils.get_uci_with_min_diff_dataset()
mock_get_uci_min_diff_datasets.assert_called_once_with(
split='train', sample=None)
class UCIModelTest(tf.test.TestCase):
def setUp(self):
super().setUp()
self.read_csv_patch = mock.patch('pandas.read_csv', autospec=True)
mock_read_csv = self.read_csv_patch.start()
mock_data = { # All values are realistic but arbitrary.
'age': pd.Series([25]),
'workclass': pd.Series(['Private']),
'fnlwgt': pd.Series([12456]),
'education': pd.Series(['Bachelors']),
'education-num': pd.Series([13]),
'marital-status': pd.Series(['Never-married']),
'race': pd.Series(['White']),
'occupation': pd.Series(['Tech-support']),
'relationship': | pd.Series(['Husband']) | pandas.Series |
# -*- coding: utf-8 -*-
# @Author: gunjianpan
# @Date: 2019-01-24 15:41:13
# @Last Modified by: gunjianpan
# @Last Modified time: 2019-03-04 19:10:32
from util.util import begin_time, end_time
import lightgbm as lgb
import numpy as np
import pandas as pd
import warnings
from sklearn.datasets import make_classification
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import train_test_split
from sklearn import preprocessing
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OneHotEncoder
from sklearn import ensemble
from sklearn import model_selection
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import LinearRegression
warnings.filterwarnings('ignore')
def drop_col_not_req(df, cols):
df.drop(cols, axis=1, inplace=True)
def find_primenum(uppernum):
"""
find x in [2, uppernum] which x is a prime number
"""
assert isinstance(uppernum, (int))
assert uppernum >= 2
temp = range(2, uppernum + 1)
result = []
while len(temp) > 0:
result.append(temp[0])
temp = [index for index in temp[1:] if index % temp[0]]
return result
class elo_lgb(object):
"""
train for Elo Merchant Category Recommendation by lightgbm
"""
def data_pre(self, wait, uppernum=20000, train_len=201917):
"""
pre data
"""
wait.first_active_month.fillna('2018-02', inplace=True)
wait['Year'] = wait.first_active_month.map(
lambda day: int(day[2:4])).astype('int8')
wait['Month'] = wait.first_active_month.map(
lambda day: int(day[5:7])).astype('int8')
year = pd.get_dummies(
wait['Year'], prefix=wait[['Year']].columns[0]).astype('int8')
month = pd.get_dummies(
wait['Month'], prefix=wait[['Month']].columns[0]).astype('int8')
del wait['first_active_month']
wait['ID1'] = wait.card_id.map(lambda ID: int('0x' + ID[6:15], 16))
# prime_list = find_primenum(uppernum)
# prime_list = prime_list[-20:]
# print(prime_list)
# maxIndex, maxStd = [0, 0]
# print('prime begin')
# temp_wait = wait[:train_len]
# for index in prime_list:
# ID2 = temp_wait.ID1.map(lambda ID: ID % index).astype('int16')
# pre = pd.concat([ID2, temp_wait.target], axis=1)
# temp_std = pre.groupby(by=['ID1']).mean().target.std()
# if maxStd < temp_std:
# maxIndex = index
# maxStd = temp_std
# print('prime end', maxIndex)
# wait['ID2'] = wait.ID1.map(lambda ID: ID % maxIndex)
# ID3 = pd.get_dummies(wait['ID2'], prefix=wait[['ID2']].columns[0])
del wait['card_id']
print('ID end')
feature1 = pd.get_dummies(
wait['feature_1'], prefix=wait[['feature_1']].columns[0]).astype('int8')
feature2 = pd.get_dummies(
wait['feature_2'], prefix=wait[['feature_2']].columns[0]).astype('int8')
feature3 = pd.get_dummies(
wait['feature_3'], prefix=wait[['feature_3']].columns[0]).astype('int8')
del wait['target']
print('feature end')
test = pd.concat([wait, year, month, feature1,
feature2, feature3], axis=1)
print('wait begin')
# wait = ID3
print('copy end')
for index in test.axes[1]:
wait[index] = test[index]
print('data pre end')
return wait.values
def load_data(self, model=True):
"""
load data for appoint model
@param: model True-train False-predict
"""
print('Load data...')
if model:
pre = pd.read_csv('elo/data/train.csv')
target = pre.target.values
data = self.data_pre(pre, train_len=len(target))
X_train, X_test, y_train, y_test = train_test_split(
data, target, test_size=0.2)
print('data split end')
# X_train = data
# y_train = target
else:
pre = pd.read_csv('elo/data/train.csv')
y_train = pre.target.values
train_len = len(y_train)
temp_test = pd.read_csv('elo/data/test.csv')
y_test = pd.read_csv(
'elo/data/sample_submission.csv').target.values
temp_test.target = y_test
# return self.data_pre(
# pd.concat([pre, temp_test], axis=0), train_len)
total = self.data_pre(
pd.concat([pre, temp_test], axis=0), train_len=train_len)
X_train = total[:train_len]
X_test = total[train_len:]
self.X_test = X_test
self.X_train = X_train
self.y_test = y_test
self.y_train = y_train
def train_model(self):
"""
train model by lightgbm
"""
print('Start training...')
gbm = lgb.LGBMRegressor(
objective='regression', num_leaves=31, learning_rate=0.095, n_estimators=29)
gbm.fit(self.X_train, self.y_train, eval_set=[
(self.X_test, self.y_test)], eval_metric='l1', early_stopping_rounds=5)
self.gbm = gbm
def evaulate_model(self, model=True):
"""
evaulate model by lightgbm
"""
print('Start predicting...')
y_pred = self.gbm.predict(
self.X_test, num_iteration=self.gbm.best_iteration_)
result = | pd.DataFrame(y_pred - self.y_test) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# pylint: disable-msg=W0612,E1101
import itertools
import warnings
from warnings import catch_warnings
from datetime import datetime
from pandas.types.common import (is_integer_dtype,
is_float_dtype,
is_scalar)
from pandas.compat import range, lrange, lzip, StringIO, lmap
from pandas.tslib import NaT
from numpy import nan
from numpy.random import randn
import numpy as np
import pandas as pd
from pandas import option_context
from pandas.core.indexing import _non_reducing_slice, _maybe_numeric_slice
from pandas.core.api import (DataFrame, Index, Series, Panel, isnull,
MultiIndex, Timestamp, Timedelta, UInt64Index)
from pandas.formats.printing import pprint_thing
from pandas import concat
from pandas.core.common import PerformanceWarning
from pandas.tests.indexing.common import _mklbl
import pandas.util.testing as tm
from pandas import date_range
_verbose = False
# ------------------------------------------------------------------------
# Indexing test cases
def _generate_indices(f, values=False):
""" generate the indicies
if values is True , use the axis values
is False, use the range
"""
axes = f.axes
if values:
axes = [lrange(len(a)) for a in axes]
return itertools.product(*axes)
def _get_value(f, i, values=False):
""" return the value for the location i """
# check agains values
if values:
return f.values[i]
# this is equiv of f[col][row].....
# v = f
# for a in reversed(i):
# v = v.__getitem__(a)
# return v
with catch_warnings(record=True):
return f.ix[i]
def _get_result(obj, method, key, axis):
""" return the result for this obj with this key and this axis """
if isinstance(key, dict):
key = key[axis]
# use an artifical conversion to map the key as integers to the labels
# so ix can work for comparisions
if method == 'indexer':
method = 'ix'
key = obj._get_axis(axis)[key]
# in case we actually want 0 index slicing
try:
xp = getattr(obj, method).__getitem__(_axify(obj, key, axis))
except:
xp = getattr(obj, method).__getitem__(key)
return xp
def _axify(obj, key, axis):
# create a tuple accessor
axes = [slice(None)] * obj.ndim
axes[axis] = key
return tuple(axes)
class TestIndexing(tm.TestCase):
_objs = set(['series', 'frame', 'panel'])
_typs = set(['ints', 'uints', 'labels', 'mixed',
'ts', 'floats', 'empty', 'ts_rev'])
def setUp(self):
self.series_ints = Series(np.random.rand(4), index=lrange(0, 8, 2))
self.frame_ints = DataFrame(np.random.randn(4, 4),
index=lrange(0, 8, 2),
columns=lrange(0, 12, 3))
self.panel_ints = Panel(np.random.rand(4, 4, 4),
items=lrange(0, 8, 2),
major_axis=lrange(0, 12, 3),
minor_axis=lrange(0, 16, 4))
self.series_uints = Series(np.random.rand(4),
index=UInt64Index(lrange(0, 8, 2)))
self.frame_uints = DataFrame(np.random.randn(4, 4),
index=UInt64Index(lrange(0, 8, 2)),
columns=UInt64Index(lrange(0, 12, 3)))
self.panel_uints = Panel(np.random.rand(4, 4, 4),
items=UInt64Index(lrange(0, 8, 2)),
major_axis=UInt64Index(lrange(0, 12, 3)),
minor_axis=UInt64Index(lrange(0, 16, 4)))
self.series_labels = Series(np.random.randn(4), index=list('abcd'))
self.frame_labels = DataFrame(np.random.randn(4, 4),
index=list('abcd'), columns=list('ABCD'))
self.panel_labels = Panel(np.random.randn(4, 4, 4),
items=list('abcd'),
major_axis=list('ABCD'),
minor_axis=list('ZYXW'))
self.series_mixed = Series(np.random.randn(4), index=[2, 4, 'null', 8])
self.frame_mixed = DataFrame(np.random.randn(4, 4),
index=[2, 4, 'null', 8])
self.panel_mixed = Panel(np.random.randn(4, 4, 4),
items=[2, 4, 'null', 8])
self.series_ts = Series(np.random.randn(4),
index=date_range('20130101', periods=4))
self.frame_ts = DataFrame(np.random.randn(4, 4),
index=date_range('20130101', periods=4))
self.panel_ts = Panel(np.random.randn(4, 4, 4),
items=date_range('20130101', periods=4))
dates_rev = (date_range('20130101', periods=4)
.sort_values(ascending=False))
self.series_ts_rev = Series(np.random.randn(4),
index=dates_rev)
self.frame_ts_rev = DataFrame(np.random.randn(4, 4),
index=dates_rev)
self.panel_ts_rev = Panel(np.random.randn(4, 4, 4),
items=dates_rev)
self.frame_empty = DataFrame({})
self.series_empty = Series({})
self.panel_empty = Panel({})
# form agglomerates
for o in self._objs:
d = dict()
for t in self._typs:
d[t] = getattr(self, '%s_%s' % (o, t), None)
setattr(self, o, d)
def check_values(self, f, func, values=False):
if f is None:
return
axes = f.axes
indicies = itertools.product(*axes)
for i in indicies:
result = getattr(f, func)[i]
# check agains values
if values:
expected = f.values[i]
else:
expected = f
for a in reversed(i):
expected = expected.__getitem__(a)
tm.assert_almost_equal(result, expected)
def check_result(self, name, method1, key1, method2, key2, typs=None,
objs=None, axes=None, fails=None):
def _eq(t, o, a, obj, k1, k2):
""" compare equal for these 2 keys """
if a is not None and a > obj.ndim - 1:
return
def _print(result, error=None):
if error is not None:
error = str(error)
v = ("%-16.16s [%-16.16s]: [typ->%-8.8s,obj->%-8.8s,"
"key1->(%-4.4s),key2->(%-4.4s),axis->%s] %s" %
(name, result, t, o, method1, method2, a, error or ''))
if _verbose:
pprint_thing(v)
try:
rs = getattr(obj, method1).__getitem__(_axify(obj, k1, a))
try:
xp = _get_result(obj, method2, k2, a)
except:
result = 'no comp'
_print(result)
return
detail = None
try:
if is_scalar(rs) and is_scalar(xp):
self.assertEqual(rs, xp)
elif xp.ndim == 1:
tm.assert_series_equal(rs, xp)
elif xp.ndim == 2:
tm.assert_frame_equal(rs, xp)
elif xp.ndim == 3:
tm.assert_panel_equal(rs, xp)
result = 'ok'
except AssertionError as e:
detail = str(e)
result = 'fail'
# reverse the checks
if fails is True:
if result == 'fail':
result = 'ok (fail)'
_print(result)
if not result.startswith('ok'):
raise AssertionError(detail)
except AssertionError:
raise
except Exception as detail:
# if we are in fails, the ok, otherwise raise it
if fails is not None:
if isinstance(detail, fails):
result = 'ok (%s)' % type(detail).__name__
_print(result)
return
result = type(detail).__name__
raise AssertionError(_print(result, error=detail))
if typs is None:
typs = self._typs
if objs is None:
objs = self._objs
if axes is not None:
if not isinstance(axes, (tuple, list)):
axes = [axes]
else:
axes = list(axes)
else:
axes = [0, 1, 2]
# check
for o in objs:
if o not in self._objs:
continue
d = getattr(self, o)
for a in axes:
for t in typs:
if t not in self._typs:
continue
obj = d[t]
if obj is not None:
obj = obj.copy()
k2 = key2
_eq(t, o, a, obj, key1, k2)
def test_ix_deprecation(self):
# GH 15114
df = DataFrame({'A': [1, 2, 3]})
with tm.assert_produces_warning(DeprecationWarning,
check_stacklevel=False):
df.ix[1, 'A']
def test_indexer_caching(self):
# GH5727
# make sure that indexers are in the _internal_names_set
n = 1000001
arrays = [lrange(n), lrange(n)]
index = MultiIndex.from_tuples(lzip(*arrays))
s = Series(np.zeros(n), index=index)
str(s)
# setitem
expected = Series(np.ones(n), index=index)
s = Series(np.zeros(n), index=index)
s[s == 0] = 1
tm.assert_series_equal(s, expected)
def test_at_and_iat_get(self):
def _check(f, func, values=False):
if f is not None:
indicies = _generate_indices(f, values)
for i in indicies:
result = getattr(f, func)[i]
expected = _get_value(f, i, values)
tm.assert_almost_equal(result, expected)
for o in self._objs:
d = getattr(self, o)
# iat
for f in [d['ints'], d['uints']]:
_check(f, 'iat', values=True)
for f in [d['labels'], d['ts'], d['floats']]:
if f is not None:
self.assertRaises(ValueError, self.check_values, f, 'iat')
# at
for f in [d['ints'], d['uints'], d['labels'],
d['ts'], d['floats']]:
_check(f, 'at')
def test_at_and_iat_set(self):
def _check(f, func, values=False):
if f is not None:
indicies = _generate_indices(f, values)
for i in indicies:
getattr(f, func)[i] = 1
expected = _get_value(f, i, values)
tm.assert_almost_equal(expected, 1)
for t in self._objs:
d = getattr(self, t)
# iat
for f in [d['ints'], d['uints']]:
_check(f, 'iat', values=True)
for f in [d['labels'], d['ts'], d['floats']]:
if f is not None:
self.assertRaises(ValueError, _check, f, 'iat')
# at
for f in [d['ints'], d['uints'], d['labels'],
d['ts'], d['floats']]:
_check(f, 'at')
def test_at_iat_coercion(self):
# as timestamp is not a tuple!
dates = date_range('1/1/2000', periods=8)
df = DataFrame(randn(8, 4), index=dates, columns=['A', 'B', 'C', 'D'])
s = df['A']
result = s.at[dates[5]]
xp = s.values[5]
self.assertEqual(result, xp)
# GH 7729
# make sure we are boxing the returns
s = Series(['2014-01-01', '2014-02-02'], dtype='datetime64[ns]')
expected = Timestamp('2014-02-02')
for r in [lambda: s.iat[1], lambda: s.iloc[1]]:
result = r()
self.assertEqual(result, expected)
s = Series(['1 days', '2 days'], dtype='timedelta64[ns]')
expected = Timedelta('2 days')
for r in [lambda: s.iat[1], lambda: s.iloc[1]]:
result = r()
self.assertEqual(result, expected)
def test_iat_invalid_args(self):
pass
def test_imethods_with_dups(self):
# GH6493
# iat/iloc with dups
s = Series(range(5), index=[1, 1, 2, 2, 3], dtype='int64')
result = s.iloc[2]
self.assertEqual(result, 2)
result = s.iat[2]
self.assertEqual(result, 2)
self.assertRaises(IndexError, lambda: s.iat[10])
self.assertRaises(IndexError, lambda: s.iat[-10])
result = s.iloc[[2, 3]]
expected = Series([2, 3], [2, 2], dtype='int64')
tm.assert_series_equal(result, expected)
df = s.to_frame()
result = df.iloc[2]
expected = Series(2, index=[0], name=2)
tm.assert_series_equal(result, expected)
result = df.iat[2, 0]
expected = 2
self.assertEqual(result, 2)
def test_repeated_getitem_dups(self):
# GH 5678
# repeated gettitems on a dup index returing a ndarray
df = DataFrame(
np.random.random_sample((20, 5)),
index=['ABCDE' [x % 5] for x in range(20)])
expected = df.loc['A', 0]
result = df.loc[:, 0].loc['A']
tm.assert_series_equal(result, expected)
def test_iloc_exceeds_bounds(self):
# GH6296
# iloc should allow indexers that exceed the bounds
df = DataFrame(np.random.random_sample((20, 5)), columns=list('ABCDE'))
expected = df
# lists of positions should raise IndexErrror!
with tm.assertRaisesRegexp(IndexError,
'positional indexers are out-of-bounds'):
df.iloc[:, [0, 1, 2, 3, 4, 5]]
self.assertRaises(IndexError, lambda: df.iloc[[1, 30]])
self.assertRaises(IndexError, lambda: df.iloc[[1, -30]])
self.assertRaises(IndexError, lambda: df.iloc[[100]])
s = df['A']
self.assertRaises(IndexError, lambda: s.iloc[[100]])
self.assertRaises(IndexError, lambda: s.iloc[[-100]])
# still raise on a single indexer
msg = 'single positional indexer is out-of-bounds'
with tm.assertRaisesRegexp(IndexError, msg):
df.iloc[30]
self.assertRaises(IndexError, lambda: df.iloc[-30])
# GH10779
# single positive/negative indexer exceeding Series bounds should raise
# an IndexError
with tm.assertRaisesRegexp(IndexError, msg):
s.iloc[30]
self.assertRaises(IndexError, lambda: s.iloc[-30])
# slices are ok
result = df.iloc[:, 4:10] # 0 < start < len < stop
expected = df.iloc[:, 4:]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, -4:-10] # stop < 0 < start < len
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 10:4:-1] # 0 < stop < len < start (down)
expected = df.iloc[:, :4:-1]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 4:-10:-1] # stop < 0 < start < len (down)
expected = df.iloc[:, 4::-1]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, -10:4] # start < 0 < stop < len
expected = df.iloc[:, :4]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 10:4] # 0 < stop < len < start
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, -10:-11:-1] # stop < start < 0 < len (down)
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 10:11] # 0 < len < start < stop
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
# slice bounds exceeding is ok
result = s.iloc[18:30]
expected = s.iloc[18:]
tm.assert_series_equal(result, expected)
result = s.iloc[30:]
expected = s.iloc[:0]
tm.assert_series_equal(result, expected)
result = s.iloc[30::-1]
expected = s.iloc[::-1]
tm.assert_series_equal(result, expected)
# doc example
def check(result, expected):
str(result)
result.dtypes
tm.assert_frame_equal(result, expected)
dfl = DataFrame(np.random.randn(5, 2), columns=list('AB'))
check(dfl.iloc[:, 2:3], DataFrame(index=dfl.index))
check(dfl.iloc[:, 1:3], dfl.iloc[:, [1]])
check(dfl.iloc[4:6], dfl.iloc[[4]])
self.assertRaises(IndexError, lambda: dfl.iloc[[4, 5, 6]])
self.assertRaises(IndexError, lambda: dfl.iloc[:, 4])
def test_iloc_getitem_int(self):
# integer
self.check_result('integer', 'iloc', 2, 'ix',
{0: 4, 1: 6, 2: 8}, typs=['ints', 'uints'])
self.check_result('integer', 'iloc', 2, 'indexer', 2,
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_neg_int(self):
# neg integer
self.check_result('neg int', 'iloc', -1, 'ix',
{0: 6, 1: 9, 2: 12}, typs=['ints', 'uints'])
self.check_result('neg int', 'iloc', -1, 'indexer', -1,
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_list_int(self):
# list of ints
self.check_result('list int', 'iloc', [0, 1, 2], 'ix',
{0: [0, 2, 4], 1: [0, 3, 6], 2: [0, 4, 8]},
typs=['ints', 'uints'])
self.check_result('list int', 'iloc', [2], 'ix',
{0: [4], 1: [6], 2: [8]}, typs=['ints', 'uints'])
self.check_result('list int', 'iloc', [0, 1, 2], 'indexer', [0, 1, 2],
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
# array of ints (GH5006), make sure that a single indexer is returning
# the correct type
self.check_result('array int', 'iloc', np.array([0, 1, 2]), 'ix',
{0: [0, 2, 4],
1: [0, 3, 6],
2: [0, 4, 8]}, typs=['ints', 'uints'])
self.check_result('array int', 'iloc', np.array([2]), 'ix',
{0: [4], 1: [6], 2: [8]}, typs=['ints', 'uints'])
self.check_result('array int', 'iloc', np.array([0, 1, 2]), 'indexer',
[0, 1, 2],
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_neg_int_can_reach_first_index(self):
# GH10547 and GH10779
# negative integers should be able to reach index 0
df = DataFrame({'A': [2, 3, 5], 'B': [7, 11, 13]})
s = df['A']
expected = df.iloc[0]
result = df.iloc[-3]
tm.assert_series_equal(result, expected)
expected = df.iloc[[0]]
result = df.iloc[[-3]]
tm.assert_frame_equal(result, expected)
expected = s.iloc[0]
result = s.iloc[-3]
self.assertEqual(result, expected)
expected = s.iloc[[0]]
result = s.iloc[[-3]]
| tm.assert_series_equal(result, expected) | pandas.util.testing.assert_series_equal |
# pylint: disable-msg=E1101,W0612
from datetime import datetime, time, timedelta, date
import sys
import os
import operator
from distutils.version import LooseVersion
import nose
import numpy as np
randn = np.random.randn
from pandas import (Index, Series, TimeSeries, DataFrame,
isnull, date_range, Timestamp, Period, DatetimeIndex,
Int64Index, to_datetime, bdate_range, Float64Index)
import pandas.core.datetools as datetools
import pandas.tseries.offsets as offsets
import pandas.tseries.tools as tools
import pandas.tseries.frequencies as fmod
import pandas as pd
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
from pandas.tslib import NaT, iNaT
import pandas.lib as lib
import pandas.tslib as tslib
import pandas.index as _index
from pandas.compat import range, long, StringIO, lrange, lmap, zip, product
import pandas.core.datetools as dt
from numpy.random import rand
from numpy.testing import assert_array_equal
from pandas.util.testing import assert_frame_equal
import pandas.compat as compat
import pandas.core.common as com
from pandas import concat
from pandas import _np_version_under1p7
from numpy.testing.decorators import slow
def _skip_if_no_pytz():
try:
import pytz
except ImportError:
raise nose.SkipTest("pytz not installed")
def _skip_if_has_locale():
import locale
lang, _ = locale.getlocale()
if lang is not None:
raise nose.SkipTest("Specific locale is set {0}".format(lang))
class TestTimeSeriesDuplicates(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
dates = [datetime(2000, 1, 2), datetime(2000, 1, 2),
datetime(2000, 1, 2), datetime(2000, 1, 3),
datetime(2000, 1, 3), datetime(2000, 1, 3),
datetime(2000, 1, 4), datetime(2000, 1, 4),
datetime(2000, 1, 4), datetime(2000, 1, 5)]
self.dups = Series(np.random.randn(len(dates)), index=dates)
def test_constructor(self):
tm.assert_isinstance(self.dups, TimeSeries)
tm.assert_isinstance(self.dups.index, DatetimeIndex)
def test_is_unique_monotonic(self):
self.assertFalse(self.dups.index.is_unique)
def test_index_unique(self):
uniques = self.dups.index.unique()
expected = DatetimeIndex([datetime(2000, 1, 2), datetime(2000, 1, 3),
datetime(2000, 1, 4), datetime(2000, 1, 5)])
self.assertEqual(uniques.dtype, 'M8[ns]') # sanity
self.assertTrue(uniques.equals(expected))
self.assertEqual(self.dups.index.nunique(), 4)
# #2563
self.assertTrue(isinstance(uniques, DatetimeIndex))
dups_local = self.dups.index.tz_localize('US/Eastern')
dups_local.name = 'foo'
result = dups_local.unique()
expected = DatetimeIndex(expected, tz='US/Eastern')
self.assertTrue(result.tz is not None)
self.assertEqual(result.name, 'foo')
self.assertTrue(result.equals(expected))
# NaT
arr = [ 1370745748 + t for t in range(20) ] + [iNaT]
idx = DatetimeIndex(arr * 3)
self.assertTrue(idx.unique().equals(DatetimeIndex(arr)))
self.assertEqual(idx.nunique(), 21)
arr = [ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT]
idx = DatetimeIndex(arr * 3)
self.assertTrue(idx.unique().equals(DatetimeIndex(arr)))
self.assertEqual(idx.nunique(), 21)
def test_index_dupes_contains(self):
d = datetime(2011, 12, 5, 20, 30)
ix = DatetimeIndex([d, d])
self.assertTrue(d in ix)
def test_duplicate_dates_indexing(self):
ts = self.dups
uniques = ts.index.unique()
for date in uniques:
result = ts[date]
mask = ts.index == date
total = (ts.index == date).sum()
expected = ts[mask]
if total > 1:
assert_series_equal(result, expected)
else:
assert_almost_equal(result, expected[0])
cp = ts.copy()
cp[date] = 0
expected = Series(np.where(mask, 0, ts), index=ts.index)
assert_series_equal(cp, expected)
self.assertRaises(KeyError, ts.__getitem__, datetime(2000, 1, 6))
# new index
ts[datetime(2000,1,6)] = 0
self.assertEqual(ts[datetime(2000,1,6)], 0)
def test_range_slice(self):
idx = DatetimeIndex(['1/1/2000', '1/2/2000', '1/2/2000', '1/3/2000',
'1/4/2000'])
ts = Series(np.random.randn(len(idx)), index=idx)
result = ts['1/2/2000':]
expected = ts[1:]
assert_series_equal(result, expected)
result = ts['1/2/2000':'1/3/2000']
expected = ts[1:4]
assert_series_equal(result, expected)
def test_groupby_average_dup_values(self):
result = self.dups.groupby(level=0).mean()
expected = self.dups.groupby(self.dups.index).mean()
assert_series_equal(result, expected)
def test_indexing_over_size_cutoff(self):
import datetime
# #1821
old_cutoff = _index._SIZE_CUTOFF
try:
_index._SIZE_CUTOFF = 1000
# create large list of non periodic datetime
dates = []
sec = datetime.timedelta(seconds=1)
half_sec = datetime.timedelta(microseconds=500000)
d = datetime.datetime(2011, 12, 5, 20, 30)
n = 1100
for i in range(n):
dates.append(d)
dates.append(d + sec)
dates.append(d + sec + half_sec)
dates.append(d + sec + sec + half_sec)
d += 3 * sec
# duplicate some values in the list
duplicate_positions = np.random.randint(0, len(dates) - 1, 20)
for p in duplicate_positions:
dates[p + 1] = dates[p]
df = DataFrame(np.random.randn(len(dates), 4),
index=dates,
columns=list('ABCD'))
pos = n * 3
timestamp = df.index[pos]
self.assertIn(timestamp, df.index)
# it works!
df.ix[timestamp]
self.assertTrue(len(df.ix[[timestamp]]) > 0)
finally:
_index._SIZE_CUTOFF = old_cutoff
def test_indexing_unordered(self):
# GH 2437
rng = date_range(start='2011-01-01', end='2011-01-15')
ts = Series(randn(len(rng)), index=rng)
ts2 = concat([ts[0:4],ts[-4:],ts[4:-4]])
for t in ts.index:
s = str(t)
expected = ts[t]
result = ts2[t]
self.assertTrue(expected == result)
# GH 3448 (ranges)
def compare(slobj):
result = ts2[slobj].copy()
result = result.sort_index()
expected = ts[slobj]
assert_series_equal(result,expected)
compare(slice('2011-01-01','2011-01-15'))
compare(slice('2010-12-30','2011-01-15'))
compare(slice('2011-01-01','2011-01-16'))
# partial ranges
compare(slice('2011-01-01','2011-01-6'))
compare(slice('2011-01-06','2011-01-8'))
compare(slice('2011-01-06','2011-01-12'))
# single values
result = ts2['2011'].sort_index()
expected = ts['2011']
assert_series_equal(result,expected)
# diff freq
rng = date_range(datetime(2005, 1, 1), periods=20, freq='M')
ts = Series(np.arange(len(rng)), index=rng)
ts = ts.take(np.random.permutation(20))
result = ts['2005']
for t in result.index:
self.assertTrue(t.year == 2005)
def test_indexing(self):
idx = date_range("2001-1-1", periods=20, freq='M')
ts = Series(np.random.rand(len(idx)),index=idx)
# getting
# GH 3070, make sure semantics work on Series/Frame
expected = ts['2001']
df = DataFrame(dict(A = ts))
result = df['2001']['A']
assert_series_equal(expected,result)
# setting
ts['2001'] = 1
expected = ts['2001']
df.loc['2001','A'] = 1
result = df['2001']['A']
assert_series_equal(expected,result)
# GH3546 (not including times on the last day)
idx = date_range(start='2013-05-31 00:00', end='2013-05-31 23:00', freq='H')
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013-05']
assert_series_equal(expected,ts)
idx = date_range(start='2013-05-31 00:00', end='2013-05-31 23:59', freq='S')
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013-05']
assert_series_equal(expected,ts)
idx = [ Timestamp('2013-05-31 00:00'), Timestamp(datetime(2013,5,31,23,59,59,999999))]
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013']
assert_series_equal(expected,ts)
# GH 3925, indexing with a seconds resolution string / datetime object
df = DataFrame(randn(5,5),columns=['open','high','low','close','volume'],index=date_range('2012-01-02 18:01:00',periods=5,tz='US/Central',freq='s'))
expected = df.loc[[df.index[2]]]
result = df['2012-01-02 18:01:02']
assert_frame_equal(result,expected)
# this is a single date, so will raise
self.assertRaises(KeyError, df.__getitem__, df.index[2],)
def test_recreate_from_data(self):
if _np_version_under1p7:
freqs = ['M', 'Q', 'A', 'D', 'B', 'T', 'S', 'L', 'U', 'H']
else:
freqs = ['M', 'Q', 'A', 'D', 'B', 'T', 'S', 'L', 'U', 'H', 'N', 'C']
for f in freqs:
org = DatetimeIndex(start='2001/02/01 09:00', freq=f, periods=1)
idx = DatetimeIndex(org, freq=f)
self.assertTrue(idx.equals(org))
# unbale to create tz-aware 'A' and 'C' freq
if _np_version_under1p7:
freqs = ['M', 'Q', 'D', 'B', 'T', 'S', 'L', 'U', 'H']
else:
freqs = ['M', 'Q', 'D', 'B', 'T', 'S', 'L', 'U', 'H', 'N']
for f in freqs:
org = DatetimeIndex(start='2001/02/01 09:00', freq=f, tz='US/Pacific', periods=1)
idx = DatetimeIndex(org, freq=f, tz='US/Pacific')
self.assertTrue(idx.equals(org))
def assert_range_equal(left, right):
assert(left.equals(right))
assert(left.freq == right.freq)
assert(left.tz == right.tz)
class TestTimeSeries(tm.TestCase):
_multiprocess_can_split_ = True
def test_is_(self):
dti = DatetimeIndex(start='1/1/2005', end='12/1/2005', freq='M')
self.assertTrue(dti.is_(dti))
self.assertTrue(dti.is_(dti.view()))
self.assertFalse(dti.is_(dti.copy()))
def test_dti_slicing(self):
dti = DatetimeIndex(start='1/1/2005', end='12/1/2005', freq='M')
dti2 = dti[[1, 3, 5]]
v1 = dti2[0]
v2 = dti2[1]
v3 = dti2[2]
self.assertEqual(v1, Timestamp('2/28/2005'))
self.assertEqual(v2, Timestamp('4/30/2005'))
self.assertEqual(v3, Timestamp('6/30/2005'))
# don't carry freq through irregular slicing
self.assertIsNone(dti2.freq)
def test_pass_datetimeindex_to_index(self):
# Bugs in #1396
rng = date_range('1/1/2000', '3/1/2000')
idx = Index(rng, dtype=object)
expected = Index(rng.to_pydatetime(), dtype=object)
self.assert_numpy_array_equal(idx.values, expected.values)
def test_contiguous_boolean_preserve_freq(self):
rng = date_range('1/1/2000', '3/1/2000', freq='B')
mask = np.zeros(len(rng), dtype=bool)
mask[10:20] = True
masked = rng[mask]
expected = rng[10:20]
self.assertIsNotNone(expected.freq)
assert_range_equal(masked, expected)
mask[22] = True
masked = rng[mask]
self.assertIsNone(masked.freq)
def test_getitem_median_slice_bug(self):
index = date_range('20090415', '20090519', freq='2B')
s = Series(np.random.randn(13), index=index)
indexer = [slice(6, 7, None)]
result = s[indexer]
expected = s[indexer[0]]
assert_series_equal(result, expected)
def test_series_box_timestamp(self):
rng = date_range('20090415', '20090519', freq='B')
s = Series(rng)
tm.assert_isinstance(s[5], Timestamp)
rng = date_range('20090415', '20090519', freq='B')
s = Series(rng, index=rng)
tm.assert_isinstance(s[5], Timestamp)
tm.assert_isinstance(s.iget_value(5), Timestamp)
def test_date_range_ambiguous_arguments(self):
# #2538
start = datetime(2011, 1, 1, 5, 3, 40)
end = datetime(2011, 1, 1, 8, 9, 40)
self.assertRaises(ValueError, date_range, start, end,
freq='s', periods=10)
def test_timestamp_to_datetime(self):
_skip_if_no_pytz()
rng = date_range('20090415', '20090519',
tz='US/Eastern')
stamp = rng[0]
dtval = stamp.to_pydatetime()
self.assertEqual(stamp, dtval)
self.assertEqual(stamp.tzinfo, dtval.tzinfo)
def test_index_convert_to_datetime_array(self):
_skip_if_no_pytz()
def _check_rng(rng):
converted = rng.to_pydatetime()
tm.assert_isinstance(converted, np.ndarray)
for x, stamp in zip(converted, rng):
tm.assert_isinstance(x, datetime)
self.assertEqual(x, stamp.to_pydatetime())
self.assertEqual(x.tzinfo, stamp.tzinfo)
rng = date_range('20090415', '20090519')
rng_eastern = date_range('20090415', '20090519', tz='US/Eastern')
rng_utc = date_range('20090415', '20090519', tz='utc')
_check_rng(rng)
_check_rng(rng_eastern)
_check_rng(rng_utc)
def test_ctor_str_intraday(self):
rng = DatetimeIndex(['1-1-2000 00:00:01'])
self.assertEqual(rng[0].second, 1)
def test_series_ctor_plus_datetimeindex(self):
rng = date_range('20090415', '20090519', freq='B')
data = dict((k, 1) for k in rng)
result = Series(data, index=rng)
self.assertIs(result.index, rng)
def test_series_pad_backfill_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
result = s[:2].reindex(index, method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected[-3:] = np.nan
assert_series_equal(result, expected)
result = s[-2:].reindex(index, method='backfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected[:3] = np.nan
assert_series_equal(result, expected)
def test_series_fillna_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
result = s[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected[-3:] = np.nan
assert_series_equal(result, expected)
result = s[-2:].reindex(index)
result = result.fillna(method='bfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected[:3] = np.nan
assert_series_equal(result, expected)
def test_frame_pad_backfill_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
result = df[:2].reindex(index, method='pad', limit=5)
expected = df[:2].reindex(index).fillna(method='pad')
expected.values[-3:] = np.nan
tm.assert_frame_equal(result, expected)
result = df[-2:].reindex(index, method='backfill', limit=5)
expected = df[-2:].reindex(index).fillna(method='backfill')
expected.values[:3] = np.nan
tm.assert_frame_equal(result, expected)
def test_frame_fillna_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
result = df[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = df[:2].reindex(index).fillna(method='pad')
expected.values[-3:] = np.nan
tm.assert_frame_equal(result, expected)
result = df[-2:].reindex(index)
result = result.fillna(method='backfill', limit=5)
expected = df[-2:].reindex(index).fillna(method='backfill')
expected.values[:3] = np.nan
tm.assert_frame_equal(result, expected)
def test_frame_setitem_timestamp(self):
# 2155
columns = DatetimeIndex(start='1/1/2012', end='2/1/2012',
freq=datetools.bday)
index = lrange(10)
data = DataFrame(columns=columns, index=index)
t = datetime(2012, 11, 1)
ts = Timestamp(t)
data[ts] = np.nan # works
def test_sparse_series_fillna_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
ss = s[:2].reindex(index).to_sparse()
result = ss.fillna(method='pad', limit=5)
expected = ss.fillna(method='pad', limit=5)
expected = expected.to_dense()
expected[-3:] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
ss = s[-2:].reindex(index).to_sparse()
result = ss.fillna(method='backfill', limit=5)
expected = ss.fillna(method='backfill')
expected = expected.to_dense()
expected[:3] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
def test_sparse_series_pad_backfill_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
s = s.to_sparse()
result = s[:2].reindex(index, method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected[-3:] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
result = s[-2:].reindex(index, method='backfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected[:3] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
def test_sparse_frame_pad_backfill_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
sdf = df.to_sparse()
result = sdf[:2].reindex(index, method='pad', limit=5)
expected = sdf[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected.values[-3:] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
result = sdf[-2:].reindex(index, method='backfill', limit=5)
expected = sdf[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected.values[:3] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
def test_sparse_frame_fillna_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
sdf = df.to_sparse()
result = sdf[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = sdf[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected.values[-3:] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
result = sdf[-2:].reindex(index)
result = result.fillna(method='backfill', limit=5)
expected = sdf[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected.values[:3] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
def test_pad_require_monotonicity(self):
rng = date_range('1/1/2000', '3/1/2000', freq='B')
rng2 = rng[::2][::-1]
self.assertRaises(ValueError, rng2.get_indexer, rng,
method='pad')
def test_frame_ctor_datetime64_column(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50',
freq='10s')
dates = np.asarray(rng)
df = DataFrame({'A': np.random.randn(len(rng)), 'B': dates})
self.assertTrue(np.issubdtype(df['B'].dtype, np.dtype('M8[ns]')))
def test_frame_add_datetime64_column(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50',
freq='10s')
df = DataFrame(index=np.arange(len(rng)))
df['A'] = rng
self.assertTrue(np.issubdtype(df['A'].dtype, np.dtype('M8[ns]')))
def test_frame_datetime64_pre1900_repr(self):
df = DataFrame({'year': date_range('1/1/1700', periods=50,
freq='A-DEC')})
# it works!
repr(df)
def test_frame_add_datetime64_col_other_units(self):
n = 100
units = ['h', 'm', 's', 'ms', 'D', 'M', 'Y']
ns_dtype = np.dtype('M8[ns]')
for unit in units:
dtype = np.dtype('M8[%s]' % unit)
vals = np.arange(n, dtype=np.int64).view(dtype)
df = DataFrame({'ints': np.arange(n)}, index=np.arange(n))
df[unit] = vals
ex_vals = to_datetime(vals.astype('O'))
self.assertEqual(df[unit].dtype, ns_dtype)
self.assertTrue((df[unit].values == ex_vals).all())
# Test insertion into existing datetime64 column
df = DataFrame({'ints': np.arange(n)}, index=np.arange(n))
df['dates'] = np.arange(n, dtype=np.int64).view(ns_dtype)
for unit in units:
dtype = np.dtype('M8[%s]' % unit)
vals = np.arange(n, dtype=np.int64).view(dtype)
tmp = df.copy()
tmp['dates'] = vals
ex_vals = to_datetime(vals.astype('O'))
self.assertTrue((tmp['dates'].values == ex_vals).all())
def test_to_datetime_unit(self):
epoch = 1370745748
s = Series([ epoch + t for t in range(20) ])
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ])
assert_series_equal(result,expected)
s = Series([ epoch + t for t in range(20) ]).astype(float)
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ])
assert_series_equal(result,expected)
s = Series([ epoch + t for t in range(20) ] + [iNaT])
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT])
assert_series_equal(result,expected)
s = Series([ epoch + t for t in range(20) ] + [iNaT]).astype(float)
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT])
assert_series_equal(result,expected)
s = concat([Series([ epoch + t for t in range(20) ]).astype(float),Series([np.nan])],ignore_index=True)
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT])
assert_series_equal(result,expected)
def test_series_ctor_datetime64(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50',
freq='10s')
dates = np.asarray(rng)
series = Series(dates)
self.assertTrue(np.issubdtype(series.dtype, np.dtype('M8[ns]')))
def test_index_cast_datetime64_other_units(self):
arr = np.arange(0, 100, 10, dtype=np.int64).view('M8[D]')
idx = Index(arr)
self.assertTrue((idx.values == tslib.cast_to_nanoseconds(arr)).all())
def test_index_astype_datetime64(self):
idx = Index([datetime(2012, 1, 1)], dtype=object)
if not _np_version_under1p7:
raise nose.SkipTest("test only valid in numpy < 1.7")
casted = idx.astype(np.dtype('M8[D]'))
expected = DatetimeIndex(idx.values)
tm.assert_isinstance(casted, DatetimeIndex)
self.assertTrue(casted.equals(expected))
def test_reindex_series_add_nat(self):
rng = date_range('1/1/2000 00:00:00', periods=10, freq='10s')
series = Series(rng)
result = series.reindex(lrange(15))
self.assertTrue(np.issubdtype(result.dtype, np.dtype('M8[ns]')))
mask = result.isnull()
self.assertTrue(mask[-5:].all())
self.assertFalse(mask[:-5].any())
def test_reindex_frame_add_nat(self):
rng = date_range('1/1/2000 00:00:00', periods=10, freq='10s')
df = DataFrame({'A': np.random.randn(len(rng)), 'B': rng})
result = df.reindex(lrange(15))
self.assertTrue(np.issubdtype(result['B'].dtype, np.dtype('M8[ns]')))
mask = com.isnull(result)['B']
self.assertTrue(mask[-5:].all())
self.assertFalse(mask[:-5].any())
def test_series_repr_nat(self):
series = Series([0, 1000, 2000, iNaT], dtype='M8[ns]')
result = repr(series)
expected = ('0 1970-01-01 00:00:00\n'
'1 1970-01-01 00:00:00.000001\n'
'2 1970-01-01 00:00:00.000002\n'
'3 NaT\n'
'dtype: datetime64[ns]')
self.assertEqual(result, expected)
def test_fillna_nat(self):
series = Series([0, 1, 2, iNaT], dtype='M8[ns]')
filled = series.fillna(method='pad')
filled2 = series.fillna(value=series.values[2])
expected = series.copy()
expected.values[3] = expected.values[2]
assert_series_equal(filled, expected)
assert_series_equal(filled2, expected)
df = DataFrame({'A': series})
filled = df.fillna(method='pad')
filled2 = df.fillna(value=series.values[2])
expected = DataFrame({'A': expected})
assert_frame_equal(filled, expected)
assert_frame_equal(filled2, expected)
series = Series([iNaT, 0, 1, 2], dtype='M8[ns]')
filled = series.fillna(method='bfill')
filled2 = series.fillna(value=series[1])
expected = series.copy()
expected[0] = expected[1]
assert_series_equal(filled, expected)
assert_series_equal(filled2, expected)
df = DataFrame({'A': series})
filled = df.fillna(method='bfill')
filled2 = df.fillna(value=series[1])
expected = DataFrame({'A': expected})
assert_frame_equal(filled, expected)
assert_frame_equal(filled2, expected)
def test_string_na_nat_conversion(self):
# GH #999, #858
from pandas.compat import parse_date
strings = np.array(['1/1/2000', '1/2/2000', np.nan,
'1/4/2000, 12:34:56'], dtype=object)
expected = np.empty(4, dtype='M8[ns]')
for i, val in enumerate(strings):
if com.isnull(val):
expected[i] = iNaT
else:
expected[i] = parse_date(val)
result = tslib.array_to_datetime(strings)
assert_almost_equal(result, expected)
result2 = to_datetime(strings)
tm.assert_isinstance(result2, DatetimeIndex)
assert_almost_equal(result, result2)
malformed = np.array(['1/100/2000', np.nan], dtype=object)
result = to_datetime(malformed)
assert_almost_equal(result, malformed)
self.assertRaises(ValueError, to_datetime, malformed,
errors='raise')
idx = ['a', 'b', 'c', 'd', 'e']
series = Series(['1/1/2000', np.nan, '1/3/2000', np.nan,
'1/5/2000'], index=idx, name='foo')
dseries = Series([to_datetime('1/1/2000'), np.nan,
to_datetime('1/3/2000'), np.nan,
to_datetime('1/5/2000')], index=idx, name='foo')
result = to_datetime(series)
dresult = to_datetime(dseries)
expected = Series(np.empty(5, dtype='M8[ns]'), index=idx)
for i in range(5):
x = series[i]
if isnull(x):
expected[i] = iNaT
else:
expected[i] = to_datetime(x)
assert_series_equal(result, expected)
self.assertEqual(result.name, 'foo')
assert_series_equal(dresult, expected)
self.assertEqual(dresult.name, 'foo')
def test_to_datetime_iso8601(self):
result = to_datetime(["2012-01-01 00:00:00"])
exp = Timestamp("2012-01-01 00:00:00")
self.assertEqual(result[0], exp)
result = to_datetime(['20121001']) # bad iso 8601
exp = Timestamp('2012-10-01')
self.assertEqual(result[0], exp)
def test_to_datetime_default(self):
rs = to_datetime('2001')
xp = datetime(2001, 1, 1)
self.assertTrue(rs, xp)
#### dayfirst is essentially broken
#### to_datetime('01-13-2012', dayfirst=True)
#### self.assertRaises(ValueError, to_datetime('01-13-2012', dayfirst=True))
def test_to_datetime_on_datetime64_series(self):
# #2699
s = Series(date_range('1/1/2000', periods=10))
result = to_datetime(s)
self.assertEqual(result[0], s[0])
def test_to_datetime_with_apply(self):
# this is only locale tested with US/None locales
_skip_if_has_locale()
# GH 5195
# with a format and coerce a single item to_datetime fails
td = Series(['May 04', 'Jun 02', 'Dec 11'], index=[1,2,3])
expected = pd.to_datetime(td, format='%b %y')
result = td.apply(pd.to_datetime, format='%b %y')
assert_series_equal(result, expected)
td = pd.Series(['May 04', 'Jun 02', ''], index=[1,2,3])
self.assertRaises(ValueError, lambda : pd.to_datetime(td,format='%b %y'))
self.assertRaises(ValueError, lambda : td.apply(pd.to_datetime, format='%b %y'))
expected = pd.to_datetime(td, format='%b %y', coerce=True)
result = td.apply(lambda x: pd.to_datetime(x, format='%b %y', coerce=True))
assert_series_equal(result, expected)
def test_nat_vector_field_access(self):
idx = DatetimeIndex(['1/1/2000', None, None, '1/4/2000'])
fields = ['year', 'quarter', 'month', 'day', 'hour',
'minute', 'second', 'microsecond', 'nanosecond',
'week', 'dayofyear']
for field in fields:
result = getattr(idx, field)
expected = [getattr(x, field) if x is not NaT else -1
for x in idx]
self.assert_numpy_array_equal(result, expected)
def test_nat_scalar_field_access(self):
fields = ['year', 'quarter', 'month', 'day', 'hour',
'minute', 'second', 'microsecond', 'nanosecond',
'week', 'dayofyear']
for field in fields:
result = getattr(NaT, field)
self.assertEqual(result, -1)
self.assertEqual(NaT.weekday(), -1)
def test_to_datetime_types(self):
# empty string
result = to_datetime('')
self.assertIs(result, NaT)
result = to_datetime(['', ''])
self.assertTrue(isnull(result).all())
# ints
result = Timestamp(0)
expected = to_datetime(0)
self.assertEqual(result, expected)
# GH 3888 (strings)
expected = to_datetime(['2012'])[0]
result = to_datetime('2012')
self.assertEqual(result, expected)
### array = ['2012','20120101','20120101 12:01:01']
array = ['20120101','20120101 12:01:01']
expected = list(to_datetime(array))
result = lmap(Timestamp,array)
tm.assert_almost_equal(result,expected)
### currently fails ###
### result = Timestamp('2012')
### expected = to_datetime('2012')
### self.assertEqual(result, expected)
def test_to_datetime_unprocessable_input(self):
# GH 4928
self.assert_numpy_array_equal(
to_datetime([1, '1']),
np.array([1, '1'], dtype='O')
)
self.assertRaises(TypeError, to_datetime, [1, '1'], errors='raise')
def test_to_datetime_other_datetime64_units(self):
# 5/25/2012
scalar = np.int64(1337904000000000).view('M8[us]')
as_obj = scalar.astype('O')
index = DatetimeIndex([scalar])
self.assertEqual(index[0], scalar.astype('O'))
value = Timestamp(scalar)
self.assertEqual(value, as_obj)
def test_to_datetime_list_of_integers(self):
rng = date_range('1/1/2000', periods=20)
rng = DatetimeIndex(rng.values)
ints = list(rng.asi8)
result = DatetimeIndex(ints)
self.assertTrue(rng.equals(result))
def test_to_datetime_dt64s(self):
in_bound_dts = [
np.datetime64('2000-01-01'),
np.datetime64('2000-01-02'),
]
for dt in in_bound_dts:
self.assertEqual(
pd.to_datetime(dt),
Timestamp(dt)
)
oob_dts = [
np.datetime64('1000-01-01'),
np.datetime64('5000-01-02'),
]
for dt in oob_dts:
self.assertRaises(ValueError, pd.to_datetime, dt, errors='raise')
self.assertRaises(ValueError, tslib.Timestamp, dt)
self.assertIs(pd.to_datetime(dt, coerce=True), NaT)
def test_to_datetime_array_of_dt64s(self):
dts = [
np.datetime64('2000-01-01'),
np.datetime64('2000-01-02'),
]
# Assuming all datetimes are in bounds, to_datetime() returns
# an array that is equal to Timestamp() parsing
self.assert_numpy_array_equal(
pd.to_datetime(dts, box=False),
np.array([Timestamp(x).asm8 for x in dts])
)
# A list of datetimes where the last one is out of bounds
dts_with_oob = dts + [np.datetime64('9999-01-01')]
self.assertRaises(
ValueError,
pd.to_datetime,
dts_with_oob,
coerce=False,
errors='raise'
)
self.assert_numpy_array_equal(
pd.to_datetime(dts_with_oob, box=False, coerce=True),
np.array(
[
Timestamp(dts_with_oob[0]).asm8,
Timestamp(dts_with_oob[1]).asm8,
iNaT,
],
dtype='M8'
)
)
# With coerce=False and errors='ignore', out of bounds datetime64s
# are converted to their .item(), which depending on the version of
# numpy is either a python datetime.datetime or datetime.date
self.assert_numpy_array_equal(
pd.to_datetime(dts_with_oob, box=False, coerce=False),
np.array(
[dt.item() for dt in dts_with_oob],
dtype='O'
)
)
def test_index_to_datetime(self):
idx = Index(['1/1/2000', '1/2/2000', '1/3/2000'])
result = idx.to_datetime()
expected = DatetimeIndex(datetools.to_datetime(idx.values))
self.assertTrue(result.equals(expected))
today = datetime.today()
idx = Index([today], dtype=object)
result = idx.to_datetime()
expected = DatetimeIndex([today])
self.assertTrue(result.equals(expected))
def test_to_datetime_freq(self):
xp = bdate_range('2000-1-1', periods=10, tz='UTC')
rs = xp.to_datetime()
self.assertEqual(xp.freq, rs.freq)
self.assertEqual(xp.tzinfo, rs.tzinfo)
def test_range_misspecified(self):
# GH #1095
self.assertRaises(ValueError, date_range, '1/1/2000')
self.assertRaises(ValueError, date_range, end='1/1/2000')
self.assertRaises(ValueError, date_range, periods=10)
self.assertRaises(ValueError, date_range, '1/1/2000', freq='H')
self.assertRaises(ValueError, date_range, end='1/1/2000', freq='H')
self.assertRaises(ValueError, date_range, periods=10, freq='H')
def test_reasonable_keyerror(self):
# GH #1062
index = DatetimeIndex(['1/3/2000'])
try:
index.get_loc('1/1/2000')
except KeyError as e:
self.assertIn('2000', str(e))
def test_reindex_with_datetimes(self):
rng = date_range('1/1/2000', periods=20)
ts = Series(np.random.randn(20), index=rng)
result = ts.reindex(list(ts.index[5:10]))
expected = ts[5:10]
tm.assert_series_equal(result, expected)
result = ts[list(ts.index[5:10])]
tm.assert_series_equal(result, expected)
def test_promote_datetime_date(self):
rng = date_range('1/1/2000', periods=20)
ts = Series(np.random.randn(20), index=rng)
ts_slice = ts[5:]
ts2 = ts_slice.copy()
ts2.index = [x.date() for x in ts2.index]
result = ts + ts2
result2 = ts2 + ts
expected = ts + ts[5:]
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
# test asfreq
result = ts2.asfreq('4H', method='ffill')
expected = ts[5:].asfreq('4H', method='ffill')
assert_series_equal(result, expected)
result = rng.get_indexer(ts2.index)
expected = rng.get_indexer(ts_slice.index)
self.assert_numpy_array_equal(result, expected)
def test_asfreq_normalize(self):
rng = date_range('1/1/2000 09:30', periods=20)
norm = date_range('1/1/2000', periods=20)
vals = np.random.randn(20)
ts = Series(vals, index=rng)
result = ts.asfreq('D', normalize=True)
norm = date_range('1/1/2000', periods=20)
expected = Series(vals, index=norm)
assert_series_equal(result, expected)
vals = np.random.randn(20, 3)
ts = DataFrame(vals, index=rng)
result = ts.asfreq('D', normalize=True)
expected = DataFrame(vals, index=norm)
assert_frame_equal(result, expected)
def test_date_range_gen_error(self):
rng = date_range('1/1/2000 00:00', '1/1/2000 00:18', freq='5min')
self.assertEqual(len(rng), 4)
def test_first_subset(self):
ts = _simple_ts('1/1/2000', '1/1/2010', freq='12h')
result = ts.first('10d')
self.assertEqual(len(result), 20)
ts = _simple_ts('1/1/2000', '1/1/2010')
result = ts.first('10d')
self.assertEqual(len(result), 10)
result = ts.first('3M')
expected = ts[:'3/31/2000']
assert_series_equal(result, expected)
result = ts.first('21D')
expected = ts[:21]
assert_series_equal(result, expected)
result = ts[:0].first('3M')
assert_series_equal(result, ts[:0])
def test_last_subset(self):
ts = _simple_ts('1/1/2000', '1/1/2010', freq='12h')
result = ts.last('10d')
self.assertEqual(len(result), 20)
ts = _simple_ts('1/1/2000', '1/1/2010')
result = ts.last('10d')
self.assertEqual(len(result), 10)
result = ts.last('21D')
expected = ts['12/12/2009':]
assert_series_equal(result, expected)
result = ts.last('21D')
expected = ts[-21:]
assert_series_equal(result, expected)
result = ts[:0].last('3M')
assert_series_equal(result, ts[:0])
def test_add_offset(self):
rng = date_range('1/1/2000', '2/1/2000')
result = rng + offsets.Hour(2)
expected = date_range('1/1/2000 02:00', '2/1/2000 02:00')
self.assertTrue(result.equals(expected))
def test_format_pre_1900_dates(self):
rng = date_range('1/1/1850', '1/1/1950', freq='A-DEC')
rng.format()
ts = Series(1, index=rng)
repr(ts)
def test_repeat(self):
rng = date_range('1/1/2000', '1/1/2001')
result = rng.repeat(5)
self.assertIsNone(result.freq)
self.assertEqual(len(result), 5 * len(rng))
def test_at_time(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = Series(np.random.randn(len(rng)), index=rng)
rs = ts.at_time(rng[1])
self.assertTrue((rs.index.hour == rng[1].hour).all())
self.assertTrue((rs.index.minute == rng[1].minute).all())
self.assertTrue((rs.index.second == rng[1].second).all())
result = ts.at_time('9:30')
expected = ts.at_time(time(9, 30))
assert_series_equal(result, expected)
df = DataFrame(np.random.randn(len(rng), 3), index=rng)
result = ts[time(9, 30)]
result_df = df.ix[time(9, 30)]
expected = ts[(rng.hour == 9) & (rng.minute == 30)]
exp_df = df[(rng.hour == 9) & (rng.minute == 30)]
# expected.index = date_range('1/1/2000', '1/4/2000')
assert_series_equal(result, expected)
tm.assert_frame_equal(result_df, exp_df)
chunk = df.ix['1/4/2000':]
result = chunk.ix[time(9, 30)]
expected = result_df[-1:]
tm.assert_frame_equal(result, expected)
# midnight, everything
rng = date_range('1/1/2000', '1/31/2000')
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts.at_time(time(0, 0))
assert_series_equal(result, ts)
# time doesn't exist
rng = date_range('1/1/2012', freq='23Min', periods=384)
ts = Series(np.random.randn(len(rng)), rng)
rs = ts.at_time('16:00')
self.assertEqual(len(rs), 0)
def test_at_time_frame(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
rs = ts.at_time(rng[1])
self.assertTrue((rs.index.hour == rng[1].hour).all())
self.assertTrue((rs.index.minute == rng[1].minute).all())
self.assertTrue((rs.index.second == rng[1].second).all())
result = ts.at_time('9:30')
expected = ts.at_time(time(9, 30))
assert_frame_equal(result, expected)
result = ts.ix[time(9, 30)]
expected = ts.ix[(rng.hour == 9) & (rng.minute == 30)]
assert_frame_equal(result, expected)
# midnight, everything
rng = date_range('1/1/2000', '1/31/2000')
ts = DataFrame(np.random.randn(len(rng), 3), index=rng)
result = ts.at_time(time(0, 0))
assert_frame_equal(result, ts)
# time doesn't exist
rng = date_range('1/1/2012', freq='23Min', periods=384)
ts = DataFrame(np.random.randn(len(rng), 2), rng)
rs = ts.at_time('16:00')
self.assertEqual(len(rs), 0)
def test_between_time(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = Series(np.random.randn(len(rng)), index=rng)
stime = time(0, 0)
etime = time(1, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = 13 * 4 + 1
if not inc_start:
exp_len -= 5
if not inc_end:
exp_len -= 4
self.assertEqual(len(filtered), exp_len)
for rs in filtered.index:
t = rs.time()
if inc_start:
self.assertTrue(t >= stime)
else:
self.assertTrue(t > stime)
if inc_end:
self.assertTrue(t <= etime)
else:
self.assertTrue(t < etime)
result = ts.between_time('00:00', '01:00')
expected = ts.between_time(stime, etime)
assert_series_equal(result, expected)
# across midnight
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = Series(np.random.randn(len(rng)), index=rng)
stime = time(22, 0)
etime = time(9, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = (12 * 11 + 1) * 4 + 1
if not inc_start:
exp_len -= 4
if not inc_end:
exp_len -= 4
self.assertEqual(len(filtered), exp_len)
for rs in filtered.index:
t = rs.time()
if inc_start:
self.assertTrue((t >= stime) or (t <= etime))
else:
self.assertTrue((t > stime) or (t <= etime))
if inc_end:
self.assertTrue((t <= etime) or (t >= stime))
else:
self.assertTrue((t < etime) or (t >= stime))
def test_between_time_frame(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
stime = time(0, 0)
etime = time(1, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = 13 * 4 + 1
if not inc_start:
exp_len -= 5
if not inc_end:
exp_len -= 4
self.assertEqual(len(filtered), exp_len)
for rs in filtered.index:
t = rs.time()
if inc_start:
self.assertTrue(t >= stime)
else:
self.assertTrue(t > stime)
if inc_end:
self.assertTrue(t <= etime)
else:
self.assertTrue(t < etime)
result = ts.between_time('00:00', '01:00')
expected = ts.between_time(stime, etime)
assert_frame_equal(result, expected)
# across midnight
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
stime = time(22, 0)
etime = time(9, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = (12 * 11 + 1) * 4 + 1
if not inc_start:
exp_len -= 4
if not inc_end:
exp_len -= 4
self.assertEqual(len(filtered), exp_len)
for rs in filtered.index:
t = rs.time()
if inc_start:
self.assertTrue((t >= stime) or (t <= etime))
else:
self.assertTrue((t > stime) or (t <= etime))
if inc_end:
self.assertTrue((t <= etime) or (t >= stime))
else:
self.assertTrue((t < etime) or (t >= stime))
def test_dti_constructor_preserve_dti_freq(self):
rng = date_range('1/1/2000', '1/2/2000', freq='5min')
rng2 = DatetimeIndex(rng)
self.assertEqual(rng.freq, rng2.freq)
def test_normalize(self):
rng = date_range('1/1/2000 9:30', periods=10, freq='D')
result = rng.normalize()
expected = date_range('1/1/2000', periods=10, freq='D')
self.assertTrue(result.equals(expected))
rng_ns = pd.DatetimeIndex(np.array([1380585623454345752, 1380585612343234312]).astype("datetime64[ns]"))
rng_ns_normalized = rng_ns.normalize()
expected = pd.DatetimeIndex(np.array([1380585600000000000, 1380585600000000000]).astype("datetime64[ns]"))
self.assertTrue(rng_ns_normalized.equals(expected))
self.assertTrue(result.is_normalized)
self.assertFalse(rng.is_normalized)
def test_to_period(self):
from pandas.tseries.period import period_range
ts = _simple_ts('1/1/2000', '1/1/2001')
pts = ts.to_period()
exp = ts.copy()
exp.index = period_range('1/1/2000', '1/1/2001')
assert_series_equal(pts, exp)
pts = ts.to_period('M')
self.assertTrue(pts.index.equals(exp.index.asfreq('M')))
def create_dt64_based_index(self):
data = [Timestamp('2007-01-01 10:11:12.123456Z'),
Timestamp('2007-01-01 10:11:13.789123Z')]
index = DatetimeIndex(data)
return index
def test_to_period_millisecond(self):
index = self.create_dt64_based_index()
period = index.to_period(freq='L')
self.assertEqual(2, len(period))
self.assertEqual(period[0], Period('2007-01-01 10:11:12.123Z', 'L'))
self.assertEqual(period[1], Period('2007-01-01 10:11:13.789Z', 'L'))
def test_to_period_microsecond(self):
index = self.create_dt64_based_index()
period = index.to_period(freq='U')
self.assertEqual(2, len(period))
self.assertEqual(period[0], Period('2007-01-01 10:11:12.123456Z', 'U'))
self.assertEqual(period[1], Period('2007-01-01 10:11:13.789123Z', 'U'))
def test_to_period_tz(self):
_skip_if_no_pytz()
from dateutil.tz import tzlocal
from pytz import utc as UTC
xp = date_range('1/1/2000', '4/1/2000').to_period()
ts = date_range('1/1/2000', '4/1/2000', tz='US/Eastern')
result = ts.to_period()[0]
expected = ts[0].to_period()
self.assertEqual(result, expected)
self.assertTrue(ts.to_period().equals(xp))
ts = date_range('1/1/2000', '4/1/2000', tz=UTC)
result = ts.to_period()[0]
expected = ts[0].to_period()
self.assertEqual(result, expected)
self.assertTrue(ts.to_period().equals(xp))
ts = date_range('1/1/2000', '4/1/2000', tz=tzlocal())
result = ts.to_period()[0]
expected = ts[0].to_period()
self.assertEqual(result, expected)
self.assertTrue(ts.to_period().equals(xp))
def test_frame_to_period(self):
K = 5
from pandas.tseries.period import period_range
dr = date_range('1/1/2000', '1/1/2001')
pr = period_range('1/1/2000', '1/1/2001')
df = DataFrame(randn(len(dr), K), index=dr)
df['mix'] = 'a'
pts = df.to_period()
exp = df.copy()
exp.index = pr
assert_frame_equal(pts, exp)
pts = df.to_period('M')
self.assertTrue(pts.index.equals(exp.index.asfreq('M')))
df = df.T
pts = df.to_period(axis=1)
exp = df.copy()
exp.columns = pr
assert_frame_equal(pts, exp)
pts = df.to_period('M', axis=1)
self.assertTrue(pts.columns.equals(exp.columns.asfreq('M')))
self.assertRaises(ValueError, df.to_period, axis=2)
def test_timestamp_fields(self):
# extra fields from DatetimeIndex like quarter and week
idx = tm.makeDateIndex(100)
fields = ['dayofweek', 'dayofyear', 'week', 'weekofyear', 'quarter', 'is_month_start', 'is_month_end', 'is_quarter_start', 'is_quarter_end', 'is_year_start', 'is_year_end']
for f in fields:
expected = getattr(idx, f)[-1]
result = getattr(Timestamp(idx[-1]), f)
self.assertEqual(result, expected)
self.assertEqual(idx.freq, Timestamp(idx[-1], idx.freq).freq)
self.assertEqual(idx.freqstr, Timestamp(idx[-1], idx.freq).freqstr)
def test_woy_boundary(self):
# make sure weeks at year boundaries are correct
d = datetime(2013,12,31)
result = Timestamp(d).week
expected = 1 # ISO standard
self.assertEqual(result, expected)
d = datetime(2008,12,28)
result = Timestamp(d).week
expected = 52 # ISO standard
self.assertEqual(result, expected)
d = datetime(2009,12,31)
result = Timestamp(d).week
expected = 53 # ISO standard
self.assertEqual(result, expected)
d = datetime(2010,1,1)
result = Timestamp(d).week
expected = 53 # ISO standard
self.assertEqual(result, expected)
d = datetime(2010,1,3)
result = Timestamp(d).week
expected = 53 # ISO standard
self.assertEqual(result, expected)
result = np.array([Timestamp(datetime(*args)).week for args in
[(2000,1,1),(2000,1,2),(2005,1,1),(2005,1,2)]])
self.assertTrue((result == [52, 52, 53, 53]).all())
def test_timestamp_date_out_of_range(self):
self.assertRaises(ValueError, Timestamp, '1676-01-01')
self.assertRaises(ValueError, Timestamp, '2263-01-01')
# 1475
self.assertRaises(ValueError, DatetimeIndex, ['1400-01-01'])
self.assertRaises(ValueError, DatetimeIndex, [datetime(1400, 1, 1)])
def test_timestamp_repr(self):
# pre-1900
stamp = Timestamp('1850-01-01', tz='US/Eastern')
repr(stamp)
iso8601 = '1850-01-01 01:23:45.012345'
stamp = Timestamp(iso8601, tz='US/Eastern')
result = repr(stamp)
self.assertIn(iso8601, result)
def test_timestamp_from_ordinal(self):
# GH 3042
dt = datetime(2011, 4, 16, 0, 0)
ts = Timestamp.fromordinal(dt.toordinal())
self.assertEqual(ts.to_pydatetime(), dt)
# with a tzinfo
stamp = Timestamp('2011-4-16', tz='US/Eastern')
dt_tz = stamp.to_pydatetime()
ts = Timestamp.fromordinal(dt_tz.toordinal(),tz='US/Eastern')
self.assertEqual(ts.to_pydatetime(), dt_tz)
def test_datetimeindex_integers_shift(self):
rng = date_range('1/1/2000', periods=20)
result = rng + 5
expected = rng.shift(5)
self.assertTrue(result.equals(expected))
result = rng - 5
expected = rng.shift(-5)
self.assertTrue(result.equals(expected))
def test_astype_object(self):
# NumPy 1.6.1 weak ns support
rng = date_range('1/1/2000', periods=20)
casted = rng.astype('O')
exp_values = list(rng)
self.assert_numpy_array_equal(casted, exp_values)
def test_catch_infinite_loop(self):
offset = datetools.DateOffset(minute=5)
# blow up, don't loop forever
self.assertRaises(Exception, date_range, datetime(2011, 11, 11),
datetime(2011, 11, 12), freq=offset)
def test_append_concat(self):
rng = date_range('5/8/2012 1:45', periods=10, freq='5T')
ts = Series(np.random.randn(len(rng)), rng)
df = DataFrame(np.random.randn(len(rng), 4), index=rng)
result = ts.append(ts)
result_df = df.append(df)
ex_index = DatetimeIndex(np.tile(rng.values, 2))
self.assertTrue(result.index.equals(ex_index))
self.assertTrue(result_df.index.equals(ex_index))
appended = rng.append(rng)
self.assertTrue(appended.equals(ex_index))
appended = rng.append([rng, rng])
ex_index = DatetimeIndex(np.tile(rng.values, 3))
self.assertTrue(appended.equals(ex_index))
# different index names
rng1 = rng.copy()
rng2 = rng.copy()
rng1.name = 'foo'
rng2.name = 'bar'
self.assertEqual(rng1.append(rng1).name, 'foo')
self.assertIsNone(rng1.append(rng2).name)
def test_append_concat_tz(self):
#GH 2938
_skip_if_no_pytz()
rng = date_range('5/8/2012 1:45', periods=10, freq='5T',
tz='US/Eastern')
rng2 = date_range('5/8/2012 2:35', periods=10, freq='5T',
tz='US/Eastern')
rng3 = date_range('5/8/2012 1:45', periods=20, freq='5T',
tz='US/Eastern')
ts = Series(np.random.randn(len(rng)), rng)
df = DataFrame(np.random.randn(len(rng), 4), index=rng)
ts2 = Series(np.random.randn(len(rng2)), rng2)
df2 = DataFrame(np.random.randn(len(rng2), 4), index=rng2)
result = ts.append(ts2)
result_df = df.append(df2)
self.assertTrue(result.index.equals(rng3))
self.assertTrue(result_df.index.equals(rng3))
appended = rng.append(rng2)
self.assertTrue(appended.equals(rng3))
def test_set_dataframe_column_ns_dtype(self):
x = DataFrame([datetime.now(), datetime.now()])
self.assertEqual(x[0].dtype, np.dtype('M8[ns]'))
def test_groupby_count_dateparseerror(self):
dr = date_range(start='1/1/2012', freq='5min', periods=10)
# BAD Example, datetimes first
s = Series(np.arange(10), index=[dr, lrange(10)])
grouped = s.groupby(lambda x: x[1] % 2 == 0)
result = grouped.count()
s = Series(np.arange(10), index=[lrange(10), dr])
grouped = s.groupby(lambda x: x[0] % 2 == 0)
expected = grouped.count()
assert_series_equal(result, expected)
def test_datetimeindex_repr_short(self):
dr = date_range(start='1/1/2012', periods=1)
repr(dr)
dr = date_range(start='1/1/2012', periods=2)
repr(dr)
dr = date_range(start='1/1/2012', periods=3)
repr(dr)
def test_constructor_int64_nocopy(self):
# #1624
arr = np.arange(1000, dtype=np.int64)
index = DatetimeIndex(arr)
arr[50:100] = -1
self.assertTrue((index.asi8[50:100] == -1).all())
arr = np.arange(1000, dtype=np.int64)
index = DatetimeIndex(arr, copy=True)
arr[50:100] = -1
self.assertTrue((index.asi8[50:100] != -1).all())
def test_series_interpolate_method_values(self):
# #1646
ts = _simple_ts('1/1/2000', '1/20/2000')
ts[::2] = np.nan
result = ts.interpolate(method='values')
exp = ts.interpolate()
assert_series_equal(result, exp)
def test_frame_datetime64_handling_groupby(self):
# it works!
df = DataFrame([(3, np.datetime64('2012-07-03')),
(3, np.datetime64('2012-07-04'))],
columns=['a', 'date'])
result = df.groupby('a').first()
self.assertEqual(result['date'][3], Timestamp('2012-07-03'))
def test_series_interpolate_intraday(self):
# #1698
index = pd.date_range('1/1/2012', periods=4, freq='12D')
ts = pd.Series([0, 12, 24, 36], index)
new_index = index.append(index + pd.DateOffset(days=1)).order()
exp = ts.reindex(new_index).interpolate(method='time')
index = pd.date_range('1/1/2012', periods=4, freq='12H')
ts = pd.Series([0, 12, 24, 36], index)
new_index = index.append(index + pd.DateOffset(hours=1)).order()
result = ts.reindex(new_index).interpolate(method='time')
self.assert_numpy_array_equal(result.values, exp.values)
def test_frame_dict_constructor_datetime64_1680(self):
dr = date_range('1/1/2012', periods=10)
s = Series(dr, index=dr)
# it works!
DataFrame({'a': 'foo', 'b': s}, index=dr)
DataFrame({'a': 'foo', 'b': s.values}, index=dr)
def test_frame_datetime64_mixed_index_ctor_1681(self):
dr = date_range('2011/1/1', '2012/1/1', freq='W-FRI')
ts = Series(dr)
# it works!
d = DataFrame({'A': 'foo', 'B': ts}, index=dr)
self.assertTrue(d['B'].isnull().all())
def test_frame_timeseries_to_records(self):
index = date_range('1/1/2000', periods=10)
df = DataFrame(np.random.randn(10, 3), index=index,
columns=['a', 'b', 'c'])
result = df.to_records()
result['index'].dtype == 'M8[ns]'
result = df.to_records(index=False)
def test_frame_datetime64_duplicated(self):
dates = date_range('2010-07-01', end='2010-08-05')
tst = DataFrame({'symbol': 'AAA', 'date': dates})
result = tst.duplicated(['date', 'symbol'])
self.assertTrue((-result).all())
tst = DataFrame({'date': dates})
result = tst.duplicated()
self.assertTrue((-result).all())
def test_timestamp_compare_with_early_datetime(self):
# e.g. datetime.min
stamp = Timestamp('2012-01-01')
self.assertFalse(stamp == datetime.min)
self.assertFalse(stamp == datetime(1600, 1, 1))
self.assertFalse(stamp == datetime(2700, 1, 1))
self.assertNotEqual(stamp, datetime.min)
self.assertNotEqual(stamp, datetime(1600, 1, 1))
self.assertNotEqual(stamp, datetime(2700, 1, 1))
self.assertTrue(stamp > datetime(1600, 1, 1))
self.assertTrue(stamp >= datetime(1600, 1, 1))
self.assertTrue(stamp < datetime(2700, 1, 1))
self.assertTrue(stamp <= datetime(2700, 1, 1))
def test_to_html_timestamp(self):
rng = date_range('2000-01-01', periods=10)
df = DataFrame(np.random.randn(10, 4), index=rng)
result = df.to_html()
self.assertIn('2000-01-01', result)
def test_to_csv_numpy_16_bug(self):
frame = DataFrame({'a': date_range('1/1/2000', periods=10)})
buf = StringIO()
frame.to_csv(buf)
result = buf.getvalue()
self.assertIn('2000-01-01', result)
def test_series_map_box_timestamps(self):
# #2689, #2627
s = Series(date_range('1/1/2000', periods=10))
def f(x):
return (x.hour, x.day, x.month)
# it works!
s.map(f)
s.apply(f)
DataFrame(s).applymap(f)
def test_concat_datetime_datetime64_frame(self):
# #2624
rows = []
rows.append([datetime(2010, 1, 1), 1])
rows.append([datetime(2010, 1, 2), 'hi'])
df2_obj = DataFrame.from_records(rows, columns=['date', 'test'])
ind = date_range(start="2000/1/1", freq="D", periods=10)
df1 = DataFrame({'date': ind, 'test':lrange(10)})
# it works!
pd.concat([df1, df2_obj])
def test_period_resample(self):
# GH3609
s = Series(range(100),index=date_range('20130101', freq='s', periods=100), dtype='float')
s[10:30] = np.nan
expected = Series([34.5, 79.5], index=[Period('2013-01-01 00:00', 'T'), Period('2013-01-01 00:01', 'T')])
result = s.to_period().resample('T', kind='period')
assert_series_equal(result, expected)
result2 = s.resample('T', kind='period')
assert_series_equal(result2, expected)
def test_period_resample_with_local_timezone(self):
# GH5430
_skip_if_no_pytz()
import pytz
local_timezone = pytz.timezone('America/Los_Angeles')
start = datetime(year=2013, month=11, day=1, hour=0, minute=0, tzinfo=pytz.utc)
# 1 day later
end = datetime(year=2013, month=11, day=2, hour=0, minute=0, tzinfo=pytz.utc)
index = pd.date_range(start, end, freq='H')
series = pd.Series(1, index=index)
series = series.tz_convert(local_timezone)
result = series.resample('D', kind='period')
# Create the expected series
expected_index = (pd.period_range(start=start, end=end, freq='D') - 1) # Index is moved back a day with the timezone conversion from UTC to Pacific
expected = pd.Series(1, index=expected_index)
assert_series_equal(result, expected)
def test_pickle(self):
#GH4606
from pandas.compat import cPickle
import pickle
for pick in [pickle, cPickle]:
p = pick.loads(pick.dumps(NaT))
self.assertTrue(p is NaT)
idx = pd.to_datetime(['2013-01-01', NaT, '2014-01-06'])
idx_p = pick.loads(pick.dumps(idx))
self.assertTrue(idx_p[0] == idx[0])
self.assertTrue(idx_p[1] is NaT)
self.assertTrue(idx_p[2] == idx[2])
def _simple_ts(start, end, freq='D'):
rng = date_range(start, end, freq=freq)
return Series(np.random.randn(len(rng)), index=rng)
class TestDatetimeIndex(tm.TestCase):
_multiprocess_can_split_ = True
def test_hash_error(self):
index = date_range('20010101', periods=10)
with tm.assertRaisesRegexp(TypeError,
"unhashable type: %r" %
type(index).__name__):
hash(index)
def test_stringified_slice_with_tz(self):
#GH2658
import datetime
start=datetime.datetime.now()
idx=DatetimeIndex(start=start,freq="1d",periods=10)
df=DataFrame(lrange(10),index=idx)
df["2013-01-14 23:44:34.437768-05:00":] # no exception here
def test_append_join_nondatetimeindex(self):
rng = date_range('1/1/2000', periods=10)
idx = Index(['a', 'b', 'c', 'd'])
result = rng.append(idx)
tm.assert_isinstance(result[0], Timestamp)
# it works
rng.join(idx, how='outer')
def test_astype(self):
rng = date_range('1/1/2000', periods=10)
result = rng.astype('i8')
self.assert_numpy_array_equal(result, rng.asi8)
def test_to_period_nofreq(self):
idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-04'])
self.assertRaises(ValueError, idx.to_period)
idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-03'],
freq='infer')
idx.to_period()
def test_000constructor_resolution(self):
# 2252
t1 = Timestamp((1352934390 * 1000000000) + 1000000 + 1000 + 1)
idx = DatetimeIndex([t1])
self.assertEqual(idx.nanosecond[0], t1.nanosecond)
def test_constructor_coverage(self):
rng = date_range('1/1/2000', periods=10.5)
exp = date_range('1/1/2000', periods=10)
self.assertTrue(rng.equals(exp))
self.assertRaises(ValueError, DatetimeIndex, start='1/1/2000',
periods='foo', freq='D')
self.assertRaises(ValueError, DatetimeIndex, start='1/1/2000',
end='1/10/2000')
self.assertRaises(ValueError, DatetimeIndex, '1/1/2000')
# generator expression
gen = (datetime(2000, 1, 1) + timedelta(i) for i in range(10))
result = DatetimeIndex(gen)
expected = DatetimeIndex([datetime(2000, 1, 1) + timedelta(i)
for i in range(10)])
self.assertTrue(result.equals(expected))
# NumPy string array
strings = np.array(['2000-01-01', '2000-01-02', '2000-01-03'])
result = DatetimeIndex(strings)
expected = DatetimeIndex(strings.astype('O'))
self.assertTrue(result.equals(expected))
from_ints = DatetimeIndex(expected.asi8)
self.assertTrue(from_ints.equals(expected))
# non-conforming
self.assertRaises(ValueError, DatetimeIndex,
['2000-01-01', '2000-01-02', '2000-01-04'],
freq='D')
self.assertRaises(ValueError, DatetimeIndex,
start='2011-01-01', freq='b')
self.assertRaises(ValueError, DatetimeIndex,
end='2011-01-01', freq='B')
self.assertRaises(ValueError, DatetimeIndex, periods=10, freq='D')
def test_constructor_name(self):
idx = DatetimeIndex(start='2000-01-01', periods=1, freq='A',
name='TEST')
self.assertEqual(idx.name, 'TEST')
def test_comparisons_coverage(self):
rng = date_range('1/1/2000', periods=10)
# raise TypeError for now
self.assertRaises(TypeError, rng.__lt__, rng[3].value)
result = rng == list(rng)
exp = rng == rng
self.assert_numpy_array_equal(result, exp)
def test_map(self):
rng = date_range('1/1/2000', periods=10)
f = lambda x: x.strftime('%Y%m%d')
result = rng.map(f)
exp = [f(x) for x in rng]
self.assert_numpy_array_equal(result, exp)
def test_add_union(self):
rng = date_range('1/1/2000', periods=5)
rng2 = date_range('1/6/2000', periods=5)
result = rng + rng2
expected = rng.union(rng2)
self.assertTrue(result.equals(expected))
def test_misc_coverage(self):
rng = date_range('1/1/2000', periods=5)
result = rng.groupby(rng.day)
tm.assert_isinstance(list(result.values())[0][0], Timestamp)
idx = DatetimeIndex(['2000-01-03', '2000-01-01', '2000-01-02'])
self.assertTrue(idx.equals(list(idx)))
non_datetime = Index(list('abc'))
self.assertFalse(idx.equals(list(non_datetime)))
def test_union_coverage(self):
idx = DatetimeIndex(['2000-01-03', '2000-01-01', '2000-01-02'])
ordered = DatetimeIndex(idx.order(), freq='infer')
result = ordered.union(idx)
self.assertTrue(result.equals(ordered))
result = ordered[:0].union(ordered)
self.assertTrue(result.equals(ordered))
self.assertEqual(result.freq, ordered.freq)
def test_union_bug_1730(self):
rng_a = date_range('1/1/2012', periods=4, freq='3H')
rng_b = date_range('1/1/2012', periods=4, freq='4H')
result = rng_a.union(rng_b)
exp = DatetimeIndex(sorted(set(list(rng_a)) | set(list(rng_b))))
self.assertTrue(result.equals(exp))
def test_union_bug_1745(self):
left = DatetimeIndex(['2012-05-11 15:19:49.695000'])
right = DatetimeIndex(['2012-05-29 13:04:21.322000',
'2012-05-11 15:27:24.873000',
'2012-05-11 15:31:05.350000'])
result = left.union(right)
exp = DatetimeIndex(sorted(set(list(left)) | set(list(right))))
self.assertTrue(result.equals(exp))
def test_union_bug_4564(self):
from pandas import DateOffset
left = date_range("2013-01-01", "2013-02-01")
right = left + DateOffset(minutes=15)
result = left.union(right)
exp = DatetimeIndex(sorted(set(list(left)) | set(list(right))))
self.assertTrue(result.equals(exp))
def test_intersection_bug_1708(self):
from pandas import DateOffset
index_1 = date_range('1/1/2012', periods=4, freq='12H')
index_2 = index_1 + DateOffset(hours=1)
result = index_1 & index_2
self.assertEqual(len(result), 0)
# def test_add_timedelta64(self):
# rng = date_range('1/1/2000', periods=5)
# delta = rng.values[3] - rng.values[1]
# result = rng + delta
# expected = rng + timedelta(2)
# self.assertTrue(result.equals(expected))
def test_get_duplicates(self):
idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-02',
'2000-01-03', '2000-01-03', '2000-01-04'])
result = idx.get_duplicates()
ex = DatetimeIndex(['2000-01-02', '2000-01-03'])
self.assertTrue(result.equals(ex))
def test_argmin_argmax(self):
idx = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-02'])
self.assertEqual(idx.argmin(), 1)
self.assertEqual(idx.argmax(), 0)
def test_order(self):
idx = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-02'])
ordered = idx.order()
self.assertTrue(ordered.is_monotonic)
ordered = idx.order(ascending=False)
self.assertTrue(ordered[::-1].is_monotonic)
ordered, dexer = idx.order(return_indexer=True)
self.assertTrue(ordered.is_monotonic)
self.assert_numpy_array_equal(dexer, [1, 2, 0])
ordered, dexer = idx.order(return_indexer=True, ascending=False)
self.assertTrue(ordered[::-1].is_monotonic)
self.assert_numpy_array_equal(dexer, [0, 2, 1])
def test_insert(self):
idx = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-02'])
result = idx.insert(2, datetime(2000, 1, 5))
exp = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-05',
'2000-01-02'])
self.assertTrue(result.equals(exp))
# insertion of non-datetime should coerce to object index
result = idx.insert(1, 'inserted')
expected = Index([datetime(2000, 1, 4), 'inserted', datetime(2000, 1, 1),
datetime(2000, 1, 2)])
self.assertNotIsInstance(result, DatetimeIndex)
tm.assert_index_equal(result, expected)
idx = date_range('1/1/2000', periods=3, freq='M')
result = idx.insert(3, datetime(2000, 4, 30))
self.assertEqual(result.freqstr, 'M')
def test_map_bug_1677(self):
index = DatetimeIndex(['2012-04-25 09:30:00.393000'])
f = index.asof
result = index.map(f)
expected = np.array([f(index[0])])
self.assert_numpy_array_equal(result, expected)
def test_groupby_function_tuple_1677(self):
df = DataFrame(np.random.rand(100),
index=date_range("1/1/2000", periods=100))
monthly_group = df.groupby(lambda x: (x.year, x.month))
result = monthly_group.mean()
tm.assert_isinstance(result.index[0], tuple)
def test_append_numpy_bug_1681(self):
# another datetime64 bug
dr = date_range('2011/1/1', '2012/1/1', freq='W-FRI')
a = DataFrame()
c = DataFrame({'A': 'foo', 'B': dr}, index=dr)
result = a.append(c)
self.assertTrue((result['B'] == dr).all())
def test_isin(self):
index = tm.makeDateIndex(4)
result = index.isin(index)
self.assertTrue(result.all())
result = index.isin(list(index))
self.assertTrue(result.all())
assert_almost_equal(index.isin([index[2], 5]),
[False, False, True, False])
def test_union(self):
i1 = Int64Index(np.arange(0, 20, 2))
i2 = Int64Index(np.arange(10, 30, 2))
result = i1.union(i2)
expected = Int64Index(np.arange(0, 30, 2))
self.assert_numpy_array_equal(result, expected)
def test_union_with_DatetimeIndex(self):
i1 = Int64Index(np.arange(0, 20, 2))
i2 = DatetimeIndex(start='2012-01-03 00:00:00', periods=10, freq='D')
i1.union(i2) # Works
i2.union(i1) # Fails with "AttributeError: can't set attribute"
def test_time(self):
rng = pd.date_range('1/1/2000', freq='12min', periods=10)
result = pd.Index(rng).time
expected = [t.time() for t in rng]
self.assertTrue((result == expected).all())
def test_date(self):
rng = pd.date_range('1/1/2000', freq='12H', periods=10)
result = pd.Index(rng).date
expected = [t.date() for t in rng]
self.assertTrue((result == expected).all())
def test_does_not_convert_mixed_integer(self):
df = tm.makeCustomDataframe(10, 10, data_gen_f=lambda *args, **kwargs:
randn(), r_idx_type='i', c_idx_type='dt')
cols = df.columns.join(df.index, how='outer')
joined = cols.join(df.columns)
self.assertEqual(cols.dtype, np.dtype('O'))
self.assertEqual(cols.dtype, joined.dtype)
assert_array_equal(cols.values, joined.values)
def test_slice_keeps_name(self):
# GH4226
st = pd.Timestamp('2013-07-01 00:00:00', tz='America/Los_Angeles')
et = pd.Timestamp('2013-07-02 00:00:00', tz='America/Los_Angeles')
dr = pd.date_range(st, et, freq='H', name='timebucket')
self.assertEqual(dr[1:].name, dr.name)
def test_join_self(self):
index = date_range('1/1/2000', periods=10)
kinds = 'outer', 'inner', 'left', 'right'
for kind in kinds:
joined = index.join(index, how=kind)
self.assertIs(index, joined)
def assert_index_parameters(self, index):
assert index.freq == '40960N'
assert index.inferred_freq == '40960N'
def test_ns_index(self):
if _np_version_under1p7:
raise nose.SkipTest
nsamples = 400
ns = int(1e9 / 24414)
dtstart = np.datetime64('2012-09-20T00:00:00')
dt = dtstart + np.arange(nsamples) * np.timedelta64(ns, 'ns')
freq = ns * pd.datetools.Nano()
index = pd.DatetimeIndex(dt, freq=freq, name='time')
self.assert_index_parameters(index)
new_index = pd.DatetimeIndex(start=index[0], end=index[-1], freq=index.freq)
self.assert_index_parameters(new_index)
def test_join_with_period_index(self):
df = tm.makeCustomDataframe(10, 10, data_gen_f=lambda *args:
np.random.randint(2), c_idx_type='p',
r_idx_type='dt')
s = df.iloc[:5, 0]
joins = 'left', 'right', 'inner', 'outer'
for join in joins:
with tm.assertRaisesRegexp(ValueError, 'can only call with other '
'PeriodIndex-ed objects'):
df.columns.join(s.index, how=join)
def test_factorize(self):
idx1 = DatetimeIndex(['2014-01', '2014-01', '2014-02',
'2014-02', '2014-03', '2014-03'])
exp_arr = np.array([0, 0, 1, 1, 2, 2])
exp_idx = DatetimeIndex(['2014-01', '2014-02', '2014-03'])
arr, idx = idx1.factorize()
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(exp_idx))
arr, idx = idx1.factorize(sort=True)
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(exp_idx))
# tz must be preserved
idx1 = idx1.tz_localize('Asia/Tokyo')
exp_idx = exp_idx.tz_localize('Asia/Tokyo')
arr, idx = idx1.factorize()
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(exp_idx))
idx2 = pd.DatetimeIndex(['2014-03', '2014-03', '2014-02', '2014-01',
'2014-03', '2014-01'])
exp_arr = np.array([2, 2, 1, 0, 2, 0])
exp_idx = DatetimeIndex(['2014-01', '2014-02', '2014-03'])
arr, idx = idx2.factorize(sort=True)
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(exp_idx))
exp_arr = np.array([0, 0, 1, 2, 0, 2])
exp_idx = DatetimeIndex(['2014-03', '2014-02', '2014-01'])
arr, idx = idx2.factorize()
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(exp_idx))
# freq must be preserved
idx3 = date_range('2000-01', periods=4, freq='M', tz='Asia/Tokyo')
exp_arr = np.array([0, 1, 2, 3])
arr, idx = idx3.factorize()
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(idx3))
class TestDatetime64(tm.TestCase):
"""
Also test support for datetime64[ns] in Series / DataFrame
"""
def setUp(self):
dti = DatetimeIndex(start=datetime(2005, 1, 1),
end=datetime(2005, 1, 10), freq='Min')
self.series = Series(rand(len(dti)), dti)
def test_datetimeindex_accessors(self):
dti = DatetimeIndex(
freq='D', start=datetime(1998, 1, 1), periods=365)
self.assertEqual(dti.year[0], 1998)
self.assertEqual(dti.month[0], 1)
self.assertEqual(dti.day[0], 1)
self.assertEqual(dti.hour[0], 0)
self.assertEqual(dti.minute[0], 0)
self.assertEqual(dti.second[0], 0)
self.assertEqual(dti.microsecond[0], 0)
self.assertEqual(dti.dayofweek[0], 3)
self.assertEqual(dti.dayofyear[0], 1)
self.assertEqual(dti.dayofyear[120], 121)
self.assertEqual(dti.weekofyear[0], 1)
self.assertEqual(dti.weekofyear[120], 18)
self.assertEqual(dti.quarter[0], 1)
self.assertEqual(dti.quarter[120], 2)
self.assertEqual(dti.is_month_start[0], True)
self.assertEqual(dti.is_month_start[1], False)
self.assertEqual(dti.is_month_start[31], True)
self.assertEqual(dti.is_quarter_start[0], True)
self.assertEqual(dti.is_quarter_start[90], True)
self.assertEqual(dti.is_year_start[0], True)
self.assertEqual(dti.is_year_start[364], False)
self.assertEqual(dti.is_month_end[0], False)
self.assertEqual(dti.is_month_end[30], True)
self.assertEqual(dti.is_month_end[31], False)
self.assertEqual(dti.is_month_end[364], True)
self.assertEqual(dti.is_quarter_end[0], False)
self.assertEqual(dti.is_quarter_end[30], False)
self.assertEqual(dti.is_quarter_end[89], True)
self.assertEqual(dti.is_quarter_end[364], True)
self.assertEqual(dti.is_year_end[0], False)
self.assertEqual(dti.is_year_end[364], True)
self.assertEqual(len(dti.year), 365)
self.assertEqual(len(dti.month), 365)
self.assertEqual(len(dti.day), 365)
self.assertEqual(len(dti.hour), 365)
self.assertEqual(len(dti.minute), 365)
self.assertEqual(len(dti.second), 365)
self.assertEqual(len(dti.microsecond), 365)
self.assertEqual(len(dti.dayofweek), 365)
self.assertEqual(len(dti.dayofyear), 365)
self.assertEqual(len(dti.weekofyear), 365)
self.assertEqual(len(dti.quarter), 365)
self.assertEqual(len(dti.is_month_start), 365)
self.assertEqual(len(dti.is_month_end), 365)
self.assertEqual(len(dti.is_quarter_start), 365)
self.assertEqual(len(dti.is_quarter_end), 365)
self.assertEqual(len(dti.is_year_start), 365)
self.assertEqual(len(dti.is_year_end), 365)
dti = DatetimeIndex(
freq='BQ-FEB', start=datetime(1998, 1, 1), periods=4)
self.assertEqual(sum(dti.is_quarter_start), 0)
self.assertEqual(sum(dti.is_quarter_end), 4)
self.assertEqual(sum(dti.is_year_start), 0)
self.assertEqual(sum(dti.is_year_end), 1)
# Ensure is_start/end accessors throw ValueError for CustomBusinessDay, CBD requires np >= 1.7
if not _np_version_under1p7:
bday_egypt = offsets.CustomBusinessDay(weekmask='Sun Mon Tue Wed Thu')
dti = date_range(datetime(2013, 4, 30), periods=5, freq=bday_egypt)
self.assertRaises(ValueError, lambda: dti.is_month_start)
dti = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-03'])
self.assertEqual(dti.is_month_start[0], 1)
tests = [
(Timestamp('2013-06-01', offset='M').is_month_start, 1),
(Timestamp('2013-06-01', offset='BM').is_month_start, 0),
(Timestamp('2013-06-03', offset='M').is_month_start, 0),
(Timestamp('2013-06-03', offset='BM').is_month_start, 1),
(Timestamp('2013-02-28', offset='Q-FEB').is_month_end, 1),
(Timestamp('2013-02-28', offset='Q-FEB').is_quarter_end, 1),
(Timestamp('2013-02-28', offset='Q-FEB').is_year_end, 1),
(Timestamp('2013-03-01', offset='Q-FEB').is_month_start, 1),
(Timestamp('2013-03-01', offset='Q-FEB').is_quarter_start, 1),
(Timestamp('2013-03-01', offset='Q-FEB').is_year_start, 1),
(Timestamp('2013-03-31', offset='QS-FEB').is_month_end, 1),
(Timestamp('2013-03-31', offset='QS-FEB').is_quarter_end, 0),
(Timestamp('2013-03-31', offset='QS-FEB').is_year_end, 0),
(Timestamp('2013-02-01', offset='QS-FEB').is_month_start, 1),
(Timestamp('2013-02-01', offset='QS-FEB').is_quarter_start, 1),
(Timestamp('2013-02-01', offset='QS-FEB').is_year_start, 1),
(Timestamp('2013-06-30', offset='BQ').is_month_end, 0),
(Timestamp('2013-06-30', offset='BQ').is_quarter_end, 0),
(Timestamp('2013-06-30', offset='BQ').is_year_end, 0),
(Timestamp('2013-06-28', offset='BQ').is_month_end, 1),
(Timestamp('2013-06-28', offset='BQ').is_quarter_end, 1),
(Timestamp('2013-06-28', offset='BQ').is_year_end, 0),
(Timestamp('2013-06-30', offset='BQS-APR').is_month_end, 0),
(Timestamp('2013-06-30', offset='BQS-APR').is_quarter_end, 0),
(Timestamp('2013-06-30', offset='BQS-APR').is_year_end, 0),
(Timestamp('2013-06-28', offset='BQS-APR').is_month_end, 1),
(Timestamp('2013-06-28', offset='BQS-APR').is_quarter_end, 1),
(Timestamp('2013-03-29', offset='BQS-APR').is_year_end, 1),
(Timestamp('2013-11-01', offset='AS-NOV').is_year_start, 1),
(Timestamp('2013-10-31', offset='AS-NOV').is_year_end, 1)]
for ts, value in tests:
self.assertEqual(ts, value)
def test_nanosecond_field(self):
dti = DatetimeIndex(np.arange(10))
self.assert_numpy_array_equal(dti.nanosecond, np.arange(10))
def test_datetimeindex_diff(self):
dti1 = DatetimeIndex(freq='Q-JAN', start=datetime(1997, 12, 31),
periods=100)
dti2 = DatetimeIndex(freq='Q-JAN', start=datetime(1997, 12, 31),
periods=98)
self.assertEqual(len(dti1.diff(dti2)), 2)
def test_fancy_getitem(self):
dti = DatetimeIndex(freq='WOM-1FRI', start=datetime(2005, 1, 1),
end=datetime(2010, 1, 1))
s = Series(np.arange(len(dti)), index=dti)
self.assertEqual(s[48], 48)
self.assertEqual(s['1/2/2009'], 48)
self.assertEqual(s['2009-1-2'], 48)
self.assertEqual(s[datetime(2009, 1, 2)], 48)
self.assertEqual(s[lib.Timestamp(datetime(2009, 1, 2))], 48)
self.assertRaises(KeyError, s.__getitem__, '2009-1-3')
assert_series_equal(s['3/6/2009':'2009-06-05'],
s[datetime(2009, 3, 6):datetime(2009, 6, 5)])
def test_fancy_setitem(self):
dti = DatetimeIndex(freq='WOM-1FRI', start=datetime(2005, 1, 1),
end=datetime(2010, 1, 1))
s = Series(np.arange(len(dti)), index=dti)
s[48] = -1
self.assertEqual(s[48], -1)
s['1/2/2009'] = -2
self.assertEqual(s[48], -2)
s['1/2/2009':'2009-06-05'] = -3
self.assertTrue((s[48:54] == -3).all())
def test_datetimeindex_constructor(self):
arr = ['1/1/2005', '1/2/2005', 'Jn 3, 2005', '2005-01-04']
self.assertRaises(Exception, DatetimeIndex, arr)
arr = ['1/1/2005', '1/2/2005', '1/3/2005', '2005-01-04']
idx1 = DatetimeIndex(arr)
arr = [datetime(2005, 1, 1), '1/2/2005', '1/3/2005', '2005-01-04']
idx2 = DatetimeIndex(arr)
arr = [lib.Timestamp(datetime(2005, 1, 1)), '1/2/2005', '1/3/2005',
'2005-01-04']
idx3 = DatetimeIndex(arr)
arr = np.array(['1/1/2005', '1/2/2005', '1/3/2005',
'2005-01-04'], dtype='O')
idx4 = DatetimeIndex(arr)
arr = to_datetime(['1/1/2005', '1/2/2005', '1/3/2005', '2005-01-04'])
idx5 = DatetimeIndex(arr)
arr = to_datetime(
['1/1/2005', '1/2/2005', 'Jan 3, 2005', '2005-01-04'])
idx6 = DatetimeIndex(arr)
idx7 = DatetimeIndex(['12/05/2007', '25/01/2008'], dayfirst=True)
idx8 = DatetimeIndex(['2007/05/12', '2008/01/25'], dayfirst=False,
yearfirst=True)
self.assertTrue(idx7.equals(idx8))
for other in [idx2, idx3, idx4, idx5, idx6]:
self.assertTrue((idx1.values == other.values).all())
sdate = datetime(1999, 12, 25)
edate = datetime(2000, 1, 1)
idx = DatetimeIndex(start=sdate, freq='1B', periods=20)
self.assertEqual(len(idx), 20)
self.assertEqual(idx[0], sdate + 0 * dt.bday)
self.assertEqual(idx.freq, 'B')
idx = DatetimeIndex(end=edate, freq=('D', 5), periods=20)
self.assertEqual(len(idx), 20)
self.assertEqual(idx[-1], edate)
self.assertEqual(idx.freq, '5D')
idx1 = DatetimeIndex(start=sdate, end=edate, freq='W-SUN')
idx2 = DatetimeIndex(start=sdate, end=edate,
freq=dt.Week(weekday=6))
self.assertEqual(len(idx1), len(idx2))
self.assertEqual(idx1.offset, idx2.offset)
idx1 = DatetimeIndex(start=sdate, end=edate, freq='QS')
idx2 = DatetimeIndex(start=sdate, end=edate,
freq=dt.QuarterBegin(startingMonth=1))
self.assertEqual(len(idx1), len(idx2))
self.assertEqual(idx1.offset, idx2.offset)
idx1 = DatetimeIndex(start=sdate, end=edate, freq='BQ')
idx2 = DatetimeIndex(start=sdate, end=edate,
freq=dt.BQuarterEnd(startingMonth=12))
self.assertEqual(len(idx1), len(idx2))
self.assertEqual(idx1.offset, idx2.offset)
def test_dayfirst(self):
# GH 5917
arr = ['10/02/2014', '11/02/2014', '12/02/2014']
expected = DatetimeIndex([datetime(2014, 2, 10),
datetime(2014, 2, 11),
datetime(2014, 2, 12)])
idx1 = DatetimeIndex(arr, dayfirst=True)
idx2 = DatetimeIndex(np.array(arr), dayfirst=True)
idx3 = to_datetime(arr, dayfirst=True)
idx4 = to_datetime(np.array(arr), dayfirst=True)
idx5 = DatetimeIndex(Index(arr), dayfirst=True)
idx6 = DatetimeIndex(Series(arr), dayfirst=True)
self.assertTrue(expected.equals(idx1))
self.assertTrue(expected.equals(idx2))
self.assertTrue(expected.equals(idx3))
self.assertTrue(expected.equals(idx4))
self.assertTrue(expected.equals(idx5))
self.assertTrue(expected.equals(idx6))
def test_dti_snap(self):
dti = DatetimeIndex(['1/1/2002', '1/2/2002', '1/3/2002', '1/4/2002',
'1/5/2002', '1/6/2002', '1/7/2002'], freq='D')
res = dti.snap(freq='W-MON')
exp = date_range('12/31/2001', '1/7/2002', freq='w-mon')
exp = exp.repeat([3, 4])
self.assertTrue((res == exp).all())
res = dti.snap(freq='B')
exp = date_range('1/1/2002', '1/7/2002', freq='b')
exp = exp.repeat([1, 1, 1, 2, 2])
self.assertTrue((res == exp).all())
def test_dti_reset_index_round_trip(self):
dti = DatetimeIndex(start='1/1/2001', end='6/1/2001', freq='D')
d1 = DataFrame({'v': np.random.rand(len(dti))}, index=dti)
d2 = d1.reset_index()
self.assertEqual(d2.dtypes[0], np.dtype('M8[ns]'))
d3 = d2.set_index('index')
assert_frame_equal(d1, d3, check_names=False)
# #2329
stamp = datetime(2012, 11, 22)
df = DataFrame([[stamp, 12.1]], columns=['Date', 'Value'])
df = df.set_index('Date')
self.assertEqual(df.index[0], stamp)
self.assertEqual(df.reset_index()['Date'][0], stamp)
def test_dti_set_index_reindex(self):
# GH 6631
df = DataFrame(np.random.random(6))
idx1 = date_range('2011/01/01', periods=6, freq='M', tz='US/Eastern')
idx2 = date_range('2013', periods=6, freq='A', tz='Asia/Tokyo')
df = df.set_index(idx1)
self.assertTrue(df.index.equals(idx1))
df = df.reindex(idx2)
self.assertTrue(df.index.equals(idx2))
def test_datetimeindex_union_join_empty(self):
dti = DatetimeIndex(start='1/1/2001', end='2/1/2001', freq='D')
empty = Index([])
result = dti.union(empty)
tm.assert_isinstance(result, DatetimeIndex)
self.assertIs(result, result)
result = dti.join(empty)
tm.assert_isinstance(result, DatetimeIndex)
def test_series_set_value(self):
# #1561
dates = [datetime(2001, 1, 1), datetime(2001, 1, 2)]
index = DatetimeIndex(dates)
s = Series().set_value(dates[0], 1.)
s2 = s.set_value(dates[1], np.nan)
exp = Series([1., np.nan], index=index)
assert_series_equal(s2, exp)
# s = Series(index[:1], index[:1])
# s2 = s.set_value(dates[1], index[1])
# self.assertEqual(s2.values.dtype, 'M8[ns]')
@slow
def test_slice_locs_indexerror(self):
times = [datetime(2000, 1, 1) + timedelta(minutes=i * 10)
for i in range(100000)]
s = Series(lrange(100000), times)
s.ix[datetime(1900, 1, 1):datetime(2100, 1, 1)]
class TestSeriesDatetime64(tm.TestCase):
def setUp(self):
self.series = Series(date_range('1/1/2000', periods=10))
def test_auto_conversion(self):
series = Series(list(date_range('1/1/2000', periods=10)))
self.assertEqual(series.dtype, 'M8[ns]')
def test_constructor_cant_cast_datetime64(self):
self.assertRaises(TypeError, Series,
date_range('1/1/2000', periods=10), dtype=float)
def test_series_comparison_scalars(self):
val = datetime(2000, 1, 4)
result = self.series > val
expected = np.array([x > val for x in self.series])
self.assert_numpy_array_equal(result, expected)
val = self.series[5]
result = self.series > val
expected = np.array([x > val for x in self.series])
self.assert_numpy_array_equal(result, expected)
def test_between(self):
left, right = self.series[[2, 7]]
result = self.series.between(left, right)
expected = (self.series >= left) & (self.series <= right)
assert_series_equal(result, expected)
#----------------------------------------------------------------------
# NaT support
def test_NaT_scalar(self):
series = Series([0, 1000, 2000, iNaT], dtype='M8[ns]')
val = series[3]
self.assertTrue(com.isnull(val))
series[2] = val
self.assertTrue(com.isnull(series[2]))
def test_set_none_nan(self):
self.series[3] = None
self.assertIs(self.series[3], NaT)
self.series[3:5] = None
self.assertIs(self.series[4], NaT)
self.series[5] = np.nan
self.assertIs(self.series[5], NaT)
self.series[5:7] = np.nan
self.assertIs(self.series[6], NaT)
def test_intercept_astype_object(self):
# this test no longer makes sense as series is by default already M8[ns]
expected = self.series.astype('object')
df = DataFrame({'a': self.series,
'b': np.random.randn(len(self.series))})
result = df.values.squeeze()
self.assertTrue((result[:, 0] == expected.values).all())
df = DataFrame({'a': self.series,
'b': ['foo'] * len(self.series)})
result = df.values.squeeze()
self.assertTrue((result[:, 0] == expected.values).all())
def test_union(self):
rng1 = date_range('1/1/1999', '1/1/2012', freq='MS')
s1 = Series(np.random.randn(len(rng1)), rng1)
rng2 = date_range('1/1/1980', '12/1/2001', freq='MS')
s2 = Series(np.random.randn(len(rng2)), rng2)
df = DataFrame({'s1': s1, 's2': s2})
self.assertEqual(df.index.values.dtype, np.dtype('M8[ns]'))
def test_intersection(self):
rng = date_range('6/1/2000', '6/15/2000', freq='D')
rng = rng.delete(5)
rng2 = date_range('5/15/2000', '6/20/2000', freq='D')
rng2 = DatetimeIndex(rng2.values)
result = rng.intersection(rng2)
self.assertTrue(result.equals(rng))
# empty same freq GH2129
rng = date_range('6/1/2000', '6/15/2000', freq='T')
result = rng[0:0].intersection(rng)
self.assertEqual(len(result), 0)
result = rng.intersection(rng[0:0])
self.assertEqual(len(result), 0)
def test_date_range_bms_bug(self):
# #1645
rng = date_range('1/1/2000', periods=10, freq='BMS')
ex_first = Timestamp('2000-01-03')
self.assertEqual(rng[0], ex_first)
def test_string_index_series_name_converted(self):
# #1644
df = DataFrame(np.random.randn(10, 4),
index=date_range('1/1/2000', periods=10))
result = df.ix['1/3/2000']
self.assertEqual(result.name, df.index[2])
result = df.T['1/3/2000']
self.assertEqual(result.name, df.index[2])
class TestTimestamp(tm.TestCase):
def test_class_ops(self):
_skip_if_no_pytz()
import pytz
def compare(x,y):
self.assertEqual(int(Timestamp(x).value/1e9), int(Timestamp(y).value/1e9))
compare(Timestamp.now(),datetime.now())
compare(Timestamp.now('UTC'),datetime.now(pytz.timezone('UTC')))
compare(Timestamp.utcnow(),datetime.utcnow())
compare(Timestamp.today(),datetime.today())
def test_basics_nanos(self):
val = np.int64(946684800000000000).view('M8[ns]')
stamp = Timestamp(val.view('i8') + 500)
self.assertEqual(stamp.year, 2000)
self.assertEqual(stamp.month, 1)
self.assertEqual(stamp.microsecond, 0)
self.assertEqual(stamp.nanosecond, 500)
def test_unit(self):
def check(val,unit=None,h=1,s=1,us=0):
stamp = Timestamp(val, unit=unit)
self.assertEqual(stamp.year, 2000)
self.assertEqual(stamp.month, 1)
self.assertEqual(stamp.day, 1)
self.assertEqual(stamp.hour, h)
if unit != 'D':
self.assertEqual(stamp.minute, 1)
self.assertEqual(stamp.second, s)
self.assertEqual(stamp.microsecond, us)
else:
self.assertEqual(stamp.minute, 0)
self.assertEqual(stamp.second, 0)
self.assertEqual(stamp.microsecond, 0)
self.assertEqual(stamp.nanosecond, 0)
ts = Timestamp('20000101 01:01:01')
val = ts.value
days = (ts - Timestamp('1970-01-01')).days
check(val)
check(val/long(1000),unit='us')
check(val/long(1000000),unit='ms')
check(val/long(1000000000),unit='s')
check(days,unit='D',h=0)
# using truediv, so these are like floats
if compat.PY3:
check((val+500000)/long(1000000000),unit='s',us=500)
check((val+500000000)/long(1000000000),unit='s',us=500000)
check((val+500000)/long(1000000),unit='ms',us=500)
# get chopped in py2
else:
check((val+500000)/long(1000000000),unit='s')
check((val+500000000)/long(1000000000),unit='s')
check((val+500000)/long(1000000),unit='ms')
# ok
check((val+500000)/long(1000),unit='us',us=500)
check((val+500000000)/long(1000000),unit='ms',us=500000)
# floats
check(val/1000.0 + 5,unit='us',us=5)
check(val/1000.0 + 5000,unit='us',us=5000)
check(val/1000000.0 + 0.5,unit='ms',us=500)
check(val/1000000.0 + 0.005,unit='ms',us=5)
check(val/1000000000.0 + 0.5,unit='s',us=500000)
check(days + 0.5,unit='D',h=12)
# nan
result = Timestamp(np.nan)
self.assertIs(result, NaT)
result = Timestamp(None)
self.assertIs(result, NaT)
result = Timestamp(iNaT)
self.assertIs(result, NaT)
result = Timestamp(NaT)
self.assertIs(result, NaT)
def test_comparison(self):
# 5-18-2012 00:00:00.000
stamp = long(1337299200000000000)
val = Timestamp(stamp)
self.assertEqual(val, val)
self.assertFalse(val != val)
self.assertFalse(val < val)
self.assertTrue(val <= val)
self.assertFalse(val > val)
self.assertTrue(val >= val)
other = datetime(2012, 5, 18)
self.assertEqual(val, other)
self.assertFalse(val != other)
self.assertFalse(val < other)
self.assertTrue(val <= other)
self.assertFalse(val > other)
self.assertTrue(val >= other)
other = Timestamp(stamp + 100)
self.assertNotEqual(val, other)
self.assertNotEqual(val, other)
self.assertTrue(val < other)
self.assertTrue(val <= other)
self.assertTrue(other > val)
self.assertTrue(other >= val)
def test_cant_compare_tz_naive_w_aware(self):
_skip_if_no_pytz()
# #1404
a = Timestamp('3/12/2012')
b = Timestamp('3/12/2012', tz='utc')
self.assertRaises(Exception, a.__eq__, b)
self.assertRaises(Exception, a.__ne__, b)
self.assertRaises(Exception, a.__lt__, b)
self.assertRaises(Exception, a.__gt__, b)
self.assertRaises(Exception, b.__eq__, a)
self.assertRaises(Exception, b.__ne__, a)
self.assertRaises(Exception, b.__lt__, a)
self.assertRaises(Exception, b.__gt__, a)
if sys.version_info < (3, 3):
self.assertRaises(Exception, a.__eq__, b.to_pydatetime())
self.assertRaises(Exception, a.to_pydatetime().__eq__, b)
else:
self.assertFalse(a == b.to_pydatetime())
self.assertFalse(a.to_pydatetime() == b)
def test_delta_preserve_nanos(self):
val = Timestamp(long(1337299200000000123))
result = val + timedelta(1)
self.assertEqual(result.nanosecond, val.nanosecond)
def test_frequency_misc(self):
self.assertEqual(fmod.get_freq_group('T'),
fmod.FreqGroup.FR_MIN)
code, stride = fmod.get_freq_code(offsets.Hour())
self.assertEqual(code, fmod.FreqGroup.FR_HR)
code, stride = fmod.get_freq_code((5, 'T'))
self.assertEqual(code, fmod.FreqGroup.FR_MIN)
self.assertEqual(stride, 5)
offset = offsets.Hour()
result = fmod.to_offset(offset)
self.assertEqual(result, offset)
result = fmod.to_offset((5, 'T'))
expected = offsets.Minute(5)
self.assertEqual(result, expected)
self.assertRaises(ValueError, fmod.get_freq_code, (5, 'baz'))
self.assertRaises(ValueError, fmod.to_offset, '100foo')
self.assertRaises(ValueError, fmod.to_offset, ('', ''))
result = fmod.get_standard_freq(offsets.Hour())
self.assertEqual(result, 'H')
def test_hash_equivalent(self):
d = {datetime(2011, 1, 1): 5}
stamp = Timestamp(datetime(2011, 1, 1))
self.assertEqual(d[stamp], 5)
def test_timestamp_compare_scalars(self):
# case where ndim == 0
lhs = np.datetime64(datetime(2013, 12, 6))
rhs = Timestamp('now')
nat = Timestamp('nat')
ops = {'gt': 'lt', 'lt': 'gt', 'ge': 'le', 'le': 'ge', 'eq': 'eq',
'ne': 'ne'}
for left, right in ops.items():
left_f = getattr(operator, left)
right_f = getattr(operator, right)
if pd._np_version_under1p7:
# you have to convert to timestamp for this to work with numpy
# scalars
expected = left_f(Timestamp(lhs), rhs)
# otherwise a TypeError is thrown
if left not in ('eq', 'ne'):
with tm.assertRaises(TypeError):
left_f(lhs, rhs)
else:
expected = left_f(lhs, rhs)
result = right_f(rhs, lhs)
self.assertEqual(result, expected)
expected = left_f(rhs, nat)
result = right_f(nat, rhs)
self.assertEqual(result, expected)
def test_timestamp_compare_series(self):
# make sure we can compare Timestamps on the right AND left hand side
# GH4982
s = Series(date_range('20010101', periods=10), name='dates')
s_nat = s.copy(deep=True)
s[0] = pd.Timestamp('nat')
s[3] = pd.Timestamp('nat')
ops = {'lt': 'gt', 'le': 'ge', 'eq': 'eq', 'ne': 'ne'}
for left, right in ops.items():
left_f = getattr(operator, left)
right_f = getattr(operator, right)
# no nats
expected = left_f(s, Timestamp('20010109'))
result = right_f(Timestamp('20010109'), s)
tm.assert_series_equal(result, expected)
# nats
expected = left_f(s, Timestamp('nat'))
result = right_f(Timestamp('nat'), s)
tm.assert_series_equal(result, expected)
# compare to timestamp with series containing nats
expected = left_f(s_nat, Timestamp('20010109'))
result = right_f(Timestamp('20010109'), s_nat)
tm.assert_series_equal(result, expected)
# compare to nat with series containing nats
expected = left_f(s_nat, Timestamp('nat'))
result = right_f(Timestamp('nat'), s_nat)
tm.assert_series_equal(result, expected)
class TestSlicing(tm.TestCase):
def test_slice_year(self):
dti = DatetimeIndex(freq='B', start=datetime(2005, 1, 1), periods=500)
s = Series(np.arange(len(dti)), index=dti)
result = s['2005']
expected = s[s.index.year == 2005]
assert_series_equal(result, expected)
df = DataFrame(np.random.rand(len(dti), 5), index=dti)
result = df.ix['2005']
expected = df[df.index.year == 2005]
assert_frame_equal(result, expected)
rng = date_range('1/1/2000', '1/1/2010')
result = rng.get_loc('2009')
expected = slice(3288, 3653)
self.assertEqual(result, expected)
def test_slice_quarter(self):
dti = DatetimeIndex(freq='D', start=datetime(2000, 6, 1), periods=500)
s = Series(np.arange(len(dti)), index=dti)
self.assertEqual(len(s['2001Q1']), 90)
df = DataFrame(np.random.rand(len(dti), 5), index=dti)
self.assertEqual(len(df.ix['1Q01']), 90)
def test_slice_month(self):
dti = DatetimeIndex(freq='D', start=datetime(2005, 1, 1), periods=500)
s = Series(np.arange(len(dti)), index=dti)
self.assertEqual(len(s['2005-11']), 30)
df = DataFrame(np.random.rand(len(dti), 5), index=dti)
self.assertEqual(len(df.ix['2005-11']), 30)
assert_series_equal(s['2005-11'], s['11-2005'])
def test_partial_slice(self):
rng = DatetimeIndex(freq='D', start=datetime(2005, 1, 1), periods=500)
s = Series(np.arange(len(rng)), index=rng)
result = s['2005-05':'2006-02']
expected = s['20050501':'20060228']
assert_series_equal(result, expected)
result = s['2005-05':]
expected = s['20050501':]
assert_series_equal(result, expected)
result = s[:'2006-02']
expected = s[:'20060228']
assert_series_equal(result, expected)
result = s['2005-1-1']
self.assertEqual(result, s.irow(0))
self.assertRaises(Exception, s.__getitem__, '2004-12-31')
def test_partial_slice_daily(self):
rng = DatetimeIndex(freq='H', start=datetime(2005, 1, 31), periods=500)
s = Series(np.arange(len(rng)), index=rng)
result = s['2005-1-31']
assert_series_equal(result, s.ix[:24])
self.assertRaises(Exception, s.__getitem__, '2004-12-31 00')
def test_partial_slice_hourly(self):
rng = DatetimeIndex(freq='T', start=datetime(2005, 1, 1, 20, 0, 0),
periods=500)
s = Series(np.arange(len(rng)), index=rng)
result = s['2005-1-1']
assert_series_equal(result, s.ix[:60 * 4])
result = s['2005-1-1 20']
assert_series_equal(result, s.ix[:60])
self.assertEqual(s['2005-1-1 20:00'], s.ix[0])
self.assertRaises(Exception, s.__getitem__, '2004-12-31 00:15')
def test_partial_slice_minutely(self):
rng = DatetimeIndex(freq='S', start=datetime(2005, 1, 1, 23, 59, 0),
periods=500)
s = Series(np.arange(len(rng)), index=rng)
result = s['2005-1-1 23:59']
assert_series_equal(result, s.ix[:60])
result = s['2005-1-1']
assert_series_equal(result, s.ix[:60])
self.assertEqual(s[Timestamp('2005-1-1 23:59:00')], s.ix[0])
self.assertRaises(Exception, s.__getitem__, '2004-12-31 00:00:00')
def test_partial_slicing_with_multiindex(self):
# GH 4758
# partial string indexing with a multi-index buggy
df = DataFrame({'ACCOUNT':["ACCT1", "ACCT1", "ACCT1", "ACCT2"],
'TICKER':["ABC", "MNP", "XYZ", "XYZ"],
'val':[1,2,3,4]},
index=date_range("2013-06-19 09:30:00", periods=4, freq='5T'))
df_multi = df.set_index(['ACCOUNT', 'TICKER'], append=True)
expected = DataFrame([[1]],index=Index(['ABC'],name='TICKER'),columns=['val'])
result = df_multi.loc[('2013-06-19 09:30:00', 'ACCT1')]
assert_frame_equal(result, expected)
expected = df_multi.loc[(pd.Timestamp('2013-06-19 09:30:00', tz=None), 'ACCT1', 'ABC')]
result = df_multi.loc[('2013-06-19 09:30:00', 'ACCT1', 'ABC')]
assert_series_equal(result, expected)
# this is a KeyError as we don't do partial string selection on multi-levels
def f():
df_multi.loc[('2013-06-19', 'ACCT1', 'ABC')]
self.assertRaises(KeyError, f)
# GH 4294
# partial slice on a series mi
s = pd.DataFrame(randn(1000, 1000), index=pd.date_range('2000-1-1', periods=1000)).stack()
s2 = s[:-1].copy()
expected = s2['2000-1-4']
result = s2[pd.Timestamp('2000-1-4')]
assert_series_equal(result, expected)
result = s[pd.Timestamp('2000-1-4')]
expected = s['2000-1-4']
assert_series_equal(result, expected)
df2 = pd.DataFrame(s)
expected = df2.ix['2000-1-4']
result = df2.ix[pd.Timestamp('2000-1-4')]
assert_frame_equal(result, expected)
def test_date_range_normalize(self):
snap = datetime.today()
n = 50
rng = date_range(snap, periods=n, normalize=False, freq='2D')
offset = timedelta(2)
values = np.array([snap + i * offset for i in range(n)],
dtype='M8[ns]')
self.assert_numpy_array_equal(rng, values)
rng = date_range(
'1/1/2000 08:15', periods=n, normalize=False, freq='B')
the_time = time(8, 15)
for val in rng:
self.assertEqual(val.time(), the_time)
def test_timedelta(self):
# this is valid too
index = date_range('1/1/2000', periods=50, freq='B')
shifted = index + timedelta(1)
back = shifted + timedelta(-1)
self.assertTrue(tm.equalContents(index, back))
self.assertEqual(shifted.freq, index.freq)
self.assertEqual(shifted.freq, back.freq)
result = index - timedelta(1)
expected = index + timedelta(-1)
self.assertTrue(result.equals(expected))
# GH4134, buggy with timedeltas
rng = date_range('2013', '2014')
s = Series(rng)
result1 = rng - | pd.offsets.Hour(1) | pandas.offsets.Hour |
import os, sys
import numpy as np
from pdb import set_trace as st
import pandas as pd
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
from matplotlib import cm
from matplotlib import axes
import matplotlib.ticker as ticker
dir = f"models/prune/f1_score/allconv_channel"
modes = [
"posneg_small",
"posonly_small",
]
fig_dir = os.path.join(dir, "fig")
os.makedirs(fig_dir, exist_ok=True)
exp_configs = {
# "conv12": ["conv2d_12"],
"block4": ["conv2d_10", "conv2d_11", "conv2d_12"],
# "block4main": ["conv2d_11", "conv2d_12"],
}
def draw_heatmap_expconfig():
for mode in modes:
for exp_name in exp_configs.keys():
for ratio in np.arange(0.5, 0.9, 0.1):
print(f"{mode} {exp_name} ratio={ratio}")
interclass_matrix = []
for class_id in list(range(10))+["all"]:
filename = f"allconv_class_{exp_name}_{class_id}.csv"
path = os.path.join(dir, filename)
df = pd.read_csv(path, index_col=0)
df = df[df['ratio'] == round(ratio, 2)]
df = df.sort_values(by="test_class", ascending=True)
df = df[mode]
interclass_matrix.append(df.to_numpy())
interclass_matrix = np.stack(interclass_matrix)
print(interclass_matrix)
print()
# plot
fig_name = f"{mode}_{exp_name}_ratio={round(ratio, 2)}.pdf"
fig_path = os.path.join(fig_dir, fig_name)
fig = plt.figure()
ax = fig.add_subplot(111)
cax = ax.matshow(
interclass_matrix,
interpolation='nearest', cmap='winter',
vmin=0.5, vmax=0.95
)
fig.colorbar(cax)
tick_spacing = 1
ax.xaxis.set_major_locator(ticker.MultipleLocator(tick_spacing))
ax.yaxis.set_major_locator(ticker.MultipleLocator(tick_spacing))
ax.set_xticklabels([''] + list(range(10)))
ax.set_yticklabels([''] + list(range(10)) + ["All"])
for i in range(11):
for j in range(10):
text = ax.text(j, i, interclass_matrix[i, j],
ha="center", va="center", color="w")
plt.savefig(fig_path)
plt.clf()
# st()
def draw_heatmap_allconv():
modes = [
"posnegweight_small",
# "posonlyweight_small",
]
for mode in modes:
for ratio in np.arange(0.1, 1.0, 0.1):
print(f"{mode} ratio={ratio}")
interclass_matrix = []
for class_id in list(range(10))+["all"]:
filename = f"allconv_class_{class_id}.csv"
path = os.path.join(dir, filename)
df = | pd.read_csv(path, index_col=0) | pandas.read_csv |
import os
import pandas as pd
import pytest
from pandas.testing import assert_frame_equal
from .. import read_sql
@pytest.fixture(scope="module") # type: ignore
def mssql_url() -> str:
conn = os.environ["MSSQL_URL"]
return conn
@pytest.mark.xfail
def test_on_non_select(mssql_url: str) -> None:
query = "CREATE TABLE non_select(id INTEGER NOT NULL)"
df = read_sql(mssql_url, query)
def test_aggregation(mssql_url: str) -> None:
query = (
"SELECT test_bool, SUM(test_float) as sum FROM test_table GROUP BY test_bool"
)
df = read_sql(mssql_url, query)
expected = pd.DataFrame(
index=range(3),
data={
"test_bool": pd.Series([None, False, True], dtype="boolean"),
"sum": pd.Series([10.9, 5.2, -10.0], dtype="float64"),
},
)
assert_frame_equal(df, expected, check_names=True)
def test_partition_on_aggregation(mssql_url: str) -> None:
query = (
"SELECT test_bool, SUM(test_int) AS test_int FROM test_table GROUP BY test_bool"
)
df = read_sql(mssql_url, query, partition_on="test_int", partition_num=2)
expected = pd.DataFrame(
index=range(3),
data={
"test_bool": pd.Series([None, False, True], dtype="boolean"),
"test_int": pd.Series([4, 5, 1315], dtype="Int64"),
},
)
df.sort_values(by="test_int", inplace=True, ignore_index=True)
assert_frame_equal(df, expected, check_names=True)
def test_aggregation2(mssql_url: str) -> None:
query = "select DISTINCT(test_bool) from test_table"
df = read_sql(mssql_url, query)
expected = pd.DataFrame(
index=range(3),
data={
"test_bool": pd.Series([None, False, True], dtype="boolean"),
},
)
assert_frame_equal(df, expected, check_names=True)
def test_partition_on_aggregation2(mssql_url: str) -> None:
query = "select MAX(test_int) as max, MIN(test_int) as min from test_table"
df = read_sql(mssql_url, query, partition_on="max", partition_num=2)
expected = pd.DataFrame(
index=range(1),
data={
"max": pd.Series([1314], dtype="Int64"),
"min": pd.Series([0], dtype="Int64"),
},
)
assert_frame_equal(df, expected, check_names=True)
def test_udf(mssql_url: str) -> None:
query = (
"SELECT dbo.increment(test_int) AS test_int FROM test_table ORDER BY test_int"
)
df = read_sql(mssql_url, query, partition_on="test_int", partition_num=2)
expected = pd.DataFrame(
index=range(6),
data={
"test_int": | pd.Series([1, 2, 3, 4, 5, 1315], dtype="Int64") | pandas.Series |
# Project: fuelmeter-tools
# Created by: # Created on: 5/7/2020
from pandas.tseries.offsets import MonthEnd
from puma.Report import Report
import pandas as pd
import numpy as np
import puma.plot as pplot
import puma.tex as ptex
import datetime
import os
class MultiMonthReport(Report):
def __init__(self,start,end,title,nc,houses,monthly_fuel_price):
super(MultiMonthReport, self).__init__(start,end,title,nc,houses,monthly_fuel_price)
def getAveCostPerDay(self):
'''calculates the average cost of fuel per day. If the attribute gph_hdd
is available this will be used to calculate costs otherwise the attribute
fuel_by_day is used.'''
if 'gpd_hdd' not in self.__dict__:
self.cost_per_day = self.getCostPerDay(self.fuel_by_day)
else:
self.cost_per_day = self.getCostPerDay(self.gpd_hdd)
return self.cost_per_day.mean()
def getCostPerDay(self,fuel_by_day):
'''calculate cost for each day based on a fuel price for each day and fuel consumption for each day'''
self.fuel_price.name = 'fuel_price'
df = pd.concat([fuel_by_day, self.fuel_price.groupby(pd.Grouper(freq='D')).mean()], axis=1)
df.fuel_price = df.fuel_price.ffill() # filled for days that did not match
return df.fuel_consumption * df.fuel_price
# def getEstimatedTotalGallons(self):
# '''calculates the total gallons used each month and sets the attribute gallons_by_month
# :return float total gallons for the entire report period'''
# self.estimated_gallons_by_month = self.calculateTotalGallonsByMonth()
# return self.gallons_by_month.sum()
def getCostPerMonth(self):
'''calculates the total cost of consumed fuel per month by summing cost per day for every day within a month'''
if self.cost_per_day == None:
if 'gpd_hdd' in self.__dict__:
self.cost_per_day = self.getCostPerDay(self.gpd_hdd)
else:
self.cost_per_day = self.getCostPerDay(self.fuel_by_day)
self.cost_per_month = self.cost_per_day.groupby(pd.Grouper(freq="M")).sum()
return
def getTotalCost(self):
'''uses hdd corrected estimate of fuel consumption to estimate cost per day and aggregate to the entire report period.'''
costPerDay = self.getCostPerDay(self.gpd_hdd)
return costPerDay.sum()
def calculateMeanDailyGallonsPerMonth(self):
'''Calculates the total gallons consumed by month based on an average daily consumption rate for each month'''
#actual measured total by day We use a count of 5 records as our cutoff for producing a legit average
groupedDaily = self.filtered_df['fuel_consumption'].groupby(pd.Grouper(freq="D")).sum(min_count=5) #total gallons each day
#total days needing estimates
self.meanDailyByMonth = groupedDaily.groupby(pd.Grouper(freq='M')).agg(['mean','count']) #total daily gallons averaged over month
self.meanDailyByMonth = self.meanDailyByMonth.loc[self.meanDailyByMonth['count'] >=15,'mean'] #drop months with fewer than 20 days of data
#estimatedTotalByMonth = self.meanDailyByMonth * self.meanDailyByMonth.index.days_in_month #use the average to calculate a total amount for the month
return
def calculateMeanGallonsPerMonth(self):
'''get the average gallons consumed for all months in the reporting period'''
tgpm = self.calculateTotalGallonsByMonth()
return tgpm.mean()
def getGallonsPerFt(self):
'''get the total gallons used in the report period per house area (square feet).
sets the aveGPFByYear attribute which is the totalGPF for each year averaged over all years.
:return float total gallons per house square footage for the report period'''
totalGPF = super().getGallonsPerFt()
AveDailyByYear = self.filtered_df['fuel_consumption'].groupby( | pd.Grouper(freq='A') | pandas.Grouper |
# -*- coding: utf-8 -*-
"""Color Dataset Creator.ipynb
Automatically generated by Colaboratory.
# Importing Libraries & Initialization
"""
#Importing Libraries
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import random
import pandas as pd
import numpy as np
#Initialisation at First Time Execution
data = []
red, green, blue, label = [], [], [], []
counter = 0 #color_index in df
first_time = 0 #flag for first time execution
df = | pd.DataFrame() | pandas.DataFrame |
# Copyright (c) 2018-2021, NVIDIA CORPORATION.
import array as arr
import datetime
import io
import operator
import random
import re
import string
import textwrap
from copy import copy
import cupy
import numpy as np
import pandas as pd
import pyarrow as pa
import pytest
from numba import cuda
import cudf
from cudf.core._compat import PANDAS_GE_110, PANDAS_GE_120
from cudf.core.column import column
from cudf.tests import utils
from cudf.tests.utils import (
ALL_TYPES,
DATETIME_TYPES,
NUMERIC_TYPES,
assert_eq,
assert_exceptions_equal,
does_not_raise,
gen_rand,
)
def test_init_via_list_of_tuples():
data = [
(5, "cats", "jump", np.nan),
(2, "dogs", "dig", 7.5),
(3, "cows", "moo", -2.1, "occasionally"),
]
pdf = pd.DataFrame(data)
gdf = cudf.DataFrame(data)
assert_eq(pdf, gdf)
def _dataframe_na_data():
return [
pd.DataFrame(
{
"a": [0, 1, 2, np.nan, 4, None, 6],
"b": [np.nan, None, "u", "h", "d", "a", "m"],
},
index=["q", "w", "e", "r", "t", "y", "u"],
),
pd.DataFrame({"a": [0, 1, 2, 3, 4], "b": ["a", "b", "u", "h", "d"]}),
pd.DataFrame(
{
"a": [None, None, np.nan, None],
"b": [np.nan, None, np.nan, None],
}
),
pd.DataFrame({"a": []}),
pd.DataFrame({"a": [np.nan], "b": [None]}),
pd.DataFrame({"a": ["a", "b", "c", None, "e"]}),
pd.DataFrame({"a": ["a", "b", "c", "d", "e"]}),
]
@pytest.mark.parametrize("rows", [0, 1, 2, 100])
def test_init_via_list_of_empty_tuples(rows):
data = [()] * rows
pdf = pd.DataFrame(data)
gdf = cudf.DataFrame(data)
assert_eq(
pdf,
gdf,
check_like=True,
check_column_type=False,
check_index_type=False,
)
@pytest.mark.parametrize(
"dict_of_series",
[
{"a": pd.Series([1.0, 2.0, 3.0])},
{"a": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6])},
{
"a": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6]),
"b": pd.Series([1.0, 2.0, 4.0], index=[1, 2, 3]),
},
{"a": [1, 2, 3], "b": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6])},
{
"a": pd.Series([1.0, 2.0, 3.0], index=["a", "b", "c"]),
"b": pd.Series([1.0, 2.0, 4.0], index=["c", "d", "e"]),
},
{
"a": pd.Series(
["a", "b", "c"],
index=pd.MultiIndex.from_tuples([(1, 2), (1, 3), (2, 3)]),
),
"b": pd.Series(
["a", " b", "d"],
index=pd.MultiIndex.from_tuples([(1, 2), (1, 3), (2, 3)]),
),
},
],
)
def test_init_from_series_align(dict_of_series):
pdf = pd.DataFrame(dict_of_series)
gdf = cudf.DataFrame(dict_of_series)
assert_eq(pdf, gdf)
for key in dict_of_series:
if isinstance(dict_of_series[key], pd.Series):
dict_of_series[key] = cudf.Series(dict_of_series[key])
gdf = cudf.DataFrame(dict_of_series)
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
("dict_of_series", "expectation"),
[
(
{
"a": pd.Series(["a", "b", "c"], index=[4, 4, 5]),
"b": pd.Series(["a", "b", "c"], index=[4, 5, 6]),
},
pytest.raises(
ValueError, match="Cannot align indices with non-unique values"
),
),
(
{
"a": pd.Series(["a", "b", "c"], index=[4, 4, 5]),
"b": pd.Series(["a", "b", "c"], index=[4, 4, 5]),
},
does_not_raise(),
),
],
)
def test_init_from_series_align_nonunique(dict_of_series, expectation):
with expectation:
gdf = cudf.DataFrame(dict_of_series)
if expectation == does_not_raise():
pdf = pd.DataFrame(dict_of_series)
assert_eq(pdf, gdf)
def test_init_unaligned_with_index():
pdf = pd.DataFrame(
{
"a": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6]),
"b": pd.Series([1.0, 2.0, 3.0], index=[1, 2, 3]),
},
index=[7, 8, 9],
)
gdf = cudf.DataFrame(
{
"a": cudf.Series([1.0, 2.0, 3.0], index=[4, 5, 6]),
"b": cudf.Series([1.0, 2.0, 3.0], index=[1, 2, 3]),
},
index=[7, 8, 9],
)
assert_eq(pdf, gdf, check_dtype=False)
def test_series_basic():
# Make series from buffer
a1 = np.arange(10, dtype=np.float64)
series = cudf.Series(a1)
assert len(series) == 10
np.testing.assert_equal(series.to_array(), np.hstack([a1]))
def test_series_from_cupy_scalars():
data = [0.1, 0.2, 0.3]
data_np = np.array(data)
data_cp = cupy.array(data)
s_np = cudf.Series([data_np[0], data_np[2]])
s_cp = cudf.Series([data_cp[0], data_cp[2]])
assert_eq(s_np, s_cp)
@pytest.mark.parametrize("a", [[1, 2, 3], [1, 10, 30]])
@pytest.mark.parametrize("b", [[4, 5, 6], [-11, -100, 30]])
def test_append_index(a, b):
df = pd.DataFrame()
df["a"] = a
df["b"] = b
gdf = cudf.DataFrame()
gdf["a"] = a
gdf["b"] = b
# Check the default index after appending two columns(Series)
expected = df.a.append(df.b)
actual = gdf.a.append(gdf.b)
assert len(expected) == len(actual)
assert_eq(expected.index, actual.index)
expected = df.a.append(df.b, ignore_index=True)
actual = gdf.a.append(gdf.b, ignore_index=True)
assert len(expected) == len(actual)
assert_eq(expected.index, actual.index)
def test_series_init_none():
# test for creating empty series
# 1: without initializing
sr1 = cudf.Series()
got = sr1.to_string()
expect = "Series([], dtype: float64)"
# values should match despite whitespace difference
assert got.split() == expect.split()
# 2: Using `None` as an initializer
sr2 = cudf.Series(None)
got = sr2.to_string()
expect = "Series([], dtype: float64)"
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_basic():
np.random.seed(0)
df = cudf.DataFrame()
# Populate with cuda memory
df["keys"] = np.arange(10, dtype=np.float64)
np.testing.assert_equal(df["keys"].to_array(), np.arange(10))
assert len(df) == 10
# Populate with numpy array
rnd_vals = np.random.random(10)
df["vals"] = rnd_vals
np.testing.assert_equal(df["vals"].to_array(), rnd_vals)
assert len(df) == 10
assert tuple(df.columns) == ("keys", "vals")
# Make another dataframe
df2 = cudf.DataFrame()
df2["keys"] = np.array([123], dtype=np.float64)
df2["vals"] = np.array([321], dtype=np.float64)
# Concat
df = cudf.concat([df, df2])
assert len(df) == 11
hkeys = np.asarray(np.arange(10, dtype=np.float64).tolist() + [123])
hvals = np.asarray(rnd_vals.tolist() + [321])
np.testing.assert_equal(df["keys"].to_array(), hkeys)
np.testing.assert_equal(df["vals"].to_array(), hvals)
# As matrix
mat = df.as_matrix()
expect = np.vstack([hkeys, hvals]).T
np.testing.assert_equal(mat, expect)
# test dataframe with tuple name
df_tup = cudf.DataFrame()
data = np.arange(10)
df_tup[(1, "foobar")] = data
np.testing.assert_equal(data, df_tup[(1, "foobar")].to_array())
df = cudf.DataFrame(pd.DataFrame({"a": [1, 2, 3], "c": ["a", "b", "c"]}))
pdf = pd.DataFrame(pd.DataFrame({"a": [1, 2, 3], "c": ["a", "b", "c"]}))
assert_eq(df, pdf)
gdf = cudf.DataFrame({"id": [0, 1], "val": [None, None]})
gdf["val"] = gdf["val"].astype("int")
assert gdf["val"].isnull().all()
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"columns", [["a"], ["b"], "a", "b", ["a", "b"]],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_columns(pdf, columns, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(columns=columns, inplace=inplace)
actual = gdf.drop(columns=columns, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"labels",
[[1], [0], 1, 5, [5, 9], pd.Index([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_labels_axis_0(pdf, labels, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(labels=labels, axis=0, inplace=inplace)
actual = gdf.drop(labels=labels, axis=0, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"index",
[[1], [0], 1, 5, [5, 9], pd.Index([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_index(pdf, index, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(index=index, inplace=inplace)
actual = gdf.drop(index=index, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5},
index=pd.MultiIndex(
levels=[
["lama", "cow", "falcon"],
["speed", "weight", "length"],
],
codes=[
[0, 0, 0, 1, 1, 1, 2, 2, 2, 1],
[0, 1, 2, 0, 1, 2, 0, 1, 2, 1],
],
),
)
],
)
@pytest.mark.parametrize(
"index,level",
[
("cow", 0),
("lama", 0),
("falcon", 0),
("speed", 1),
("weight", 1),
("length", 1),
pytest.param(
"cow",
None,
marks=pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/36293"
),
),
pytest.param(
"lama",
None,
marks=pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/36293"
),
),
pytest.param(
"falcon",
None,
marks=pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/36293"
),
),
],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_multiindex(pdf, index, level, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(index=index, inplace=inplace, level=level)
actual = gdf.drop(index=index, inplace=inplace, level=level)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"labels", [["a"], ["b"], "a", "b", ["a", "b"]],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_labels_axis_1(pdf, labels, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(labels=labels, axis=1, inplace=inplace)
actual = gdf.drop(labels=labels, axis=1, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
def test_dataframe_drop_error():
df = cudf.DataFrame({"a": [1], "b": [2], "c": [3]})
pdf = df.to_pandas()
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"columns": "d"}),
rfunc_args_and_kwargs=([], {"columns": "d"}),
expected_error_message="column 'd' does not exist",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"columns": ["a", "d", "b"]}),
rfunc_args_and_kwargs=([], {"columns": ["a", "d", "b"]}),
expected_error_message="column 'd' does not exist",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=(["a"], {"columns": "a", "axis": 1}),
rfunc_args_and_kwargs=(["a"], {"columns": "a", "axis": 1}),
expected_error_message="Cannot specify both",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"axis": 1}),
rfunc_args_and_kwargs=([], {"axis": 1}),
expected_error_message="Need to specify at least",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([[2, 0]],),
rfunc_args_and_kwargs=([[2, 0]],),
expected_error_message="One or more values not found in axis",
)
def test_dataframe_drop_raises():
df = cudf.DataFrame(
{"a": [1, 2, 3], "c": [10, 20, 30]}, index=["x", "y", "z"]
)
pdf = df.to_pandas()
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=(["p"],),
rfunc_args_and_kwargs=(["p"],),
expected_error_message="One or more values not found in axis",
)
# label dtype mismatch
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([3],),
rfunc_args_and_kwargs=([3],),
expected_error_message="One or more values not found in axis",
)
expect = pdf.drop("p", errors="ignore")
actual = df.drop("p", errors="ignore")
assert_eq(actual, expect)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"columns": "p"}),
rfunc_args_and_kwargs=([], {"columns": "p"}),
expected_error_message="column 'p' does not exist",
)
expect = pdf.drop(columns="p", errors="ignore")
actual = df.drop(columns="p", errors="ignore")
assert_eq(actual, expect)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"labels": "p", "axis": 1}),
rfunc_args_and_kwargs=([], {"labels": "p", "axis": 1}),
expected_error_message="column 'p' does not exist",
)
expect = pdf.drop(labels="p", axis=1, errors="ignore")
actual = df.drop(labels="p", axis=1, errors="ignore")
assert_eq(actual, expect)
def test_dataframe_column_add_drop_via_setitem():
df = cudf.DataFrame()
data = np.asarray(range(10))
df["a"] = data
df["b"] = data
assert tuple(df.columns) == ("a", "b")
del df["a"]
assert tuple(df.columns) == ("b",)
df["c"] = data
assert tuple(df.columns) == ("b", "c")
df["a"] = data
assert tuple(df.columns) == ("b", "c", "a")
def test_dataframe_column_set_via_attr():
data_0 = np.asarray([0, 2, 4, 5])
data_1 = np.asarray([1, 4, 2, 3])
data_2 = np.asarray([2, 0, 3, 0])
df = cudf.DataFrame({"a": data_0, "b": data_1, "c": data_2})
for i in range(10):
df.c = df.a
assert assert_eq(df.c, df.a, check_names=False)
assert tuple(df.columns) == ("a", "b", "c")
df.c = df.b
assert assert_eq(df.c, df.b, check_names=False)
assert tuple(df.columns) == ("a", "b", "c")
def test_dataframe_column_drop_via_attr():
df = cudf.DataFrame({"a": []})
with pytest.raises(AttributeError):
del df.a
assert tuple(df.columns) == tuple("a")
@pytest.mark.parametrize("axis", [0, "index"])
def test_dataframe_index_rename(axis):
pdf = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]})
gdf = cudf.DataFrame.from_pandas(pdf)
expect = pdf.rename(mapper={1: 5, 2: 6}, axis=axis)
got = gdf.rename(mapper={1: 5, 2: 6}, axis=axis)
assert_eq(expect, got)
expect = pdf.rename(index={1: 5, 2: 6})
got = gdf.rename(index={1: 5, 2: 6})
assert_eq(expect, got)
expect = pdf.rename({1: 5, 2: 6})
got = gdf.rename({1: 5, 2: 6})
assert_eq(expect, got)
# `pandas` can support indexes with mixed values. We throw a
# `NotImplementedError`.
with pytest.raises(NotImplementedError):
gdf.rename(mapper={1: "x", 2: "y"}, axis=axis)
def test_dataframe_MI_rename():
gdf = cudf.DataFrame(
{"a": np.arange(10), "b": np.arange(10), "c": np.arange(10)}
)
gdg = gdf.groupby(["a", "b"]).count()
pdg = gdg.to_pandas()
expect = pdg.rename(mapper={1: 5, 2: 6}, axis=0)
got = gdg.rename(mapper={1: 5, 2: 6}, axis=0)
assert_eq(expect, got)
@pytest.mark.parametrize("axis", [1, "columns"])
def test_dataframe_column_rename(axis):
pdf = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]})
gdf = cudf.DataFrame.from_pandas(pdf)
expect = pdf.rename(mapper=lambda name: 2 * name, axis=axis)
got = gdf.rename(mapper=lambda name: 2 * name, axis=axis)
assert_eq(expect, got)
expect = pdf.rename(columns=lambda name: 2 * name)
got = gdf.rename(columns=lambda name: 2 * name)
assert_eq(expect, got)
rename_mapper = {"a": "z", "b": "y", "c": "x"}
expect = pdf.rename(columns=rename_mapper)
got = gdf.rename(columns=rename_mapper)
assert_eq(expect, got)
def test_dataframe_pop():
pdf = pd.DataFrame(
{"a": [1, 2, 3], "b": ["x", "y", "z"], "c": [7.0, 8.0, 9.0]}
)
gdf = cudf.DataFrame.from_pandas(pdf)
# Test non-existing column error
with pytest.raises(KeyError) as raises:
gdf.pop("fake_colname")
raises.match("fake_colname")
# check pop numeric column
pdf_pop = pdf.pop("a")
gdf_pop = gdf.pop("a")
assert_eq(pdf_pop, gdf_pop)
assert_eq(pdf, gdf)
# check string column
pdf_pop = pdf.pop("b")
gdf_pop = gdf.pop("b")
assert_eq(pdf_pop, gdf_pop)
assert_eq(pdf, gdf)
# check float column and empty dataframe
pdf_pop = pdf.pop("c")
gdf_pop = gdf.pop("c")
assert_eq(pdf_pop, gdf_pop)
assert_eq(pdf, gdf)
# check empty dataframe edge case
empty_pdf = pd.DataFrame(columns=["a", "b"])
empty_gdf = cudf.DataFrame(columns=["a", "b"])
pb = empty_pdf.pop("b")
gb = empty_gdf.pop("b")
assert len(pb) == len(gb)
assert empty_pdf.empty and empty_gdf.empty
@pytest.mark.parametrize("nelem", [0, 3, 100, 1000])
def test_dataframe_astype(nelem):
df = cudf.DataFrame()
data = np.asarray(range(nelem), dtype=np.int32)
df["a"] = data
assert df["a"].dtype is np.dtype(np.int32)
df["b"] = df["a"].astype(np.float32)
assert df["b"].dtype is np.dtype(np.float32)
np.testing.assert_equal(df["a"].to_array(), df["b"].to_array())
@pytest.mark.parametrize("nelem", [0, 100])
def test_index_astype(nelem):
df = cudf.DataFrame()
data = np.asarray(range(nelem), dtype=np.int32)
df["a"] = data
assert df.index.dtype is np.dtype(np.int64)
df.index = df.index.astype(np.float32)
assert df.index.dtype is np.dtype(np.float32)
df["a"] = df["a"].astype(np.float32)
np.testing.assert_equal(df.index.to_array(), df["a"].to_array())
df["b"] = df["a"]
df = df.set_index("b")
df["a"] = df["a"].astype(np.int16)
df.index = df.index.astype(np.int16)
np.testing.assert_equal(df.index.to_array(), df["a"].to_array())
def test_dataframe_to_string():
pd.options.display.max_rows = 5
pd.options.display.max_columns = 8
# Test basic
df = cudf.DataFrame(
{"a": [1, 2, 3, 4, 5, 6], "b": [11, 12, 13, 14, 15, 16]}
)
string = str(df)
assert string.splitlines()[-1] == "[6 rows x 2 columns]"
# Test skipped columns
df = cudf.DataFrame(
{
"a": [1, 2, 3, 4, 5, 6],
"b": [11, 12, 13, 14, 15, 16],
"c": [11, 12, 13, 14, 15, 16],
"d": [11, 12, 13, 14, 15, 16],
}
)
string = df.to_string()
assert string.splitlines()[-1] == "[6 rows x 4 columns]"
# Test masked
df = cudf.DataFrame(
{"a": [1, 2, 3, 4, 5, 6], "b": [11, 12, 13, 14, 15, 16]}
)
data = np.arange(6)
mask = np.zeros(1, dtype=cudf.utils.utils.mask_dtype)
mask[0] = 0b00101101
masked = cudf.Series.from_masked_array(data, mask)
assert masked.null_count == 2
df["c"] = masked
# check data
values = masked.copy()
validids = [0, 2, 3, 5]
densearray = masked.to_array()
np.testing.assert_equal(data[validids], densearray)
# valid position is corret
for i in validids:
assert data[i] == values[i]
# null position is correct
for i in range(len(values)):
if i not in validids:
assert values[i] is cudf.NA
pd.options.display.max_rows = 10
got = df.to_string()
expect = """
a b c
0 1 11 0
1 2 12 <NA>
2 3 13 2
3 4 14 3
4 5 15 <NA>
5 6 16 5
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_to_string_wide(monkeypatch):
monkeypatch.setenv("COLUMNS", "79")
# Test basic
df = cudf.DataFrame()
for i in range(100):
df["a{}".format(i)] = list(range(3))
pd.options.display.max_columns = 0
got = df.to_string()
expect = """
a0 a1 a2 a3 a4 a5 a6 a7 ... a92 a93 a94 a95 a96 a97 a98 a99
0 0 0 0 0 0 0 0 0 ... 0 0 0 0 0 0 0 0
1 1 1 1 1 1 1 1 1 ... 1 1 1 1 1 1 1 1
2 2 2 2 2 2 2 2 2 ... 2 2 2 2 2 2 2 2
[3 rows x 100 columns]
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_empty_to_string():
# Test for printing empty dataframe
df = cudf.DataFrame()
got = df.to_string()
expect = "Empty DataFrame\nColumns: []\nIndex: []\n"
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_emptycolumns_to_string():
# Test for printing dataframe having empty columns
df = cudf.DataFrame()
df["a"] = []
df["b"] = []
got = df.to_string()
expect = "Empty DataFrame\nColumns: [a, b]\nIndex: []\n"
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_copy():
# Test for copying the dataframe using python copy pkg
df = cudf.DataFrame()
df["a"] = [1, 2, 3]
df2 = copy(df)
df2["b"] = [4, 5, 6]
got = df.to_string()
expect = """
a
0 1
1 2
2 3
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_copy_shallow():
# Test for copy dataframe using class method
df = cudf.DataFrame()
df["a"] = [1, 2, 3]
df2 = df.copy()
df2["b"] = [4, 2, 3]
got = df.to_string()
expect = """
a
0 1
1 2
2 3
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_dtypes():
dtypes = pd.Series(
[np.int32, np.float32, np.float64], index=["c", "a", "b"]
)
df = cudf.DataFrame(
{k: np.ones(10, dtype=v) for k, v in dtypes.iteritems()}
)
assert df.dtypes.equals(dtypes)
def test_dataframe_add_col_to_object_dataframe():
# Test for adding column to an empty object dataframe
cols = ["a", "b", "c"]
df = pd.DataFrame(columns=cols, dtype="str")
data = {k: v for (k, v) in zip(cols, [["a"] for _ in cols])}
gdf = cudf.DataFrame(data)
gdf = gdf[:0]
assert gdf.dtypes.equals(df.dtypes)
gdf["a"] = [1]
df["a"] = [10]
assert gdf.dtypes.equals(df.dtypes)
gdf["b"] = [1.0]
df["b"] = [10.0]
assert gdf.dtypes.equals(df.dtypes)
def test_dataframe_dir_and_getattr():
df = cudf.DataFrame(
{
"a": np.ones(10),
"b": np.ones(10),
"not an id": np.ones(10),
"oop$": np.ones(10),
}
)
o = dir(df)
assert {"a", "b"}.issubset(o)
assert "not an id" not in o
assert "oop$" not in o
# Getattr works
assert df.a.equals(df["a"])
assert df.b.equals(df["b"])
with pytest.raises(AttributeError):
df.not_a_column
@pytest.mark.parametrize("order", ["C", "F"])
def test_empty_dataframe_as_gpu_matrix(order):
df = cudf.DataFrame()
# Check fully empty dataframe.
mat = df.as_gpu_matrix(order=order).copy_to_host()
assert mat.shape == (0, 0)
df = cudf.DataFrame()
nelem = 123
for k in "abc":
df[k] = np.random.random(nelem)
# Check all columns in empty dataframe.
mat = df.head(0).as_gpu_matrix(order=order).copy_to_host()
assert mat.shape == (0, 3)
@pytest.mark.parametrize("order", ["C", "F"])
def test_dataframe_as_gpu_matrix(order):
df = cudf.DataFrame()
nelem = 123
for k in "abcd":
df[k] = np.random.random(nelem)
# Check all columns
mat = df.as_gpu_matrix(order=order).copy_to_host()
assert mat.shape == (nelem, 4)
for i, k in enumerate(df.columns):
np.testing.assert_array_equal(df[k].to_array(), mat[:, i])
# Check column subset
mat = df.as_gpu_matrix(order=order, columns=["a", "c"]).copy_to_host()
assert mat.shape == (nelem, 2)
for i, k in enumerate("ac"):
np.testing.assert_array_equal(df[k].to_array(), mat[:, i])
def test_dataframe_as_gpu_matrix_null_values():
df = cudf.DataFrame()
nelem = 123
na = -10000
refvalues = {}
for k in "abcd":
df[k] = data = np.random.random(nelem)
bitmask = utils.random_bitmask(nelem)
df[k] = df[k].set_mask(bitmask)
boolmask = np.asarray(
utils.expand_bits_to_bytes(bitmask)[:nelem], dtype=np.bool_
)
data[~boolmask] = na
refvalues[k] = data
# Check null value causes error
with pytest.raises(ValueError) as raises:
df.as_gpu_matrix()
raises.match("column 'a' has null values")
for k in df.columns:
df[k] = df[k].fillna(na)
mat = df.as_gpu_matrix().copy_to_host()
for i, k in enumerate(df.columns):
np.testing.assert_array_equal(refvalues[k], mat[:, i])
def test_dataframe_append_empty():
pdf = pd.DataFrame(
{
"key": [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4],
"value": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
}
)
gdf = cudf.DataFrame.from_pandas(pdf)
gdf["newcol"] = 100
pdf["newcol"] = 100
assert len(gdf["newcol"]) == len(pdf)
assert len(pdf["newcol"]) == len(pdf)
assert_eq(gdf, pdf)
def test_dataframe_setitem_from_masked_object():
ary = np.random.randn(100)
mask = np.zeros(100, dtype=bool)
mask[:20] = True
np.random.shuffle(mask)
ary[mask] = np.nan
test1_null = cudf.Series(ary, nan_as_null=True)
assert test1_null.nullable
assert test1_null.null_count == 20
test1_nan = cudf.Series(ary, nan_as_null=False)
assert test1_nan.null_count == 0
test2_null = cudf.DataFrame.from_pandas(
pd.DataFrame({"a": ary}), nan_as_null=True
)
assert test2_null["a"].nullable
assert test2_null["a"].null_count == 20
test2_nan = cudf.DataFrame.from_pandas(
pd.DataFrame({"a": ary}), nan_as_null=False
)
assert test2_nan["a"].null_count == 0
gpu_ary = cupy.asarray(ary)
test3_null = cudf.Series(gpu_ary, nan_as_null=True)
assert test3_null.nullable
assert test3_null.null_count == 20
test3_nan = cudf.Series(gpu_ary, nan_as_null=False)
assert test3_nan.null_count == 0
test4 = cudf.DataFrame()
lst = [1, 2, None, 4, 5, 6, None, 8, 9]
test4["lst"] = lst
assert test4["lst"].nullable
assert test4["lst"].null_count == 2
def test_dataframe_append_to_empty():
pdf = pd.DataFrame()
pdf["a"] = []
pdf["b"] = [1, 2, 3]
gdf = cudf.DataFrame()
gdf["a"] = []
gdf["b"] = [1, 2, 3]
assert_eq(gdf, pdf)
def test_dataframe_setitem_index_len1():
gdf = cudf.DataFrame()
gdf["a"] = [1]
gdf["b"] = gdf.index._values
np.testing.assert_equal(gdf.b.to_array(), [0])
def test_empty_dataframe_setitem_df():
gdf1 = cudf.DataFrame()
gdf2 = cudf.DataFrame({"a": [1, 2, 3, 4, 5]})
gdf1["a"] = gdf2["a"]
assert_eq(gdf1, gdf2)
def test_assign():
gdf = cudf.DataFrame({"x": [1, 2, 3]})
gdf2 = gdf.assign(y=gdf.x + 1)
assert list(gdf.columns) == ["x"]
assert list(gdf2.columns) == ["x", "y"]
np.testing.assert_equal(gdf2.y.to_array(), [2, 3, 4])
@pytest.mark.parametrize("nrows", [1, 8, 100, 1000])
def test_dataframe_hash_columns(nrows):
gdf = cudf.DataFrame()
data = np.asarray(range(nrows))
data[0] = data[-1] # make first and last the same
gdf["a"] = data
gdf["b"] = gdf.a + 100
out = gdf.hash_columns(["a", "b"])
assert isinstance(out, cupy.ndarray)
assert len(out) == nrows
assert out.dtype == np.int32
# Check default
out_all = gdf.hash_columns()
np.testing.assert_array_equal(cupy.asnumpy(out), cupy.asnumpy(out_all))
# Check single column
out_one = cupy.asnumpy(gdf.hash_columns(["a"]))
# First matches last
assert out_one[0] == out_one[-1]
# Equivalent to the cudf.Series.hash_values()
np.testing.assert_array_equal(cupy.asnumpy(gdf.a.hash_values()), out_one)
@pytest.mark.parametrize("nrows", [3, 10, 100, 1000])
@pytest.mark.parametrize("nparts", [1, 2, 8, 13])
@pytest.mark.parametrize("nkeys", [1, 2])
def test_dataframe_hash_partition(nrows, nparts, nkeys):
np.random.seed(123)
gdf = cudf.DataFrame()
keycols = []
for i in range(nkeys):
keyname = "key{}".format(i)
gdf[keyname] = np.random.randint(0, 7 - i, nrows)
keycols.append(keyname)
gdf["val1"] = np.random.randint(0, nrows * 2, nrows)
got = gdf.partition_by_hash(keycols, nparts=nparts)
# Must return a list
assert isinstance(got, list)
# Must have correct number of partitions
assert len(got) == nparts
# All partitions must be DataFrame type
assert all(isinstance(p, cudf.DataFrame) for p in got)
# Check that all partitions have unique keys
part_unique_keys = set()
for p in got:
if len(p):
# Take rows of the keycolumns and build a set of the key-values
unique_keys = set(map(tuple, p.as_matrix(columns=keycols)))
# Ensure that none of the key-values have occurred in other groups
assert not (unique_keys & part_unique_keys)
part_unique_keys |= unique_keys
assert len(part_unique_keys)
@pytest.mark.parametrize("nrows", [3, 10, 50])
def test_dataframe_hash_partition_masked_value(nrows):
gdf = cudf.DataFrame()
gdf["key"] = np.arange(nrows)
gdf["val"] = np.arange(nrows) + 100
bitmask = utils.random_bitmask(nrows)
bytemask = utils.expand_bits_to_bytes(bitmask)
gdf["val"] = gdf["val"].set_mask(bitmask)
parted = gdf.partition_by_hash(["key"], nparts=3)
# Verify that the valid mask is correct
for p in parted:
df = p.to_pandas()
for row in df.itertuples():
valid = bool(bytemask[row.key])
expected_value = row.key + 100 if valid else np.nan
got_value = row.val
assert (expected_value == got_value) or (
np.isnan(expected_value) and np.isnan(got_value)
)
@pytest.mark.parametrize("nrows", [3, 10, 50])
def test_dataframe_hash_partition_masked_keys(nrows):
gdf = cudf.DataFrame()
gdf["key"] = np.arange(nrows)
gdf["val"] = np.arange(nrows) + 100
bitmask = utils.random_bitmask(nrows)
bytemask = utils.expand_bits_to_bytes(bitmask)
gdf["key"] = gdf["key"].set_mask(bitmask)
parted = gdf.partition_by_hash(["key"], nparts=3, keep_index=False)
# Verify that the valid mask is correct
for p in parted:
df = p.to_pandas()
for row in df.itertuples():
valid = bool(bytemask[row.val - 100])
# val is key + 100
expected_value = row.val - 100 if valid else np.nan
got_value = row.key
assert (expected_value == got_value) or (
np.isnan(expected_value) and np.isnan(got_value)
)
@pytest.mark.parametrize("keep_index", [True, False])
def test_dataframe_hash_partition_keep_index(keep_index):
gdf = cudf.DataFrame(
{"val": [1, 2, 3, 4], "key": [3, 2, 1, 4]}, index=[4, 3, 2, 1]
)
expected_df1 = cudf.DataFrame(
{"val": [1], "key": [3]}, index=[4] if keep_index else None
)
expected_df2 = cudf.DataFrame(
{"val": [2, 3, 4], "key": [2, 1, 4]},
index=[3, 2, 1] if keep_index else range(1, 4),
)
expected = [expected_df1, expected_df2]
parts = gdf.partition_by_hash(["key"], nparts=2, keep_index=keep_index)
for exp, got in zip(expected, parts):
assert_eq(exp, got)
def test_dataframe_hash_partition_empty():
gdf = cudf.DataFrame({"val": [1, 2], "key": [3, 2]}, index=["a", "b"])
parts = gdf.iloc[:0].partition_by_hash(["key"], nparts=3)
assert len(parts) == 3
for part in parts:
assert_eq(gdf.iloc[:0], part)
@pytest.mark.parametrize("dtype1", utils.supported_numpy_dtypes)
@pytest.mark.parametrize("dtype2", utils.supported_numpy_dtypes)
def test_dataframe_concat_different_numerical_columns(dtype1, dtype2):
df1 = pd.DataFrame(dict(x=pd.Series(np.arange(5)).astype(dtype1)))
df2 = pd.DataFrame(dict(x=pd.Series(np.arange(5)).astype(dtype2)))
if dtype1 != dtype2 and "datetime" in dtype1 or "datetime" in dtype2:
with pytest.raises(TypeError):
cudf.concat([df1, df2])
else:
pres = pd.concat([df1, df2])
gres = cudf.concat([cudf.from_pandas(df1), cudf.from_pandas(df2)])
assert_eq(cudf.from_pandas(pres), gres)
def test_dataframe_concat_different_column_types():
df1 = cudf.Series([42], dtype=np.float64)
df2 = cudf.Series(["a"], dtype="category")
with pytest.raises(ValueError):
cudf.concat([df1, df2])
df2 = cudf.Series(["a string"])
with pytest.raises(TypeError):
cudf.concat([df1, df2])
@pytest.mark.parametrize(
"df_1", [cudf.DataFrame({"a": [1, 2], "b": [1, 3]}), cudf.DataFrame({})]
)
@pytest.mark.parametrize(
"df_2", [cudf.DataFrame({"a": [], "b": []}), cudf.DataFrame({})]
)
def test_concat_empty_dataframe(df_1, df_2):
got = cudf.concat([df_1, df_2])
expect = pd.concat([df_1.to_pandas(), df_2.to_pandas()], sort=False)
# ignoring dtypes as pandas upcasts int to float
# on concatenation with empty dataframes
assert_eq(got, expect, check_dtype=False)
@pytest.mark.parametrize(
"df1_d",
[
{"a": [1, 2], "b": [1, 2], "c": ["s1", "s2"], "d": [1.0, 2.0]},
{"b": [1.9, 10.9], "c": ["s1", "s2"]},
{"c": ["s1"], "b": [None], "a": [False]},
],
)
@pytest.mark.parametrize(
"df2_d",
[
{"a": [1, 2, 3]},
{"a": [1, None, 3], "b": [True, True, False], "c": ["s3", None, "s4"]},
{"a": [], "b": []},
{},
],
)
def test_concat_different_column_dataframe(df1_d, df2_d):
got = cudf.concat(
[cudf.DataFrame(df1_d), cudf.DataFrame(df2_d), cudf.DataFrame(df1_d)],
sort=False,
)
expect = pd.concat(
[pd.DataFrame(df1_d), pd.DataFrame(df2_d), pd.DataFrame(df1_d)],
sort=False,
)
# numerical columns are upcasted to float in cudf.DataFrame.to_pandas()
# casts nan to 0 in non-float numerical columns
numeric_cols = got.dtypes[got.dtypes != "object"].index
for col in numeric_cols:
got[col] = got[col].astype(np.float64).fillna(np.nan)
assert_eq(got, expect, check_dtype=False)
@pytest.mark.parametrize(
"ser_1", [pd.Series([1, 2, 3]), pd.Series([], dtype="float64")]
)
@pytest.mark.parametrize("ser_2", [pd.Series([], dtype="float64")])
def test_concat_empty_series(ser_1, ser_2):
got = cudf.concat([cudf.Series(ser_1), cudf.Series(ser_2)])
expect = pd.concat([ser_1, ser_2])
assert_eq(got, expect)
def test_concat_with_axis():
df1 = pd.DataFrame(dict(x=np.arange(5), y=np.arange(5)))
df2 = pd.DataFrame(dict(a=np.arange(5), b=np.arange(5)))
concat_df = pd.concat([df1, df2], axis=1)
cdf1 = cudf.from_pandas(df1)
cdf2 = cudf.from_pandas(df2)
# concat only dataframes
concat_cdf = cudf.concat([cdf1, cdf2], axis=1)
assert_eq(concat_cdf, concat_df)
# concat only series
concat_s = pd.concat([df1.x, df1.y], axis=1)
cs1 = cudf.Series.from_pandas(df1.x)
cs2 = cudf.Series.from_pandas(df1.y)
concat_cdf_s = cudf.concat([cs1, cs2], axis=1)
assert_eq(concat_cdf_s, concat_s)
# concat series and dataframes
s3 = pd.Series(np.random.random(5))
cs3 = cudf.Series.from_pandas(s3)
concat_cdf_all = cudf.concat([cdf1, cs3, cdf2], axis=1)
concat_df_all = pd.concat([df1, s3, df2], axis=1)
assert_eq(concat_cdf_all, concat_df_all)
# concat manual multi index
midf1 = cudf.from_pandas(df1)
midf1.index = cudf.MultiIndex(
levels=[[0, 1, 2, 3], [0, 1]], codes=[[0, 1, 2, 3, 2], [0, 1, 0, 1, 0]]
)
midf2 = midf1[2:]
midf2.index = cudf.MultiIndex(
levels=[[3, 4, 5], [2, 0]], codes=[[0, 1, 2], [1, 0, 1]]
)
mipdf1 = midf1.to_pandas()
mipdf2 = midf2.to_pandas()
assert_eq(cudf.concat([midf1, midf2]), pd.concat([mipdf1, mipdf2]))
assert_eq(cudf.concat([midf2, midf1]), pd.concat([mipdf2, mipdf1]))
assert_eq(
cudf.concat([midf1, midf2, midf1]), pd.concat([mipdf1, mipdf2, mipdf1])
)
# concat groupby multi index
gdf1 = cudf.DataFrame(
{
"x": np.random.randint(0, 10, 10),
"y": np.random.randint(0, 10, 10),
"z": np.random.randint(0, 10, 10),
"v": np.random.randint(0, 10, 10),
}
)
gdf2 = gdf1[5:]
gdg1 = gdf1.groupby(["x", "y"]).min()
gdg2 = gdf2.groupby(["x", "y"]).min()
pdg1 = gdg1.to_pandas()
pdg2 = gdg2.to_pandas()
assert_eq(cudf.concat([gdg1, gdg2]), pd.concat([pdg1, pdg2]))
assert_eq(cudf.concat([gdg2, gdg1]), pd.concat([pdg2, pdg1]))
# series multi index concat
gdgz1 = gdg1.z
gdgz2 = gdg2.z
pdgz1 = gdgz1.to_pandas()
pdgz2 = gdgz2.to_pandas()
assert_eq(cudf.concat([gdgz1, gdgz2]), pd.concat([pdgz1, pdgz2]))
assert_eq(cudf.concat([gdgz2, gdgz1]), pd.concat([pdgz2, pdgz1]))
@pytest.mark.parametrize("nrows", [0, 3, 10, 100, 1000])
def test_nonmatching_index_setitem(nrows):
np.random.seed(0)
gdf = cudf.DataFrame()
gdf["a"] = np.random.randint(2147483647, size=nrows)
gdf["b"] = np.random.randint(2147483647, size=nrows)
gdf = gdf.set_index("b")
test_values = np.random.randint(2147483647, size=nrows)
gdf["c"] = test_values
assert len(test_values) == len(gdf["c"])
assert (
gdf["c"]
.to_pandas()
.equals(cudf.Series(test_values).set_index(gdf._index).to_pandas())
)
def test_from_pandas():
df = pd.DataFrame({"x": [1, 2, 3]}, index=[4.0, 5.0, 6.0])
gdf = cudf.DataFrame.from_pandas(df)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
s = df.x
gs = cudf.Series.from_pandas(s)
assert isinstance(gs, cudf.Series)
assert_eq(s, gs)
@pytest.mark.parametrize("dtypes", [int, float])
def test_from_records(dtypes):
h_ary = np.ndarray(shape=(10, 4), dtype=dtypes)
rec_ary = h_ary.view(np.recarray)
gdf = cudf.DataFrame.from_records(rec_ary, columns=["a", "b", "c", "d"])
df = pd.DataFrame.from_records(rec_ary, columns=["a", "b", "c", "d"])
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame.from_records(rec_ary)
df = pd.DataFrame.from_records(rec_ary)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
@pytest.mark.parametrize("columns", [None, ["first", "second", "third"]])
@pytest.mark.parametrize(
"index",
[
None,
["first", "second"],
"name",
"age",
"weight",
[10, 11],
["abc", "xyz"],
],
)
def test_from_records_index(columns, index):
rec_ary = np.array(
[("Rex", 9, 81.0), ("Fido", 3, 27.0)],
dtype=[("name", "U10"), ("age", "i4"), ("weight", "f4")],
)
gdf = cudf.DataFrame.from_records(rec_ary, columns=columns, index=index)
df = pd.DataFrame.from_records(rec_ary, columns=columns, index=index)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
def test_dataframe_construction_from_cupy_arrays():
h_ary = np.array([[1, 2, 3], [4, 5, 6]], np.int32)
d_ary = cupy.asarray(h_ary)
gdf = cudf.DataFrame(d_ary, columns=["a", "b", "c"])
df = pd.DataFrame(h_ary, columns=["a", "b", "c"])
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary)
df = pd.DataFrame(h_ary)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary, index=["a", "b"])
df = pd.DataFrame(h_ary, index=["a", "b"])
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary)
gdf = gdf.set_index(keys=0, drop=False)
df = pd.DataFrame(h_ary)
df = df.set_index(keys=0, drop=False)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary)
gdf = gdf.set_index(keys=1, drop=False)
df = pd.DataFrame(h_ary)
df = df.set_index(keys=1, drop=False)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
def test_dataframe_cupy_wrong_dimensions():
d_ary = cupy.empty((2, 3, 4), dtype=np.int32)
with pytest.raises(
ValueError, match="records dimension expected 1 or 2 but found: 3"
):
cudf.DataFrame(d_ary)
def test_dataframe_cupy_array_wrong_index():
d_ary = cupy.empty((2, 3), dtype=np.int32)
with pytest.raises(
ValueError,
match="Length mismatch: Expected axis has 2 elements, "
"new values have 1 elements",
):
cudf.DataFrame(d_ary, index=["a"])
with pytest.raises(
ValueError,
match="Length mismatch: Expected axis has 2 elements, "
"new values have 1 elements",
):
cudf.DataFrame(d_ary, index="a")
def test_index_in_dataframe_constructor():
a = pd.DataFrame({"x": [1, 2, 3]}, index=[4.0, 5.0, 6.0])
b = cudf.DataFrame({"x": [1, 2, 3]}, index=[4.0, 5.0, 6.0])
assert_eq(a, b)
assert_eq(a.loc[4:], b.loc[4:])
dtypes = NUMERIC_TYPES + DATETIME_TYPES + ["bool"]
@pytest.mark.parametrize("nelem", [0, 2, 3, 100, 1000])
@pytest.mark.parametrize("data_type", dtypes)
def test_from_arrow(nelem, data_type):
df = pd.DataFrame(
{
"a": np.random.randint(0, 1000, nelem).astype(data_type),
"b": np.random.randint(0, 1000, nelem).astype(data_type),
}
)
padf = pa.Table.from_pandas(
df, preserve_index=False
).replace_schema_metadata(None)
gdf = cudf.DataFrame.from_arrow(padf)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
s = pa.Array.from_pandas(df.a)
gs = cudf.Series.from_arrow(s)
assert isinstance(gs, cudf.Series)
# For some reason PyArrow to_pandas() converts to numpy array and has
# better type compatibility
np.testing.assert_array_equal(s.to_pandas(), gs.to_array())
@pytest.mark.parametrize("nelem", [0, 2, 3, 100, 1000])
@pytest.mark.parametrize("data_type", dtypes)
def test_to_arrow(nelem, data_type):
df = pd.DataFrame(
{
"a": np.random.randint(0, 1000, nelem).astype(data_type),
"b": np.random.randint(0, 1000, nelem).astype(data_type),
}
)
gdf = cudf.DataFrame.from_pandas(df)
pa_df = pa.Table.from_pandas(
df, preserve_index=False
).replace_schema_metadata(None)
pa_gdf = gdf.to_arrow(preserve_index=False).replace_schema_metadata(None)
assert isinstance(pa_gdf, pa.Table)
assert pa.Table.equals(pa_df, pa_gdf)
pa_s = pa.Array.from_pandas(df.a)
pa_gs = gdf["a"].to_arrow()
assert isinstance(pa_gs, pa.Array)
assert pa.Array.equals(pa_s, pa_gs)
pa_i = pa.Array.from_pandas(df.index)
pa_gi = gdf.index.to_arrow()
assert isinstance(pa_gi, pa.Array)
assert pa.Array.equals(pa_i, pa_gi)
@pytest.mark.parametrize("data_type", dtypes)
def test_to_from_arrow_nulls(data_type):
if data_type == "longlong":
data_type = "int64"
if data_type == "bool":
s1 = pa.array([True, None, False, None, True], type=data_type)
else:
dtype = np.dtype(data_type)
if dtype.type == np.datetime64:
time_unit, _ = np.datetime_data(dtype)
data_type = pa.timestamp(unit=time_unit)
s1 = pa.array([1, None, 3, None, 5], type=data_type)
gs1 = cudf.Series.from_arrow(s1)
assert isinstance(gs1, cudf.Series)
# We have 64B padded buffers for nulls whereas Arrow returns a minimal
# number of bytes, so only check the first byte in this case
np.testing.assert_array_equal(
np.asarray(s1.buffers()[0]).view("u1")[0],
gs1._column.mask_array_view.copy_to_host().view("u1")[0],
)
assert pa.Array.equals(s1, gs1.to_arrow())
s2 = pa.array([None, None, None, None, None], type=data_type)
gs2 = cudf.Series.from_arrow(s2)
assert isinstance(gs2, cudf.Series)
# We have 64B padded buffers for nulls whereas Arrow returns a minimal
# number of bytes, so only check the first byte in this case
np.testing.assert_array_equal(
np.asarray(s2.buffers()[0]).view("u1")[0],
gs2._column.mask_array_view.copy_to_host().view("u1")[0],
)
assert pa.Array.equals(s2, gs2.to_arrow())
def test_to_arrow_categorical():
df = pd.DataFrame()
df["a"] = pd.Series(["a", "b", "c"], dtype="category")
gdf = cudf.DataFrame.from_pandas(df)
pa_df = pa.Table.from_pandas(
df, preserve_index=False
).replace_schema_metadata(None)
pa_gdf = gdf.to_arrow(preserve_index=False).replace_schema_metadata(None)
assert isinstance(pa_gdf, pa.Table)
assert pa.Table.equals(pa_df, pa_gdf)
pa_s = pa.Array.from_pandas(df.a)
pa_gs = gdf["a"].to_arrow()
assert isinstance(pa_gs, pa.Array)
assert pa.Array.equals(pa_s, pa_gs)
def test_from_arrow_missing_categorical():
pd_cat = pd.Categorical(["a", "b", "c"], categories=["a", "b"])
pa_cat = pa.array(pd_cat, from_pandas=True)
gd_cat = cudf.Series(pa_cat)
assert isinstance(gd_cat, cudf.Series)
assert_eq(
pd.Series(pa_cat.to_pandas()), # PyArrow returns a pd.Categorical
gd_cat.to_pandas(),
)
def test_to_arrow_missing_categorical():
pd_cat = pd.Categorical(["a", "b", "c"], categories=["a", "b"])
pa_cat = pa.array(pd_cat, from_pandas=True)
gd_cat = cudf.Series(pa_cat)
assert isinstance(gd_cat, cudf.Series)
assert pa.Array.equals(pa_cat, gd_cat.to_arrow())
@pytest.mark.parametrize("data_type", dtypes)
def test_from_scalar_typing(data_type):
if data_type == "datetime64[ms]":
scalar = (
np.dtype("int64")
.type(np.random.randint(0, 5))
.astype("datetime64[ms]")
)
elif data_type.startswith("datetime64"):
scalar = np.datetime64(datetime.date.today()).astype("datetime64[ms]")
data_type = "datetime64[ms]"
else:
scalar = np.dtype(data_type).type(np.random.randint(0, 5))
gdf = cudf.DataFrame()
gdf["a"] = [1, 2, 3, 4, 5]
gdf["b"] = scalar
assert gdf["b"].dtype == np.dtype(data_type)
assert len(gdf["b"]) == len(gdf["a"])
@pytest.mark.parametrize("data_type", NUMERIC_TYPES)
def test_from_python_array(data_type):
np_arr = np.random.randint(0, 100, 10).astype(data_type)
data = memoryview(np_arr)
data = arr.array(data.format, data)
gs = cudf.Series(data)
np.testing.assert_equal(gs.to_array(), np_arr)
def test_series_shape():
ps = pd.Series([1, 2, 3, 4])
cs = cudf.Series([1, 2, 3, 4])
assert ps.shape == cs.shape
def test_series_shape_empty():
ps = pd.Series(dtype="float64")
cs = cudf.Series([])
assert ps.shape == cs.shape
def test_dataframe_shape():
pdf = pd.DataFrame({"a": [0, 1, 2, 3], "b": [0.1, 0.2, None, 0.3]})
gdf = cudf.DataFrame.from_pandas(pdf)
assert pdf.shape == gdf.shape
def test_dataframe_shape_empty():
pdf = pd.DataFrame()
gdf = cudf.DataFrame()
assert pdf.shape == gdf.shape
@pytest.mark.parametrize("num_cols", [1, 2, 10])
@pytest.mark.parametrize("num_rows", [1, 2, 20])
@pytest.mark.parametrize("dtype", dtypes)
@pytest.mark.parametrize("nulls", ["none", "some", "all"])
def test_dataframe_transpose(nulls, num_cols, num_rows, dtype):
pdf = pd.DataFrame()
null_rep = np.nan if dtype in ["float32", "float64"] else None
for i in range(num_cols):
colname = string.ascii_lowercase[i]
data = pd.Series(np.random.randint(0, 26, num_rows).astype(dtype))
if nulls == "some":
idx = np.random.choice(
num_rows, size=int(num_rows / 2), replace=False
)
data[idx] = null_rep
elif nulls == "all":
data[:] = null_rep
pdf[colname] = data
gdf = cudf.DataFrame.from_pandas(pdf)
got_function = gdf.transpose()
got_property = gdf.T
expect = pdf.transpose()
assert_eq(expect, got_function)
assert_eq(expect, got_property)
@pytest.mark.parametrize("num_cols", [1, 2, 10])
@pytest.mark.parametrize("num_rows", [1, 2, 20])
def test_dataframe_transpose_category(num_cols, num_rows):
pdf = pd.DataFrame()
for i in range(num_cols):
colname = string.ascii_lowercase[i]
data = pd.Series(list(string.ascii_lowercase), dtype="category")
data = data.sample(num_rows, replace=True).reset_index(drop=True)
pdf[colname] = data
gdf = cudf.DataFrame.from_pandas(pdf)
got_function = gdf.transpose()
got_property = gdf.T
expect = pdf.transpose()
assert_eq(expect, got_function.to_pandas())
assert_eq(expect, got_property.to_pandas())
def test_generated_column():
gdf = cudf.DataFrame({"a": (i for i in range(5))})
assert len(gdf) == 5
@pytest.fixture
def pdf():
return pd.DataFrame({"x": range(10), "y": range(10)})
@pytest.fixture
def gdf(pdf):
return cudf.DataFrame.from_pandas(pdf)
@pytest.mark.parametrize(
"data",
[
{"x": [np.nan, 2, 3, 4, 100, np.nan], "y": [4, 5, 6, 88, 99, np.nan]},
{"x": [1, 2, 3], "y": [4, 5, 6]},
{"x": [np.nan, np.nan, np.nan], "y": [np.nan, np.nan, np.nan]},
{"x": [], "y": []},
{"x": []},
],
)
@pytest.mark.parametrize(
"func",
[
lambda df, **kwargs: df.min(**kwargs),
lambda df, **kwargs: df.max(**kwargs),
lambda df, **kwargs: df.sum(**kwargs),
lambda df, **kwargs: df.product(**kwargs),
lambda df, **kwargs: df.cummin(**kwargs),
lambda df, **kwargs: df.cummax(**kwargs),
lambda df, **kwargs: df.cumsum(**kwargs),
lambda df, **kwargs: df.cumprod(**kwargs),
lambda df, **kwargs: df.mean(**kwargs),
lambda df, **kwargs: df.sum(**kwargs),
lambda df, **kwargs: df.max(**kwargs),
lambda df, **kwargs: df.std(ddof=1, **kwargs),
lambda df, **kwargs: df.var(ddof=1, **kwargs),
lambda df, **kwargs: df.std(ddof=2, **kwargs),
lambda df, **kwargs: df.var(ddof=2, **kwargs),
lambda df, **kwargs: df.kurt(**kwargs),
lambda df, **kwargs: df.skew(**kwargs),
lambda df, **kwargs: df.all(**kwargs),
lambda df, **kwargs: df.any(**kwargs),
],
)
@pytest.mark.parametrize("skipna", [True, False, None])
def test_dataframe_reductions(data, func, skipna):
pdf = pd.DataFrame(data=data)
gdf = cudf.DataFrame.from_pandas(pdf)
assert_eq(func(pdf, skipna=skipna), func(gdf, skipna=skipna))
@pytest.mark.parametrize(
"data",
[
{"x": [np.nan, 2, 3, 4, 100, np.nan], "y": [4, 5, 6, 88, 99, np.nan]},
{"x": [1, 2, 3], "y": [4, 5, 6]},
{"x": [np.nan, np.nan, np.nan], "y": [np.nan, np.nan, np.nan]},
{"x": [], "y": []},
{"x": []},
],
)
@pytest.mark.parametrize("func", [lambda df: df.count()])
def test_dataframe_count_reduction(data, func):
pdf = pd.DataFrame(data=data)
gdf = cudf.DataFrame.from_pandas(pdf)
assert_eq(func(pdf), func(gdf))
@pytest.mark.parametrize(
"data",
[
{"x": [np.nan, 2, 3, 4, 100, np.nan], "y": [4, 5, 6, 88, 99, np.nan]},
{"x": [1, 2, 3], "y": [4, 5, 6]},
{"x": [np.nan, np.nan, np.nan], "y": [np.nan, np.nan, np.nan]},
{"x": [], "y": []},
{"x": []},
],
)
@pytest.mark.parametrize("ops", ["sum", "product", "prod"])
@pytest.mark.parametrize("skipna", [True, False, None])
@pytest.mark.parametrize("min_count", [-10, -1, 0, 1, 2, 3, 10])
def test_dataframe_min_count_ops(data, ops, skipna, min_count):
psr = pd.DataFrame(data)
gsr = cudf.DataFrame(data)
if PANDAS_GE_120 and psr.shape[0] * psr.shape[1] < min_count:
pytest.xfail("https://github.com/pandas-dev/pandas/issues/39738")
assert_eq(
getattr(psr, ops)(skipna=skipna, min_count=min_count),
getattr(gsr, ops)(skipna=skipna, min_count=min_count),
check_dtype=False,
)
@pytest.mark.parametrize(
"binop",
[
operator.add,
operator.mul,
operator.floordiv,
operator.truediv,
operator.mod,
operator.pow,
operator.eq,
operator.lt,
operator.le,
operator.gt,
operator.ge,
operator.ne,
],
)
def test_binops_df(pdf, gdf, binop):
pdf = pdf + 1.0
gdf = gdf + 1.0
d = binop(pdf, pdf)
g = binop(gdf, gdf)
assert_eq(d, g)
@pytest.mark.parametrize("binop", [operator.and_, operator.or_, operator.xor])
def test_bitwise_binops_df(pdf, gdf, binop):
d = binop(pdf, pdf + 1)
g = binop(gdf, gdf + 1)
assert_eq(d, g)
@pytest.mark.parametrize(
"binop",
[
operator.add,
operator.mul,
operator.floordiv,
operator.truediv,
operator.mod,
operator.pow,
operator.eq,
operator.lt,
operator.le,
operator.gt,
operator.ge,
operator.ne,
],
)
def test_binops_series(pdf, gdf, binop):
pdf = pdf + 1.0
gdf = gdf + 1.0
d = binop(pdf.x, pdf.y)
g = binop(gdf.x, gdf.y)
assert_eq(d, g)
@pytest.mark.parametrize("binop", [operator.and_, operator.or_, operator.xor])
def test_bitwise_binops_series(pdf, gdf, binop):
d = binop(pdf.x, pdf.y + 1)
g = binop(gdf.x, gdf.y + 1)
assert_eq(d, g)
@pytest.mark.parametrize("unaryop", [operator.neg, operator.inv, operator.abs])
def test_unaryops_df(pdf, gdf, unaryop):
d = unaryop(pdf - 5)
g = unaryop(gdf - 5)
assert_eq(d, g)
@pytest.mark.parametrize(
"func",
[
lambda df: df.empty,
lambda df: df.x.empty,
lambda df: df.x.fillna(123, limit=None, method=None, axis=None),
lambda df: df.drop("x", axis=1, errors="raise"),
],
)
def test_unary_operators(func, pdf, gdf):
p = func(pdf)
g = func(gdf)
assert_eq(p, g)
def test_is_monotonic(gdf):
pdf = pd.DataFrame({"x": [1, 2, 3]}, index=[3, 1, 2])
gdf = cudf.DataFrame.from_pandas(pdf)
assert not gdf.index.is_monotonic
assert not gdf.index.is_monotonic_increasing
assert not gdf.index.is_monotonic_decreasing
def test_iter(pdf, gdf):
assert list(pdf) == list(gdf)
def test_iteritems(gdf):
for k, v in gdf.iteritems():
assert k in gdf.columns
assert isinstance(v, cudf.Series)
assert_eq(v, gdf[k])
@pytest.mark.parametrize("q", [0.5, 1, 0.001, [0.5], [], [0.005, 0.5, 1]])
@pytest.mark.parametrize("numeric_only", [True, False])
def test_quantile(q, numeric_only):
ts = pd.date_range("2018-08-24", periods=5, freq="D")
td = pd.to_timedelta(np.arange(5), unit="h")
pdf = pd.DataFrame(
{"date": ts, "delta": td, "val": np.random.randn(len(ts))}
)
gdf = cudf.DataFrame.from_pandas(pdf)
assert_eq(pdf["date"].quantile(q), gdf["date"].quantile(q))
assert_eq(pdf["delta"].quantile(q), gdf["delta"].quantile(q))
assert_eq(pdf["val"].quantile(q), gdf["val"].quantile(q))
if numeric_only:
assert_eq(pdf.quantile(q), gdf.quantile(q))
else:
q = q if isinstance(q, list) else [q]
assert_eq(
pdf.quantile(
q if isinstance(q, list) else [q], numeric_only=False
),
gdf.quantile(q, numeric_only=False),
)
def test_empty_quantile():
pdf = pd.DataFrame({"x": []})
df = cudf.DataFrame({"x": []})
actual = df.quantile()
expected = pdf.quantile()
assert_eq(actual, expected)
def test_from_pandas_function(pdf):
gdf = cudf.from_pandas(pdf)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(pdf, gdf)
gdf = cudf.from_pandas(pdf.x)
assert isinstance(gdf, cudf.Series)
assert_eq(pdf.x, gdf)
with pytest.raises(TypeError):
cudf.from_pandas(123)
@pytest.mark.parametrize("preserve_index", [True, False])
def test_arrow_pandas_compat(pdf, gdf, preserve_index):
pdf["z"] = range(10)
pdf = pdf.set_index("z")
gdf["z"] = range(10)
gdf = gdf.set_index("z")
pdf_arrow_table = pa.Table.from_pandas(pdf, preserve_index=preserve_index)
gdf_arrow_table = gdf.to_arrow(preserve_index=preserve_index)
assert pa.Table.equals(pdf_arrow_table, gdf_arrow_table)
gdf2 = cudf.DataFrame.from_arrow(pdf_arrow_table)
pdf2 = pdf_arrow_table.to_pandas()
assert_eq(pdf2, gdf2)
@pytest.mark.parametrize("nrows", [1, 8, 100, 1000, 100000])
def test_series_hash_encode(nrows):
data = np.asarray(range(nrows))
# Python hash returns different value which sometimes
# results in enc_with_name_arr and enc_arr to be same.
# And there is no other better way to make hash return same value.
# So using an integer name to get constant value back from hash.
s = cudf.Series(data, name=1)
num_features = 1000
encoded_series = s.hash_encode(num_features)
assert isinstance(encoded_series, cudf.Series)
enc_arr = encoded_series.to_array()
assert np.all(enc_arr >= 0)
assert np.max(enc_arr) < num_features
enc_with_name_arr = s.hash_encode(num_features, use_name=True).to_array()
assert enc_with_name_arr[0] != enc_arr[0]
@pytest.mark.parametrize("dtype", NUMERIC_TYPES + ["bool"])
def test_cuda_array_interface(dtype):
np_data = np.arange(10).astype(dtype)
cupy_data = cupy.array(np_data)
pd_data = pd.Series(np_data)
cudf_data = cudf.Series(cupy_data)
assert_eq(pd_data, cudf_data)
gdf = cudf.DataFrame()
gdf["test"] = cupy_data
pd_data.name = "test"
assert_eq(pd_data, gdf["test"])
@pytest.mark.parametrize("nelem", [0, 2, 3, 100])
@pytest.mark.parametrize("nchunks", [1, 2, 5, 10])
@pytest.mark.parametrize("data_type", dtypes)
def test_from_arrow_chunked_arrays(nelem, nchunks, data_type):
np_list_data = [
np.random.randint(0, 100, nelem).astype(data_type)
for i in range(nchunks)
]
pa_chunk_array = pa.chunked_array(np_list_data)
expect = pd.Series(pa_chunk_array.to_pandas())
got = cudf.Series(pa_chunk_array)
assert_eq(expect, got)
np_list_data2 = [
np.random.randint(0, 100, nelem).astype(data_type)
for i in range(nchunks)
]
pa_chunk_array2 = pa.chunked_array(np_list_data2)
pa_table = pa.Table.from_arrays(
[pa_chunk_array, pa_chunk_array2], names=["a", "b"]
)
expect = pa_table.to_pandas()
got = cudf.DataFrame.from_arrow(pa_table)
assert_eq(expect, got)
@pytest.mark.skip(reason="Test was designed to be run in isolation")
def test_gpu_memory_usage_with_boolmask():
ctx = cuda.current_context()
def query_GPU_memory(note=""):
memInfo = ctx.get_memory_info()
usedMemoryGB = (memInfo.total - memInfo.free) / 1e9
return usedMemoryGB
cuda.current_context().deallocations.clear()
nRows = int(1e8)
nCols = 2
dataNumpy = np.asfortranarray(np.random.rand(nRows, nCols))
colNames = ["col" + str(iCol) for iCol in range(nCols)]
pandasDF = pd.DataFrame(data=dataNumpy, columns=colNames, dtype=np.float32)
cudaDF = cudf.core.DataFrame.from_pandas(pandasDF)
boolmask = cudf.Series(np.random.randint(1, 2, len(cudaDF)).astype("bool"))
memory_used = query_GPU_memory()
cudaDF = cudaDF[boolmask]
assert (
cudaDF.index._values.data_array_view.device_ctypes_pointer
== cudaDF["col0"].index._values.data_array_view.device_ctypes_pointer
)
assert (
cudaDF.index._values.data_array_view.device_ctypes_pointer
== cudaDF["col1"].index._values.data_array_view.device_ctypes_pointer
)
assert memory_used == query_GPU_memory()
def test_boolmask(pdf, gdf):
boolmask = np.random.randint(0, 2, len(pdf)) > 0
gdf = gdf[boolmask]
pdf = pdf[boolmask]
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
"mask_shape",
[
(2, "ab"),
(2, "abc"),
(3, "ab"),
(3, "abc"),
(3, "abcd"),
(4, "abc"),
(4, "abcd"),
],
)
def test_dataframe_boolmask(mask_shape):
pdf = pd.DataFrame()
for col in "abc":
pdf[col] = np.random.randint(0, 10, 3)
pdf_mask = pd.DataFrame()
for col in mask_shape[1]:
pdf_mask[col] = np.random.randint(0, 2, mask_shape[0]) > 0
gdf = cudf.DataFrame.from_pandas(pdf)
gdf_mask = cudf.DataFrame.from_pandas(pdf_mask)
gdf = gdf[gdf_mask]
pdf = pdf[pdf_mask]
assert np.array_equal(gdf.columns, pdf.columns)
for col in gdf.columns:
assert np.array_equal(
gdf[col].fillna(-1).to_pandas().values, pdf[col].fillna(-1).values
)
@pytest.mark.parametrize(
"mask",
[
[True, False, True],
pytest.param(
cudf.Series([True, False, True]),
marks=pytest.mark.xfail(
reason="Pandas can't index a multiindex with a Series"
),
),
],
)
def test_dataframe_multiindex_boolmask(mask):
gdf = cudf.DataFrame(
{"w": [3, 2, 1], "x": [1, 2, 3], "y": [0, 1, 0], "z": [1, 1, 1]}
)
gdg = gdf.groupby(["w", "x"]).count()
pdg = gdg.to_pandas()
assert_eq(gdg[mask], pdg[mask])
def test_dataframe_assignment():
pdf = pd.DataFrame()
for col in "abc":
pdf[col] = np.array([0, 1, 1, -2, 10])
gdf = cudf.DataFrame.from_pandas(pdf)
gdf[gdf < 0] = 999
pdf[pdf < 0] = 999
assert_eq(gdf, pdf)
def test_1row_arrow_table():
data = [pa.array([0]), pa.array([1])]
batch = pa.RecordBatch.from_arrays(data, ["f0", "f1"])
table = pa.Table.from_batches([batch])
expect = table.to_pandas()
got = cudf.DataFrame.from_arrow(table)
assert_eq(expect, got)
def test_arrow_handle_no_index_name(pdf, gdf):
gdf_arrow = gdf.to_arrow()
pdf_arrow = pa.Table.from_pandas(pdf)
assert pa.Table.equals(pdf_arrow, gdf_arrow)
got = cudf.DataFrame.from_arrow(gdf_arrow)
expect = pdf_arrow.to_pandas()
assert_eq(expect, got)
@pytest.mark.parametrize("num_rows", [1, 3, 10, 100])
@pytest.mark.parametrize("num_bins", [1, 2, 4, 20])
@pytest.mark.parametrize("right", [True, False])
@pytest.mark.parametrize("dtype", NUMERIC_TYPES + ["bool"])
@pytest.mark.parametrize("series_bins", [True, False])
def test_series_digitize(num_rows, num_bins, right, dtype, series_bins):
data = np.random.randint(0, 100, num_rows).astype(dtype)
bins = np.unique(np.sort(np.random.randint(2, 95, num_bins).astype(dtype)))
s = cudf.Series(data)
if series_bins:
s_bins = cudf.Series(bins)
indices = s.digitize(s_bins, right)
else:
indices = s.digitize(bins, right)
np.testing.assert_array_equal(
np.digitize(data, bins, right), indices.to_array()
)
def test_series_digitize_invalid_bins():
s = cudf.Series(np.random.randint(0, 30, 80), dtype="int32")
bins = cudf.Series([2, None, None, 50, 90], dtype="int32")
with pytest.raises(
ValueError, match="`bins` cannot contain null entries."
):
_ = s.digitize(bins)
def test_pandas_non_contiguious():
arr1 = np.random.sample([5000, 10])
assert arr1.flags["C_CONTIGUOUS"] is True
df = pd.DataFrame(arr1)
for col in df.columns:
assert df[col].values.flags["C_CONTIGUOUS"] is False
gdf = cudf.DataFrame.from_pandas(df)
assert_eq(gdf.to_pandas(), df)
@pytest.mark.parametrize("num_elements", [0, 2, 10, 100])
@pytest.mark.parametrize("null_type", [np.nan, None, "mixed"])
def test_series_all_null(num_elements, null_type):
if null_type == "mixed":
data = []
data1 = [np.nan] * int(num_elements / 2)
data2 = [None] * int(num_elements / 2)
for idx in range(len(data1)):
data.append(data1[idx])
data.append(data2[idx])
else:
data = [null_type] * num_elements
# Typecast Pandas because None will return `object` dtype
expect = pd.Series(data, dtype="float64")
got = cudf.Series(data)
assert_eq(expect, got)
@pytest.mark.parametrize("num_elements", [0, 2, 10, 100])
def test_series_all_valid_nan(num_elements):
data = [np.nan] * num_elements
sr = cudf.Series(data, nan_as_null=False)
np.testing.assert_equal(sr.null_count, 0)
def test_series_rename():
pds = pd.Series([1, 2, 3], name="asdf")
gds = cudf.Series([1, 2, 3], name="asdf")
expect = pds.rename("new_name")
got = gds.rename("new_name")
assert_eq(expect, got)
pds = pd.Series(expect)
gds = cudf.Series(got)
assert_eq(pds, gds)
pds = pd.Series(expect, name="name name")
gds = cudf.Series(got, name="name name")
assert_eq(pds, gds)
@pytest.mark.parametrize("data_type", dtypes)
@pytest.mark.parametrize("nelem", [0, 100])
def test_head_tail(nelem, data_type):
def check_index_equality(left, right):
assert left.index.equals(right.index)
def check_values_equality(left, right):
if len(left) == 0 and len(right) == 0:
return None
np.testing.assert_array_equal(left.to_pandas(), right.to_pandas())
def check_frame_series_equality(left, right):
check_index_equality(left, right)
check_values_equality(left, right)
gdf = cudf.DataFrame(
{
"a": np.random.randint(0, 1000, nelem).astype(data_type),
"b": np.random.randint(0, 1000, nelem).astype(data_type),
}
)
check_frame_series_equality(gdf.head(), gdf[:5])
check_frame_series_equality(gdf.head(3), gdf[:3])
check_frame_series_equality(gdf.head(-2), gdf[:-2])
check_frame_series_equality(gdf.head(0), gdf[0:0])
check_frame_series_equality(gdf["a"].head(), gdf["a"][:5])
check_frame_series_equality(gdf["a"].head(3), gdf["a"][:3])
check_frame_series_equality(gdf["a"].head(-2), gdf["a"][:-2])
check_frame_series_equality(gdf.tail(), gdf[-5:])
check_frame_series_equality(gdf.tail(3), gdf[-3:])
check_frame_series_equality(gdf.tail(-2), gdf[2:])
check_frame_series_equality(gdf.tail(0), gdf[0:0])
check_frame_series_equality(gdf["a"].tail(), gdf["a"][-5:])
check_frame_series_equality(gdf["a"].tail(3), gdf["a"][-3:])
check_frame_series_equality(gdf["a"].tail(-2), gdf["a"][2:])
def test_tail_for_string():
gdf = cudf.DataFrame()
gdf["id"] = cudf.Series(["a", "b"], dtype=np.object_)
gdf["v"] = cudf.Series([1, 2])
assert_eq(gdf.tail(3), gdf.to_pandas().tail(3))
@pytest.mark.parametrize("drop", [True, False])
def test_reset_index(pdf, gdf, drop):
assert_eq(
pdf.reset_index(drop=drop, inplace=False),
gdf.reset_index(drop=drop, inplace=False),
)
assert_eq(
pdf.x.reset_index(drop=drop, inplace=False),
gdf.x.reset_index(drop=drop, inplace=False),
)
@pytest.mark.parametrize("drop", [True, False])
def test_reset_named_index(pdf, gdf, drop):
pdf.index.name = "cudf"
gdf.index.name = "cudf"
assert_eq(
pdf.reset_index(drop=drop, inplace=False),
gdf.reset_index(drop=drop, inplace=False),
)
assert_eq(
pdf.x.reset_index(drop=drop, inplace=False),
gdf.x.reset_index(drop=drop, inplace=False),
)
@pytest.mark.parametrize("drop", [True, False])
def test_reset_index_inplace(pdf, gdf, drop):
pdf.reset_index(drop=drop, inplace=True)
gdf.reset_index(drop=drop, inplace=True)
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
"data",
[
{
"a": [1, 2, 3, 4, 5],
"b": ["a", "b", "c", "d", "e"],
"c": [1.0, 2.0, 3.0, 4.0, 5.0],
}
],
)
@pytest.mark.parametrize(
"index",
[
"a",
["a", "b"],
pd.CategoricalIndex(["I", "II", "III", "IV", "V"]),
pd.Series(["h", "i", "k", "l", "m"]),
["b", pd.Index(["I", "II", "III", "IV", "V"])],
["c", [11, 12, 13, 14, 15]],
pd.MultiIndex(
levels=[
["I", "II", "III", "IV", "V"],
["one", "two", "three", "four", "five"],
],
codes=[[0, 1, 2, 3, 4], [4, 3, 2, 1, 0]],
names=["col1", "col2"],
),
pd.RangeIndex(0, 5), # corner case
[pd.Series(["h", "i", "k", "l", "m"]), pd.RangeIndex(0, 5)],
[
pd.MultiIndex(
levels=[
["I", "II", "III", "IV", "V"],
["one", "two", "three", "four", "five"],
],
codes=[[0, 1, 2, 3, 4], [4, 3, 2, 1, 0]],
names=["col1", "col2"],
),
pd.RangeIndex(0, 5),
],
],
)
@pytest.mark.parametrize("drop", [True, False])
@pytest.mark.parametrize("append", [True, False])
@pytest.mark.parametrize("inplace", [True, False])
def test_set_index(data, index, drop, append, inplace):
gdf = cudf.DataFrame(data)
pdf = gdf.to_pandas()
expected = pdf.set_index(index, inplace=inplace, drop=drop, append=append)
actual = gdf.set_index(index, inplace=inplace, drop=drop, append=append)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"data",
[
{
"a": [1, 1, 2, 2, 5],
"b": ["a", "b", "c", "d", "e"],
"c": [1.0, 2.0, 3.0, 4.0, 5.0],
}
],
)
@pytest.mark.parametrize("index", ["a", pd.Index([1, 1, 2, 2, 3])])
@pytest.mark.parametrize("verify_integrity", [True])
@pytest.mark.xfail
def test_set_index_verify_integrity(data, index, verify_integrity):
gdf = cudf.DataFrame(data)
gdf.set_index(index, verify_integrity=verify_integrity)
@pytest.mark.parametrize("drop", [True, False])
@pytest.mark.parametrize("nelem", [10, 200, 1333])
def test_set_index_multi(drop, nelem):
np.random.seed(0)
a = np.arange(nelem)
np.random.shuffle(a)
df = pd.DataFrame(
{
"a": a,
"b": np.random.randint(0, 4, size=nelem),
"c": np.random.uniform(low=0, high=4, size=nelem),
"d": np.random.choice(["green", "black", "white"], nelem),
}
)
df["e"] = df["d"].astype("category")
gdf = cudf.DataFrame.from_pandas(df)
assert_eq(gdf.set_index("a", drop=drop), gdf.set_index(["a"], drop=drop))
assert_eq(
df.set_index(["b", "c"], drop=drop),
gdf.set_index(["b", "c"], drop=drop),
)
assert_eq(
df.set_index(["d", "b"], drop=drop),
gdf.set_index(["d", "b"], drop=drop),
)
assert_eq(
df.set_index(["b", "d", "e"], drop=drop),
gdf.set_index(["b", "d", "e"], drop=drop),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_0(copy):
# TODO (ptaylor): pandas changes `int` dtype to `float64`
# when reindexing and filling new label indices with NaN
gdf = cudf.datasets.randomdata(
nrows=6,
dtypes={
"a": "category",
# 'b': int,
"c": float,
"d": str,
},
)
pdf = gdf.to_pandas()
# Validate reindex returns a copy unmodified
assert_eq(pdf.reindex(copy=True), gdf.reindex(copy=copy))
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_1(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as index when axis defaults to 0
assert_eq(pdf.reindex(index, copy=True), gdf.reindex(index, copy=copy))
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_2(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as index when axis=0
assert_eq(
pdf.reindex(index, axis=0, copy=True),
gdf.reindex(index, axis=0, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_3(copy):
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as columns when axis=0
assert_eq(
pdf.reindex(columns, axis=1, copy=True),
gdf.reindex(columns, axis=1, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_4(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as index when axis=0
assert_eq(
pdf.reindex(labels=index, axis=0, copy=True),
gdf.reindex(labels=index, axis=0, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_5(copy):
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as columns when axis=1
assert_eq(
pdf.reindex(labels=columns, axis=1, copy=True),
gdf.reindex(labels=columns, axis=1, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_6(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as index when axis='index'
assert_eq(
pdf.reindex(labels=index, axis="index", copy=True),
gdf.reindex(labels=index, axis="index", copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_7(copy):
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as columns when axis='columns'
assert_eq(
pdf.reindex(labels=columns, axis="columns", copy=True),
gdf.reindex(labels=columns, axis="columns", copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_8(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate reindexes labels when index=labels
assert_eq(
pdf.reindex(index=index, copy=True),
gdf.reindex(index=index, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_9(copy):
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate reindexes column names when columns=labels
assert_eq(
pdf.reindex(columns=columns, copy=True),
gdf.reindex(columns=columns, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_10(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate reindexes both labels and column names when
# index=index_labels and columns=column_labels
assert_eq(
pdf.reindex(index=index, columns=columns, copy=True),
gdf.reindex(index=index, columns=columns, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_change_dtype(copy):
if PANDAS_GE_110:
kwargs = {"check_freq": False}
else:
kwargs = {}
index = pd.date_range("12/29/2009", periods=10, freq="D")
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate reindexes both labels and column names when
# index=index_labels and columns=column_labels
assert_eq(
pdf.reindex(index=index, columns=columns, copy=True),
gdf.reindex(index=index, columns=columns, copy=copy),
**kwargs,
)
@pytest.mark.parametrize("copy", [True, False])
def test_series_categorical_reindex(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(nrows=6, dtypes={"a": "category"})
pdf = gdf.to_pandas()
assert_eq(pdf["a"].reindex(copy=True), gdf["a"].reindex(copy=copy))
assert_eq(
pdf["a"].reindex(index, copy=True), gdf["a"].reindex(index, copy=copy)
)
assert_eq(
pdf["a"].reindex(index=index, copy=True),
gdf["a"].reindex(index=index, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_series_float_reindex(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(nrows=6, dtypes={"c": float})
pdf = gdf.to_pandas()
assert_eq(pdf["c"].reindex(copy=True), gdf["c"].reindex(copy=copy))
assert_eq(
pdf["c"].reindex(index, copy=True), gdf["c"].reindex(index, copy=copy)
)
assert_eq(
pdf["c"].reindex(index=index, copy=True),
gdf["c"].reindex(index=index, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_series_string_reindex(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(nrows=6, dtypes={"d": str})
pdf = gdf.to_pandas()
assert_eq(pdf["d"].reindex(copy=True), gdf["d"].reindex(copy=copy))
assert_eq(
pdf["d"].reindex(index, copy=True), gdf["d"].reindex(index, copy=copy)
)
assert_eq(
pdf["d"].reindex(index=index, copy=True),
gdf["d"].reindex(index=index, copy=copy),
)
def test_to_frame(pdf, gdf):
assert_eq(pdf.x.to_frame(), gdf.x.to_frame())
name = "foo"
gdf_new_name = gdf.x.to_frame(name=name)
pdf_new_name = pdf.x.to_frame(name=name)
assert_eq(pdf.x.to_frame(), gdf.x.to_frame())
name = False
gdf_new_name = gdf.x.to_frame(name=name)
pdf_new_name = pdf.x.to_frame(name=name)
assert_eq(gdf_new_name, pdf_new_name)
assert gdf_new_name.columns[0] is name
def test_dataframe_empty_sort_index():
pdf = pd.DataFrame({"x": []})
gdf = cudf.DataFrame.from_pandas(pdf)
expect = pdf.sort_index()
got = gdf.sort_index()
assert_eq(expect, got)
@pytest.mark.parametrize("axis", [0, 1, "index", "columns"])
@pytest.mark.parametrize("ascending", [True, False])
@pytest.mark.parametrize("ignore_index", [True, False])
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize("na_position", ["first", "last"])
def test_dataframe_sort_index(
axis, ascending, inplace, ignore_index, na_position
):
pdf = pd.DataFrame(
{"b": [1, 3, 2], "a": [1, 4, 3], "c": [4, 1, 5]},
index=[3.0, 1.0, np.nan],
)
gdf = cudf.DataFrame.from_pandas(pdf)
expected = pdf.sort_index(
axis=axis,
ascending=ascending,
ignore_index=ignore_index,
inplace=inplace,
na_position=na_position,
)
got = gdf.sort_index(
axis=axis,
ascending=ascending,
ignore_index=ignore_index,
inplace=inplace,
na_position=na_position,
)
if inplace is True:
assert_eq(pdf, gdf)
else:
assert_eq(expected, got)
@pytest.mark.parametrize("axis", [0, 1, "index", "columns"])
@pytest.mark.parametrize(
"level",
[
0,
"b",
1,
["b"],
"a",
["a", "b"],
["b", "a"],
[0, 1],
[1, 0],
[0, 2],
None,
],
)
@pytest.mark.parametrize("ascending", [True, False])
@pytest.mark.parametrize("ignore_index", [True, False])
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize("na_position", ["first", "last"])
def test_dataframe_mulitindex_sort_index(
axis, level, ascending, inplace, ignore_index, na_position
):
pdf = pd.DataFrame(
{
"b": [1.0, 3.0, np.nan],
"a": [1, 4, 3],
1: ["a", "b", "c"],
"e": [3, 1, 4],
"d": [1, 2, 8],
}
).set_index(["b", "a", 1])
gdf = cudf.DataFrame.from_pandas(pdf)
# ignore_index is supported in v.1.0
expected = pdf.sort_index(
axis=axis,
level=level,
ascending=ascending,
inplace=inplace,
na_position=na_position,
)
if ignore_index is True:
expected = expected
got = gdf.sort_index(
axis=axis,
level=level,
ascending=ascending,
ignore_index=ignore_index,
inplace=inplace,
na_position=na_position,
)
if inplace is True:
if ignore_index is True:
pdf = pdf.reset_index(drop=True)
assert_eq(pdf, gdf)
else:
if ignore_index is True:
expected = expected.reset_index(drop=True)
assert_eq(expected, got)
@pytest.mark.parametrize("dtype", dtypes + ["category"])
def test_dataframe_0_row_dtype(dtype):
if dtype == "category":
data = pd.Series(["a", "b", "c", "d", "e"], dtype="category")
else:
data = np.array([1, 2, 3, 4, 5], dtype=dtype)
expect = cudf.DataFrame()
expect["x"] = data
expect["y"] = data
got = expect.head(0)
for col_name in got.columns:
assert expect[col_name].dtype == got[col_name].dtype
expect = cudf.Series(data)
got = expect.head(0)
assert expect.dtype == got.dtype
@pytest.mark.parametrize("nan_as_null", [True, False])
def test_series_list_nanasnull(nan_as_null):
data = [1.0, 2.0, 3.0, np.nan, None]
expect = pa.array(data, from_pandas=nan_as_null)
got = cudf.Series(data, nan_as_null=nan_as_null).to_arrow()
# Bug in Arrow 0.14.1 where NaNs aren't handled
expect = expect.cast("int64", safe=False)
got = got.cast("int64", safe=False)
assert pa.Array.equals(expect, got)
def test_column_assignment():
gdf = cudf.datasets.randomdata(
nrows=20, dtypes={"a": "category", "b": int, "c": float}
)
new_cols = ["q", "r", "s"]
gdf.columns = new_cols
assert list(gdf.columns) == new_cols
def test_select_dtype():
gdf = cudf.datasets.randomdata(
nrows=20, dtypes={"a": "category", "b": int, "c": float, "d": str}
)
pdf = gdf.to_pandas()
assert_eq(pdf.select_dtypes("float64"), gdf.select_dtypes("float64"))
assert_eq(pdf.select_dtypes(np.float64), gdf.select_dtypes(np.float64))
assert_eq(
pdf.select_dtypes(include=["float64"]),
gdf.select_dtypes(include=["float64"]),
)
assert_eq(
pdf.select_dtypes(include=["object", "int", "category"]),
gdf.select_dtypes(include=["object", "int", "category"]),
)
assert_eq(
pdf.select_dtypes(include=["int64", "float64"]),
gdf.select_dtypes(include=["int64", "float64"]),
)
assert_eq(
pdf.select_dtypes(include=np.number),
gdf.select_dtypes(include=np.number),
)
assert_eq(
pdf.select_dtypes(include=[np.int64, np.float64]),
gdf.select_dtypes(include=[np.int64, np.float64]),
)
assert_eq(
pdf.select_dtypes(include=["category"]),
gdf.select_dtypes(include=["category"]),
)
assert_eq(
pdf.select_dtypes(exclude=np.number),
gdf.select_dtypes(exclude=np.number),
)
assert_exceptions_equal(
lfunc=pdf.select_dtypes,
rfunc=gdf.select_dtypes,
lfunc_args_and_kwargs=([], {"includes": ["Foo"]}),
rfunc_args_and_kwargs=([], {"includes": ["Foo"]}),
)
assert_exceptions_equal(
lfunc=pdf.select_dtypes,
rfunc=gdf.select_dtypes,
lfunc_args_and_kwargs=(
[],
{"exclude": np.number, "include": np.number},
),
rfunc_args_and_kwargs=(
[],
{"exclude": np.number, "include": np.number},
),
)
gdf = cudf.DataFrame(
{"A": [3, 4, 5], "C": [1, 2, 3], "D": ["a", "b", "c"]}
)
pdf = gdf.to_pandas()
assert_eq(
pdf.select_dtypes(include=["object", "int", "category"]),
gdf.select_dtypes(include=["object", "int", "category"]),
)
assert_eq(
pdf.select_dtypes(include=["object"], exclude=["category"]),
gdf.select_dtypes(include=["object"], exclude=["category"]),
)
gdf = cudf.DataFrame({"a": range(10), "b": range(10, 20)})
pdf = gdf.to_pandas()
assert_eq(
pdf.select_dtypes(include=["category"]),
gdf.select_dtypes(include=["category"]),
)
assert_eq(
pdf.select_dtypes(include=["float"]),
gdf.select_dtypes(include=["float"]),
)
assert_eq(
pdf.select_dtypes(include=["object"]),
gdf.select_dtypes(include=["object"]),
)
assert_eq(
pdf.select_dtypes(include=["int"]), gdf.select_dtypes(include=["int"])
)
assert_eq(
pdf.select_dtypes(exclude=["float"]),
gdf.select_dtypes(exclude=["float"]),
)
assert_eq(
pdf.select_dtypes(exclude=["object"]),
gdf.select_dtypes(exclude=["object"]),
)
assert_eq(
pdf.select_dtypes(include=["int"], exclude=["object"]),
gdf.select_dtypes(include=["int"], exclude=["object"]),
)
assert_exceptions_equal(
lfunc=pdf.select_dtypes, rfunc=gdf.select_dtypes,
)
gdf = cudf.DataFrame(
{"a": cudf.Series([], dtype="int"), "b": cudf.Series([], dtype="str")}
)
pdf = gdf.to_pandas()
assert_eq(
pdf.select_dtypes(exclude=["object"]),
gdf.select_dtypes(exclude=["object"]),
)
assert_eq(
pdf.select_dtypes(include=["int"], exclude=["object"]),
gdf.select_dtypes(include=["int"], exclude=["object"]),
)
def test_select_dtype_datetime():
gdf = cudf.datasets.timeseries(
start="2000-01-01", end="2000-01-02", freq="3600s", dtypes={"x": int}
)
gdf = gdf.reset_index()
pdf = gdf.to_pandas()
assert_eq(pdf.select_dtypes("datetime64"), gdf.select_dtypes("datetime64"))
assert_eq(
pdf.select_dtypes(np.dtype("datetime64")),
gdf.select_dtypes(np.dtype("datetime64")),
)
assert_eq(
pdf.select_dtypes(include="datetime64"),
gdf.select_dtypes(include="datetime64"),
)
def test_select_dtype_datetime_with_frequency():
gdf = cudf.datasets.timeseries(
start="2000-01-01", end="2000-01-02", freq="3600s", dtypes={"x": int}
)
gdf = gdf.reset_index()
pdf = gdf.to_pandas()
assert_exceptions_equal(
pdf.select_dtypes,
gdf.select_dtypes,
(["datetime64[ms]"],),
(["datetime64[ms]"],),
)
def test_array_ufunc():
gdf = cudf.DataFrame({"x": [2, 3, 4.0], "y": [9.0, 2.5, 1.1]})
pdf = gdf.to_pandas()
assert_eq(np.sqrt(gdf), np.sqrt(pdf))
assert_eq(np.sqrt(gdf.x), np.sqrt(pdf.x))
@pytest.mark.parametrize("nan_value", [-5, -5.0, 0, 5, 5.0, None, "pandas"])
def test_series_to_gpu_array(nan_value):
s = cudf.Series([0, 1, None, 3])
np.testing.assert_array_equal(
s.to_array(nan_value), s.to_gpu_array(nan_value).copy_to_host()
)
def test_dataframe_describe_exclude():
np.random.seed(12)
data_length = 10000
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["x"] = df.x.astype("int64")
df["y"] = np.random.normal(10, 1, data_length)
pdf = df.to_pandas()
gdf_results = df.describe(exclude=["float"])
pdf_results = pdf.describe(exclude=["float"])
assert_eq(gdf_results, pdf_results)
def test_dataframe_describe_include():
np.random.seed(12)
data_length = 10000
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["x"] = df.x.astype("int64")
df["y"] = np.random.normal(10, 1, data_length)
pdf = df.to_pandas()
gdf_results = df.describe(include=["int"])
pdf_results = pdf.describe(include=["int"])
assert_eq(gdf_results, pdf_results)
def test_dataframe_describe_default():
np.random.seed(12)
data_length = 10000
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["y"] = np.random.normal(10, 1, data_length)
pdf = df.to_pandas()
gdf_results = df.describe()
pdf_results = pdf.describe()
assert_eq(pdf_results, gdf_results)
def test_series_describe_include_all():
np.random.seed(12)
data_length = 10000
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["x"] = df.x.astype("int64")
df["y"] = np.random.normal(10, 1, data_length)
df["animal"] = np.random.choice(["dog", "cat", "bird"], data_length)
pdf = df.to_pandas()
gdf_results = df.describe(include="all")
pdf_results = pdf.describe(include="all")
assert_eq(gdf_results[["x", "y"]], pdf_results[["x", "y"]])
assert_eq(gdf_results.index, pdf_results.index)
assert_eq(gdf_results.columns, pdf_results.columns)
assert_eq(
gdf_results[["animal"]].fillna(-1).astype("str"),
pdf_results[["animal"]].fillna(-1).astype("str"),
)
def test_dataframe_describe_percentiles():
np.random.seed(12)
data_length = 10000
sample_percentiles = [0.0, 0.1, 0.33, 0.84, 0.4, 0.99]
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["y"] = np.random.normal(10, 1, data_length)
pdf = df.to_pandas()
gdf_results = df.describe(percentiles=sample_percentiles)
pdf_results = pdf.describe(percentiles=sample_percentiles)
assert_eq(pdf_results, gdf_results)
def test_get_numeric_data():
pdf = pd.DataFrame(
{"x": [1, 2, 3], "y": [1.0, 2.0, 3.0], "z": ["a", "b", "c"]}
)
gdf = cudf.from_pandas(pdf)
assert_eq(pdf._get_numeric_data(), gdf._get_numeric_data())
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("period", [-1, -5, -10, -20, 0, 1, 5, 10, 20])
@pytest.mark.parametrize("data_empty", [False, True])
def test_shift(dtype, period, data_empty):
if data_empty:
data = None
else:
if dtype == np.int8:
# to keep data in range
data = gen_rand(dtype, 100000, low=-2, high=2)
else:
data = gen_rand(dtype, 100000)
gdf = cudf.DataFrame({"a": cudf.Series(data, dtype=dtype)})
pdf = pd.DataFrame({"a": pd.Series(data, dtype=dtype)})
shifted_outcome = gdf.a.shift(period).fillna(0)
expected_outcome = pdf.a.shift(period).fillna(0).astype(dtype)
if data_empty:
assert_eq(shifted_outcome, expected_outcome, check_index_type=False)
else:
assert_eq(shifted_outcome, expected_outcome)
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("period", [-1, -5, -10, -20, 0, 1, 5, 10, 20])
@pytest.mark.parametrize("data_empty", [False, True])
def test_diff(dtype, period, data_empty):
if data_empty:
data = None
else:
if dtype == np.int8:
# to keep data in range
data = gen_rand(dtype, 100000, low=-2, high=2)
else:
data = gen_rand(dtype, 100000)
gdf = cudf.DataFrame({"a": cudf.Series(data, dtype=dtype)})
pdf = pd.DataFrame({"a": pd.Series(data, dtype=dtype)})
expected_outcome = pdf.a.diff(period)
diffed_outcome = gdf.a.diff(period).astype(expected_outcome.dtype)
if data_empty:
assert_eq(diffed_outcome, expected_outcome, check_index_type=False)
else:
assert_eq(diffed_outcome, expected_outcome)
@pytest.mark.parametrize("df", _dataframe_na_data())
@pytest.mark.parametrize("nan_as_null", [True, False, None])
def test_dataframe_isnull_isna(df, nan_as_null):
gdf = cudf.DataFrame.from_pandas(df, nan_as_null=nan_as_null)
assert_eq(df.isnull(), gdf.isnull())
assert_eq(df.isna(), gdf.isna())
# Test individual columns
for col in df:
assert_eq(df[col].isnull(), gdf[col].isnull())
assert_eq(df[col].isna(), gdf[col].isna())
@pytest.mark.parametrize("df", _dataframe_na_data())
@pytest.mark.parametrize("nan_as_null", [True, False, None])
def test_dataframe_notna_notnull(df, nan_as_null):
gdf = cudf.DataFrame.from_pandas(df, nan_as_null=nan_as_null)
assert_eq(df.notnull(), gdf.notnull())
assert_eq(df.notna(), gdf.notna())
# Test individual columns
for col in df:
assert_eq(df[col].notnull(), gdf[col].notnull())
assert_eq(df[col].notna(), gdf[col].notna())
def test_ndim():
pdf = pd.DataFrame({"x": range(5), "y": range(5, 10)})
gdf = cudf.DataFrame.from_pandas(pdf)
assert pdf.ndim == gdf.ndim
assert pdf.x.ndim == gdf.x.ndim
s = pd.Series(dtype="float64")
gs = cudf.Series()
assert s.ndim == gs.ndim
@pytest.mark.parametrize(
"decimals",
[
-3,
0,
5,
pd.Series([1, 4, 3, -6], index=["w", "x", "y", "z"]),
cudf.Series([-4, -2, 12], index=["x", "y", "z"]),
{"w": -1, "x": 15, "y": 2},
],
)
def test_dataframe_round(decimals):
pdf = pd.DataFrame(
{
"w": np.arange(0.5, 10.5, 1),
"x": np.random.normal(-100, 100, 10),
"y": np.array(
[
14.123,
2.343,
np.nan,
0.0,
-8.302,
np.nan,
94.313,
-112.236,
-8.029,
np.nan,
]
),
"z": np.repeat([-0.6459412758761901], 10),
}
)
gdf = cudf.DataFrame.from_pandas(pdf)
if isinstance(decimals, cudf.Series):
pdecimals = decimals.to_pandas()
else:
pdecimals = decimals
result = gdf.round(decimals)
expected = pdf.round(pdecimals)
assert_eq(result, expected)
# with nulls, maintaining existing null mask
for c in pdf.columns:
arr = pdf[c].to_numpy().astype("float64") # for pandas nulls
arr.ravel()[np.random.choice(10, 5, replace=False)] = np.nan
pdf[c] = gdf[c] = arr
result = gdf.round(decimals)
expected = pdf.round(pdecimals)
assert_eq(result, expected)
for c in gdf.columns:
np.array_equal(gdf[c].nullmask.to_array(), result[c].to_array())
@pytest.mark.parametrize(
"data",
[
[0, 1, 2, 3],
[-2, -1, 2, 3, 5],
[-2, -1, 0, 3, 5],
[True, False, False],
[True],
[False],
[],
[True, None, False],
[True, True, None],
[None, None],
[[0, 5], [1, 6], [2, 7], [3, 8], [4, 9]],
[[1, True], [2, False], [3, False]],
pytest.param(
[["a", True], ["b", False], ["c", False]],
marks=[
pytest.mark.xfail(
reason="NotImplementedError: all does not "
"support columns of object dtype."
)
],
),
],
)
def test_all(data):
# Pandas treats `None` in object type columns as True for some reason, so
# replacing with `False`
if np.array(data).ndim <= 1:
pdata = cudf.utils.utils._create_pandas_series(data=data).replace(
[None], False
)
gdata = cudf.Series.from_pandas(pdata)
else:
pdata = pd.DataFrame(data, columns=["a", "b"]).replace([None], False)
gdata = cudf.DataFrame.from_pandas(pdata)
# test bool_only
if pdata["b"].dtype == "bool":
got = gdata.all(bool_only=True)
expected = pdata.all(bool_only=True)
assert_eq(got, expected)
else:
with pytest.raises(NotImplementedError):
gdata.all(bool_only=False)
with pytest.raises(NotImplementedError):
gdata.all(level="a")
got = gdata.all()
expected = pdata.all()
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[0, 1, 2, 3],
[-2, -1, 2, 3, 5],
[-2, -1, 0, 3, 5],
[0, 0, 0, 0, 0],
[0, 0, None, 0],
[True, False, False],
[True],
[False],
[],
[True, None, False],
[True, True, None],
[None, None],
[[0, 5], [1, 6], [2, 7], [3, 8], [4, 9]],
[[1, True], [2, False], [3, False]],
pytest.param(
[["a", True], ["b", False], ["c", False]],
marks=[
pytest.mark.xfail(
reason="NotImplementedError: any does not "
"support columns of object dtype."
)
],
),
],
)
@pytest.mark.parametrize("axis", [0, 1])
def test_any(data, axis):
if np.array(data).ndim <= 1:
pdata = cudf.utils.utils._create_pandas_series(data=data)
gdata = cudf.Series.from_pandas(pdata)
if axis == 1:
with pytest.raises(NotImplementedError):
gdata.any(axis=axis)
else:
got = gdata.any(axis=axis)
expected = pdata.any(axis=axis)
assert_eq(got, expected)
else:
pdata = pd.DataFrame(data, columns=["a", "b"])
gdata = cudf.DataFrame.from_pandas(pdata)
# test bool_only
if pdata["b"].dtype == "bool":
got = gdata.any(bool_only=True)
expected = pdata.any(bool_only=True)
assert_eq(got, expected)
else:
with pytest.raises(NotImplementedError):
gdata.any(bool_only=False)
with pytest.raises(NotImplementedError):
gdata.any(level="a")
got = gdata.any(axis=axis)
expected = pdata.any(axis=axis)
assert_eq(got, expected)
@pytest.mark.parametrize("axis", [0, 1])
def test_empty_dataframe_any(axis):
pdf = pd.DataFrame({}, columns=["a", "b"])
gdf = cudf.DataFrame.from_pandas(pdf)
got = gdf.any(axis=axis)
expected = pdf.any(axis=axis)
assert_eq(got, expected, check_index_type=False)
@pytest.mark.parametrize("indexed", [False, True])
def test_dataframe_sizeof(indexed):
rows = int(1e6)
index = list(i for i in range(rows)) if indexed else None
gdf = cudf.DataFrame({"A": [8] * rows, "B": [32] * rows}, index=index)
for c in gdf._data.columns:
assert gdf._index.__sizeof__() == gdf._index.__sizeof__()
cols_sizeof = sum(c.__sizeof__() for c in gdf._data.columns)
assert gdf.__sizeof__() == (gdf._index.__sizeof__() + cols_sizeof)
@pytest.mark.parametrize("a", [[], ["123"]])
@pytest.mark.parametrize("b", ["123", ["123"]])
@pytest.mark.parametrize(
"misc_data",
["123", ["123"] * 20, 123, [1, 2, 0.8, 0.9] * 50, 0.9, 0.00001],
)
@pytest.mark.parametrize("non_list_data", [123, "abc", "zyx", "rapids", 0.8])
def test_create_dataframe_cols_empty_data(a, b, misc_data, non_list_data):
expected = pd.DataFrame({"a": a})
actual = cudf.DataFrame.from_pandas(expected)
expected["b"] = b
actual["b"] = b
assert_eq(actual, expected)
expected = pd.DataFrame({"a": []})
actual = cudf.DataFrame.from_pandas(expected)
expected["b"] = misc_data
actual["b"] = misc_data
assert_eq(actual, expected)
expected = pd.DataFrame({"a": a})
actual = cudf.DataFrame.from_pandas(expected)
expected["b"] = non_list_data
actual["b"] = non_list_data
assert_eq(actual, expected)
def test_empty_dataframe_describe():
pdf = pd.DataFrame({"a": [], "b": []})
gdf = cudf.from_pandas(pdf)
expected = pdf.describe()
actual = gdf.describe()
assert_eq(expected, actual)
def test_as_column_types():
col = column.as_column(cudf.Series([]))
assert_eq(col.dtype, np.dtype("float64"))
gds = cudf.Series(col)
pds = pd.Series(pd.Series([], dtype="float64"))
assert_eq(pds, gds)
col = column.as_column(cudf.Series([]), dtype="float32")
assert_eq(col.dtype, np.dtype("float32"))
gds = cudf.Series(col)
pds = pd.Series(pd.Series([], dtype="float32"))
assert_eq(pds, gds)
col = column.as_column(cudf.Series([]), dtype="str")
assert_eq(col.dtype, np.dtype("object"))
gds = cudf.Series(col)
pds = pd.Series(pd.Series([], dtype="str"))
assert_eq(pds, gds)
col = column.as_column(cudf.Series([]), dtype="object")
assert_eq(col.dtype, np.dtype("object"))
gds = cudf.Series(col)
pds = pd.Series(pd.Series([], dtype="object"))
assert_eq(pds, gds)
pds = pd.Series(np.array([1, 2, 3]), dtype="float32")
gds = cudf.Series(column.as_column(np.array([1, 2, 3]), dtype="float32"))
assert_eq(pds, gds)
pds = pd.Series([1, 2, 3], dtype="float32")
gds = cudf.Series([1, 2, 3], dtype="float32")
assert_eq(pds, gds)
pds = pd.Series([], dtype="float64")
gds = cudf.Series(column.as_column(pds))
assert_eq(pds, gds)
pds = pd.Series([1, 2, 4], dtype="int64")
gds = cudf.Series(column.as_column(cudf.Series([1, 2, 4]), dtype="int64"))
assert_eq(pds, gds)
pds = pd.Series([1.2, 18.0, 9.0], dtype="float32")
gds = cudf.Series(
column.as_column(cudf.Series([1.2, 18.0, 9.0]), dtype="float32")
)
assert_eq(pds, gds)
pds = pd.Series([1.2, 18.0, 9.0], dtype="str")
gds = cudf.Series(
column.as_column(cudf.Series([1.2, 18.0, 9.0]), dtype="str")
)
assert_eq(pds, gds)
pds = pd.Series(pd.Index(["1", "18", "9"]), dtype="int")
gds = cudf.Series(
cudf.core.index.StringIndex(["1", "18", "9"]), dtype="int"
)
assert_eq(pds, gds)
def test_one_row_head():
gdf = cudf.DataFrame({"name": ["carl"], "score": [100]}, index=[123])
pdf = gdf.to_pandas()
head_gdf = gdf.head()
head_pdf = pdf.head()
assert_eq(head_pdf, head_gdf)
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("as_dtype", NUMERIC_TYPES)
def test_series_astype_numeric_to_numeric(dtype, as_dtype):
psr = pd.Series([1, 2, 4, 3], dtype=dtype)
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("as_dtype", NUMERIC_TYPES)
def test_series_astype_numeric_to_numeric_nulls(dtype, as_dtype):
data = [1, 2, None, 3]
sr = cudf.Series(data, dtype=dtype)
got = sr.astype(as_dtype)
expect = cudf.Series([1, 2, None, 3], dtype=as_dtype)
assert_eq(expect, got)
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize(
"as_dtype",
[
"str",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
],
)
def test_series_astype_numeric_to_other(dtype, as_dtype):
psr = pd.Series([1, 2, 3], dtype=dtype)
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize(
"as_dtype",
[
"str",
"int32",
"uint32",
"float32",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
],
)
def test_series_astype_string_to_other(as_dtype):
if "datetime64" in as_dtype:
data = ["2001-01-01", "2002-02-02", "2000-01-05"]
else:
data = ["1", "2", "3"]
psr = pd.Series(data)
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize(
"as_dtype",
[
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
],
)
def test_series_astype_datetime_to_other(as_dtype):
data = ["2001-01-01", "2002-02-02", "2001-01-05"]
psr = pd.Series(data)
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize(
"inp",
[
("datetime64[ns]", "2011-01-01 00:00:00.000000000"),
("datetime64[us]", "2011-01-01 00:00:00.000000"),
("datetime64[ms]", "2011-01-01 00:00:00.000"),
("datetime64[s]", "2011-01-01 00:00:00"),
],
)
def test_series_astype_datetime_to_string(inp):
dtype, expect = inp
base_date = "2011-01-01"
sr = cudf.Series([base_date], dtype=dtype)
got = sr.astype(str)[0]
assert expect == got
@pytest.mark.parametrize(
"as_dtype",
[
"int32",
"uint32",
"float32",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
"str",
],
)
def test_series_astype_categorical_to_other(as_dtype):
if "datetime64" in as_dtype:
data = ["2001-01-01", "2002-02-02", "2000-01-05", "2001-01-01"]
else:
data = [1, 2, 3, 1]
psr = pd.Series(data, dtype="category")
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize("ordered", [True, False])
def test_series_astype_to_categorical_ordered(ordered):
psr = pd.Series([1, 2, 3, 1], dtype="category")
gsr = cudf.from_pandas(psr)
ordered_dtype_pd = pd.CategoricalDtype(
categories=[1, 2, 3], ordered=ordered
)
ordered_dtype_gd = cudf.CategoricalDtype.from_pandas(ordered_dtype_pd)
assert_eq(
psr.astype("int32").astype(ordered_dtype_pd).astype("int32"),
gsr.astype("int32").astype(ordered_dtype_gd).astype("int32"),
)
@pytest.mark.parametrize("ordered", [True, False])
def test_series_astype_cat_ordered_to_unordered(ordered):
pd_dtype = pd.CategoricalDtype(categories=[1, 2, 3], ordered=ordered)
pd_to_dtype = pd.CategoricalDtype(
categories=[1, 2, 3], ordered=not ordered
)
gd_dtype = cudf.CategoricalDtype.from_pandas(pd_dtype)
gd_to_dtype = cudf.CategoricalDtype.from_pandas(pd_to_dtype)
psr = pd.Series([1, 2, 3], dtype=pd_dtype)
gsr = cudf.Series([1, 2, 3], dtype=gd_dtype)
expect = psr.astype(pd_to_dtype)
got = gsr.astype(gd_to_dtype)
assert_eq(expect, got)
def test_series_astype_null_cases():
data = [1, 2, None, 3]
# numerical to other
assert_eq(cudf.Series(data, dtype="str"), cudf.Series(data).astype("str"))
assert_eq(
cudf.Series(data, dtype="category"),
cudf.Series(data).astype("category"),
)
assert_eq(
cudf.Series(data, dtype="float32"),
cudf.Series(data, dtype="int32").astype("float32"),
)
assert_eq(
cudf.Series(data, dtype="float32"),
cudf.Series(data, dtype="uint32").astype("float32"),
)
assert_eq(
cudf.Series(data, dtype="datetime64[ms]"),
cudf.Series(data).astype("datetime64[ms]"),
)
# categorical to other
assert_eq(
cudf.Series(data, dtype="str"),
cudf.Series(data, dtype="category").astype("str"),
)
assert_eq(
cudf.Series(data, dtype="float32"),
cudf.Series(data, dtype="category").astype("float32"),
)
assert_eq(
cudf.Series(data, dtype="datetime64[ms]"),
cudf.Series(data, dtype="category").astype("datetime64[ms]"),
)
# string to other
assert_eq(
cudf.Series([1, 2, None, 3], dtype="int32"),
cudf.Series(["1", "2", None, "3"]).astype("int32"),
)
assert_eq(
cudf.Series(
["2001-01-01", "2001-02-01", None, "2001-03-01"],
dtype="datetime64[ms]",
),
cudf.Series(["2001-01-01", "2001-02-01", None, "2001-03-01"]).astype(
"datetime64[ms]"
),
)
assert_eq(
cudf.Series(["a", "b", "c", None], dtype="category").to_pandas(),
cudf.Series(["a", "b", "c", None]).astype("category").to_pandas(),
)
# datetime to other
data = [
"2001-01-01 00:00:00.000000",
"2001-02-01 00:00:00.000000",
None,
"2001-03-01 00:00:00.000000",
]
assert_eq(
cudf.Series(data),
cudf.Series(data, dtype="datetime64[us]").astype("str"),
)
assert_eq(
pd.Series(data, dtype="datetime64[ns]").astype("category"),
cudf.from_pandas(pd.Series(data, dtype="datetime64[ns]")).astype(
"category"
),
)
def test_series_astype_null_categorical():
sr = cudf.Series([None, None, None], dtype="category")
expect = cudf.Series([None, None, None], dtype="int32")
got = sr.astype("int32")
assert_eq(expect, got)
@pytest.mark.parametrize(
"data",
[
(
pd.Series([3, 3.0]),
pd.Series([2.3, 3.9]),
pd.Series([1.5, 3.9]),
pd.Series([1.0, 2]),
),
[
pd.Series([3, 3.0]),
pd.Series([2.3, 3.9]),
pd.Series([1.5, 3.9]),
pd.Series([1.0, 2]),
],
],
)
def test_create_dataframe_from_list_like(data):
pdf = pd.DataFrame(data, index=["count", "mean", "std", "min"])
gdf = cudf.DataFrame(data, index=["count", "mean", "std", "min"])
assert_eq(pdf, gdf)
pdf = pd.DataFrame(data)
gdf = cudf.DataFrame(data)
assert_eq(pdf, gdf)
def test_create_dataframe_column():
pdf = pd.DataFrame(columns=["a", "b", "c"], index=["A", "Z", "X"])
gdf = cudf.DataFrame(columns=["a", "b", "c"], index=["A", "Z", "X"])
assert_eq(pdf, gdf)
pdf = pd.DataFrame(
{"a": [1, 2, 3], "b": [2, 3, 5]},
columns=["a", "b", "c"],
index=["A", "Z", "X"],
)
gdf = cudf.DataFrame(
{"a": [1, 2, 3], "b": [2, 3, 5]},
columns=["a", "b", "c"],
index=["A", "Z", "X"],
)
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
"data",
[
[1, 2, 4],
[],
[5.0, 7.0, 8.0],
pd.Categorical(["a", "b", "c"]),
["m", "a", "d", "v"],
],
)
def test_series_values_host_property(data):
pds = cudf.utils.utils._create_pandas_series(data=data)
gds = cudf.Series(data)
np.testing.assert_array_equal(pds.values, gds.values_host)
@pytest.mark.parametrize(
"data",
[
[1, 2, 4],
[],
[5.0, 7.0, 8.0],
pytest.param(
pd.Categorical(["a", "b", "c"]),
marks=pytest.mark.xfail(raises=NotImplementedError),
),
pytest.param(
["m", "a", "d", "v"],
marks=pytest.mark.xfail(raises=NotImplementedError),
),
],
)
def test_series_values_property(data):
pds = cudf.utils.utils._create_pandas_series(data=data)
gds = cudf.Series(data)
gds_vals = gds.values
assert isinstance(gds_vals, cupy.ndarray)
np.testing.assert_array_equal(gds_vals.get(), pds.values)
@pytest.mark.parametrize(
"data",
[
{"A": [1, 2, 3], "B": [4, 5, 6]},
{"A": [1.0, 2.0, 3.0], "B": [4.0, 5.0, 6.0]},
{"A": [1, 2, 3], "B": [1.0, 2.0, 3.0]},
{"A": np.float32(np.arange(3)), "B": np.float64(np.arange(3))},
pytest.param(
{"A": [1, None, 3], "B": [1, 2, None]},
marks=pytest.mark.xfail(
reason="Nulls not supported by as_gpu_matrix"
),
),
pytest.param(
{"A": [None, None, None], "B": [None, None, None]},
marks=pytest.mark.xfail(
reason="Nulls not supported by as_gpu_matrix"
),
),
pytest.param(
{"A": [], "B": []},
marks=pytest.mark.xfail(reason="Requires at least 1 row"),
),
pytest.param(
{"A": [1, 2, 3], "B": ["a", "b", "c"]},
marks=pytest.mark.xfail(
reason="str or categorical not supported by as_gpu_matrix"
),
),
pytest.param(
{"A": pd.Categorical(["a", "b", "c"]), "B": ["d", "e", "f"]},
marks=pytest.mark.xfail(
reason="str or categorical not supported by as_gpu_matrix"
),
),
],
)
def test_df_values_property(data):
pdf = pd.DataFrame.from_dict(data)
gdf = cudf.DataFrame.from_pandas(pdf)
pmtr = pdf.values
gmtr = gdf.values.get()
np.testing.assert_array_equal(pmtr, gmtr)
def test_value_counts():
pdf = pd.DataFrame(
{
"numeric": [1, 2, 3, 4, 5, 6, 1, 2, 4] * 10,
"alpha": ["u", "h", "d", "a", "m", "u", "h", "d", "a"] * 10,
}
)
gdf = cudf.DataFrame(
{
"numeric": [1, 2, 3, 4, 5, 6, 1, 2, 4] * 10,
"alpha": ["u", "h", "d", "a", "m", "u", "h", "d", "a"] * 10,
}
)
assert_eq(
pdf.numeric.value_counts().sort_index(),
gdf.numeric.value_counts().sort_index(),
check_dtype=False,
)
assert_eq(
pdf.alpha.value_counts().sort_index(),
gdf.alpha.value_counts().sort_index(),
check_dtype=False,
)
@pytest.mark.parametrize(
"data",
[
[],
[0, 12, 14],
[0, 14, 12, 12, 3, 10, 12, 14],
np.random.randint(-100, 100, 200),
pd.Series([0.0, 1.0, None, 10.0]),
[None, None, None, None],
[np.nan, None, -1, 2, 3],
],
)
@pytest.mark.parametrize(
"values",
[
np.random.randint(-100, 100, 10),
[],
[np.nan, None, -1, 2, 3],
[1.0, 12.0, None, None, 120],
[0, 14, 12, 12, 3, 10, 12, 14, None],
[None, None, None],
["0", "12", "14"],
["0", "12", "14", "a"],
],
)
def test_isin_numeric(data, values):
index = np.random.randint(0, 100, len(data))
psr = cudf.utils.utils._create_pandas_series(data=data, index=index)
gsr = cudf.Series.from_pandas(psr, nan_as_null=False)
expected = psr.isin(values)
got = gsr.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[],
pd.Series(
["2018-01-01", "2019-04-03", None, "2019-12-30"],
dtype="datetime64[ns]",
),
pd.Series(
[
"2018-01-01",
"2019-04-03",
None,
"2019-12-30",
"2018-01-01",
"2018-01-01",
],
dtype="datetime64[ns]",
),
],
)
@pytest.mark.parametrize(
"values",
[
[],
[1514764800000000000, 1577664000000000000],
[
1514764800000000000,
1577664000000000000,
1577664000000000000,
1577664000000000000,
1514764800000000000,
],
["2019-04-03", "2019-12-30", "2012-01-01"],
[
"2012-01-01",
"2012-01-01",
"2012-01-01",
"2019-04-03",
"2019-12-30",
"2012-01-01",
],
],
)
def test_isin_datetime(data, values):
psr = cudf.utils.utils._create_pandas_series(data=data)
gsr = cudf.Series.from_pandas(psr)
got = gsr.isin(values)
expected = psr.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[],
pd.Series(["this", "is", None, "a", "test"]),
pd.Series(["test", "this", "test", "is", None, "test", "a", "test"]),
pd.Series(["0", "12", "14"]),
],
)
@pytest.mark.parametrize(
"values",
[
[],
["this", "is"],
[None, None, None],
["12", "14", "19"],
pytest.param(
[12, 14, 19],
marks=pytest.mark.xfail(
not PANDAS_GE_120,
reason="pandas's failure here seems like a bug(in < 1.2) "
"given the reverse succeeds",
),
),
["is", "this", "is", "this", "is"],
],
)
def test_isin_string(data, values):
psr = cudf.utils.utils._create_pandas_series(data=data)
gsr = cudf.Series.from_pandas(psr)
got = gsr.isin(values)
expected = psr.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[],
pd.Series(["a", "b", "c", "c", "c", "d", "e"], dtype="category"),
pd.Series(["a", "b", None, "c", "d", "e"], dtype="category"),
pd.Series([0, 3, 10, 12], dtype="category"),
pd.Series([0, 3, 10, 12, 0, 10, 3, 0, 0, 3, 3], dtype="category"),
],
)
@pytest.mark.parametrize(
"values",
[
[],
["a", "b", None, "f", "words"],
["0", "12", None, "14"],
[0, 10, 12, None, 39, 40, 1000],
[0, 0, 0, 0, 3, 3, 3, None, 1, 2, 3],
],
)
def test_isin_categorical(data, values):
psr = cudf.utils.utils._create_pandas_series(data=data)
gsr = cudf.Series.from_pandas(psr)
got = gsr.isin(values)
expected = psr.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[],
pd.Series(
["this", "is", None, "a", "test"], index=["a", "b", "c", "d", "e"]
),
pd.Series([0, 15, 10], index=[0, None, 9]),
pd.Series(
range(25),
index=pd.date_range(
start="2019-01-01", end="2019-01-02", freq="H"
),
),
],
)
@pytest.mark.parametrize(
"values",
[
[],
["this", "is"],
[0, 19, 13],
["2019-01-01 04:00:00", "2019-01-01 06:00:00", "2018-03-02"],
],
)
def test_isin_index(data, values):
psr = cudf.utils.utils._create_pandas_series(data=data)
gsr = cudf.Series.from_pandas(psr)
got = gsr.index.isin(values)
expected = psr.index.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
pd.MultiIndex.from_arrays(
[[1, 2, 3], ["red", "blue", "green"]], names=("number", "color")
),
pd.MultiIndex.from_arrays([[], []], names=("number", "color")),
pd.MultiIndex.from_arrays(
[[1, 2, 3, 10, 100], ["red", "blue", "green", "pink", "white"]],
names=("number", "color"),
),
],
)
@pytest.mark.parametrize(
"values,level,err",
[
(["red", "orange", "yellow"], "color", None),
(["red", "white", "yellow"], "color", None),
([0, 1, 2, 10, 11, 15], "number", None),
([0, 1, 2, 10, 11, 15], None, TypeError),
(pd.Series([0, 1, 2, 10, 11, 15]), None, TypeError),
(pd.Index([0, 1, 2, 10, 11, 15]), None, TypeError),
(pd.Index([0, 1, 2, 8, 11, 15]), "number", None),
(pd.Index(["red", "white", "yellow"]), "color", None),
([(1, "red"), (3, "red")], None, None),
(((1, "red"), (3, "red")), None, None),
(
pd.MultiIndex.from_arrays(
[[1, 2, 3], ["red", "blue", "green"]],
names=("number", "color"),
),
None,
None,
),
(
pd.MultiIndex.from_arrays([[], []], names=("number", "color")),
None,
None,
),
(
pd.MultiIndex.from_arrays(
[
[1, 2, 3, 10, 100],
["red", "blue", "green", "pink", "white"],
],
names=("number", "color"),
),
None,
None,
),
],
)
def test_isin_multiindex(data, values, level, err):
pmdx = data
gmdx = cudf.from_pandas(data)
if err is None:
expected = pmdx.isin(values, level=level)
if isinstance(values, pd.MultiIndex):
values = cudf.from_pandas(values)
got = gmdx.isin(values, level=level)
assert_eq(got, expected)
else:
assert_exceptions_equal(
lfunc=pmdx.isin,
rfunc=gmdx.isin,
lfunc_args_and_kwargs=([values], {"level": level}),
rfunc_args_and_kwargs=([values], {"level": level}),
check_exception_type=False,
expected_error_message=re.escape(
"values need to be a Multi-Index or set/list-like tuple "
"squences when `level=None`."
),
)
@pytest.mark.parametrize(
"data",
[
pd.DataFrame(
{
"num_legs": [2, 4],
"num_wings": [2, 0],
"bird_cats": pd.Series(
["sparrow", "pigeon"],
dtype="category",
index=["falcon", "dog"],
),
},
index=["falcon", "dog"],
),
pd.DataFrame(
{"num_legs": [8, 2], "num_wings": [0, 2]},
index=["spider", "falcon"],
),
pd.DataFrame(
{
"num_legs": [8, 2, 1, 0, 2, 4, 5],
"num_wings": [2, 0, 2, 1, 2, 4, -1],
}
),
],
)
@pytest.mark.parametrize(
"values",
[
[0, 2],
{"num_wings": [0, 3]},
pd.DataFrame(
{"num_legs": [8, 2], "num_wings": [0, 2]},
index=["spider", "falcon"],
),
pd.DataFrame(
{
"num_legs": [2, 4],
"num_wings": [2, 0],
"bird_cats": pd.Series(
["sparrow", "pigeon"],
dtype="category",
index=["falcon", "dog"],
),
},
index=["falcon", "dog"],
),
["sparrow", "pigeon"],
pd.Series(["sparrow", "pigeon"], dtype="category"),
pd.Series([1, 2, 3, 4, 5]),
"abc",
123,
],
)
def test_isin_dataframe(data, values):
pdf = data
gdf = cudf.from_pandas(pdf)
if cudf.utils.dtypes.is_scalar(values):
assert_exceptions_equal(
lfunc=pdf.isin,
rfunc=gdf.isin,
lfunc_args_and_kwargs=([values],),
rfunc_args_and_kwargs=([values],),
)
else:
try:
expected = pdf.isin(values)
except ValueError as e:
if str(e) == "Lengths must match.":
pytest.xfail(
not PANDAS_GE_110,
"https://github.com/pandas-dev/pandas/issues/34256",
)
if isinstance(values, (pd.DataFrame, pd.Series)):
values = cudf.from_pandas(values)
got = gdf.isin(values)
assert_eq(got, expected)
def test_constructor_properties():
df = cudf.DataFrame()
key1 = "a"
key2 = "b"
val1 = np.array([123], dtype=np.float64)
val2 = np.array([321], dtype=np.float64)
df[key1] = val1
df[key2] = val2
# Correct use of _constructor (for DataFrame)
assert_eq(df, df._constructor({key1: val1, key2: val2}))
# Correct use of _constructor (for cudf.Series)
assert_eq(df[key1], df[key2]._constructor(val1, name=key1))
# Correct use of _constructor_sliced (for DataFrame)
assert_eq(df[key1], df._constructor_sliced(val1, name=key1))
# Correct use of _constructor_expanddim (for cudf.Series)
assert_eq(df, df[key2]._constructor_expanddim({key1: val1, key2: val2}))
# Incorrect use of _constructor_sliced (Raises for cudf.Series)
with pytest.raises(NotImplementedError):
df[key1]._constructor_sliced
# Incorrect use of _constructor_expanddim (Raises for DataFrame)
with pytest.raises(NotImplementedError):
df._constructor_expanddim
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("as_dtype", ALL_TYPES)
def test_df_astype_numeric_to_all(dtype, as_dtype):
if "uint" in dtype:
data = [1, 2, None, 4, 7]
elif "int" in dtype or "longlong" in dtype:
data = [1, 2, None, 4, -7]
elif "float" in dtype:
data = [1.0, 2.0, None, 4.0, np.nan, -7.0]
gdf = cudf.DataFrame()
gdf["foo"] = cudf.Series(data, dtype=dtype)
gdf["bar"] = cudf.Series(data, dtype=dtype)
insert_data = cudf.Series(data, dtype=dtype)
expect = cudf.DataFrame()
expect["foo"] = insert_data.astype(as_dtype)
expect["bar"] = insert_data.astype(as_dtype)
got = gdf.astype(as_dtype)
assert_eq(expect, got)
@pytest.mark.parametrize(
"as_dtype",
[
"int32",
"float32",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
],
)
def test_df_astype_string_to_other(as_dtype):
if "datetime64" in as_dtype:
# change None to "NaT" after this issue is fixed:
# https://github.com/rapidsai/cudf/issues/5117
data = ["2001-01-01", "2002-02-02", "2000-01-05", None]
elif as_dtype == "int32":
data = [1, 2, 3]
elif as_dtype == "category":
data = ["1", "2", "3", None]
elif "float" in as_dtype:
data = [1.0, 2.0, 3.0, np.nan]
insert_data = cudf.Series.from_pandas(pd.Series(data, dtype="str"))
expect_data = cudf.Series(data, dtype=as_dtype)
gdf = cudf.DataFrame()
expect = cudf.DataFrame()
gdf["foo"] = insert_data
gdf["bar"] = insert_data
expect["foo"] = expect_data
expect["bar"] = expect_data
got = gdf.astype(as_dtype)
assert_eq(expect, got)
@pytest.mark.parametrize(
"as_dtype",
[
"int64",
"datetime64[s]",
"datetime64[us]",
"datetime64[ns]",
"str",
"category",
],
)
def test_df_astype_datetime_to_other(as_dtype):
data = [
"1991-11-20 00:00:00.000",
"2004-12-04 00:00:00.000",
"2016-09-13 00:00:00.000",
None,
]
gdf = cudf.DataFrame()
expect = cudf.DataFrame()
gdf["foo"] = cudf.Series(data, dtype="datetime64[ms]")
gdf["bar"] = cudf.Series(data, dtype="datetime64[ms]")
if as_dtype == "int64":
expect["foo"] = cudf.Series(
[690595200000, 1102118400000, 1473724800000, None], dtype="int64"
)
expect["bar"] = cudf.Series(
[690595200000, 1102118400000, 1473724800000, None], dtype="int64"
)
elif as_dtype == "str":
expect["foo"] = cudf.Series(data, dtype="str")
expect["bar"] = cudf.Series(data, dtype="str")
elif as_dtype == "category":
expect["foo"] = cudf.Series(gdf["foo"], dtype="category")
expect["bar"] = cudf.Series(gdf["bar"], dtype="category")
else:
expect["foo"] = cudf.Series(data, dtype=as_dtype)
expect["bar"] = cudf.Series(data, dtype=as_dtype)
got = gdf.astype(as_dtype)
assert_eq(expect, got)
@pytest.mark.parametrize(
"as_dtype",
[
"int32",
"float32",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
"str",
],
)
def test_df_astype_categorical_to_other(as_dtype):
if "datetime64" in as_dtype:
data = ["2001-01-01", "2002-02-02", "2000-01-05", "2001-01-01"]
else:
data = [1, 2, 3, 1]
psr = pd.Series(data, dtype="category")
pdf = pd.DataFrame()
pdf["foo"] = psr
pdf["bar"] = psr
gdf = cudf.DataFrame.from_pandas(pdf)
assert_eq(pdf.astype(as_dtype), gdf.astype(as_dtype))
@pytest.mark.parametrize("ordered", [True, False])
def test_df_astype_to_categorical_ordered(ordered):
psr = pd.Series([1, 2, 3, 1], dtype="category")
pdf = pd.DataFrame()
pdf["foo"] = psr
pdf["bar"] = psr
gdf = cudf.DataFrame.from_pandas(pdf)
ordered_dtype_pd = pd.CategoricalDtype(
categories=[1, 2, 3], ordered=ordered
)
ordered_dtype_gd = cudf.CategoricalDtype.from_pandas(ordered_dtype_pd)
assert_eq(
pdf.astype(ordered_dtype_pd).astype("int32"),
gdf.astype(ordered_dtype_gd).astype("int32"),
)
@pytest.mark.parametrize(
"dtype,args",
[(dtype, {}) for dtype in ALL_TYPES]
+ [("category", {"ordered": True}), ("category", {"ordered": False})],
)
def test_empty_df_astype(dtype, args):
df = cudf.DataFrame()
kwargs = {}
kwargs.update(args)
assert_eq(df, df.astype(dtype=dtype, **kwargs))
@pytest.mark.parametrize(
"errors",
[
pytest.param(
"raise", marks=pytest.mark.xfail(reason="should raise error here")
),
pytest.param("other", marks=pytest.mark.xfail(raises=ValueError)),
"ignore",
pytest.param(
"warn", marks=pytest.mark.filterwarnings("ignore:Traceback")
),
],
)
def test_series_astype_error_handling(errors):
sr = cudf.Series(["random", "words"])
got = sr.astype("datetime64", errors=errors)
assert_eq(sr, got)
@pytest.mark.parametrize("dtype", ALL_TYPES)
def test_df_constructor_dtype(dtype):
if "datetime" in dtype:
data = ["1991-11-20", "2004-12-04", "2016-09-13", None]
elif dtype == "str":
data = ["a", "b", "c", None]
elif "float" in dtype:
data = [1.0, 0.5, -1.1, np.nan, None]
elif "bool" in dtype:
data = [True, False, None]
else:
data = [1, 2, 3, None]
sr = cudf.Series(data, dtype=dtype)
expect = cudf.DataFrame()
expect["foo"] = sr
expect["bar"] = sr
got = cudf.DataFrame({"foo": data, "bar": data}, dtype=dtype)
assert_eq(expect, got)
@pytest.mark.parametrize(
"data",
[
cudf.datasets.randomdata(
nrows=10, dtypes={"a": "category", "b": int, "c": float, "d": int}
),
cudf.datasets.randomdata(
nrows=10, dtypes={"a": "category", "b": int, "c": float, "d": str}
),
cudf.datasets.randomdata(
nrows=10, dtypes={"a": bool, "b": int, "c": float, "d": str}
),
cudf.DataFrame(),
cudf.DataFrame({"a": [0, 1, 2], "b": [1, None, 3]}),
cudf.DataFrame(
{
"a": [1, 2, 3, 4],
"b": [7, np.NaN, 9, 10],
"c": [np.NaN, np.NaN, np.NaN, np.NaN],
"d": cudf.Series([None, None, None, None], dtype="int64"),
"e": [100, None, 200, None],
"f": cudf.Series([10, None, np.NaN, 11], nan_as_null=False),
}
),
cudf.DataFrame(
{
"a": [10, 11, 12, 13, 14, 15],
"b": cudf.Series(
[10, None, np.NaN, 2234, None, np.NaN], nan_as_null=False
),
}
),
],
)
@pytest.mark.parametrize(
"op", ["max", "min", "sum", "product", "mean", "var", "std"]
)
@pytest.mark.parametrize("skipna", [True, False])
def test_rowwise_ops(data, op, skipna):
gdf = data
pdf = gdf.to_pandas()
if op in ("var", "std"):
expected = getattr(pdf, op)(axis=1, ddof=0, skipna=skipna)
got = getattr(gdf, op)(axis=1, ddof=0, skipna=skipna)
else:
expected = getattr(pdf, op)(axis=1, skipna=skipna)
got = getattr(gdf, op)(axis=1, skipna=skipna)
assert_eq(expected, got, check_exact=False)
@pytest.mark.parametrize(
"op", ["max", "min", "sum", "product", "mean", "var", "std"]
)
def test_rowwise_ops_nullable_dtypes_all_null(op):
gdf = cudf.DataFrame(
{
"a": [1, 2, 3, 4],
"b": [7, np.NaN, 9, 10],
"c": [np.NaN, np.NaN, np.NaN, np.NaN],
"d": cudf.Series([None, None, None, None], dtype="int64"),
"e": [100, None, 200, None],
"f": cudf.Series([10, None, np.NaN, 11], nan_as_null=False),
}
)
expected = cudf.Series([None, None, None, None], dtype="float64")
if op in ("var", "std"):
got = getattr(gdf, op)(axis=1, ddof=0, skipna=False)
else:
got = getattr(gdf, op)(axis=1, skipna=False)
assert_eq(got.null_count, expected.null_count)
assert_eq(got, expected)
@pytest.mark.parametrize(
"op,expected",
[
(
"max",
cudf.Series(
[10.0, None, np.NaN, 2234.0, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"min",
cudf.Series(
[10.0, None, np.NaN, 13.0, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"sum",
cudf.Series(
[20.0, None, np.NaN, 2247.0, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"product",
cudf.Series(
[100.0, None, np.NaN, 29042.0, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"mean",
cudf.Series(
[10.0, None, np.NaN, 1123.5, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"var",
cudf.Series(
[0.0, None, np.NaN, 1233210.25, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"std",
cudf.Series(
[0.0, None, np.NaN, 1110.5, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
],
)
def test_rowwise_ops_nullable_dtypes_partial_null(op, expected):
gdf = cudf.DataFrame(
{
"a": [10, 11, 12, 13, 14, 15],
"b": cudf.Series(
[10, None, np.NaN, 2234, None, np.NaN], nan_as_null=False,
),
}
)
if op in ("var", "std"):
got = getattr(gdf, op)(axis=1, ddof=0, skipna=False)
else:
got = getattr(gdf, op)(axis=1, skipna=False)
assert_eq(got.null_count, expected.null_count)
assert_eq(got, expected)
@pytest.mark.parametrize(
"op,expected",
[
(
"max",
cudf.Series([10, None, None, 2234, None, 453], dtype="int64",),
),
("min", cudf.Series([10, None, None, 13, None, 15], dtype="int64",),),
(
"sum",
cudf.Series([20, None, None, 2247, None, 468], dtype="int64",),
),
(
"product",
cudf.Series([100, None, None, 29042, None, 6795], dtype="int64",),
),
(
"mean",
cudf.Series(
[10.0, None, None, 1123.5, None, 234.0], dtype="float32",
),
),
(
"var",
cudf.Series(
[0.0, None, None, 1233210.25, None, 47961.0], dtype="float32",
),
),
(
"std",
cudf.Series(
[0.0, None, None, 1110.5, None, 219.0], dtype="float32",
),
),
],
)
def test_rowwise_ops_nullable_int_dtypes(op, expected):
gdf = cudf.DataFrame(
{
"a": [10, 11, None, 13, None, 15],
"b": cudf.Series(
[10, None, 323, 2234, None, 453], nan_as_null=False,
),
}
)
if op in ("var", "std"):
got = getattr(gdf, op)(axis=1, ddof=0, skipna=False)
else:
got = getattr(gdf, op)(axis=1, skipna=False)
assert_eq(got.null_count, expected.null_count)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"t2": cudf.Series(
["1940-08-31 06:00:00", "2020-08-02 10:00:00"], dtype="<M8[ms]"
),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"t2": cudf.Series(
["1940-08-31 06:00:00", "2020-08-02 10:00:00"], dtype="<M8[ns]"
),
"t3": cudf.Series(
["1960-08-31 06:00:00", "2030-08-02 10:00:00"], dtype="<M8[s]"
),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"t2": cudf.Series(
["1940-08-31 06:00:00", "2020-08-02 10:00:00"], dtype="<M8[us]"
),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"t2": cudf.Series(
["1940-08-31 06:00:00", "2020-08-02 10:00:00"], dtype="<M8[ms]"
),
"i1": cudf.Series([1001, 2002], dtype="int64"),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"t2": cudf.Series(["1940-08-31 06:00:00", None], dtype="<M8[ms]"),
"i1": cudf.Series([1001, 2002], dtype="int64"),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"i1": cudf.Series([1001, 2002], dtype="int64"),
"f1": cudf.Series([-100.001, 123.456], dtype="float64"),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"i1": cudf.Series([1001, 2002], dtype="int64"),
"f1": cudf.Series([-100.001, 123.456], dtype="float64"),
"b1": cudf.Series([True, False], dtype="bool"),
},
],
)
@pytest.mark.parametrize("op", ["max", "min"])
@pytest.mark.parametrize("skipna", [True, False])
def test_rowwise_ops_datetime_dtypes(data, op, skipna):
gdf = cudf.DataFrame(data)
pdf = gdf.to_pandas()
got = getattr(gdf, op)(axis=1, skipna=skipna)
expected = getattr(pdf, op)(axis=1, skipna=skipna)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data,op,skipna",
[
(
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"],
dtype="<M8[ms]",
),
"t2": cudf.Series(
["1940-08-31 06:00:00", None], dtype="<M8[ms]"
),
},
"max",
True,
),
(
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"],
dtype="<M8[ms]",
),
"t2": cudf.Series(
["1940-08-31 06:00:00", None], dtype="<M8[ms]"
),
},
"min",
False,
),
(
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"],
dtype="<M8[ms]",
),
"t2": cudf.Series(
["1940-08-31 06:00:00", None], dtype="<M8[ms]"
),
},
"min",
True,
),
],
)
def test_rowwise_ops_datetime_dtypes_2(data, op, skipna):
gdf = cudf.DataFrame(data)
pdf = gdf.to_pandas()
got = getattr(gdf, op)(axis=1, skipna=skipna)
expected = getattr(pdf, op)(axis=1, skipna=skipna)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
(
{
"t1": pd.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"],
dtype="<M8[ns]",
),
"t2": pd.Series(
["1940-08-31 06:00:00", pd.NaT], dtype="<M8[ns]"
),
}
)
],
)
def test_rowwise_ops_datetime_dtypes_pdbug(data):
pdf = pd.DataFrame(data)
gdf = cudf.from_pandas(pdf)
expected = pdf.max(axis=1, skipna=False)
got = gdf.max(axis=1, skipna=False)
if PANDAS_GE_120:
assert_eq(got, expected)
else:
# PANDAS BUG: https://github.com/pandas-dev/pandas/issues/36907
with pytest.raises(AssertionError, match="numpy array are different"):
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[5.0, 6.0, 7.0],
"single value",
np.array(1, dtype="int64"),
np.array(0.6273643, dtype="float64"),
],
)
def test_insert(data):
pdf = pd.DataFrame.from_dict({"A": [1, 2, 3], "B": ["a", "b", "c"]})
gdf = cudf.DataFrame.from_pandas(pdf)
# insertion by index
pdf.insert(0, "foo", data)
gdf.insert(0, "foo", data)
assert_eq(pdf, gdf)
pdf.insert(3, "bar", data)
gdf.insert(3, "bar", data)
assert_eq(pdf, gdf)
pdf.insert(1, "baz", data)
gdf.insert(1, "baz", data)
assert_eq(pdf, gdf)
# pandas insert doesn't support negative indexing
pdf.insert(len(pdf.columns), "qux", data)
gdf.insert(-1, "qux", data)
assert_eq(pdf, gdf)
def test_cov():
gdf = cudf.datasets.randomdata(10)
pdf = gdf.to_pandas()
assert_eq(pdf.cov(), gdf.cov())
@pytest.mark.xfail(reason="cupy-based cov does not support nulls")
def test_cov_nans():
pdf = pd.DataFrame()
pdf["a"] = [None, None, None, 2.00758632, None]
pdf["b"] = [0.36403686, None, None, None, None]
pdf["c"] = [None, None, None, 0.64882227, None]
pdf["d"] = [None, -1.46863125, None, 1.22477948, -0.06031689]
gdf = cudf.from_pandas(pdf)
assert_eq(pdf.cov(), gdf.cov())
@pytest.mark.parametrize(
"gsr",
[
cudf.Series([4, 2, 3]),
cudf.Series([4, 2, 3], index=["a", "b", "c"]),
cudf.Series([4, 2, 3], index=["a", "b", "d"]),
cudf.Series([4, 2], index=["a", "b"]),
cudf.Series([4, 2, 3], index=cudf.core.index.RangeIndex(0, 3)),
pytest.param(
cudf.Series([4, 2, 3, 4, 5], index=["a", "b", "d", "0", "12"]),
marks=pytest.mark.xfail,
),
],
)
@pytest.mark.parametrize("colnames", [["a", "b", "c"], [0, 1, 2]])
@pytest.mark.parametrize(
"op",
[
operator.add,
operator.mul,
operator.floordiv,
operator.truediv,
operator.mod,
operator.pow,
operator.eq,
operator.lt,
operator.le,
operator.gt,
operator.ge,
operator.ne,
],
)
def test_df_sr_binop(gsr, colnames, op):
data = [[3.0, 2.0, 5.0], [3.0, None, 5.0], [6.0, 7.0, np.nan]]
data = dict(zip(colnames, data))
gsr = gsr.astype("float64")
gdf = cudf.DataFrame(data)
pdf = gdf.to_pandas(nullable=True)
psr = gsr.to_pandas(nullable=True)
expect = op(pdf, psr)
got = op(gdf, gsr).to_pandas(nullable=True)
assert_eq(expect, got, check_dtype=False)
expect = op(psr, pdf)
got = op(gsr, gdf).to_pandas(nullable=True)
assert_eq(expect, got, check_dtype=False)
@pytest.mark.parametrize(
"op",
[
operator.add,
operator.mul,
operator.floordiv,
operator.truediv,
operator.mod,
operator.pow,
# comparison ops will temporarily XFAIL
# see PR https://github.com/rapidsai/cudf/pull/7491
pytest.param(operator.eq, marks=pytest.mark.xfail()),
pytest.param(operator.lt, marks=pytest.mark.xfail()),
pytest.param(operator.le, marks=pytest.mark.xfail()),
pytest.param(operator.gt, marks=pytest.mark.xfail()),
pytest.param(operator.ge, marks=pytest.mark.xfail()),
pytest.param(operator.ne, marks=pytest.mark.xfail()),
],
)
@pytest.mark.parametrize(
"gsr", [cudf.Series([1, 2, 3, 4, 5], index=["a", "b", "d", "0", "12"])]
)
def test_df_sr_binop_col_order(gsr, op):
colnames = [0, 1, 2]
data = [[0, 2, 5], [3, None, 5], [6, 7, np.nan]]
data = dict(zip(colnames, data))
gdf = cudf.DataFrame(data)
pdf = pd.DataFrame.from_dict(data)
psr = gsr.to_pandas()
expect = op(pdf, psr).astype("float")
out = op(gdf, gsr).astype("float")
got = out[expect.columns]
assert_eq(expect, got)
@pytest.mark.parametrize("set_index", [None, "A", "C", "D"])
@pytest.mark.parametrize("index", [True, False])
@pytest.mark.parametrize("deep", [True, False])
def test_memory_usage(deep, index, set_index):
# Testing numerical/datetime by comparing with pandas
# (string and categorical columns will be different)
rows = int(100)
df = pd.DataFrame(
{
"A": np.arange(rows, dtype="int64"),
"B": np.arange(rows, dtype="int32"),
"C": np.arange(rows, dtype="float64"),
}
)
df["D"] = pd.to_datetime(df.A)
if set_index:
df = df.set_index(set_index)
gdf = cudf.from_pandas(df)
if index and set_index is None:
# Special Case: Assume RangeIndex size == 0
assert gdf.index.memory_usage(deep=deep) == 0
else:
# Check for Series only
assert df["B"].memory_usage(index=index, deep=deep) == gdf[
"B"
].memory_usage(index=index, deep=deep)
# Check for entire DataFrame
assert_eq(
df.memory_usage(index=index, deep=deep).sort_index(),
gdf.memory_usage(index=index, deep=deep).sort_index(),
)
@pytest.mark.xfail
def test_memory_usage_string():
rows = int(100)
df = pd.DataFrame(
{
"A": np.arange(rows, dtype="int32"),
"B": np.random.choice(["apple", "banana", "orange"], rows),
}
)
gdf = cudf.from_pandas(df)
# Check deep=False (should match pandas)
assert gdf.B.memory_usage(deep=False, index=False) == df.B.memory_usage(
deep=False, index=False
)
# Check string column
assert gdf.B.memory_usage(deep=True, index=False) == df.B.memory_usage(
deep=True, index=False
)
# Check string index
assert gdf.set_index("B").index.memory_usage(
deep=True
) == df.B.memory_usage(deep=True, index=False)
def test_memory_usage_cat():
rows = int(100)
df = pd.DataFrame(
{
"A": np.arange(rows, dtype="int32"),
"B": np.random.choice(["apple", "banana", "orange"], rows),
}
)
df["B"] = df.B.astype("category")
gdf = cudf.from_pandas(df)
expected = (
gdf.B._column.cat().categories.__sizeof__()
+ gdf.B._column.cat().codes.__sizeof__()
)
# Check cat column
assert gdf.B.memory_usage(deep=True, index=False) == expected
# Check cat index
assert gdf.set_index("B").index.memory_usage(deep=True) == expected
def test_memory_usage_list():
df = cudf.DataFrame({"A": [[0, 1, 2, 3], [4, 5, 6], [7, 8], [9]]})
expected = (
df.A._column.offsets._memory_usage()
+ df.A._column.elements._memory_usage()
)
assert expected == df.A.memory_usage()
@pytest.mark.xfail
def test_memory_usage_multi():
rows = int(100)
deep = True
df = pd.DataFrame(
{
"A": np.arange(rows, dtype="int32"),
"B": np.random.choice(np.arange(3, dtype="int64"), rows),
"C": np.random.choice(np.arange(3, dtype="float64"), rows),
}
).set_index(["B", "C"])
gdf = cudf.from_pandas(df)
# Assume MultiIndex memory footprint is just that
# of the underlying columns, levels, and codes
expect = rows * 16 # Source Columns
expect += rows * 16 # Codes
expect += 3 * 8 # Level 0
expect += 3 * 8 # Level 1
assert expect == gdf.index.memory_usage(deep=deep)
@pytest.mark.parametrize(
"list_input",
[
pytest.param([1, 2, 3, 4], id="smaller"),
pytest.param([1, 2, 3, 4, 5, 6], id="larger"),
],
)
@pytest.mark.parametrize(
"key",
[
pytest.param("list_test", id="new_column"),
pytest.param("id", id="existing_column"),
],
)
def test_setitem_diff_size_list(list_input, key):
gdf = cudf.datasets.randomdata(5)
with pytest.raises(
ValueError, match=("All columns must be of equal length")
):
gdf[key] = list_input
@pytest.mark.parametrize(
"series_input",
[
pytest.param(cudf.Series([1, 2, 3, 4]), id="smaller_cudf"),
pytest.param(cudf.Series([1, 2, 3, 4, 5, 6]), id="larger_cudf"),
pytest.param(cudf.Series([1, 2, 3], index=[4, 5, 6]), id="index_cudf"),
pytest.param(pd.Series([1, 2, 3, 4]), id="smaller_pandas"),
pytest.param(pd.Series([1, 2, 3, 4, 5, 6]), id="larger_pandas"),
pytest.param(pd.Series([1, 2, 3], index=[4, 5, 6]), id="index_pandas"),
],
)
@pytest.mark.parametrize(
"key",
[
pytest.param("list_test", id="new_column"),
pytest.param("id", id="existing_column"),
],
)
def test_setitem_diff_size_series(series_input, key):
gdf = cudf.datasets.randomdata(5)
pdf = gdf.to_pandas()
pandas_input = series_input
if isinstance(pandas_input, cudf.Series):
pandas_input = pandas_input.to_pandas()
expect = pdf
expect[key] = pandas_input
got = gdf
got[key] = series_input
# Pandas uses NaN and typecasts to float64 if there's missing values on
# alignment, so need to typecast to float64 for equality comparison
expect = expect.astype("float64")
got = got.astype("float64")
assert_eq(expect, got)
def test_tupleize_cols_False_set():
pdf = pd.DataFrame()
gdf = cudf.DataFrame()
pdf[("a", "b")] = [1]
gdf[("a", "b")] = [1]
assert_eq(pdf, gdf)
assert_eq(pdf.columns, gdf.columns)
def test_init_multiindex_from_dict():
pdf = pd.DataFrame({("a", "b"): [1]})
gdf = cudf.DataFrame({("a", "b"): [1]})
assert_eq(pdf, gdf)
assert_eq(pdf.columns, gdf.columns)
def test_change_column_dtype_in_empty():
pdf = pd.DataFrame({"a": [], "b": []})
gdf = cudf.from_pandas(pdf)
assert_eq(pdf, gdf)
pdf["b"] = pdf["b"].astype("int64")
gdf["b"] = gdf["b"].astype("int64")
assert_eq(pdf, gdf)
def test_dataframe_from_table_empty_index():
df = cudf.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
odict = df._data
tbl = cudf._lib.table.Table(odict)
result = cudf.DataFrame._from_table(tbl) # noqa: F841
@pytest.mark.parametrize("dtype", ["int64", "str"])
def test_dataframe_from_dictionary_series_same_name_index(dtype):
pd_idx1 = pd.Index([1, 2, 0], name="test_index").astype(dtype)
pd_idx2 = pd.Index([2, 0, 1], name="test_index").astype(dtype)
pd_series1 = pd.Series([1, 2, 3], index=pd_idx1)
pd_series2 = pd.Series([1, 2, 3], index=pd_idx2)
gd_idx1 = cudf.from_pandas(pd_idx1)
gd_idx2 = cudf.from_pandas(pd_idx2)
gd_series1 = cudf.Series([1, 2, 3], index=gd_idx1)
gd_series2 = cudf.Series([1, 2, 3], index=gd_idx2)
expect = pd.DataFrame({"a": pd_series1, "b": pd_series2})
got = cudf.DataFrame({"a": gd_series1, "b": gd_series2})
if dtype == "str":
# Pandas actually loses its index name erroneously here...
expect.index.name = "test_index"
assert_eq(expect, got)
assert expect.index.names == got.index.names
@pytest.mark.parametrize(
"arg", [slice(2, 8, 3), slice(1, 20, 4), slice(-2, -6, -2)]
)
def test_dataframe_strided_slice(arg):
mul = pd.DataFrame(
{
"Index": [1, 2, 3, 4, 5, 6, 7, 8, 9],
"AlphaIndex": ["a", "b", "c", "d", "e", "f", "g", "h", "i"],
}
)
pdf = pd.DataFrame(
{"Val": [10, 9, 8, 7, 6, 5, 4, 3, 2]},
index=pd.MultiIndex.from_frame(mul),
)
gdf = cudf.DataFrame.from_pandas(pdf)
expect = pdf[arg]
got = gdf[arg]
assert_eq(expect, got)
@pytest.mark.parametrize(
"data,condition,other,error",
[
(pd.Series(range(5)), pd.Series(range(5)) > 0, None, None),
(pd.Series(range(5)), pd.Series(range(5)) > 1, None, None),
(pd.Series(range(5)), pd.Series(range(5)) > 1, 10, None),
(
pd.Series(range(5)),
pd.Series(range(5)) > 1,
pd.Series(range(5, 10)),
None,
),
(
pd.DataFrame(np.arange(10).reshape(-1, 2), columns=["A", "B"]),
(
pd.DataFrame(np.arange(10).reshape(-1, 2), columns=["A", "B"])
% 3
)
== 0,
-pd.DataFrame(np.arange(10).reshape(-1, 2), columns=["A", "B"]),
None,
),
(
pd.DataFrame({"a": [1, 2, np.nan], "b": [4, np.nan, 6]}),
pd.DataFrame({"a": [1, 2, np.nan], "b": [4, np.nan, 6]}) == 4,
None,
None,
),
(
pd.DataFrame({"a": [1, 2, np.nan], "b": [4, np.nan, 6]}),
pd.DataFrame({"a": [1, 2, np.nan], "b": [4, np.nan, 6]}) != 4,
None,
None,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
[True, True, True],
None,
ValueError,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
[True, True, True, False],
None,
ValueError,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
[[True, True, True, False], [True, True, True, False]],
None,
ValueError,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
[[True, True], [False, True], [True, False], [False, True]],
None,
None,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
cuda.to_device(
np.array(
[[True, True], [False, True], [True, False], [False, True]]
)
),
None,
None,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
cupy.array(
[[True, True], [False, True], [True, False], [False, True]]
),
17,
None,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
[[True, True], [False, True], [True, False], [False, True]],
17,
None,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
[
[True, True, False, True],
[True, True, False, True],
[True, True, False, True],
[True, True, False, True],
],
None,
ValueError,
),
(
pd.Series([1, 2, np.nan]),
pd.Series([1, 2, np.nan]) == 4,
None,
None,
),
(
pd.Series([1, 2, np.nan]),
pd.Series([1, 2, np.nan]) != 4,
None,
None,
),
(
pd.Series([4, np.nan, 6]),
pd.Series([4, np.nan, 6]) == 4,
None,
None,
),
(
pd.Series([4, np.nan, 6]),
pd.Series([4, np.nan, 6]) != 4,
None,
None,
),
(
pd.Series([4, np.nan, 6], dtype="category"),
pd.Series([4, np.nan, 6], dtype="category") != 4,
None,
None,
),
(
pd.Series(["a", "b", "b", "d", "c", "s"], dtype="category"),
pd.Series(["a", "b", "b", "d", "c", "s"], dtype="category") == "b",
None,
None,
),
(
pd.Series(["a", "b", "b", "d", "c", "s"], dtype="category"),
pd.Series(["a", "b", "b", "d", "c", "s"], dtype="category") == "b",
"s",
None,
),
(
pd.Series([1, 2, 3, 2, 5]),
pd.Series([1, 2, 3, 2, 5]) == 2,
pd.DataFrame(
{
"a": pd.Series([1, 2, 3, 2, 5]),
"b": pd.Series([1, 2, 3, 2, 5]),
}
),
NotImplementedError,
),
],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_df_sr_mask_where(data, condition, other, error, inplace):
ps_where = data
gs_where = cudf.from_pandas(data)
ps_mask = ps_where.copy(deep=True)
gs_mask = gs_where.copy(deep=True)
if hasattr(condition, "__cuda_array_interface__"):
if type(condition).__module__.split(".")[0] == "cupy":
ps_condition = cupy.asnumpy(condition)
else:
ps_condition = np.array(condition).astype("bool")
else:
ps_condition = condition
if type(condition).__module__.split(".")[0] == "pandas":
gs_condition = cudf.from_pandas(condition)
else:
gs_condition = condition
ps_other = other
if type(other).__module__.split(".")[0] == "pandas":
gs_other = cudf.from_pandas(other)
else:
gs_other = other
if error is None:
expect_where = ps_where.where(
ps_condition, other=ps_other, inplace=inplace
)
got_where = gs_where.where(
gs_condition, other=gs_other, inplace=inplace
)
expect_mask = ps_mask.mask(
ps_condition, other=ps_other, inplace=inplace
)
got_mask = gs_mask.mask(gs_condition, other=gs_other, inplace=inplace)
if inplace:
expect_where = ps_where
got_where = gs_where
expect_mask = ps_mask
got_mask = gs_mask
if pd.api.types.is_categorical_dtype(expect_where):
np.testing.assert_array_equal(
expect_where.cat.codes,
got_where.cat.codes.astype(expect_where.cat.codes.dtype)
.fillna(-1)
.to_array(),
)
assert_eq(expect_where.cat.categories, got_where.cat.categories)
np.testing.assert_array_equal(
expect_mask.cat.codes,
got_mask.cat.codes.astype(expect_mask.cat.codes.dtype)
.fillna(-1)
.to_array(),
)
assert_eq(expect_mask.cat.categories, got_mask.cat.categories)
else:
assert_eq(
expect_where.fillna(-1),
got_where.fillna(-1),
check_dtype=False,
)
assert_eq(
expect_mask.fillna(-1), got_mask.fillna(-1), check_dtype=False
)
else:
assert_exceptions_equal(
lfunc=ps_where.where,
rfunc=gs_where.where,
lfunc_args_and_kwargs=(
[ps_condition],
{"other": ps_other, "inplace": inplace},
),
rfunc_args_and_kwargs=(
[gs_condition],
{"other": gs_other, "inplace": inplace},
),
compare_error_message=False
if error is NotImplementedError
else True,
)
assert_exceptions_equal(
lfunc=ps_mask.mask,
rfunc=gs_mask.mask,
lfunc_args_and_kwargs=(
[ps_condition],
{"other": ps_other, "inplace": inplace},
),
rfunc_args_and_kwargs=(
[gs_condition],
{"other": gs_other, "inplace": inplace},
),
compare_error_message=False,
)
@pytest.mark.parametrize(
"data,condition,other,has_cat",
[
(
pd.DataFrame(
{
"a": pd.Series(["a", "a", "b", "c", "a", "d", "d", "a"]),
"b": pd.Series(["o", "p", "q", "e", "p", "p", "a", "a"]),
}
),
pd.DataFrame(
{
"a": pd.Series(["a", "a", "b", "c", "a", "d", "d", "a"]),
"b": pd.Series(["o", "p", "q", "e", "p", "p", "a", "a"]),
}
)
!= "a",
None,
None,
),
(
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
),
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
)
!= "a",
None,
True,
),
(
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
),
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
)
== "a",
None,
True,
),
(
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
),
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
)
!= "a",
"a",
True,
),
(
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
),
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
)
== "a",
"a",
True,
),
],
)
def test_df_string_cat_types_mask_where(data, condition, other, has_cat):
ps = data
gs = cudf.from_pandas(data)
ps_condition = condition
if type(condition).__module__.split(".")[0] == "pandas":
gs_condition = cudf.from_pandas(condition)
else:
gs_condition = condition
ps_other = other
if type(other).__module__.split(".")[0] == "pandas":
gs_other = cudf.from_pandas(other)
else:
gs_other = other
expect_where = ps.where(ps_condition, other=ps_other)
got_where = gs.where(gs_condition, other=gs_other)
expect_mask = ps.mask(ps_condition, other=ps_other)
got_mask = gs.mask(gs_condition, other=gs_other)
if has_cat is None:
assert_eq(
expect_where.fillna(-1).astype("str"),
got_where.fillna(-1),
check_dtype=False,
)
assert_eq(
expect_mask.fillna(-1).astype("str"),
got_mask.fillna(-1),
check_dtype=False,
)
else:
assert_eq(expect_where, got_where, check_dtype=False)
assert_eq(expect_mask, got_mask, check_dtype=False)
@pytest.mark.parametrize(
"data,expected_upcast_type,error",
[
(
pd.Series([random.random() for _ in range(10)], dtype="float32"),
np.dtype("float32"),
None,
),
(
pd.Series([random.random() for _ in range(10)], dtype="float16"),
np.dtype("float32"),
None,
),
(
pd.Series([random.random() for _ in range(10)], dtype="float64"),
np.dtype("float64"),
None,
),
(
pd.Series([random.random() for _ in range(10)], dtype="float128"),
None,
NotImplementedError,
),
],
)
def test_from_pandas_unsupported_types(data, expected_upcast_type, error):
pdf = pd.DataFrame({"one_col": data})
if error == NotImplementedError:
with pytest.raises(error):
cudf.from_pandas(data)
with pytest.raises(error):
cudf.Series(data)
with pytest.raises(error):
cudf.from_pandas(pdf)
with pytest.raises(error):
cudf.DataFrame(pdf)
else:
df = cudf.from_pandas(data)
assert_eq(data, df, check_dtype=False)
assert df.dtype == expected_upcast_type
df = cudf.Series(data)
assert_eq(data, df, check_dtype=False)
assert df.dtype == expected_upcast_type
df = cudf.from_pandas(pdf)
assert_eq(pdf, df, check_dtype=False)
assert df["one_col"].dtype == expected_upcast_type
df = cudf.DataFrame(pdf)
assert_eq(pdf, df, check_dtype=False)
assert df["one_col"].dtype == expected_upcast_type
@pytest.mark.parametrize("nan_as_null", [True, False])
@pytest.mark.parametrize("index", [None, "a", ["a", "b"]])
def test_from_pandas_nan_as_null(nan_as_null, index):
data = [np.nan, 2.0, 3.0]
if index is None:
pdf = pd.DataFrame({"a": data, "b": data})
expected = cudf.DataFrame(
{
"a": column.as_column(data, nan_as_null=nan_as_null),
"b": column.as_column(data, nan_as_null=nan_as_null),
}
)
else:
pdf = pd.DataFrame({"a": data, "b": data}).set_index(index)
expected = cudf.DataFrame(
{
"a": column.as_column(data, nan_as_null=nan_as_null),
"b": column.as_column(data, nan_as_null=nan_as_null),
}
)
expected = cudf.DataFrame(
{
"a": column.as_column(data, nan_as_null=nan_as_null),
"b": column.as_column(data, nan_as_null=nan_as_null),
}
)
expected = expected.set_index(index)
got = cudf.from_pandas(pdf, nan_as_null=nan_as_null)
assert_eq(expected, got)
@pytest.mark.parametrize("nan_as_null", [True, False])
def test_from_pandas_for_series_nan_as_null(nan_as_null):
data = [np.nan, 2.0, 3.0]
psr = pd.Series(data)
expected = cudf.Series(column.as_column(data, nan_as_null=nan_as_null))
got = cudf.from_pandas(psr, nan_as_null=nan_as_null)
assert_eq(expected, got)
@pytest.mark.parametrize("copy", [True, False])
def test_df_series_dataframe_astype_copy(copy):
gdf = cudf.DataFrame({"col1": [1, 2], "col2": [3, 4]})
pdf = gdf.to_pandas()
assert_eq(
gdf.astype(dtype="float", copy=copy),
pdf.astype(dtype="float", copy=copy),
)
assert_eq(gdf, pdf)
gsr = cudf.Series([1, 2])
psr = gsr.to_pandas()
assert_eq(
gsr.astype(dtype="float", copy=copy),
psr.astype(dtype="float", copy=copy),
)
assert_eq(gsr, psr)
gsr = cudf.Series([1, 2])
psr = gsr.to_pandas()
actual = gsr.astype(dtype="int64", copy=copy)
expected = psr.astype(dtype="int64", copy=copy)
assert_eq(expected, actual)
assert_eq(gsr, psr)
actual[0] = 3
expected[0] = 3
assert_eq(gsr, psr)
@pytest.mark.parametrize("copy", [True, False])
def test_df_series_dataframe_astype_dtype_dict(copy):
gdf = cudf.DataFrame({"col1": [1, 2], "col2": [3, 4]})
pdf = gdf.to_pandas()
assert_eq(
gdf.astype(dtype={"col1": "float"}, copy=copy),
pdf.astype(dtype={"col1": "float"}, copy=copy),
)
assert_eq(gdf, pdf)
gsr = cudf.Series([1, 2])
psr = gsr.to_pandas()
assert_eq(
gsr.astype(dtype={None: "float"}, copy=copy),
psr.astype(dtype={None: "float"}, copy=copy),
)
assert_eq(gsr, psr)
assert_exceptions_equal(
lfunc=psr.astype,
rfunc=gsr.astype,
lfunc_args_and_kwargs=([], {"dtype": {"a": "float"}, "copy": copy}),
rfunc_args_and_kwargs=([], {"dtype": {"a": "float"}, "copy": copy}),
)
gsr = cudf.Series([1, 2])
psr = gsr.to_pandas()
actual = gsr.astype({None: "int64"}, copy=copy)
expected = psr.astype({None: "int64"}, copy=copy)
assert_eq(expected, actual)
assert_eq(gsr, psr)
actual[0] = 3
expected[0] = 3
assert_eq(gsr, psr)
@pytest.mark.parametrize(
"data,columns",
[
([1, 2, 3, 100, 112, 35464], ["a"]),
(range(100), None),
([], None),
((-10, 21, 32, 32, 1, 2, 3), ["p"]),
((), None),
([[1, 2, 3], [1, 2, 3]], ["col1", "col2", "col3"]),
([range(100), range(100)], ["range" + str(i) for i in range(100)]),
(((1, 2, 3), (1, 2, 3)), ["tuple0", "tuple1", "tuple2"]),
([[1, 2, 3]], ["list col1", "list col2", "list col3"]),
([range(100)], ["range" + str(i) for i in range(100)]),
(((1, 2, 3),), ["k1", "k2", "k3"]),
],
)
def test_dataframe_init_1d_list(data, columns):
expect = pd.DataFrame(data, columns=columns)
actual = cudf.DataFrame(data, columns=columns)
assert_eq(
expect, actual, check_index_type=False if len(data) == 0 else True
)
expect = pd.DataFrame(data, columns=None)
actual = cudf.DataFrame(data, columns=None)
assert_eq(
expect, actual, check_index_type=False if len(data) == 0 else True
)
@pytest.mark.parametrize(
"data,cols,index",
[
(
np.ndarray(shape=(4, 2), dtype=float, order="F"),
["a", "b"],
["a", "b", "c", "d"],
),
(
np.ndarray(shape=(4, 2), dtype=float, order="F"),
["a", "b"],
[0, 20, 30, 10],
),
(
np.ndarray(shape=(4, 2), dtype=float, order="F"),
["a", "b"],
[0, 1, 2, 3],
),
(np.array([11, 123, -2342, 232]), ["a"], [1, 2, 11, 12]),
(np.array([11, 123, -2342, 232]), ["a"], ["khsdjk", "a", "z", "kk"]),
(
cupy.ndarray(shape=(4, 2), dtype=float, order="F"),
["a", "z"],
["a", "z", "a", "z"],
),
(cupy.array([11, 123, -2342, 232]), ["z"], [0, 1, 1, 0]),
(cupy.array([11, 123, -2342, 232]), ["z"], [1, 2, 3, 4]),
(cupy.array([11, 123, -2342, 232]), ["z"], ["a", "z", "d", "e"]),
(np.random.randn(2, 4), ["a", "b", "c", "d"], ["a", "b"]),
(np.random.randn(2, 4), ["a", "b", "c", "d"], [1, 0]),
(cupy.random.randn(2, 4), ["a", "b", "c", "d"], ["a", "b"]),
(cupy.random.randn(2, 4), ["a", "b", "c", "d"], [1, 0]),
],
)
def test_dataframe_init_from_arrays_cols(data, cols, index):
gd_data = data
if isinstance(data, cupy.core.ndarray):
# pandas can't handle cupy arrays in general
pd_data = data.get()
# additional test for building DataFrame with gpu array whose
# cuda array interface has no `descr` attribute
numba_data = cuda.as_cuda_array(data)
else:
pd_data = data
numba_data = None
# verify with columns & index
pdf = pd.DataFrame(pd_data, columns=cols, index=index)
gdf = cudf.DataFrame(gd_data, columns=cols, index=index)
assert_eq(pdf, gdf, check_dtype=False)
# verify with columns
pdf = pd.DataFrame(pd_data, columns=cols)
gdf = cudf.DataFrame(gd_data, columns=cols)
assert_eq(pdf, gdf, check_dtype=False)
pdf = pd.DataFrame(pd_data)
gdf = cudf.DataFrame(gd_data)
assert_eq(pdf, gdf, check_dtype=False)
if numba_data is not None:
gdf = cudf.DataFrame(numba_data)
assert_eq(pdf, gdf, check_dtype=False)
@pytest.mark.parametrize(
"col_data",
[
range(5),
["a", "b", "x", "y", "z"],
[1.0, 0.213, 0.34332],
["a"],
[1],
[0.2323],
[],
],
)
@pytest.mark.parametrize(
"assign_val",
[
1,
2,
np.array(2),
cupy.array(2),
0.32324,
np.array(0.34248),
cupy.array(0.34248),
"abc",
np.array("abc", dtype="object"),
np.array("abc", dtype="str"),
np.array("abc"),
None,
],
)
def test_dataframe_assign_scalar(col_data, assign_val):
pdf = pd.DataFrame({"a": col_data})
gdf = cudf.DataFrame({"a": col_data})
pdf["b"] = (
cupy.asnumpy(assign_val)
if isinstance(assign_val, cupy.ndarray)
else assign_val
)
gdf["b"] = assign_val
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
"col_data",
[
1,
2,
np.array(2),
cupy.array(2),
0.32324,
np.array(0.34248),
cupy.array(0.34248),
"abc",
np.array("abc", dtype="object"),
np.array("abc", dtype="str"),
np.array("abc"),
None,
],
)
@pytest.mark.parametrize(
"assign_val",
[
1,
2,
np.array(2),
cupy.array(2),
0.32324,
np.array(0.34248),
cupy.array(0.34248),
"abc",
np.array("abc", dtype="object"),
np.array("abc", dtype="str"),
np.array("abc"),
None,
],
)
def test_dataframe_assign_scalar_with_scalar_cols(col_data, assign_val):
pdf = pd.DataFrame(
{
"a": cupy.asnumpy(col_data)
if isinstance(col_data, cupy.ndarray)
else col_data
},
index=["dummy_mandatory_index"],
)
gdf = cudf.DataFrame({"a": col_data}, index=["dummy_mandatory_index"])
pdf["b"] = (
cupy.asnumpy(assign_val)
if isinstance(assign_val, cupy.ndarray)
else assign_val
)
gdf["b"] = assign_val
assert_eq(pdf, gdf)
def test_dataframe_info_basic():
buffer = io.StringIO()
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
StringIndex: 10 entries, a to 1111
Data columns (total 10 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 0 10 non-null float64
1 1 10 non-null float64
2 2 10 non-null float64
3 3 10 non-null float64
4 4 10 non-null float64
5 5 10 non-null float64
6 6 10 non-null float64
7 7 10 non-null float64
8 8 10 non-null float64
9 9 10 non-null float64
dtypes: float64(10)
memory usage: 859.0+ bytes
"""
)
df = pd.DataFrame(
np.random.randn(10, 10),
index=["a", "2", "3", "4", "5", "6", "7", "8", "100", "1111"],
)
cudf.from_pandas(df).info(buf=buffer, verbose=True)
s = buffer.getvalue()
assert str_cmp == s
def test_dataframe_info_verbose_mem_usage():
buffer = io.StringIO()
df = pd.DataFrame({"a": [1, 2, 3], "b": ["safdas", "assa", "asdasd"]})
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 3 entries, 0 to 2
Data columns (total 2 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 a 3 non-null int64
1 b 3 non-null object
dtypes: int64(1), object(1)
memory usage: 56.0+ bytes
"""
)
cudf.from_pandas(df).info(buf=buffer, verbose=True)
s = buffer.getvalue()
assert str_cmp == s
buffer.truncate(0)
buffer.seek(0)
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 3 entries, 0 to 2
Columns: 2 entries, a to b
dtypes: int64(1), object(1)
memory usage: 56.0+ bytes
"""
)
cudf.from_pandas(df).info(buf=buffer, verbose=False)
s = buffer.getvalue()
assert str_cmp == s
buffer.truncate(0)
buffer.seek(0)
df = pd.DataFrame(
{"a": [1, 2, 3], "b": ["safdas", "assa", "asdasd"]},
index=["sdfdsf", "sdfsdfds", "dsfdf"],
)
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
StringIndex: 3 entries, sdfdsf to dsfdf
Data columns (total 2 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 a 3 non-null int64
1 b 3 non-null object
dtypes: int64(1), object(1)
memory usage: 91.0 bytes
"""
)
cudf.from_pandas(df).info(buf=buffer, verbose=True, memory_usage="deep")
s = buffer.getvalue()
assert str_cmp == s
buffer.truncate(0)
buffer.seek(0)
int_values = [1, 2, 3, 4, 5]
text_values = ["alpha", "beta", "gamma", "delta", "epsilon"]
float_values = [0.0, 0.25, 0.5, 0.75, 1.0]
df = cudf.DataFrame(
{
"int_col": int_values,
"text_col": text_values,
"float_col": float_values,
}
)
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 5 entries, 0 to 4
Data columns (total 3 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 int_col 5 non-null int64
1 text_col 5 non-null object
2 float_col 5 non-null float64
dtypes: float64(1), int64(1), object(1)
memory usage: 130.0 bytes
"""
)
df.info(buf=buffer, verbose=True, memory_usage="deep")
actual_string = buffer.getvalue()
assert str_cmp == actual_string
buffer.truncate(0)
buffer.seek(0)
def test_dataframe_info_null_counts():
int_values = [1, 2, 3, 4, 5]
text_values = ["alpha", "beta", "gamma", "delta", "epsilon"]
float_values = [0.0, 0.25, 0.5, 0.75, 1.0]
df = cudf.DataFrame(
{
"int_col": int_values,
"text_col": text_values,
"float_col": float_values,
}
)
buffer = io.StringIO()
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 5 entries, 0 to 4
Data columns (total 3 columns):
# Column Dtype
--- ------ -----
0 int_col int64
1 text_col object
2 float_col float64
dtypes: float64(1), int64(1), object(1)
memory usage: 130.0+ bytes
"""
)
df.info(buf=buffer, verbose=True, null_counts=False)
actual_string = buffer.getvalue()
assert str_cmp == actual_string
buffer.truncate(0)
buffer.seek(0)
df.info(buf=buffer, verbose=True, max_cols=0)
actual_string = buffer.getvalue()
assert str_cmp == actual_string
buffer.truncate(0)
buffer.seek(0)
df = cudf.DataFrame()
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 0 entries
Empty DataFrame"""
)
df.info(buf=buffer, verbose=True)
actual_string = buffer.getvalue()
assert str_cmp == actual_string
buffer.truncate(0)
buffer.seek(0)
df = cudf.DataFrame(
{
"a": [1, 2, 3, None, 10, 11, 12, None],
"b": ["a", "b", "c", "sd", "sdf", "sd", None, None],
}
)
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 8 entries, 0 to 7
Data columns (total 2 columns):
# Column Dtype
--- ------ -----
0 a int64
1 b object
dtypes: int64(1), object(1)
memory usage: 238.0+ bytes
"""
)
pd.options.display.max_info_rows = 2
df.info(buf=buffer, max_cols=2, null_counts=None)
pd.reset_option("display.max_info_rows")
actual_string = buffer.getvalue()
assert str_cmp == actual_string
buffer.truncate(0)
buffer.seek(0)
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 8 entries, 0 to 7
Data columns (total 2 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 a 6 non-null int64
1 b 6 non-null object
dtypes: int64(1), object(1)
memory usage: 238.0+ bytes
"""
)
df.info(buf=buffer, max_cols=2, null_counts=None)
actual_string = buffer.getvalue()
assert str_cmp == actual_string
buffer.truncate(0)
buffer.seek(0)
df.info(buf=buffer, null_counts=True)
actual_string = buffer.getvalue()
assert str_cmp == actual_string
@pytest.mark.parametrize(
"data1",
[
[1, 2, 3, 4, 5, 6, 7],
[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0],
[
1.9876543,
2.9876654,
3.9876543,
4.1234587,
5.23,
6.88918237,
7.00001,
],
[
-1.9876543,
-2.9876654,
-3.9876543,
-4.1234587,
-5.23,
-6.88918237,
-7.00001,
],
[
1.987654321,
2.987654321,
3.987654321,
0.1221,
2.1221,
0.112121,
-21.1212,
],
[
-1.987654321,
-2.987654321,
-3.987654321,
-0.1221,
-2.1221,
-0.112121,
21.1212,
],
],
)
@pytest.mark.parametrize(
"data2",
[
[1, 2, 3, 4, 5, 6, 7],
[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0],
[
1.9876543,
2.9876654,
3.9876543,
4.1234587,
5.23,
6.88918237,
7.00001,
],
[
-1.9876543,
-2.9876654,
-3.9876543,
-4.1234587,
-5.23,
-6.88918237,
-7.00001,
],
[
1.987654321,
2.987654321,
3.987654321,
0.1221,
2.1221,
0.112121,
-21.1212,
],
[
-1.987654321,
-2.987654321,
-3.987654321,
-0.1221,
-2.1221,
-0.112121,
21.1212,
],
],
)
@pytest.mark.parametrize("rtol", [0, 0.01, 1e-05, 1e-08, 5e-1, 50.12])
@pytest.mark.parametrize("atol", [0, 0.01, 1e-05, 1e-08, 50.12])
def test_cudf_isclose(data1, data2, rtol, atol):
array1 = cupy.array(data1)
array2 = cupy.array(data2)
expected = cudf.Series(cupy.isclose(array1, array2, rtol=rtol, atol=atol))
actual = cudf.isclose(
cudf.Series(data1), cudf.Series(data2), rtol=rtol, atol=atol
)
assert_eq(expected, actual)
actual = cudf.isclose(data1, data2, rtol=rtol, atol=atol)
assert_eq(expected, actual)
actual = cudf.isclose(
cupy.array(data1), cupy.array(data2), rtol=rtol, atol=atol
)
assert_eq(expected, actual)
actual = cudf.isclose(
np.array(data1), np.array(data2), rtol=rtol, atol=atol
)
assert_eq(expected, actual)
actual = cudf.isclose(
pd.Series(data1), pd.Series(data2), rtol=rtol, atol=atol
)
assert_eq(expected, actual)
@pytest.mark.parametrize(
"data1",
[
[
-1.9876543,
-2.9876654,
np.nan,
-4.1234587,
-5.23,
-6.88918237,
-7.00001,
],
[
1.987654321,
2.987654321,
3.987654321,
0.1221,
2.1221,
np.nan,
-21.1212,
],
],
)
@pytest.mark.parametrize(
"data2",
[
[
-1.9876543,
-2.9876654,
-3.9876543,
-4.1234587,
-5.23,
-6.88918237,
-7.00001,
],
[
1.987654321,
2.987654321,
3.987654321,
0.1221,
2.1221,
0.112121,
-21.1212,
],
[
-1.987654321,
-2.987654321,
-3.987654321,
np.nan,
np.nan,
np.nan,
21.1212,
],
],
)
@pytest.mark.parametrize("equal_nan", [True, False])
def test_cudf_isclose_nulls(data1, data2, equal_nan):
array1 = cupy.array(data1)
array2 = cupy.array(data2)
expected = cudf.Series(cupy.isclose(array1, array2, equal_nan=equal_nan))
actual = cudf.isclose(
cudf.Series(data1), cudf.Series(data2), equal_nan=equal_nan
)
assert_eq(expected, actual, check_dtype=False)
actual = cudf.isclose(data1, data2, equal_nan=equal_nan)
assert_eq(expected, actual, check_dtype=False)
def test_cudf_isclose_different_index():
s1 = cudf.Series(
[-1.9876543, -2.9876654, -3.9876543, -4.1234587, -5.23, -7.00001],
index=[0, 1, 2, 3, 4, 5],
)
s2 = cudf.Series(
[-1.9876543, -2.9876654, -7.00001, -4.1234587, -5.23, -3.9876543],
index=[0, 1, 5, 3, 4, 2],
)
expected = cudf.Series([True] * 6, index=s1.index)
assert_eq(expected, cudf.isclose(s1, s2))
s1 = cudf.Series(
[-1.9876543, -2.9876654, -3.9876543, -4.1234587, -5.23, -7.00001],
index=[0, 1, 2, 3, 4, 5],
)
s2 = cudf.Series(
[-1.9876543, -2.9876654, -7.00001, -4.1234587, -5.23, -3.9876543],
index=[0, 1, 5, 10, 4, 2],
)
expected = cudf.Series(
[True, True, True, False, True, True], index=s1.index
)
assert_eq(expected, cudf.isclose(s1, s2))
s1 = cudf.Series(
[-1.9876543, -2.9876654, -3.9876543, -4.1234587, -5.23, -7.00001],
index=[100, 1, 2, 3, 4, 5],
)
s2 = cudf.Series(
[-1.9876543, -2.9876654, -7.00001, -4.1234587, -5.23, -3.9876543],
index=[0, 1, 100, 10, 4, 2],
)
expected = cudf.Series(
[False, True, True, False, True, False], index=s1.index
)
assert_eq(expected, cudf.isclose(s1, s2))
def test_dataframe_to_dict_error():
df = cudf.DataFrame({"a": [1, 2, 3], "b": [9, 5, 3]})
with pytest.raises(
TypeError,
match=re.escape(
r"cuDF does not support conversion to host memory "
r"via `to_dict()` method. Consider using "
r"`.to_pandas().to_dict()` to construct a Python dictionary."
),
):
df.to_dict()
with pytest.raises(
TypeError,
match=re.escape(
r"cuDF does not support conversion to host memory "
r"via `to_dict()` method. Consider using "
r"`.to_pandas().to_dict()` to construct a Python dictionary."
),
):
df["a"].to_dict()
@pytest.mark.parametrize(
"df",
[
pd.DataFrame({"a": [1, 2, 3, 4, 5, 10, 11, 12, 33, 55, 19]}),
pd.DataFrame(
{
"one": [1, 2, 3, 4, 5, 10],
"two": ["abc", "def", "ghi", "xyz", "pqr", "abc"],
}
),
pd.DataFrame(
{
"one": [1, 2, 3, 4, 5, 10],
"two": ["abc", "def", "ghi", "xyz", "pqr", "abc"],
},
index=[10, 20, 30, 40, 50, 60],
),
pd.DataFrame(
{
"one": [1, 2, 3, 4, 5, 10],
"two": ["abc", "def", "ghi", "xyz", "pqr", "abc"],
},
index=["a", "b", "c", "d", "e", "f"],
),
pd.DataFrame(index=["a", "b", "c", "d", "e", "f"]),
pd.DataFrame(columns=["a", "b", "c", "d", "e", "f"]),
pd.DataFrame(index=[10, 11, 12]),
pd.DataFrame(columns=[10, 11, 12]),
pd.DataFrame(),
pd.DataFrame({"one": [], "two": []}),
pd.DataFrame({2: [], 1: []}),
pd.DataFrame(
{
0: [1, 2, 3, 4, 5, 10],
1: ["abc", "def", "ghi", "xyz", "pqr", "abc"],
100: ["a", "b", "b", "x", "z", "a"],
},
index=[10, 20, 30, 40, 50, 60],
),
],
)
def test_dataframe_keys(df):
gdf = cudf.from_pandas(df)
assert_eq(df.keys(), gdf.keys())
@pytest.mark.parametrize(
"ps",
[
pd.Series([1, 2, 3, 4, 5, 10, 11, 12, 33, 55, 19]),
pd.Series(["abc", "def", "ghi", "xyz", "pqr", "abc"]),
pd.Series(
[1, 2, 3, 4, 5, 10],
index=["abc", "def", "ghi", "xyz", "pqr", "abc"],
),
pd.Series(
["abc", "def", "ghi", "xyz", "pqr", "abc"],
index=[1, 2, 3, 4, 5, 10],
),
pd.Series(index=["a", "b", "c", "d", "e", "f"], dtype="float64"),
pd.Series(index=[10, 11, 12], dtype="float64"),
pd.Series(dtype="float64"),
pd.Series([], dtype="float64"),
],
)
def test_series_keys(ps):
gds = cudf.from_pandas(ps)
if len(ps) == 0 and not isinstance(ps.index, pd.RangeIndex):
assert_eq(ps.keys().astype("float64"), gds.keys())
else:
assert_eq(ps.keys(), gds.keys())
@pytest.mark.parametrize(
"df",
[
pd.DataFrame(),
pd.DataFrame(index=[10, 20, 30]),
pd.DataFrame({"first_col": [], "second_col": [], "third_col": []}),
pd.DataFrame([[1, 2], [3, 4]], columns=list("AB")),
pd.DataFrame([[1, 2], [3, 4]], columns=list("AB"), index=[10, 20]),
pd.DataFrame([[1, 2], [3, 4]], columns=list("AB"), index=[7, 8]),
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
}
),
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
},
index=[7, 20, 11, 9],
),
pd.DataFrame({"l": [10]}),
pd.DataFrame({"l": [10]}, index=[100]),
pd.DataFrame({"f": [10.2, 11.2332, 0.22, 3.3, 44.23, 10.0]}),
pd.DataFrame(
{"f": [10.2, 11.2332, 0.22, 3.3, 44.23, 10.0]},
index=[100, 200, 300, 400, 500, 0],
),
],
)
@pytest.mark.parametrize(
"other",
[
pd.DataFrame([[5, 6], [7, 8]], columns=list("AB")),
pd.DataFrame([[5, 6], [7, 8]], columns=list("BD")),
pd.DataFrame([[5, 6], [7, 8]], columns=list("DE")),
pd.DataFrame(),
pd.DataFrame(
{"c": [10, 11, 22, 33, 44, 100]}, index=[7, 8, 9, 10, 11, 20]
),
pd.DataFrame({"f": [10.2, 11.2332, 0.22, 3.3, 44.23, 10.0]}),
pd.DataFrame({"l": [10]}),
pd.DataFrame({"l": [10]}, index=[200]),
pd.DataFrame([]),
pd.DataFrame({"first_col": [], "second_col": [], "third_col": []}),
pd.DataFrame([], index=[100]),
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
}
),
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
},
index=[0, 100, 200, 300],
),
],
)
@pytest.mark.parametrize("sort", [False, True])
@pytest.mark.parametrize("ignore_index", [True, False])
def test_dataframe_append_dataframe(df, other, sort, ignore_index):
pdf = df
other_pd = other
gdf = cudf.from_pandas(df)
other_gd = cudf.from_pandas(other)
expected = pdf.append(other_pd, sort=sort, ignore_index=ignore_index)
actual = gdf.append(other_gd, sort=sort, ignore_index=ignore_index)
if expected.shape != df.shape:
assert_eq(expected.fillna(-1), actual.fillna(-1), check_dtype=False)
else:
assert_eq(
expected, actual, check_index_type=False if gdf.empty else True
)
@pytest.mark.parametrize(
"df",
[
pd.DataFrame(),
pd.DataFrame(index=[10, 20, 30]),
pd.DataFrame({12: [], 22: []}),
pd.DataFrame([[1, 2], [3, 4]], columns=[10, 20]),
pd.DataFrame([[1, 2], [3, 4]], columns=[0, 1], index=[10, 20]),
pd.DataFrame([[1, 2], [3, 4]], columns=[1, 0], index=[7, 8]),
pd.DataFrame(
{
23: [315.3324, 3243.32432, 3232.332, -100.32],
33: [0.3223, 0.32, 0.0000232, 0.32224],
}
),
pd.DataFrame(
{
0: [315.3324, 3243.32432, 3232.332, -100.32],
1: [0.3223, 0.32, 0.0000232, 0.32224],
},
index=[7, 20, 11, 9],
),
],
)
@pytest.mark.parametrize(
"other",
[
pd.Series([10, 11, 23, 234, 13]),
pytest.param(
pd.Series([10, 11, 23, 234, 13], index=[11, 12, 13, 44, 33]),
marks=pytest.mark.xfail(
reason="pandas bug: "
"https://github.com/pandas-dev/pandas/issues/35092"
),
),
{1: 1},
{0: 10, 1: 100, 2: 102},
],
)
@pytest.mark.parametrize("sort", [False, True])
def test_dataframe_append_series_dict(df, other, sort):
pdf = df
other_pd = other
gdf = cudf.from_pandas(df)
if isinstance(other, pd.Series):
other_gd = cudf.from_pandas(other)
else:
other_gd = other
expected = pdf.append(other_pd, ignore_index=True, sort=sort)
actual = gdf.append(other_gd, ignore_index=True, sort=sort)
if expected.shape != df.shape:
assert_eq(expected.fillna(-1), actual.fillna(-1), check_dtype=False)
else:
assert_eq(
expected, actual, check_index_type=False if gdf.empty else True
)
def test_dataframe_append_series_mixed_index():
df = cudf.DataFrame({"first": [], "d": []})
sr = cudf.Series([1, 2, 3, 4])
with pytest.raises(
TypeError,
match=re.escape(
"cudf does not support mixed types, please type-cast "
"the column index of dataframe and index of series "
"to same dtypes."
),
):
df.append(sr, ignore_index=True)
@pytest.mark.parametrize(
"df",
[
pd.DataFrame(),
pd.DataFrame(index=[10, 20, 30]),
pd.DataFrame({"first_col": [], "second_col": [], "third_col": []}),
pd.DataFrame([[1, 2], [3, 4]], columns=list("AB")),
pd.DataFrame([[1, 2], [3, 4]], columns=list("AB"), index=[10, 20]),
pd.DataFrame([[1, 2], [3, 4]], columns=list("AB"), index=[7, 8]),
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
}
),
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
},
index=[7, 20, 11, 9],
),
pd.DataFrame({"l": [10]}),
pd.DataFrame({"l": [10]}, index=[100]),
pd.DataFrame({"f": [10.2, 11.2332, 0.22, 3.3, 44.23, 10.0]}),
pd.DataFrame(
{"f": [10.2, 11.2332, 0.22, 3.3, 44.23, 10.0]},
index=[100, 200, 300, 400, 500, 0],
),
],
)
@pytest.mark.parametrize(
"other",
[
[pd.DataFrame([[5, 6], [7, 8]], columns=list("AB"))],
[
pd.DataFrame([[5, 6], [7, 8]], columns=list("AB")),
pd.DataFrame([[5, 6], [7, 8]], columns=list("BD")),
pd.DataFrame([[5, 6], [7, 8]], columns=list("DE")),
],
[ | pd.DataFrame() | pandas.DataFrame |
#%%
import os
import sys
try:
os.chdir('/Volumes/GoogleDrive/My Drive/python_code/connectome_tools/')
print(os.getcwd())
except:
pass
from pymaid_creds import url, name, password, token
import pymaid
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
# allows text to be editable in Illustrator
plt.rcParams['pdf.fonttype'] = 42
plt.rcParams['ps.fonttype'] = 42
rm = pymaid.CatmaidInstance(url, token, name, password)
adj = pd.read_csv('VNC_interaction/data/axon-dendrite.csv', header = 0, index_col = 0)
inputs = pd.read_csv('VNC_interaction/data/input_counts.csv', index_col = 0)
inputs = | pd.DataFrame(inputs.values, index = inputs.index, columns = ['axon_input', 'dendrite_input']) | pandas.DataFrame |
from pickle import load
from typing import Dict, List
import altair as alt
import streamlit as st
from matplotlib.pyplot import subplots
from pandas import DataFrame, read_csv
from seaborn import heatmap
from sklearn.metrics import confusion_matrix
from streamlit.delta_generator import DeltaGenerator
from .classes import Page
from .config import STREAMLIT_STATIC_PATH
class ModelsEvaluation(Page):
labels = ["bulerias", "alegrias", "sevillanas"]
def write(self):
title = "Models evaluation"
st.title(title)
st.header("Neural networks learning")
model_full_history: Dict = load(
open(STREAMLIT_STATIC_PATH / "data/history_conv_model_70_epochs.p", "rb")
)
cols: List[DeltaGenerator] = st.columns(2)
cols[0].subheader("Model with full data")
full_loss = (
alt.Chart(DataFrame(model_full_history).reset_index())
.transform_fold(["loss", "val_loss"])
.mark_line()
.encode(x="index:Q", y="value:Q", color="key:N")
).properties(width=600)
cols[0].altair_chart(full_loss)
model_partial_history: Dict = load(
open(
STREAMLIT_STATIC_PATH / "data/history_conv_model_only_mel_100_epochs.p",
"rb",
)
)
cols[1].subheader("Model with spectrogram data only")
full_loss = (
alt.Chart(DataFrame(model_partial_history).reset_index())
.transform_fold(["loss", "val_loss"])
.mark_line()
.encode(x="index:Q", y="value:Q", color="key:N")
).properties(width=600)
cols[1].altair_chart(full_loss)
st.header("Prediction execution time")
execution_times = read_csv(STREAMLIT_STATIC_PATH / "data/time_results.csv")
execution_times: DataFrame
boxplot = (
(
alt.Chart(execution_times)
.mark_boxplot(size=50, extent=0.5)
.encode(
x=alt.X("model:N", scale=alt.Scale(type="log")),
y=alt.Y("time:Q", scale=alt.Scale(zero=False)),
color=alt.Color("model", legend=None),
)
)
.properties(width=900, height=600)
.configure_axis(labelFontSize=16, titleFontSize=16)
)
st.altair_chart(boxplot)
st.header("Prediction metrics")
prediction_metrics = read_csv(STREAMLIT_STATIC_PATH / "data/metrics.csv")
prediction_metrics: DataFrame
selection = alt.selection_multi(fields=["metric_name"], bind="legend")
bar_plot = (
alt.Chart(prediction_metrics)
.mark_bar(opacity=0.7)
.encode(
x="model:N",
y=alt.Y("metric_val", stack=None),
color=alt.Color(
"metric_name",
),
opacity=alt.condition(selection, alt.value(1), alt.value(0)),
)
.add_selection(selection)
.properties(width=900, height=600)
.configure_axis(labelFontSize=16, titleFontSize=16)
)
st.altair_chart(bar_plot)
st.header("Confusion matrices")
confusion_data = | read_csv(STREAMLIT_STATIC_PATH / "data/conf.csv") | pandas.read_csv |
from datetime import timedelta
from functools import partial
import itertools
from parameterized import parameterized
import numpy as np
from numpy.testing import assert_array_equal, assert_almost_equal
import pandas as pd
from toolz import merge
from zipline.pipeline import SimplePipelineEngine, Pipeline, CustomFactor
from zipline.pipeline.common import (
EVENT_DATE_FIELD_NAME,
FISCAL_QUARTER_FIELD_NAME,
FISCAL_YEAR_FIELD_NAME,
SID_FIELD_NAME,
TS_FIELD_NAME,
)
from zipline.pipeline.data import DataSet
from zipline.pipeline.data import Column
from zipline.pipeline.domain import EquitySessionDomain
from zipline.pipeline.loaders.earnings_estimates import (
NextEarningsEstimatesLoader,
NextSplitAdjustedEarningsEstimatesLoader,
normalize_quarters,
PreviousEarningsEstimatesLoader,
PreviousSplitAdjustedEarningsEstimatesLoader,
split_normalized_quarters,
)
from zipline.testing.fixtures import (
WithAdjustmentReader,
WithTradingSessions,
ZiplineTestCase,
)
from zipline.testing.predicates import assert_equal
from zipline.testing.predicates import assert_frame_equal
from zipline.utils.numpy_utils import datetime64ns_dtype
from zipline.utils.numpy_utils import float64_dtype
import pytest
class Estimates(DataSet):
event_date = Column(dtype=datetime64ns_dtype)
fiscal_quarter = Column(dtype=float64_dtype)
fiscal_year = Column(dtype=float64_dtype)
estimate = Column(dtype=float64_dtype)
class MultipleColumnsEstimates(DataSet):
event_date = Column(dtype=datetime64ns_dtype)
fiscal_quarter = Column(dtype=float64_dtype)
fiscal_year = Column(dtype=float64_dtype)
estimate1 = Column(dtype=float64_dtype)
estimate2 = Column(dtype=float64_dtype)
def QuartersEstimates(announcements_out):
class QtrEstimates(Estimates):
num_announcements = announcements_out
name = Estimates
return QtrEstimates
def MultipleColumnsQuartersEstimates(announcements_out):
class QtrEstimates(MultipleColumnsEstimates):
num_announcements = announcements_out
name = Estimates
return QtrEstimates
def QuartersEstimatesNoNumQuartersAttr(num_qtr):
class QtrEstimates(Estimates):
name = Estimates
return QtrEstimates
def create_expected_df_for_factor_compute(start_date, sids, tuples, end_date):
"""
Given a list of tuples of new data we get for each sid on each critical
date (when information changes), create a DataFrame that fills that
data through a date range ending at `end_date`.
"""
df = pd.DataFrame(tuples, columns=[SID_FIELD_NAME, "estimate", "knowledge_date"])
df = df.pivot_table(
columns=SID_FIELD_NAME, values="estimate", index="knowledge_date", dropna=False
)
df = df.reindex(pd.date_range(start_date, end_date))
# Index name is lost during reindex.
df.index = df.index.rename("knowledge_date")
df["at_date"] = end_date.tz_localize("utc")
df = df.set_index(["at_date", df.index.tz_localize("utc")]).ffill()
new_sids = set(sids) - set(df.columns)
df = df.reindex(columns=df.columns.union(new_sids))
return df
class WithEstimates(WithTradingSessions, WithAdjustmentReader):
"""
ZiplineTestCase mixin providing cls.loader and cls.events as class
level fixtures.
Methods
-------
make_loader(events, columns) -> PipelineLoader
Method which returns the loader to be used throughout tests.
events : pd.DataFrame
The raw events to be used as input to the pipeline loader.
columns : dict[str -> str]
The dictionary mapping the names of BoundColumns to the
associated column name in the events DataFrame.
make_columns() -> dict[BoundColumn -> str]
Method which returns a dictionary of BoundColumns mapped to the
associated column names in the raw data.
"""
# Short window defined in order for test to run faster.
START_DATE = pd.Timestamp("2014-12-28", tz="utc")
END_DATE = pd.Timestamp("2015-02-04", tz="utc")
@classmethod
def make_loader(cls, events, columns):
raise NotImplementedError("make_loader")
@classmethod
def make_events(cls):
raise NotImplementedError("make_events")
@classmethod
def get_sids(cls):
return cls.events[SID_FIELD_NAME].unique()
@classmethod
def make_columns(cls):
return {
Estimates.event_date: "event_date",
Estimates.fiscal_quarter: "fiscal_quarter",
Estimates.fiscal_year: "fiscal_year",
Estimates.estimate: "estimate",
}
def make_engine(self, loader=None):
if loader is None:
loader = self.loader
return SimplePipelineEngine(
lambda x: loader,
self.asset_finder,
default_domain=EquitySessionDomain(
self.trading_days,
self.ASSET_FINDER_COUNTRY_CODE,
),
)
@classmethod
def init_class_fixtures(cls):
cls.events = cls.make_events()
cls.ASSET_FINDER_EQUITY_SIDS = cls.get_sids()
cls.ASSET_FINDER_EQUITY_SYMBOLS = [
"s" + str(n) for n in cls.ASSET_FINDER_EQUITY_SIDS
]
# We need to instantiate certain constants needed by supers of
# `WithEstimates` before we call their `init_class_fixtures`.
super(WithEstimates, cls).init_class_fixtures()
cls.columns = cls.make_columns()
# Some tests require `WithAdjustmentReader` to be set up by the time we
# make the loader.
cls.loader = cls.make_loader(
cls.events, {column.name: val for column, val in cls.columns.items()}
)
class WithOneDayPipeline(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
events : pd.DataFrame
A simple DataFrame with columns needed for estimates and a single sid
and no other data.
Tests
------
test_wrong_num_announcements_passed()
Tests that loading with an incorrect quarter number raises an error.
test_no_num_announcements_attr()
Tests that the loader throws an AssertionError if the dataset being
loaded has no `num_announcements` attribute.
"""
@classmethod
def make_columns(cls):
return {
MultipleColumnsEstimates.event_date: "event_date",
MultipleColumnsEstimates.fiscal_quarter: "fiscal_quarter",
MultipleColumnsEstimates.fiscal_year: "fiscal_year",
MultipleColumnsEstimates.estimate1: "estimate1",
MultipleColumnsEstimates.estimate2: "estimate2",
}
@classmethod
def make_events(cls):
return pd.DataFrame(
{
SID_FIELD_NAME: [0] * 2,
TS_FIELD_NAME: [pd.Timestamp("2015-01-01"), pd.Timestamp("2015-01-06")],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-10"),
pd.Timestamp("2015-01-20"),
],
"estimate1": [1.0, 2.0],
"estimate2": [3.0, 4.0],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: [2015, 2015],
}
)
@classmethod
def make_expected_out(cls):
raise NotImplementedError("make_expected_out")
@classmethod
def init_class_fixtures(cls):
super(WithOneDayPipeline, cls).init_class_fixtures()
cls.sid0 = cls.asset_finder.retrieve_asset(0)
cls.expected_out = cls.make_expected_out()
def test_load_one_day(self):
# We want to test multiple columns
dataset = MultipleColumnsQuartersEstimates(1)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline({c.name: c.latest for c in dataset.columns}),
start_date=pd.Timestamp("2015-01-15", tz="utc"),
end_date=pd.Timestamp("2015-01-15", tz="utc"),
)
assert_frame_equal(results.sort_index(1), self.expected_out.sort_index(1))
class PreviousWithOneDayPipeline(WithOneDayPipeline, ZiplineTestCase):
"""
Tests that previous quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_out(cls):
return pd.DataFrame(
{
EVENT_DATE_FIELD_NAME: pd.Timestamp("2015-01-10"),
"estimate1": 1.0,
"estimate2": 3.0,
FISCAL_QUARTER_FIELD_NAME: 1.0,
FISCAL_YEAR_FIELD_NAME: 2015.0,
},
index=pd.MultiIndex.from_tuples(
((pd.Timestamp("2015-01-15", tz="utc"), cls.sid0),)
),
)
class NextWithOneDayPipeline(WithOneDayPipeline, ZiplineTestCase):
"""
Tests that next quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_out(cls):
return pd.DataFrame(
{
EVENT_DATE_FIELD_NAME: pd.Timestamp("2015-01-20"),
"estimate1": 2.0,
"estimate2": 4.0,
FISCAL_QUARTER_FIELD_NAME: 2.0,
FISCAL_YEAR_FIELD_NAME: 2015.0,
},
index=pd.MultiIndex.from_tuples(
((pd.Timestamp("2015-01-15", tz="utc"), cls.sid0),)
),
)
dummy_df = pd.DataFrame(
{SID_FIELD_NAME: 0},
columns=[
SID_FIELD_NAME,
TS_FIELD_NAME,
EVENT_DATE_FIELD_NAME,
FISCAL_QUARTER_FIELD_NAME,
FISCAL_YEAR_FIELD_NAME,
"estimate",
],
index=[0],
)
class WithWrongLoaderDefinition(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
events : pd.DataFrame
A simple DataFrame with columns needed for estimates and a single sid
and no other data.
Tests
------
test_wrong_num_announcements_passed()
Tests that loading with an incorrect quarter number raises an error.
test_no_num_announcements_attr()
Tests that the loader throws an AssertionError if the dataset being
loaded has no `num_announcements` attribute.
"""
@classmethod
def make_events(cls):
return dummy_df
def test_wrong_num_announcements_passed(self):
bad_dataset1 = QuartersEstimates(-1)
bad_dataset2 = QuartersEstimates(-2)
good_dataset = QuartersEstimates(1)
engine = self.make_engine()
columns = {
c.name + str(dataset.num_announcements): c.latest
for dataset in (bad_dataset1, bad_dataset2, good_dataset)
for c in dataset.columns
}
p = Pipeline(columns)
err_msg = (
r"Passed invalid number of quarters -[0-9],-[0-9]; "
r"must pass a number of quarters >= 0"
)
with pytest.raises(ValueError, match=err_msg):
engine.run_pipeline(
p,
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
def test_no_num_announcements_attr(self):
dataset = QuartersEstimatesNoNumQuartersAttr(1)
engine = self.make_engine()
p = Pipeline({c.name: c.latest for c in dataset.columns})
with pytest.raises(AttributeError):
engine.run_pipeline(
p,
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
class PreviousWithWrongNumQuarters(WithWrongLoaderDefinition, ZiplineTestCase):
"""
Tests that previous quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
class NextWithWrongNumQuarters(WithWrongLoaderDefinition, ZiplineTestCase):
"""
Tests that next quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
options = [
"split_adjustments_loader",
"split_adjusted_column_names",
"split_adjusted_asof",
]
class WrongSplitsLoaderDefinition(WithEstimates, ZiplineTestCase):
"""
Test class that tests that loaders break correctly when incorrectly
instantiated.
Tests
-----
test_extra_splits_columns_passed(SplitAdjustedEstimatesLoader)
A test that checks that the loader correctly breaks when an
unexpected column is passed in the list of split-adjusted columns.
"""
@classmethod
def init_class_fixtures(cls):
super(WithEstimates, cls).init_class_fixtures()
@parameterized.expand(
itertools.product(
(
NextSplitAdjustedEarningsEstimatesLoader,
PreviousSplitAdjustedEarningsEstimatesLoader,
),
)
)
def test_extra_splits_columns_passed(self, loader):
columns = {
Estimates.event_date: "event_date",
Estimates.fiscal_quarter: "fiscal_quarter",
Estimates.fiscal_year: "fiscal_year",
Estimates.estimate: "estimate",
}
with pytest.raises(ValueError):
loader(
dummy_df,
{column.name: val for column, val in columns.items()},
split_adjustments_loader=self.adjustment_reader,
split_adjusted_column_names=["estimate", "extra_col"],
split_adjusted_asof=pd.Timestamp("2015-01-01"),
)
class WithEstimatesTimeZero(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
cls.events : pd.DataFrame
Generated dynamically in order to test inter-leavings of estimates and
event dates for multiple quarters to make sure that we select the
right immediate 'next' or 'previous' quarter relative to each date -
i.e., the right 'time zero' on the timeline. We care about selecting
the right 'time zero' because we use that to calculate which quarter's
data needs to be returned for each day.
Methods
-------
get_expected_estimate(q1_knowledge,
q2_knowledge,
comparable_date) -> pd.DataFrame
Retrieves the expected estimate given the latest knowledge about each
quarter and the date on which the estimate is being requested. If
there is no expected estimate, returns an empty DataFrame.
Tests
------
test_estimates()
Tests that we get the right 'time zero' value on each day for each
sid and for each column.
"""
# Shorter date range for performance
END_DATE = pd.Timestamp("2015-01-28", tz="utc")
q1_knowledge_dates = [
pd.Timestamp("2015-01-01"),
pd.Timestamp("2015-01-04"),
pd.Timestamp("2015-01-07"),
pd.Timestamp("2015-01-11"),
]
q2_knowledge_dates = [
pd.Timestamp("2015-01-14"),
pd.Timestamp("2015-01-17"),
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-23"),
]
# We want to model the possibility of an estimate predicting a release date
# that doesn't match the actual release. This could be done by dynamically
# generating more combinations with different release dates, but that
# significantly increases the amount of time it takes to run the tests.
# These hard-coded cases are sufficient to know that we can update our
# beliefs when we get new information.
q1_release_dates = [
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-14"),
] # One day late
q2_release_dates = [
pd.Timestamp("2015-01-25"), # One day early
pd.Timestamp("2015-01-26"),
]
@classmethod
def make_events(cls):
"""
In order to determine which estimate we care about for a particular
sid, we need to look at all estimates that we have for that sid and
their associated event dates.
We define q1 < q2, and thus event1 < event2 since event1 occurs
during q1 and event2 occurs during q2 and we assume that there can
only be 1 event per quarter. We assume that there can be multiple
estimates per quarter leading up to the event. We assume that estimates
will not surpass the relevant event date. We will look at 2 estimates
for an event before the event occurs, since that is the simplest
scenario that covers the interesting edge cases:
- estimate values changing
- a release date changing
- estimates for different quarters interleaving
Thus, we generate all possible inter-leavings of 2 estimates per
quarter-event where estimate1 < estimate2 and all estimates are < the
relevant event and assign each of these inter-leavings to a
different sid.
"""
sid_estimates = []
sid_releases = []
# We want all permutations of 2 knowledge dates per quarter.
it = enumerate(
itertools.permutations(cls.q1_knowledge_dates + cls.q2_knowledge_dates, 4)
)
for sid, (q1e1, q1e2, q2e1, q2e2) in it:
# We're assuming that estimates must come before the relevant
# release.
if (
q1e1 < q1e2
and q2e1 < q2e2
# All estimates are < Q2's event, so just constrain Q1
# estimates.
and q1e1 < cls.q1_release_dates[0]
and q1e2 < cls.q1_release_dates[0]
):
sid_estimates.append(
cls.create_estimates_df(q1e1, q1e2, q2e1, q2e2, sid)
)
sid_releases.append(cls.create_releases_df(sid))
return pd.concat(sid_estimates + sid_releases).reset_index(drop=True)
@classmethod
def get_sids(cls):
sids = cls.events[SID_FIELD_NAME].unique()
# Tack on an extra sid to make sure that sids with no data are
# included but have all-null columns.
return list(sids) + [max(sids) + 1]
@classmethod
def create_releases_df(cls, sid):
# Final release dates never change. The quarters have very tight date
# ranges in order to reduce the number of dates we need to iterate
# through when testing.
return pd.DataFrame(
{
TS_FIELD_NAME: [pd.Timestamp("2015-01-13"), pd.Timestamp("2015-01-26")],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-26"),
],
"estimate": [0.5, 0.8],
FISCAL_QUARTER_FIELD_NAME: [1.0, 2.0],
FISCAL_YEAR_FIELD_NAME: [2015.0, 2015.0],
SID_FIELD_NAME: sid,
}
)
@classmethod
def create_estimates_df(cls, q1e1, q1e2, q2e1, q2e2, sid):
return pd.DataFrame(
{
EVENT_DATE_FIELD_NAME: cls.q1_release_dates + cls.q2_release_dates,
"estimate": [0.1, 0.2, 0.3, 0.4],
FISCAL_QUARTER_FIELD_NAME: [1.0, 1.0, 2.0, 2.0],
FISCAL_YEAR_FIELD_NAME: [2015.0, 2015.0, 2015.0, 2015.0],
TS_FIELD_NAME: [q1e1, q1e2, q2e1, q2e2],
SID_FIELD_NAME: sid,
}
)
def get_expected_estimate(self, q1_knowledge, q2_knowledge, comparable_date):
return pd.DataFrame()
def test_estimates(self):
dataset = QuartersEstimates(1)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline({c.name: c.latest for c in dataset.columns}),
start_date=self.trading_days[1],
end_date=self.trading_days[-2],
)
for sid in self.ASSET_FINDER_EQUITY_SIDS:
sid_estimates = results.xs(sid, level=1)
# Separate assertion for all-null DataFrame to avoid setting
# column dtypes on `all_expected`.
if sid == max(self.ASSET_FINDER_EQUITY_SIDS):
assert sid_estimates.isnull().all().all()
else:
ts_sorted_estimates = self.events[
self.events[SID_FIELD_NAME] == sid
].sort_values(TS_FIELD_NAME)
q1_knowledge = ts_sorted_estimates[
ts_sorted_estimates[FISCAL_QUARTER_FIELD_NAME] == 1
]
q2_knowledge = ts_sorted_estimates[
ts_sorted_estimates[FISCAL_QUARTER_FIELD_NAME] == 2
]
all_expected = pd.concat(
[
self.get_expected_estimate(
q1_knowledge[
q1_knowledge[TS_FIELD_NAME] <= date.tz_localize(None)
],
q2_knowledge[
q2_knowledge[TS_FIELD_NAME] <= date.tz_localize(None)
],
date.tz_localize(None),
).set_index([[date]])
for date in sid_estimates.index
],
axis=0,
)
sid_estimates.index = all_expected.index.copy()
assert_equal(all_expected[sid_estimates.columns], sid_estimates)
class NextEstimate(WithEstimatesTimeZero, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
def get_expected_estimate(self, q1_knowledge, q2_knowledge, comparable_date):
# If our latest knowledge of q1 is that the release is
# happening on this simulation date or later, then that's
# the estimate we want to use.
if (
not q1_knowledge.empty
and q1_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] >= comparable_date
):
return q1_knowledge.iloc[-1:]
# If q1 has already happened or we don't know about it
# yet and our latest knowledge indicates that q2 hasn't
# happened yet, then that's the estimate we want to use.
elif (
not q2_knowledge.empty
and q2_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] >= comparable_date
):
return q2_knowledge.iloc[-1:]
return pd.DataFrame(columns=q1_knowledge.columns, index=[comparable_date])
class PreviousEstimate(WithEstimatesTimeZero, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
def get_expected_estimate(self, q1_knowledge, q2_knowledge, comparable_date):
# The expected estimate will be for q2 if the last thing
# we've seen is that the release date already happened.
# Otherwise, it'll be for q1, as long as the release date
# for q1 has already happened.
if (
not q2_knowledge.empty
and q2_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] <= comparable_date
):
return q2_knowledge.iloc[-1:]
elif (
not q1_knowledge.empty
and q1_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] <= comparable_date
):
return q1_knowledge.iloc[-1:]
return pd.DataFrame(columns=q1_knowledge.columns, index=[comparable_date])
class WithEstimateMultipleQuarters(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events, cls.make_expected_out as
class-level fixtures and self.test_multiple_qtrs_requested as a test.
Attributes
----------
events : pd.DataFrame
Simple DataFrame with estimates for 2 quarters for a single sid.
Methods
-------
make_expected_out() --> pd.DataFrame
Returns the DataFrame that is expected as a result of running a
Pipeline where estimates are requested for multiple quarters out.
fill_expected_out(expected)
Fills the expected DataFrame with data.
Tests
------
test_multiple_qtrs_requested()
Runs a Pipeline that calculate which estimates for multiple quarters
out and checks that the returned columns contain data for the correct
number of quarters out.
"""
@classmethod
def make_events(cls):
return pd.DataFrame(
{
SID_FIELD_NAME: [0] * 2,
TS_FIELD_NAME: [pd.Timestamp("2015-01-01"), pd.Timestamp("2015-01-06")],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-10"),
pd.Timestamp("2015-01-20"),
],
"estimate": [1.0, 2.0],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: [2015, 2015],
}
)
@classmethod
def init_class_fixtures(cls):
super(WithEstimateMultipleQuarters, cls).init_class_fixtures()
cls.expected_out = cls.make_expected_out()
@classmethod
def make_expected_out(cls):
expected = pd.DataFrame(
columns=[cls.columns[col] + "1" for col in cls.columns]
+ [cls.columns[col] + "2" for col in cls.columns],
index=cls.trading_days,
)
for (col, raw_name), suffix in itertools.product(
cls.columns.items(), ("1", "2")
):
expected_name = raw_name + suffix
if col.dtype == datetime64ns_dtype:
expected[expected_name] = pd.to_datetime(expected[expected_name])
else:
expected[expected_name] = expected[expected_name].astype(col.dtype)
cls.fill_expected_out(expected)
return expected.reindex(cls.trading_days)
def test_multiple_qtrs_requested(self):
dataset1 = QuartersEstimates(1)
dataset2 = QuartersEstimates(2)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline(
merge(
[
{c.name + "1": c.latest for c in dataset1.columns},
{c.name + "2": c.latest for c in dataset2.columns},
]
)
),
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
q1_columns = [col.name + "1" for col in self.columns]
q2_columns = [col.name + "2" for col in self.columns]
# We now expect a column for 1 quarter out and a column for 2
# quarters out for each of the dataset columns.
assert_equal(
sorted(np.array(q1_columns + q2_columns)), sorted(results.columns.values)
)
assert_equal(
self.expected_out.sort_index(axis=1),
results.xs(0, level=1).sort_index(axis=1),
)
class NextEstimateMultipleQuarters(WithEstimateMultipleQuarters, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def fill_expected_out(cls, expected):
# Fill columns for 1 Q out
for raw_name in cls.columns.values():
expected.loc[
pd.Timestamp("2015-01-01", tz="UTC") : pd.Timestamp(
"2015-01-11", tz="UTC"
),
raw_name + "1",
] = cls.events[raw_name].iloc[0]
expected.loc[
pd.Timestamp("2015-01-11", tz="UTC") : pd.Timestamp(
"2015-01-20", tz="UTC"
),
raw_name + "1",
] = cls.events[raw_name].iloc[1]
# Fill columns for 2 Q out
# We only have an estimate and event date for 2 quarters out before
# Q1's event happens; after Q1's event, we know 1 Q out but not 2 Qs
# out.
for col_name in ["estimate", "event_date"]:
expected.loc[
pd.Timestamp("2015-01-06", tz="UTC") : pd.Timestamp(
"2015-01-10", tz="UTC"
),
col_name + "2",
] = cls.events[col_name].iloc[1]
# But we know what FQ and FY we'd need in both Q1 and Q2
# because we know which FQ is next and can calculate from there
expected.loc[
pd.Timestamp("2015-01-01", tz="UTC") : pd.Timestamp("2015-01-09", tz="UTC"),
FISCAL_QUARTER_FIELD_NAME + "2",
] = 2
expected.loc[
pd.Timestamp("2015-01-12", tz="UTC") : pd.Timestamp("2015-01-20", tz="UTC"),
FISCAL_QUARTER_FIELD_NAME + "2",
] = 3
expected.loc[
pd.Timestamp("2015-01-01", tz="UTC") : pd.Timestamp("2015-01-20", tz="UTC"),
FISCAL_YEAR_FIELD_NAME + "2",
] = 2015
return expected
class PreviousEstimateMultipleQuarters(WithEstimateMultipleQuarters, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def fill_expected_out(cls, expected):
# Fill columns for 1 Q out
for raw_name in cls.columns.values():
expected[raw_name + "1"].loc[
pd.Timestamp("2015-01-12", tz="UTC") : pd.Timestamp(
"2015-01-19", tz="UTC"
)
] = cls.events[raw_name].iloc[0]
expected[raw_name + "1"].loc[
pd.Timestamp("2015-01-20", tz="UTC") :
] = cls.events[raw_name].iloc[1]
# Fill columns for 2 Q out
for col_name in ["estimate", "event_date"]:
expected[col_name + "2"].loc[
pd.Timestamp("2015-01-20", tz="UTC") :
] = cls.events[col_name].iloc[0]
expected[FISCAL_QUARTER_FIELD_NAME + "2"].loc[
pd.Timestamp("2015-01-12", tz="UTC") : pd.Timestamp("2015-01-20", tz="UTC")
] = 4
expected[FISCAL_YEAR_FIELD_NAME + "2"].loc[
pd.Timestamp("2015-01-12", tz="UTC") : pd.Timestamp("2015-01-20", tz="UTC")
] = 2014
expected[FISCAL_QUARTER_FIELD_NAME + "2"].loc[
pd.Timestamp("2015-01-20", tz="UTC") :
] = 1
expected[FISCAL_YEAR_FIELD_NAME + "2"].loc[
pd.Timestamp("2015-01-20", tz="UTC") :
] = 2015
return expected
class WithVaryingNumEstimates(WithEstimates):
"""
ZiplineTestCase mixin providing fixtures and a test to ensure that we
have the correct overwrites when the event date changes. We want to make
sure that if we have a quarter with an event date that gets pushed back,
we don't start overwriting for the next quarter early. Likewise,
if we have a quarter with an event date that gets pushed forward, we want
to make sure that we start applying adjustments at the appropriate, earlier
date, rather than the later date.
Methods
-------
assert_compute()
Defines how to determine that results computed for the `SomeFactor`
factor are correct.
Tests
-----
test_windows_with_varying_num_estimates()
Tests that we create the correct overwrites from 2015-01-13 to
2015-01-14 regardless of how event dates were updated for each
quarter for each sid.
"""
@classmethod
def make_events(cls):
return pd.DataFrame(
{
SID_FIELD_NAME: [0] * 3 + [1] * 3,
TS_FIELD_NAME: [
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-01-12"),
pd.Timestamp("2015-01-13"),
]
* 2,
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-12"),
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-12"),
pd.Timestamp("2015-01-20"),
],
"estimate": [11.0, 12.0, 21.0] * 2,
FISCAL_QUARTER_FIELD_NAME: [1, 1, 2] * 2,
FISCAL_YEAR_FIELD_NAME: [2015] * 6,
}
)
@classmethod
def assert_compute(cls, estimate, today):
raise NotImplementedError("assert_compute")
def test_windows_with_varying_num_estimates(self):
dataset = QuartersEstimates(1)
assert_compute = self.assert_compute
class SomeFactor(CustomFactor):
inputs = [dataset.estimate]
window_length = 3
def compute(self, today, assets, out, estimate):
assert_compute(estimate, today)
engine = self.make_engine()
engine.run_pipeline(
Pipeline({"est": SomeFactor()}),
start_date=pd.Timestamp("2015-01-13", tz="utc"),
# last event date we have
end_date=pd.Timestamp("2015-01-14", tz="utc"),
)
class PreviousVaryingNumEstimates(WithVaryingNumEstimates, ZiplineTestCase):
def assert_compute(self, estimate, today):
if today == pd.Timestamp("2015-01-13", tz="utc"):
assert_array_equal(estimate[:, 0], np.array([np.NaN, np.NaN, 12]))
assert_array_equal(estimate[:, 1], np.array([np.NaN, 12, 12]))
else:
assert_array_equal(estimate[:, 0], np.array([np.NaN, 12, 12]))
assert_array_equal(estimate[:, 1], np.array([12, 12, 12]))
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
class NextVaryingNumEstimates(WithVaryingNumEstimates, ZiplineTestCase):
def assert_compute(self, estimate, today):
if today == pd.Timestamp("2015-01-13", tz="utc"):
assert_array_equal(estimate[:, 0], np.array([11, 12, 12]))
assert_array_equal(estimate[:, 1], np.array([np.NaN, np.NaN, 21]))
else:
assert_array_equal(estimate[:, 0], np.array([np.NaN, 21, 21]))
assert_array_equal(estimate[:, 1], np.array([np.NaN, 21, 21]))
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
class WithEstimateWindows(WithEstimates):
"""
ZiplineTestCase mixin providing fixures and a test to test running a
Pipeline with an estimates loader over differently-sized windows.
Attributes
----------
events : pd.DataFrame
DataFrame with estimates for 2 quarters for 2 sids.
window_test_start_date : pd.Timestamp
The date from which the window should start.
timelines : dict[int -> pd.DataFrame]
A dictionary mapping to the number of quarters out to
snapshots of how the data should look on each date in the date range.
Methods
-------
make_expected_timelines() -> dict[int -> pd.DataFrame]
Creates a dictionary of expected data. See `timelines`, above.
Tests
-----
test_estimate_windows_at_quarter_boundaries()
Tests that we overwrite values with the correct quarter's estimate at
the correct dates when we have a factor that asks for a window of data.
"""
END_DATE = pd.Timestamp("2015-02-10", tz="utc")
window_test_start_date = pd.Timestamp("2015-01-05")
critical_dates = [
pd.Timestamp("2015-01-09", tz="utc"),
pd.Timestamp("2015-01-15", tz="utc"),
pd.Timestamp("2015-01-20", tz="utc"),
pd.Timestamp("2015-01-26", tz="utc"),
pd.Timestamp("2015-02-05", tz="utc"),
pd.Timestamp("2015-02-10", tz="utc"),
]
# Starting date, number of announcements out.
window_test_cases = list(itertools.product(critical_dates, (1, 2)))
@classmethod
def make_events(cls):
# Typical case: 2 consecutive quarters.
sid_0_timeline = pd.DataFrame(
{
TS_FIELD_NAME: [
cls.window_test_start_date,
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-12"),
pd.Timestamp("2015-02-10"),
# We want a case where we get info for a later
# quarter before the current quarter is over but
# after the split_asof_date to make sure that
# we choose the correct date to overwrite until.
pd.Timestamp("2015-01-18"),
],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-02-10"),
pd.Timestamp("2015-02-10"),
pd.Timestamp("2015-04-01"),
],
"estimate": [100.0, 101.0] + [200.0, 201.0] + [400],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [2] * 2 + [4],
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 0,
}
)
# We want a case where we skip a quarter. We never find out about Q2.
sid_10_timeline = pd.DataFrame(
{
TS_FIELD_NAME: [
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-01-12"),
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-01-15"),
],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-22"),
pd.Timestamp("2015-01-22"),
pd.Timestamp("2015-02-05"),
pd.Timestamp("2015-02-05"),
],
"estimate": [110.0, 111.0] + [310.0, 311.0],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [3] * 2,
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 10,
}
)
# We want to make sure we have correct overwrites when sid quarter
# boundaries collide. This sid's quarter boundaries collide with sid 0.
sid_20_timeline = pd.DataFrame(
{
TS_FIELD_NAME: [
cls.window_test_start_date,
pd.Timestamp("2015-01-07"),
cls.window_test_start_date,
pd.Timestamp("2015-01-17"),
],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-02-10"),
pd.Timestamp("2015-02-10"),
],
"estimate": [120.0, 121.0] + [220.0, 221.0],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [2] * 2,
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 20,
}
)
concatted = pd.concat(
[sid_0_timeline, sid_10_timeline, sid_20_timeline]
).reset_index()
np.random.seed(0)
return concatted.reindex(np.random.permutation(concatted.index))
@classmethod
def get_sids(cls):
sids = sorted(cls.events[SID_FIELD_NAME].unique())
# Add extra sids between sids in our data. We want to test that we
# apply adjustments to the correct sids.
return [
sid for i in range(len(sids) - 1) for sid in range(sids[i], sids[i + 1])
] + [sids[-1]]
@classmethod
def make_expected_timelines(cls):
return {}
@classmethod
def init_class_fixtures(cls):
super(WithEstimateWindows, cls).init_class_fixtures()
cls.create_expected_df_for_factor_compute = partial(
create_expected_df_for_factor_compute,
cls.window_test_start_date,
cls.get_sids(),
)
cls.timelines = cls.make_expected_timelines()
@parameterized.expand(window_test_cases)
def test_estimate_windows_at_quarter_boundaries(
self, start_date, num_announcements_out
):
dataset = QuartersEstimates(num_announcements_out)
trading_days = self.trading_days
timelines = self.timelines
# The window length should be from the starting index back to the first
# date on which we got data. The goal is to ensure that as we
# progress through the timeline, all data we got, starting from that
# first date, is correctly overwritten.
window_len = (
self.trading_days.get_loc(start_date)
- self.trading_days.get_loc(self.window_test_start_date)
+ 1
)
class SomeFactor(CustomFactor):
inputs = [dataset.estimate]
window_length = window_len
def compute(self, today, assets, out, estimate):
today_idx = trading_days.get_loc(today)
today_timeline = (
timelines[num_announcements_out]
.loc[today]
.reindex(trading_days[: today_idx + 1])
.values
)
timeline_start_idx = len(today_timeline) - window_len
assert_almost_equal(estimate, today_timeline[timeline_start_idx:])
engine = self.make_engine()
engine.run_pipeline(
Pipeline({"est": SomeFactor()}),
start_date=start_date,
# last event date we have
end_date=pd.Timestamp("2015-02-10", tz="utc"),
)
class PreviousEstimateWindows(WithEstimateWindows, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_timelines(cls):
oneq_previous = pd.concat(
[
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
],
end_date,
)
for end_date in pd.date_range("2015-01-09", "2015-01-19")
]
),
cls.create_expected_df_for_factor_compute(
[
(0, 101, pd.Timestamp("2015-01-20")),
(10, np.NaN, cls.window_test_start_date),
(20, 121, pd.Timestamp("2015-01-20")),
],
pd.Timestamp("2015-01-20"),
),
cls.create_expected_df_for_factor_compute(
[
(0, 101, pd.Timestamp("2015-01-20")),
(10, np.NaN, cls.window_test_start_date),
(20, 121, pd.Timestamp("2015-01-20")),
],
pd.Timestamp("2015-01-21"),
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 101, pd.Timestamp("2015-01-20")),
(10, 111, pd.Timestamp("2015-01-22")),
(20, 121, pd.Timestamp("2015-01-20")),
],
end_date,
)
for end_date in pd.date_range("2015-01-22", "2015-02-04")
]
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 101, pd.Timestamp("2015-01-20")),
(10, 311, pd.Timestamp("2015-02-05")),
(20, 121, pd.Timestamp("2015-01-20")),
],
end_date,
)
for end_date in pd.date_range("2015-02-05", "2015-02-09")
]
),
cls.create_expected_df_for_factor_compute(
[
(0, 201, pd.Timestamp("2015-02-10")),
(10, 311, pd.Timestamp("2015-02-05")),
(20, 221, pd.Timestamp("2015-02-10")),
],
pd.Timestamp("2015-02-10"),
),
]
)
twoq_previous = pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
],
end_date,
)
for end_date in pd.date_range("2015-01-09", "2015-02-09")
]
# We never get estimates for S1 for 2Q ago because once Q3
# becomes our previous quarter, 2Q ago would be Q2, and we have
# no data on it.
+ [
cls.create_expected_df_for_factor_compute(
[
(0, 101, pd.Timestamp("2015-02-10")),
(10, np.NaN, pd.Timestamp("2015-02-05")),
(20, 121, pd.Timestamp("2015-02-10")),
],
pd.Timestamp("2015-02-10"),
)
]
)
return {1: oneq_previous, 2: twoq_previous}
class NextEstimateWindows(WithEstimateWindows, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_timelines(cls):
oneq_next = pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 100, cls.window_test_start_date),
(10, 110, pd.Timestamp("2015-01-09")),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp("2015-01-07")),
],
pd.Timestamp("2015-01-09"),
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 100, cls.window_test_start_date),
(10, 110, pd.Timestamp("2015-01-09")),
(10, 111, pd.Timestamp("2015-01-12")),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp("2015-01-07")),
],
end_date,
)
for end_date in pd.date_range("2015-01-12", "2015-01-19")
]
),
cls.create_expected_df_for_factor_compute(
[
(0, 100, cls.window_test_start_date),
(0, 101, pd.Timestamp("2015-01-20")),
(10, 110, pd.Timestamp("2015-01-09")),
(10, 111, pd.Timestamp("2015-01-12")),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp("2015-01-07")),
],
pd.Timestamp("2015-01-20"),
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 200, pd.Timestamp("2015-01-12")),
(10, 110, pd.Timestamp("2015-01-09")),
(10, 111, pd.Timestamp("2015-01-12")),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp("2015-01-17")),
],
end_date,
)
for end_date in pd.date_range("2015-01-21", "2015-01-22")
]
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 200, pd.Timestamp("2015-01-12")),
(10, 310, pd.Timestamp("2015-01-09")),
(10, 311, pd.Timestamp("2015-01-15")),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp("2015-01-17")),
],
end_date,
)
for end_date in pd.date_range("2015-01-23", "2015-02-05")
]
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 200, pd.Timestamp("2015-01-12")),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp("2015-01-17")),
],
end_date,
)
for end_date in pd.date_range("2015-02-06", "2015-02-09")
]
),
cls.create_expected_df_for_factor_compute(
[
(0, 200, pd.Timestamp("2015-01-12")),
(0, 201, pd.Timestamp("2015-02-10")),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp("2015-01-17")),
],
pd.Timestamp("2015-02-10"),
),
]
)
twoq_next = pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
],
end_date,
)
for end_date in pd.date_range("2015-01-09", "2015-01-11")
]
+ [
cls.create_expected_df_for_factor_compute(
[
(0, 200, pd.Timestamp("2015-01-12")),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
],
end_date,
)
for end_date in pd.date_range("2015-01-12", "2015-01-16")
]
+ [
cls.create_expected_df_for_factor_compute(
[
(0, 200, pd.Timestamp("2015-01-12")),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp("2015-01-17")),
],
pd.Timestamp("2015-01-20"),
)
]
+ [
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
],
end_date,
)
for end_date in pd.date_range("2015-01-21", "2015-02-10")
]
)
return {1: oneq_next, 2: twoq_next}
class WithSplitAdjustedWindows(WithEstimateWindows):
"""
ZiplineTestCase mixin providing fixures and a test to test running a
Pipeline with an estimates loader over differently-sized windows and with
split adjustments.
"""
split_adjusted_asof_date = pd.Timestamp("2015-01-14")
@classmethod
def make_events(cls):
# Add an extra sid that has a release before the split-asof-date in
# order to test that we're reversing splits correctly in the previous
# case (without an overwrite) and in the next case (with an overwrite).
sid_30 = pd.DataFrame(
{
TS_FIELD_NAME: [
cls.window_test_start_date,
pd.Timestamp("2015-01-09"),
# For Q2, we want it to start early enough
# that we can have several adjustments before
# the end of the first quarter so that we
# can test un-adjusting & readjusting with an
# overwrite.
cls.window_test_start_date,
# We want the Q2 event date to be enough past
# the split-asof-date that we can have
# several splits and can make sure that they
# are applied correctly.
pd.Timestamp("2015-01-20"),
],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-20"),
],
"estimate": [130.0, 131.0, 230.0, 231.0],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [2] * 2,
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 30,
}
)
# An extra sid to test no splits before the split-adjusted-asof-date.
# We want an event before and after the split-adjusted-asof-date &
# timestamps for data points also before and after
# split-adjsuted-asof-date (but also before the split dates, so that
# we can test that splits actually get applied at the correct times).
sid_40 = pd.DataFrame(
{
TS_FIELD_NAME: [pd.Timestamp("2015-01-09"), pd.Timestamp("2015-01-15")],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-02-10"),
],
"estimate": [140.0, 240.0],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 40,
}
)
# An extra sid to test all splits before the
# split-adjusted-asof-date. All timestamps should be before that date
# so that we have cases where we un-apply and re-apply splits.
sid_50 = pd.DataFrame(
{
TS_FIELD_NAME: [pd.Timestamp("2015-01-09"), pd.Timestamp("2015-01-12")],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-02-10"),
],
"estimate": [150.0, 250.0],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 50,
}
)
return pd.concat(
[
# Slightly hacky, but want to make sure we're using the same
# events as WithEstimateWindows.
cls.__base__.make_events(),
sid_30,
sid_40,
sid_50,
]
)
@classmethod
def make_splits_data(cls):
# For sid 0, we want to apply a series of splits before and after the
# split-adjusted-asof-date we well as between quarters (for the
# previous case, where we won't see any values until after the event
# happens).
sid_0_splits = pd.DataFrame(
{
SID_FIELD_NAME: 0,
"ratio": (-1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 100),
"effective_date": (
pd.Timestamp("2014-01-01"), # Filter out
# Split before Q1 event & after first estimate
pd.Timestamp("2015-01-07"),
# Split before Q1 event
pd.Timestamp("2015-01-09"),
# Split before Q1 event
pd.Timestamp("2015-01-13"),
# Split before Q1 event
pd.Timestamp("2015-01-15"),
# Split before Q1 event
pd.Timestamp("2015-01-18"),
# Split after Q1 event and before Q2 event
pd.Timestamp("2015-01-30"),
# Filter out - this is after our date index
pd.Timestamp("2016-01-01"),
),
}
)
sid_10_splits = pd.DataFrame(
{
SID_FIELD_NAME: 10,
"ratio": (0.2, 0.3),
"effective_date": (
# We want a split before the first estimate and before the
# split-adjusted-asof-date but within our calendar index so
# that we can test that the split is NEVER applied.
pd.Timestamp("2015-01-07"),
# Apply a single split before Q1 event.
pd.Timestamp("2015-01-20"),
),
}
)
# We want a sid with split dates that collide with another sid (0) to
# make sure splits are correctly applied for both sids.
sid_20_splits = pd.DataFrame(
{
SID_FIELD_NAME: 20,
"ratio": (
0.4,
0.5,
0.6,
0.7,
0.8,
0.9,
),
"effective_date": (
pd.Timestamp("2015-01-07"),
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-15"),
pd.Timestamp("2015-01-18"),
| pd.Timestamp("2015-01-30") | pandas.Timestamp |
"""Class and container for pedigree information, vcf, and bam file by sample"""
from future import print_function
import pandas as pd
import re
import func
class Ped:
"""Family_ID - '.' or '0' for unknown
Individual_ID - '.' or '0' for unknown
Paternal_ID - '.' or '0' for unknown
Maternal_ID - '.' or '0' for unknown
Sex - '1'=male; '2'=female; ['other', '0', '.']=unknown
Phenotype - '1'=unaffected, '2'=affected, ['-9', '0', '.']= missing"""
def __init__(self, ped_file_name, extra_column_names=[]):
"""read ped file into pandas data frame"""
self.fname = ped_file_name
self.ped = pd.read_table(self.fname, usecols=range(6+len(extra_column_names)))
self.ped.columns = ['fam_id', 'ind_id', 'fa_id', 'mo_id', 'sex', 'pheno'] + extra_column_names
self.ped.replace(['.', '0', 0, -9, '-9'], [None]*5, inplace=True)
self.ped['fam_id'] = self.ped['fam_id'].astype(str)
def addVcf(self, field='fam_id', file_pat='/mnt/ceph/asalomatov/SSC_Eichler/rerun/ssc%s/%s-JHC-vars.vcf.gz'):
num_subst = len(re.findall('\%s', file_pat))
print('%s substitutions found' % num_subst)
if num_subst > 0:
x = self.ped[field].apply(lambda f: func.checkFile(file_pat % ((f,) * num_subst)))
self.ped['vcf'] = | pd.Series(x, index=self.ped.index) | pandas.Series |
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 31 11:00:16 2020
@author: DWXMG
"""
# import importlib.util
# import sys
# from collections import namedtuple, OrderedDict
from pathlib import Path
import itertools
# from itertools import combinations
# import operator
import datetime as dt
import random
from math import pi
# import copy
# import pickle
import pandas as pd
import numpy as np
from cmath import phase
import matplotlib.pyplot as plt
from matplotlib.offsetbox import (
AnchoredOffsetbox,
DrawingArea,
HPacker,
TextArea,
) # TODO
from scipy import stats
from scipy.stats import linregress
from scipy.optimize import curve_fit
# from lmfit import Parameters, conf_interval, Minimizer, minimize
# import numdifftools
# import corner
from file_py_helper.file_functions import FileOperations
from .validation import get_KKvalid, prep_GP_DRT_raw_data
from .models import Model_Collection
from .plotting import (
plot_linKK,
EIS_Trimming_plot,
EIS_plotting_per_EV,
plot_lin_Warburg,
)
from .DRT_DP_fitting import DP_DRT_analysis
from .GP_DRT_fitting import run_GP_DRT_fit
# from .plotting import EIS_plotting_per_EV, EIS_Trimming_plot, EIS_plotting_EvRHE
# _logger = start_logging(__name__)
import logging
_logger = logging.getLogger(__name__)
# fit_export_templ = namedtuple('fit_export_templ', 'fit_spectra fit_pars meta_index')
# Meta = namedtuple('Meta', 'PAR_file Segment E_dc_RHE E_dc_RHE_mV RPM_DAC data ovv')
globals()["EvRHE"] = "E_AppV_RHE"
def func_lin(a):
def func(x, b):
return a * x + b
return func
def fitting_recheck_params(fit_run_arg, modname, params_model, **EIS_fit_kwargs):
# PF,E,RPM = str(fit_run_arg.PAR_file), fit_run_arg.E_dc_RHE, fit_run_arg.RPM_DAC
_key = (
str(fit_run_arg[0]),
int(fit_run_arg[1]),
*[float(i) for i in fit_run_arg[2:4]],
int(fit_run_arg[4]),
modname,
)
# (PF,E,RPM,modname)
_get_params = pd.DataFrame()
# ('/mnt/DATA/EKTS_CloudStation/CloudStation/Experimental data/Raw_data/VERSASTAT/2019-05-May/06.05.2019_0.1MH2SO4_cell2/O2_EIS-range_1500rpm_JOS2_288.par',
# 4, 0.708, 708.0, 1500, 'Model(Randles_RQRQ)')
# ['PAR_file',EvRHE,'RPM_DAC','Model_EEC']
bad_grp, good_grp = EIS_fit_kwargs.get("EIS_recheck_bad_fits"), EIS_fit_kwargs.get(
"EIS_recheck_good_fits"
)
sugg_grp = EIS_fit_kwargs.get("EIS_recheck_bad_fits_suggestions")
recheck_msg = ""
if all([len(i.groups) > 0 for i in [bad_grp, good_grp, sugg_grp]]):
if [i for i in good_grp.groups if _key == i]:
recheck_msg += "Prefit recheck in good keys"
_get_params = good_grp.get_group(_key)
elif [i for i in bad_grp.groups if _key == i]:
recheck_msg += f"Prefit recheck in bad keys {_key} and"
_sugg_match = [i for i in sugg_grp.groups if _key == i]
if _sugg_match:
recheck_msg += " taking suggestions."
_get_params = sugg_grp.get_group(_key)
else:
recheck_msg += " not in suggestions."
# _logger.warning(f'Prefit recheck bad key {_key} not in suggestions')
else:
recheck_msg += f"Prefit recheck keys not in good or bad {_key}"
else:
recheck_msg += f"Prefit recheck empty frames"
# _logger.warning(recheck_msg)
return _get_params, recheck_msg
#%%
def make_prefit_frame(
EIS_data_KKvalid,
lmfitting,
prefix="pp",
plot=False,
check_err=True,
norm=np.array([]),
get_frame=False,
):
# norm = 1/(Z_KKv.real**2+Z_KKv.imag**2)
# abs(Z_KKv)
# norm = 1
# norm = np.sqrt(EIS_data_KKvalid.DATA_weightsmod_Z.values)
# norm = 1/abs(Z_KKv)
# norm = 1/np.sqrt(EIS_data_KKvalid.DATA_weightsmod_Z.values)
# lmfitting = best_trial
# EIS_data_KKvalid, lmfitting = _spectrum.EIS_data_KKvalid,best_trial_weights # FIXME
# make_prefit_frame(EIS_data_KKvalid, out, plot = 'Y')
if np.array([]).any() == False:
norm = np.array([1] * len(lmfitting.best_fit.real))
if "DataFrame" in type(EIS_data_KKvalid).__name__:
Z_KKv = EIS_data_KKvalid.DATA_Z.values
elif "array" in type(EIS_data_KKvalid).__name__:
Z_KKv = EIS_data_KKvalid
EIS_data_KKvalid = pd.DataFrame(Z_KKv)
EIS_data_KKvalid = EIS_data_KKvalid.loc[
EIS_data_KKvalid.DATA_Zre.isin(lmfitting.data.real)
& EIS_data_KKvalid.DATA_Zim.isin(lmfitting.data.imag)
]
if norm.size == 0:
norm = (
lmfitting.data.real / lmfitting.data.real
) # (Z_KKv.real**2+Z_KKv.imag**2)**-1
if not "float" in str(type(lmfitting.residual)) and lmfitting.success:
resIm, resRe = lmfitting.residual[1::2], lmfitting.residual[0::2]
else:
resIm, resRe = 1e9, 1e9
pp_errRE, pp_errIM = (lmfitting.best_fit.real - lmfitting.data.real) ** 2, (
lmfitting.best_fit.imag - lmfitting.data.imag
) ** 2
pp_errRE_mean, pp_errIM_mean = pp_errRE.mean(), pp_errIM.mean()
# MSE_Re,MSE_Im= (lmfitting.best_fit.real-Z_KKv.real)**2, (lmfitting.best_fit.imag-Z_KKv.imag)**2
MSE = np.sqrt(sum(pp_errRE) + sum(pp_errIM))
# pp_errRE_std,pp_errIM_std = np.abs(pp_errRE).std(), np.abs(pp_errIM).std()
pp_Z = lmfitting.best_fit
pp_Y = pp_Z ** -1
prefit_data = EIS_data_KKvalid.assign(
**{
f"{prefix}_err_Re": pp_errRE,
f"{prefix}_err_Im": pp_errIM,
f"{prefix}_Z_Re": pp_Z.real,
f"{prefix}_Z_Im": -1 * pp_Z.imag,
f"{prefix}_Y_Re": pp_Y.real,
f"{prefix}_Y_Im": pp_Y.imag,
f"{prefix}_res_Re": resRe,
f"{prefix}_res_Im": resIm,
f"{prefix}_norm": norm,
}
)
if get_frame:
return prefit_data
ext_ang = np.logspace(-3, 4, endpoint=True) * 2.0 * pi
ext_model = lmfitting.eval(lmfitting.params, ang=ext_ang)
extended_data = pd.DataFrame(
{
f"{prefix}_ext_Z_Re": ext_model.real,
f"{prefix}_ext_Z_Im": -1 * ext_model.imag,
f"{prefix}_ext_Y_Re": (ext_model ** -1).real,
f"{prefix}_ext_Y_Im": (ext_model ** -1).imag,
f"{prefix}_ext_freq": ext_ang / (2.0 * pi),
}
)
if plot:
fig, ax = plt.subplots(3, 1, figsize=(4, 8))
# if 'Y' in str(plot_pp):
prefit_data.plot(x=f"DATA_Yre", y=f"DATA_Yim", kind="scatter", ax=ax[0])
prefit_data.plot(x=f"{prefix}_Y_Re", y=f"{prefix}_Y_Im", c="r", ax=ax[0])
# else:
prefit_data.plot(
x=f"DATA_Zre",
y=f"DATA_-Zim",
kind="scatter",
ax=ax[1],
logy=True,
logx=True,
)
prefit_data.plot(
x=f"{prefix}_Z_Re",
y=f"{prefix}_Z_Im",
c="r",
ax=ax[1],
logy=True,
logx=True,
)
if not extended_data.empty:
extended_data.plot(
x=f"{prefix}_ext_Z_Re",
y=f"{prefix}_ext_Z_Im",
c="g",
ax=ax[1],
logy=True,
logx=True,
)
extended_data.plot(
x=f"{prefix}_ext_Y_Re", y=f"{prefix}_ext_Y_Im", c="g", ax=ax[0]
)
# extended_data.plot(x=f'{prefix}_ext_Z_Re', y=f'{prefix}_ext_Z_Im',c='g',xlim=(0,500),ylim=(0,500))
#
prefit_data.plot(x="Frequency(Hz)", y=f"{prefix}_err_Re", c="g", ax=ax[2])
prefit_data.plot(x="Frequency(Hz)", y=f"{prefix}_err_Im", c="k", ax=ax[2])
box1 = TextArea(
lmfitting.fit_report(min_correl=0.45), textprops=dict(color="k")
)
box = HPacker(children=[box1], align="center", pad=0, sep=5)
anchored_box = AnchoredOffsetbox(
loc="lower left",
child=box,
pad=0.0,
frameon=True,
bbox_to_anchor=(1.1, 0.02),
bbox_transform=ax[2].transAxes,
borderpad=0.0,
)
ax[0].add_artist(anchored_box)
# axbox = plt.axes([1.1, 0.05, 0.8, 0.075])
# text_box = TextBox(axbox, 'Evaluate', initial=initial_text)
# text_box.on_submit(submit)
# ax[0].text(print(lmfitting.fit_report()))
# ax22 = ax[2].twinx()
# prefit_data.plot(x='Frequency(Hz)', y=f'{prefix}_res_Re',c='g', ax= ax[2])
# prefit_data.plot(x='Frequency(Hz)', y=f'{prefix}_res_Im',c='k', ax= ax[2])
# f'{prefix}_norm'
# prefit_data.plot(x='Frequency(Hz)', y='DATA_weightsmod_Z' ,c='orange', ax= ax22)
ax[2].set_xscale("log")
ax[0].set_xlim(0, prefit_data[f"{prefix}_Y_Re"].max() * 1.5)
ax[0].set_ylim(0, prefit_data[f"{prefix}_Y_Im"].max() * 1.5)
ax[1].set_xlim(0, prefit_data[f"{prefix}_Z_Im"].max() * 4)
ax[1].set_ylim(0, prefit_data[f"{prefix}_Z_Im"].max() * 2)
# ax[2].set_yscale('log')
plt.show()
plt.close()
if check_err:
# test_out =''
# test_out = [False,False]
# n_std = 1E-3
# while all(test_out) == False:
# for name,freqlim in [('low freq',20),('high freq',500)]:
lf_data = prefit_data.loc[prefit_data["Frequency(Hz)"] < 20]
hf_data = prefit_data.loc[prefit_data["Frequency(Hz)"] > 500]
# , prefit_data.loc[prefit_data['Frequency(Hz)'] > freqlim]
lf_errRe_mean, lf_errIm_mean = sum(lf_data[f"{prefix}_err_Re"] ** 2), sum(
lf_data[f"{prefix}_err_Im"] ** 2
)
hf_errRe_mean, hf_errIm_mean = sum(hf_data[f"{prefix}_err_Re"] ** 2), sum(
hf_data[f"{prefix}_err_Im"] ** 2
)
lf_ratio_Re, hf_ratio_Re = (
lf_errRe_mean / pp_errRE_mean,
hf_errRe_mean / pp_errRE_mean,
)
lf_ratio_Im, hf_ratio_Im = (
lf_errIm_mean / pp_errIM_mean,
hf_errIm_mean / pp_errIM_mean,
)
# if all([lf_errRe_mean > n_std*pp_errRE_std + pp_errRE_mean, lf_errIm_mean > n_std*pp_errIM_std + pp_errIM_mean]):
# if test_out[0] == False:
# test_out[0] = n_std
# if all([hf_errRe_mean > n_std*pp_errRE_std + pp_errRE_mean, hf_errIm_mean > n_std*pp_errIM_std + pp_errIM_mean]):
# if test_out[1] == False:
# test_out[1] = n_std
## =test_out + f'bad fit {name} (lim {freqlim} Hz)\n'
# n_std += 0.01
test_out = [lf_errRe_mean + lf_errIm_mean, hf_errRe_mean + hf_errIm_mean]
good_fit_test = True if any(i < 0.5 for i in test_out) else False
return good_fit_test, test_out, MSE
def residual(params, Z_KKv, ang, model_set, weights=None):
model = model_set.eval(params, ang=ang)
MSE_re = (model.real - Z_KKv.real) ** 2
MSE_im = (model.imag - Z_KKv.imag) ** 2
MSE = MSE_re + MSE_im
resid = model - Z_KKv
return resid.view(np.float)
class Fit_Spectra_per_file:
"""This class will take the fit_run_arg and
run the steps for fitting the EIS spectrum"""
def __init__(self, _eis_run):
pass
# self. =
def __getattr__(self, attr):
return getattr(self.eis_run, attr)
def fit_mean_PAR_file(self):
for PF in itertools.groupby(self.fit_run_args, lambda x: x.PAR_file):
yield self._get_mean_EIS_from_args(*PF)
def _get_mean_EIS_from_args(self, PF, PF_run_args_gen):
global EvRHE
_new_PF_mean = PF.with_name(PF.stem + "_Zmean" + PF.suffix)
_prep_mean_args = list(PF_run_args_gen)
_PF_mean_ovv = _prep_mean_args[0].ovv
# _PF_mean_ovv['Measured_OCP'] = [i[0] for i in _PF_mean_ovv['_act0_Measured Open Circuit'].str.split()]
# _PF_mean_ovv['PAR_file'] = _new_PF_mean
_PF_mean_ovv = _PF_mean_ovv.assign(**{"PAR_file": _new_PF_mean})
_PF_data = pd.concat(i.data for i in _prep_mean_args)
_numcols = [
i
for i in _PF_data.columns
if _PF_data[i].dtype != "O" and not "Frequency" in i
]
_PF_data_mean = _PF_data.groupby("Frequency(Hz)")[_numcols].mean().reset_index()
# _PF_data_mean.plot(x='Z Real',y='Z Imag', kind='scatter') # test plot
_PF_data_mean = _PF_data_mean.assign(**_PF_mean_ovv.iloc[0].to_dict())
# _PF_data_mean = _PF_data_mean.sort_values('Frequency(Hz)',ascending=False)
# _join_cols = list(_PF_data_mean.columns.intersection(_PF_mean_ovv.columns))
# _PF_data_mean = _PF_data_mean.join(_PF_mean_ovv.set_index('PAR_file'),on=_join_cols,how='left')
# _merge = pd.concat([_PF_data_mean, _PF_mean_ovv],axis=1)
# .set_index('PAR_file'),on=_join_cols,how='outer')
_PF_data_mean[["Segment #", EvRHE, "RPM_DAC"]]
_PF_data_mean_grp = _PF_data_mean.groupby(
["PAR_file", "Segment #", EvRHE, "RPM_DAC"]
)
fit_run_arg_mean = [
Fit_Spectrum(
Path(PF),
int(Seg),
np.round(float(E_V), 3),
int(RPM_DAC),
gr,
_PF_mean_ovv,
)
for (PF, Seg, E_V, RPM_DAC), gr in _PF_data_mean_grp
][0]
self.fit_run_arg_mean = fit_run_arg_mean
class Fit_Spectra_Collection:
"""This class will take the EIS_spectra_collection and
run the steps for fitting the EIS spectrum"""
global EvRHE
def __init__(self, _EIS_spectra_pf, **kwargs):
assert type(_EIS_spectra_pf).__name__ == "EIS_spectra_collection"
# print(isinstance(_EIS_spectrum_arg,EIS_Spectrum))
self._spectra = _EIS_spectra_pf
self._kwargs = kwargs
_logger.warning(
f"Starting {dt.datetime.now():%Y-%m-%d %H:%M:%S}; {len(self._spectra.spectra)} {self._spectra} {self.__class__.__name__}"
)
self._dest_pars = self._spectra.ovv.EIS_dest_Pars.iloc[0].with_suffix(".pkl")
self.Model_Collection = Model_Collection()
# _startswith='Q_'
self.lmfit_models = self.Model_Collection
try:
self.fit_mean_spec = Fit_Spectrum(
self._spectra.mean_spectrum,
**{**self._kwargs, **dict(linKK_trimming_factor=4, res_max=0.16)},
)
self.load_pars_models_set_pretrials()
self.fit_mean_spec.Model_Collection = self.Model_Collection
if "lmfit" in self._spectra.EIS_kwargs.get("run_fit_mean"):
self.lmfit_mean = LMfit_method(self.fit_mean_spec, run_prefit=True)
self.results = {}
self.load_pars_models_set_pretrials()
for _spectrum in self._spectra.spectra:
self._results = {}
try:
_fit_spectrum = Fit_Spectrum(_spectrum, **self._spectra.EIS_kwargs)
self._results.update({"fit": _fit_spectrum})
if hasattr(
self, "lmfit_mean"
) and not "mean" in self._spectra.EIS_kwargs.get("run_fit_mean"):
_fit_spectrum.Model_Collection = self.Model_Collection
_fit_spectrum.PRETRIALS_weights = (
self.lmfit_mean.PRETRIALS_weights
)
_lmfit_spectrum = LMfit_method(_fit_spectrum)
self._results.update({"lmfit": _lmfit_spectrum})
self.results.update({str(_spectrum): self._results})
except Exception as e:
_logger.error(
f"Errror in trying fit: {_spectrum} {self.__class__.__name__}, {e} "
)
_fit_spectrum = f"fail: {e}"
self.save_pars()
except Exception as e:
_logger.error(
f"Errror in lmfit: {len(self._spectra.spectra)} {_EIS_spectra_pf} {self.__class__.__name__}, {e} "
)
# for _spectrum in self._spectra.spectra:
# self.lmfit_results.update({str(_spectrum) : LMfit_method(self.results.get(str(_spectrum)),
# _extra_init_params = self.lmfit_mean.PRETRIALS_weights)})
def load_pars_models_set_pretrials(self):
if self._dest_pars.is_file():
try:
if not hasattr(self, "loaded_pars"):
self.loaded_pars = | pd.DataFrame() | pandas.DataFrame |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
from functools import partial
import sys
from pathlib import Path
import importlib
import datetime
import fire
import pandas as pd
from tqdm import tqdm
from loguru import logger
CUR_DIR = Path(__file__).resolve().parent
sys.path.append(str(CUR_DIR.parent.parent))
from data_collector.index import IndexBase
from data_collector.utils import get_instruments
quarter_dict = {"1Q": "01-03", "2Q": "05-01", "3Q": "09-01"}
class IBOVIndex(IndexBase):
ibov_index_composition = "https://raw.githubusercontent.com/igor17400/IBOV-HCI/main/historic_composition/{}.csv"
years_4_month_periods = []
def __init__(
self,
index_name: str,
qlib_dir: [str, Path] = None,
freq: str = "day",
request_retry: int = 5,
retry_sleep: int = 3,
):
super(IBOVIndex, self).__init__(
index_name=index_name, qlib_dir=qlib_dir, freq=freq, request_retry=request_retry, retry_sleep=retry_sleep
)
self.today: datetime = datetime.date.today()
self.current_4_month_period = self.get_current_4_month_period(self.today.month)
self.year = str(self.today.year)
self.years_4_month_periods = self.get_four_month_period()
@property
def bench_start_date(self) -> pd.Timestamp:
"""
The ibovespa index started on 2 January 1968 (wiki), however,
no suitable data source that keeps track of ibovespa's history
stocks composition has been found. Except from the repo indicated
in README. Which keeps track of such information starting from
the first quarter of 2003
"""
return pd.Timestamp("2003-01-03")
def get_current_4_month_period(self, current_month: int):
"""
This function is used to calculated what is the current
four month period for the current month. For example,
If the current month is August 8, its four month period
is 2Q.
OBS: In english Q is used to represent *quarter*
which means a three month period. However, in
portuguese we use Q to represent a four month period.
In other words,
Jan, Feb, Mar, Apr: 1Q
May, Jun, Jul, Aug: 2Q
Sep, Oct, Nov, Dez: 3Q
Parameters
----------
month : int
Current month (1 <= month <= 12)
Returns
-------
current_4m_period:str
Current Four Month Period (1Q or 2Q or 3Q)
"""
if current_month < 5:
return "1Q"
if current_month < 9:
return "2Q"
if current_month <= 12:
return "3Q"
else:
return -1
def get_four_month_period(self):
"""
The ibovespa index is updated every four months.
Therefore, we will represent each time period as 2003_1Q
which means 2003 first four mount period (Jan, Feb, Mar, Apr)
"""
four_months_period = ["1Q", "2Q", "3Q"]
init_year = 2003
now = datetime.datetime.now()
current_year = now.year
current_month = now.month
for year in [item for item in range(init_year, current_year)]:
for el in four_months_period:
self.years_4_month_periods.append(str(year) + "_" + el)
# For current year the logic must be a little different
current_4_month_period = self.get_current_4_month_period(current_month)
for i in range(int(current_4_month_period[0])):
self.years_4_month_periods.append(str(current_year) + "_" + str(i + 1) + "Q")
return self.years_4_month_periods
def format_datetime(self, inst_df: pd.DataFrame) -> pd.DataFrame:
"""formatting the datetime in an instrument
Parameters
----------
inst_df: pd.DataFrame
inst_df.columns = [self.SYMBOL_FIELD_NAME, self.START_DATE_FIELD, self.END_DATE_FIELD]
Returns
-------
inst_df: pd.DataFrame
"""
logger.info("Formatting Datetime")
if self.freq != "day":
inst_df[self.END_DATE_FIELD] = inst_df[self.END_DATE_FIELD].apply(
lambda x: (pd.Timestamp(x) + pd.Timedelta(hours=23, minutes=59)).strftime("%Y-%m-%d %H:%M:%S")
)
else:
inst_df[self.START_DATE_FIELD] = inst_df[self.START_DATE_FIELD].apply(
lambda x: (pd.Timestamp(x)).strftime("%Y-%m-%d")
)
inst_df[self.END_DATE_FIELD] = inst_df[self.END_DATE_FIELD].apply(
lambda x: (pd.Timestamp(x)).strftime("%Y-%m-%d")
)
return inst_df
def format_quarter(self, cell: str):
"""
Parameters
----------
cell: str
It must be on the format 2003_1Q --> years_4_month_periods
Returns
----------
date: str
Returns date in format 2003-03-01
"""
cell_split = cell.split("_")
return cell_split[0] + "-" + quarter_dict[cell_split[1]]
def get_changes(self):
"""
Access the index historic composition and compare it quarter
by quarter and year by year in order to generate a file that
keeps track of which stocks have been removed and which have
been added.
The Dataframe used as reference will provided the index
composition for each year an quarter:
pd.DataFrame:
symbol
SH600000
SH600001
.
.
.
Parameters
----------
self: is used to represent the instance of the class.
Returns
----------
pd.DataFrame:
symbol date type
SH600000 2019-11-11 add
SH600001 2020-11-10 remove
dtypes:
symbol: str
date: pd.Timestamp
type: str, value from ["add", "remove"]
"""
logger.info("Getting companies changes in {} index ...".format(self.index_name))
try:
df_changes_list = []
for i in tqdm(range(len(self.years_4_month_periods) - 1)):
df = pd.read_csv(
self.ibov_index_composition.format(self.years_4_month_periods[i]), on_bad_lines="skip"
)["symbol"]
df_ = pd.read_csv(
self.ibov_index_composition.format(self.years_4_month_periods[i + 1]), on_bad_lines="skip"
)["symbol"]
## Remove Dataframe
remove_date = (
self.years_4_month_periods[i].split("_")[0]
+ "-"
+ quarter_dict[self.years_4_month_periods[i].split("_")[1]]
)
list_remove = list(df[~df.isin(df_)])
df_removed = pd.DataFrame(
{
"date": len(list_remove) * [remove_date],
"type": len(list_remove) * ["remove"],
"symbol": list_remove,
}
)
## Add Dataframe
add_date = (
self.years_4_month_periods[i + 1].split("_")[0]
+ "-"
+ quarter_dict[self.years_4_month_periods[i + 1].split("_")[1]]
)
list_add = list(df_[~df_.isin(df)])
df_added = pd.DataFrame(
{"date": len(list_add) * [add_date], "type": len(list_add) * ["add"], "symbol": list_add}
)
df_changes_list.append( | pd.concat([df_added, df_removed], sort=False) | pandas.concat |
from collections import deque
from datetime import datetime
import operator
import numpy as np
import pytest
import pytz
import pandas as pd
import pandas._testing as tm
from pandas.tests.frame.common import _check_mixed_float, _check_mixed_int
# -------------------------------------------------------------------
# Comparisons
class TestFrameComparisons:
# Specifically _not_ flex-comparisons
def test_frame_in_list(self):
# GH#12689 this should raise at the DataFrame level, not blocks
df = pd.DataFrame(np.random.randn(6, 4), columns=list("ABCD"))
msg = "The truth value of a DataFrame is ambiguous"
with pytest.raises(ValueError, match=msg):
df in [None]
def test_comparison_invalid(self):
def check(df, df2):
for (x, y) in [(df, df2), (df2, df)]:
# we expect the result to match Series comparisons for
# == and !=, inequalities should raise
result = x == y
expected = pd.DataFrame(
{col: x[col] == y[col] for col in x.columns},
index=x.index,
columns=x.columns,
)
tm.assert_frame_equal(result, expected)
result = x != y
expected = pd.DataFrame(
{col: x[col] != y[col] for col in x.columns},
index=x.index,
columns=x.columns,
)
tm.assert_frame_equal(result, expected)
with pytest.raises(TypeError):
x >= y
with pytest.raises(TypeError):
x > y
with pytest.raises(TypeError):
x < y
with pytest.raises(TypeError):
x <= y
# GH4968
# invalid date/int comparisons
df = pd.DataFrame(np.random.randint(10, size=(10, 1)), columns=["a"])
df["dates"] = pd.date_range("20010101", periods=len(df))
df2 = df.copy()
df2["dates"] = df["a"]
check(df, df2)
df = pd.DataFrame(np.random.randint(10, size=(10, 2)), columns=["a", "b"])
df2 = pd.DataFrame(
{
"a": pd.date_range("20010101", periods=len(df)),
"b": pd.date_range("20100101", periods=len(df)),
}
)
check(df, df2)
def test_timestamp_compare(self):
# make sure we can compare Timestamps on the right AND left hand side
# GH#4982
df = pd.DataFrame(
{
"dates1": pd.date_range("20010101", periods=10),
"dates2": pd.date_range("20010102", periods=10),
"intcol": np.random.randint(1000000000, size=10),
"floatcol": np.random.randn(10),
"stringcol": list(tm.rands(10)),
}
)
df.loc[np.random.rand(len(df)) > 0.5, "dates2"] = pd.NaT
ops = {"gt": "lt", "lt": "gt", "ge": "le", "le": "ge", "eq": "eq", "ne": "ne"}
for left, right in ops.items():
left_f = getattr(operator, left)
right_f = getattr(operator, right)
# no nats
if left in ["eq", "ne"]:
expected = left_f(df, pd.Timestamp("20010109"))
result = right_f(pd.Timestamp("20010109"), df)
tm.assert_frame_equal(result, expected)
else:
with pytest.raises(TypeError):
left_f(df, pd.Timestamp("20010109"))
with pytest.raises(TypeError):
right_f(pd.Timestamp("20010109"), df)
# nats
expected = left_f(df, pd.Timestamp("nat"))
result = right_f(pd.Timestamp("nat"), df)
tm.assert_frame_equal(result, expected)
def test_mixed_comparison(self):
# GH#13128, GH#22163 != datetime64 vs non-dt64 should be False,
# not raise TypeError
# (this appears to be fixed before GH#22163, not sure when)
df = pd.DataFrame([["1989-08-01", 1], ["1989-08-01", 2]])
other = pd.DataFrame([["a", "b"], ["c", "d"]])
result = df == other
assert not result.any().any()
result = df != other
assert result.all().all()
def test_df_boolean_comparison_error(self):
# GH#4576, GH#22880
# comparing DataFrame against list/tuple with len(obj) matching
# len(df.columns) is supported as of GH#22800
df = pd.DataFrame(np.arange(6).reshape((3, 2)))
expected = pd.DataFrame([[False, False], [True, False], [False, False]])
result = df == (2, 2)
tm.assert_frame_equal(result, expected)
result = df == [2, 2]
tm.assert_frame_equal(result, expected)
def test_df_float_none_comparison(self):
df = pd.DataFrame(
np.random.randn(8, 3), index=range(8), columns=["A", "B", "C"]
)
result = df.__eq__(None)
assert not result.any().any()
def test_df_string_comparison(self):
df = pd.DataFrame([{"a": 1, "b": "foo"}, {"a": 2, "b": "bar"}])
mask_a = df.a > 1
tm.assert_frame_equal(df[mask_a], df.loc[1:1, :])
tm.assert_frame_equal(df[-mask_a], df.loc[0:0, :])
mask_b = df.b == "foo"
tm.assert_frame_equal(df[mask_b], df.loc[0:0, :])
tm.assert_frame_equal(df[-mask_b], df.loc[1:1, :])
class TestFrameFlexComparisons:
# TODO: test_bool_flex_frame needs a better name
def test_bool_flex_frame(self):
data = np.random.randn(5, 3)
other_data = np.random.randn(5, 3)
df = pd.DataFrame(data)
other = pd.DataFrame(other_data)
ndim_5 = np.ones(df.shape + (1, 3))
# Unaligned
def _check_unaligned_frame(meth, op, df, other):
part_o = other.loc[3:, 1:].copy()
rs = meth(part_o)
xp = op(df, part_o.reindex(index=df.index, columns=df.columns))
tm.assert_frame_equal(rs, xp)
# DataFrame
assert df.eq(df).values.all()
assert not df.ne(df).values.any()
for op in ["eq", "ne", "gt", "lt", "ge", "le"]:
f = getattr(df, op)
o = getattr(operator, op)
# No NAs
tm.assert_frame_equal(f(other), o(df, other))
_check_unaligned_frame(f, o, df, other)
# ndarray
tm.assert_frame_equal(f(other.values), o(df, other.values))
# scalar
tm.assert_frame_equal(f(0), o(df, 0))
# NAs
msg = "Unable to coerce to Series/DataFrame"
tm.assert_frame_equal(f(np.nan), o(df, np.nan))
with pytest.raises(ValueError, match=msg):
f(ndim_5)
# Series
def _test_seq(df, idx_ser, col_ser):
idx_eq = df.eq(idx_ser, axis=0)
col_eq = df.eq(col_ser)
idx_ne = df.ne(idx_ser, axis=0)
col_ne = df.ne(col_ser)
tm.assert_frame_equal(col_eq, df == pd.Series(col_ser))
tm.assert_frame_equal(col_eq, -col_ne)
tm.assert_frame_equal(idx_eq, -idx_ne)
tm.assert_frame_equal(idx_eq, df.T.eq(idx_ser).T)
tm.assert_frame_equal(col_eq, df.eq(list(col_ser)))
tm.assert_frame_equal(idx_eq, df.eq(pd.Series(idx_ser), axis=0))
tm.assert_frame_equal(idx_eq, df.eq(list(idx_ser), axis=0))
idx_gt = df.gt(idx_ser, axis=0)
col_gt = df.gt(col_ser)
idx_le = df.le(idx_ser, axis=0)
col_le = df.le(col_ser)
tm.assert_frame_equal(col_gt, df > pd.Series(col_ser))
tm.assert_frame_equal(col_gt, -col_le)
tm.assert_frame_equal(idx_gt, -idx_le)
tm.assert_frame_equal(idx_gt, df.T.gt(idx_ser).T)
idx_ge = df.ge(idx_ser, axis=0)
col_ge = df.ge(col_ser)
idx_lt = df.lt(idx_ser, axis=0)
col_lt = df.lt(col_ser)
tm.assert_frame_equal(col_ge, df >= pd.Series(col_ser))
tm.assert_frame_equal(col_ge, -col_lt)
tm.assert_frame_equal(idx_ge, -idx_lt)
tm.assert_frame_equal(idx_ge, df.T.ge(idx_ser).T)
idx_ser = pd.Series(np.random.randn(5))
col_ser = pd.Series(np.random.randn(3))
_test_seq(df, idx_ser, col_ser)
# list/tuple
_test_seq(df, idx_ser.values, col_ser.values)
# NA
df.loc[0, 0] = np.nan
rs = df.eq(df)
assert not rs.loc[0, 0]
rs = df.ne(df)
assert rs.loc[0, 0]
rs = df.gt(df)
assert not rs.loc[0, 0]
rs = df.lt(df)
assert not rs.loc[0, 0]
rs = df.ge(df)
assert not rs.loc[0, 0]
rs = df.le(df)
assert not rs.loc[0, 0]
def test_bool_flex_frame_complex_dtype(self):
# complex
arr = np.array([np.nan, 1, 6, np.nan])
arr2 = np.array([2j, np.nan, 7, None])
df = pd.DataFrame({"a": arr})
df2 = pd.DataFrame({"a": arr2})
msg = "|".join(
[
"'>' not supported between instances of '.*' and 'complex'",
r"unorderable types: .*complex\(\)", # PY35
]
)
with pytest.raises(TypeError, match=msg):
# inequalities are not well-defined for complex numbers
df.gt(df2)
with pytest.raises(TypeError, match=msg):
# regression test that we get the same behavior for Series
df["a"].gt(df2["a"])
with pytest.raises(TypeError, match=msg):
# Check that we match numpy behavior here
df.values > df2.values
rs = df.ne(df2)
assert rs.values.all()
arr3 = np.array([2j, np.nan, None])
df3 = pd.DataFrame({"a": arr3})
with pytest.raises(TypeError, match=msg):
# inequalities are not well-defined for complex numbers
df3.gt(2j)
with pytest.raises(TypeError, match=msg):
# regression test that we get the same behavior for Series
df3["a"].gt(2j)
with pytest.raises(TypeError, match=msg):
# Check that we match numpy behavior here
df3.values > 2j
def test_bool_flex_frame_object_dtype(self):
# corner, dtype=object
df1 = pd.DataFrame({"col": ["foo", np.nan, "bar"]})
df2 = pd.DataFrame({"col": ["foo", datetime.now(), "bar"]})
result = df1.ne(df2)
exp = pd.DataFrame({"col": [False, True, False]})
| tm.assert_frame_equal(result, exp) | pandas._testing.assert_frame_equal |
# pylint: disable-msg=E1101,W0612
from datetime import datetime, time, timedelta, date
import sys
import os
import operator
from distutils.version import LooseVersion
import nose
import numpy as np
randn = np.random.randn
from pandas import (Index, Series, TimeSeries, DataFrame,
isnull, date_range, Timestamp, Period, DatetimeIndex,
Int64Index, to_datetime, bdate_range, Float64Index)
import pandas.core.datetools as datetools
import pandas.tseries.offsets as offsets
import pandas.tseries.tools as tools
import pandas.tseries.frequencies as fmod
import pandas as pd
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
from pandas.tslib import NaT, iNaT
import pandas.lib as lib
import pandas.tslib as tslib
import pandas.index as _index
from pandas.compat import range, long, StringIO, lrange, lmap, zip, product
import pandas.core.datetools as dt
from numpy.random import rand
from numpy.testing import assert_array_equal
from pandas.util.testing import assert_frame_equal
import pandas.compat as compat
import pandas.core.common as com
from pandas import concat
from pandas import _np_version_under1p7
from numpy.testing.decorators import slow
def _skip_if_no_pytz():
try:
import pytz
except ImportError:
raise nose.SkipTest("pytz not installed")
def _skip_if_has_locale():
import locale
lang, _ = locale.getlocale()
if lang is not None:
raise nose.SkipTest("Specific locale is set {0}".format(lang))
class TestTimeSeriesDuplicates(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
dates = [datetime(2000, 1, 2), datetime(2000, 1, 2),
datetime(2000, 1, 2), datetime(2000, 1, 3),
datetime(2000, 1, 3), datetime(2000, 1, 3),
datetime(2000, 1, 4), datetime(2000, 1, 4),
datetime(2000, 1, 4), datetime(2000, 1, 5)]
self.dups = Series(np.random.randn(len(dates)), index=dates)
def test_constructor(self):
tm.assert_isinstance(self.dups, TimeSeries)
tm.assert_isinstance(self.dups.index, DatetimeIndex)
def test_is_unique_monotonic(self):
self.assertFalse(self.dups.index.is_unique)
def test_index_unique(self):
uniques = self.dups.index.unique()
expected = DatetimeIndex([datetime(2000, 1, 2), datetime(2000, 1, 3),
datetime(2000, 1, 4), datetime(2000, 1, 5)])
self.assertEqual(uniques.dtype, 'M8[ns]') # sanity
self.assertTrue(uniques.equals(expected))
self.assertEqual(self.dups.index.nunique(), 4)
# #2563
self.assertTrue(isinstance(uniques, DatetimeIndex))
dups_local = self.dups.index.tz_localize('US/Eastern')
dups_local.name = 'foo'
result = dups_local.unique()
expected = DatetimeIndex(expected, tz='US/Eastern')
self.assertTrue(result.tz is not None)
self.assertEqual(result.name, 'foo')
self.assertTrue(result.equals(expected))
# NaT
arr = [ 1370745748 + t for t in range(20) ] + [iNaT]
idx = DatetimeIndex(arr * 3)
self.assertTrue(idx.unique().equals(DatetimeIndex(arr)))
self.assertEqual(idx.nunique(), 21)
arr = [ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT]
idx = DatetimeIndex(arr * 3)
self.assertTrue(idx.unique().equals(DatetimeIndex(arr)))
self.assertEqual(idx.nunique(), 21)
def test_index_dupes_contains(self):
d = datetime(2011, 12, 5, 20, 30)
ix = DatetimeIndex([d, d])
self.assertTrue(d in ix)
def test_duplicate_dates_indexing(self):
ts = self.dups
uniques = ts.index.unique()
for date in uniques:
result = ts[date]
mask = ts.index == date
total = (ts.index == date).sum()
expected = ts[mask]
if total > 1:
assert_series_equal(result, expected)
else:
assert_almost_equal(result, expected[0])
cp = ts.copy()
cp[date] = 0
expected = Series(np.where(mask, 0, ts), index=ts.index)
assert_series_equal(cp, expected)
self.assertRaises(KeyError, ts.__getitem__, datetime(2000, 1, 6))
# new index
ts[datetime(2000,1,6)] = 0
self.assertEqual(ts[datetime(2000,1,6)], 0)
def test_range_slice(self):
idx = DatetimeIndex(['1/1/2000', '1/2/2000', '1/2/2000', '1/3/2000',
'1/4/2000'])
ts = Series(np.random.randn(len(idx)), index=idx)
result = ts['1/2/2000':]
expected = ts[1:]
assert_series_equal(result, expected)
result = ts['1/2/2000':'1/3/2000']
expected = ts[1:4]
assert_series_equal(result, expected)
def test_groupby_average_dup_values(self):
result = self.dups.groupby(level=0).mean()
expected = self.dups.groupby(self.dups.index).mean()
assert_series_equal(result, expected)
def test_indexing_over_size_cutoff(self):
import datetime
# #1821
old_cutoff = _index._SIZE_CUTOFF
try:
_index._SIZE_CUTOFF = 1000
# create large list of non periodic datetime
dates = []
sec = datetime.timedelta(seconds=1)
half_sec = datetime.timedelta(microseconds=500000)
d = datetime.datetime(2011, 12, 5, 20, 30)
n = 1100
for i in range(n):
dates.append(d)
dates.append(d + sec)
dates.append(d + sec + half_sec)
dates.append(d + sec + sec + half_sec)
d += 3 * sec
# duplicate some values in the list
duplicate_positions = np.random.randint(0, len(dates) - 1, 20)
for p in duplicate_positions:
dates[p + 1] = dates[p]
df = DataFrame(np.random.randn(len(dates), 4),
index=dates,
columns=list('ABCD'))
pos = n * 3
timestamp = df.index[pos]
self.assertIn(timestamp, df.index)
# it works!
df.ix[timestamp]
self.assertTrue(len(df.ix[[timestamp]]) > 0)
finally:
_index._SIZE_CUTOFF = old_cutoff
def test_indexing_unordered(self):
# GH 2437
rng = date_range(start='2011-01-01', end='2011-01-15')
ts = Series(randn(len(rng)), index=rng)
ts2 = concat([ts[0:4],ts[-4:],ts[4:-4]])
for t in ts.index:
s = str(t)
expected = ts[t]
result = ts2[t]
self.assertTrue(expected == result)
# GH 3448 (ranges)
def compare(slobj):
result = ts2[slobj].copy()
result = result.sort_index()
expected = ts[slobj]
assert_series_equal(result,expected)
compare(slice('2011-01-01','2011-01-15'))
compare(slice('2010-12-30','2011-01-15'))
compare(slice('2011-01-01','2011-01-16'))
# partial ranges
compare(slice('2011-01-01','2011-01-6'))
compare(slice('2011-01-06','2011-01-8'))
compare(slice('2011-01-06','2011-01-12'))
# single values
result = ts2['2011'].sort_index()
expected = ts['2011']
assert_series_equal(result,expected)
# diff freq
rng = date_range(datetime(2005, 1, 1), periods=20, freq='M')
ts = Series(np.arange(len(rng)), index=rng)
ts = ts.take(np.random.permutation(20))
result = ts['2005']
for t in result.index:
self.assertTrue(t.year == 2005)
def test_indexing(self):
idx = date_range("2001-1-1", periods=20, freq='M')
ts = Series(np.random.rand(len(idx)),index=idx)
# getting
# GH 3070, make sure semantics work on Series/Frame
expected = ts['2001']
df = DataFrame(dict(A = ts))
result = df['2001']['A']
assert_series_equal(expected,result)
# setting
ts['2001'] = 1
expected = ts['2001']
df.loc['2001','A'] = 1
result = df['2001']['A']
assert_series_equal(expected,result)
# GH3546 (not including times on the last day)
idx = date_range(start='2013-05-31 00:00', end='2013-05-31 23:00', freq='H')
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013-05']
assert_series_equal(expected,ts)
idx = date_range(start='2013-05-31 00:00', end='2013-05-31 23:59', freq='S')
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013-05']
assert_series_equal(expected,ts)
idx = [ Timestamp('2013-05-31 00:00'), Timestamp(datetime(2013,5,31,23,59,59,999999))]
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013']
assert_series_equal(expected,ts)
# GH 3925, indexing with a seconds resolution string / datetime object
df = DataFrame(randn(5,5),columns=['open','high','low','close','volume'],index=date_range('2012-01-02 18:01:00',periods=5,tz='US/Central',freq='s'))
expected = df.loc[[df.index[2]]]
result = df['2012-01-02 18:01:02']
assert_frame_equal(result,expected)
# this is a single date, so will raise
self.assertRaises(KeyError, df.__getitem__, df.index[2],)
def test_recreate_from_data(self):
if _np_version_under1p7:
freqs = ['M', 'Q', 'A', 'D', 'B', 'T', 'S', 'L', 'U', 'H']
else:
freqs = ['M', 'Q', 'A', 'D', 'B', 'T', 'S', 'L', 'U', 'H', 'N', 'C']
for f in freqs:
org = DatetimeIndex(start='2001/02/01 09:00', freq=f, periods=1)
idx = DatetimeIndex(org, freq=f)
self.assertTrue(idx.equals(org))
# unbale to create tz-aware 'A' and 'C' freq
if _np_version_under1p7:
freqs = ['M', 'Q', 'D', 'B', 'T', 'S', 'L', 'U', 'H']
else:
freqs = ['M', 'Q', 'D', 'B', 'T', 'S', 'L', 'U', 'H', 'N']
for f in freqs:
org = DatetimeIndex(start='2001/02/01 09:00', freq=f, tz='US/Pacific', periods=1)
idx = DatetimeIndex(org, freq=f, tz='US/Pacific')
self.assertTrue(idx.equals(org))
def assert_range_equal(left, right):
assert(left.equals(right))
assert(left.freq == right.freq)
assert(left.tz == right.tz)
class TestTimeSeries(tm.TestCase):
_multiprocess_can_split_ = True
def test_is_(self):
dti = DatetimeIndex(start='1/1/2005', end='12/1/2005', freq='M')
self.assertTrue(dti.is_(dti))
self.assertTrue(dti.is_(dti.view()))
self.assertFalse(dti.is_(dti.copy()))
def test_dti_slicing(self):
dti = DatetimeIndex(start='1/1/2005', end='12/1/2005', freq='M')
dti2 = dti[[1, 3, 5]]
v1 = dti2[0]
v2 = dti2[1]
v3 = dti2[2]
self.assertEqual(v1, Timestamp('2/28/2005'))
self.assertEqual(v2, Timestamp('4/30/2005'))
self.assertEqual(v3, Timestamp('6/30/2005'))
# don't carry freq through irregular slicing
self.assertIsNone(dti2.freq)
def test_pass_datetimeindex_to_index(self):
# Bugs in #1396
rng = date_range('1/1/2000', '3/1/2000')
idx = Index(rng, dtype=object)
expected = Index(rng.to_pydatetime(), dtype=object)
self.assert_numpy_array_equal(idx.values, expected.values)
def test_contiguous_boolean_preserve_freq(self):
rng = date_range('1/1/2000', '3/1/2000', freq='B')
mask = np.zeros(len(rng), dtype=bool)
mask[10:20] = True
masked = rng[mask]
expected = rng[10:20]
self.assertIsNotNone(expected.freq)
assert_range_equal(masked, expected)
mask[22] = True
masked = rng[mask]
self.assertIsNone(masked.freq)
def test_getitem_median_slice_bug(self):
index = date_range('20090415', '20090519', freq='2B')
s = Series(np.random.randn(13), index=index)
indexer = [slice(6, 7, None)]
result = s[indexer]
expected = s[indexer[0]]
assert_series_equal(result, expected)
def test_series_box_timestamp(self):
rng = date_range('20090415', '20090519', freq='B')
s = Series(rng)
tm.assert_isinstance(s[5], Timestamp)
rng = date_range('20090415', '20090519', freq='B')
s = Series(rng, index=rng)
tm.assert_isinstance(s[5], Timestamp)
tm.assert_isinstance(s.iget_value(5), Timestamp)
def test_date_range_ambiguous_arguments(self):
# #2538
start = datetime(2011, 1, 1, 5, 3, 40)
end = datetime(2011, 1, 1, 8, 9, 40)
self.assertRaises(ValueError, date_range, start, end,
freq='s', periods=10)
def test_timestamp_to_datetime(self):
_skip_if_no_pytz()
rng = date_range('20090415', '20090519',
tz='US/Eastern')
stamp = rng[0]
dtval = stamp.to_pydatetime()
self.assertEqual(stamp, dtval)
self.assertEqual(stamp.tzinfo, dtval.tzinfo)
def test_index_convert_to_datetime_array(self):
_skip_if_no_pytz()
def _check_rng(rng):
converted = rng.to_pydatetime()
tm.assert_isinstance(converted, np.ndarray)
for x, stamp in zip(converted, rng):
tm.assert_isinstance(x, datetime)
self.assertEqual(x, stamp.to_pydatetime())
self.assertEqual(x.tzinfo, stamp.tzinfo)
rng = date_range('20090415', '20090519')
rng_eastern = date_range('20090415', '20090519', tz='US/Eastern')
rng_utc = date_range('20090415', '20090519', tz='utc')
_check_rng(rng)
_check_rng(rng_eastern)
_check_rng(rng_utc)
def test_ctor_str_intraday(self):
rng = DatetimeIndex(['1-1-2000 00:00:01'])
self.assertEqual(rng[0].second, 1)
def test_series_ctor_plus_datetimeindex(self):
rng = date_range('20090415', '20090519', freq='B')
data = dict((k, 1) for k in rng)
result = Series(data, index=rng)
self.assertIs(result.index, rng)
def test_series_pad_backfill_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
result = s[:2].reindex(index, method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected[-3:] = np.nan
assert_series_equal(result, expected)
result = s[-2:].reindex(index, method='backfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected[:3] = np.nan
assert_series_equal(result, expected)
def test_series_fillna_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
result = s[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected[-3:] = np.nan
assert_series_equal(result, expected)
result = s[-2:].reindex(index)
result = result.fillna(method='bfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected[:3] = np.nan
assert_series_equal(result, expected)
def test_frame_pad_backfill_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
result = df[:2].reindex(index, method='pad', limit=5)
expected = df[:2].reindex(index).fillna(method='pad')
expected.values[-3:] = np.nan
tm.assert_frame_equal(result, expected)
result = df[-2:].reindex(index, method='backfill', limit=5)
expected = df[-2:].reindex(index).fillna(method='backfill')
expected.values[:3] = np.nan
tm.assert_frame_equal(result, expected)
def test_frame_fillna_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
result = df[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = df[:2].reindex(index).fillna(method='pad')
expected.values[-3:] = np.nan
tm.assert_frame_equal(result, expected)
result = df[-2:].reindex(index)
result = result.fillna(method='backfill', limit=5)
expected = df[-2:].reindex(index).fillna(method='backfill')
expected.values[:3] = np.nan
tm.assert_frame_equal(result, expected)
def test_frame_setitem_timestamp(self):
# 2155
columns = DatetimeIndex(start='1/1/2012', end='2/1/2012',
freq=datetools.bday)
index = lrange(10)
data = DataFrame(columns=columns, index=index)
t = datetime(2012, 11, 1)
ts = Timestamp(t)
data[ts] = np.nan # works
def test_sparse_series_fillna_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
ss = s[:2].reindex(index).to_sparse()
result = ss.fillna(method='pad', limit=5)
expected = ss.fillna(method='pad', limit=5)
expected = expected.to_dense()
expected[-3:] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
ss = s[-2:].reindex(index).to_sparse()
result = ss.fillna(method='backfill', limit=5)
expected = ss.fillna(method='backfill')
expected = expected.to_dense()
expected[:3] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
def test_sparse_series_pad_backfill_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
s = s.to_sparse()
result = s[:2].reindex(index, method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected[-3:] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
result = s[-2:].reindex(index, method='backfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected[:3] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
def test_sparse_frame_pad_backfill_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
sdf = df.to_sparse()
result = sdf[:2].reindex(index, method='pad', limit=5)
expected = sdf[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected.values[-3:] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
result = sdf[-2:].reindex(index, method='backfill', limit=5)
expected = sdf[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected.values[:3] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
def test_sparse_frame_fillna_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
sdf = df.to_sparse()
result = sdf[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = sdf[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected.values[-3:] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
result = sdf[-2:].reindex(index)
result = result.fillna(method='backfill', limit=5)
expected = sdf[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected.values[:3] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
def test_pad_require_monotonicity(self):
rng = date_range('1/1/2000', '3/1/2000', freq='B')
rng2 = rng[::2][::-1]
self.assertRaises(ValueError, rng2.get_indexer, rng,
method='pad')
def test_frame_ctor_datetime64_column(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50',
freq='10s')
dates = np.asarray(rng)
df = DataFrame({'A': np.random.randn(len(rng)), 'B': dates})
self.assertTrue(np.issubdtype(df['B'].dtype, np.dtype('M8[ns]')))
def test_frame_add_datetime64_column(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50',
freq='10s')
df = DataFrame(index=np.arange(len(rng)))
df['A'] = rng
self.assertTrue(np.issubdtype(df['A'].dtype, np.dtype('M8[ns]')))
def test_frame_datetime64_pre1900_repr(self):
df = DataFrame({'year': date_range('1/1/1700', periods=50,
freq='A-DEC')})
# it works!
repr(df)
def test_frame_add_datetime64_col_other_units(self):
n = 100
units = ['h', 'm', 's', 'ms', 'D', 'M', 'Y']
ns_dtype = np.dtype('M8[ns]')
for unit in units:
dtype = np.dtype('M8[%s]' % unit)
vals = np.arange(n, dtype=np.int64).view(dtype)
df = DataFrame({'ints': np.arange(n)}, index=np.arange(n))
df[unit] = vals
ex_vals = to_datetime(vals.astype('O'))
self.assertEqual(df[unit].dtype, ns_dtype)
self.assertTrue((df[unit].values == ex_vals).all())
# Test insertion into existing datetime64 column
df = DataFrame({'ints': np.arange(n)}, index=np.arange(n))
df['dates'] = np.arange(n, dtype=np.int64).view(ns_dtype)
for unit in units:
dtype = np.dtype('M8[%s]' % unit)
vals = np.arange(n, dtype=np.int64).view(dtype)
tmp = df.copy()
tmp['dates'] = vals
ex_vals = to_datetime(vals.astype('O'))
self.assertTrue((tmp['dates'].values == ex_vals).all())
def test_to_datetime_unit(self):
epoch = 1370745748
s = Series([ epoch + t for t in range(20) ])
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ])
assert_series_equal(result,expected)
s = Series([ epoch + t for t in range(20) ]).astype(float)
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ])
assert_series_equal(result,expected)
s = Series([ epoch + t for t in range(20) ] + [iNaT])
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT])
assert_series_equal(result,expected)
s = Series([ epoch + t for t in range(20) ] + [iNaT]).astype(float)
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT])
assert_series_equal(result,expected)
s = concat([Series([ epoch + t for t in range(20) ]).astype(float),Series([np.nan])],ignore_index=True)
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT])
assert_series_equal(result,expected)
def test_series_ctor_datetime64(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50',
freq='10s')
dates = np.asarray(rng)
series = Series(dates)
self.assertTrue(np.issubdtype(series.dtype, np.dtype('M8[ns]')))
def test_index_cast_datetime64_other_units(self):
arr = np.arange(0, 100, 10, dtype=np.int64).view('M8[D]')
idx = Index(arr)
self.assertTrue((idx.values == tslib.cast_to_nanoseconds(arr)).all())
def test_index_astype_datetime64(self):
idx = Index([datetime(2012, 1, 1)], dtype=object)
if not _np_version_under1p7:
raise nose.SkipTest("test only valid in numpy < 1.7")
casted = idx.astype(np.dtype('M8[D]'))
expected = DatetimeIndex(idx.values)
tm.assert_isinstance(casted, DatetimeIndex)
self.assertTrue(casted.equals(expected))
def test_reindex_series_add_nat(self):
rng = date_range('1/1/2000 00:00:00', periods=10, freq='10s')
series = Series(rng)
result = series.reindex(lrange(15))
self.assertTrue(np.issubdtype(result.dtype, np.dtype('M8[ns]')))
mask = result.isnull()
self.assertTrue(mask[-5:].all())
self.assertFalse(mask[:-5].any())
def test_reindex_frame_add_nat(self):
rng = date_range('1/1/2000 00:00:00', periods=10, freq='10s')
df = DataFrame({'A': np.random.randn(len(rng)), 'B': rng})
result = df.reindex(lrange(15))
self.assertTrue(np.issubdtype(result['B'].dtype, np.dtype('M8[ns]')))
mask = com.isnull(result)['B']
self.assertTrue(mask[-5:].all())
self.assertFalse(mask[:-5].any())
def test_series_repr_nat(self):
series = Series([0, 1000, 2000, iNaT], dtype='M8[ns]')
result = repr(series)
expected = ('0 1970-01-01 00:00:00\n'
'1 1970-01-01 00:00:00.000001\n'
'2 1970-01-01 00:00:00.000002\n'
'3 NaT\n'
'dtype: datetime64[ns]')
self.assertEqual(result, expected)
def test_fillna_nat(self):
series = Series([0, 1, 2, iNaT], dtype='M8[ns]')
filled = series.fillna(method='pad')
filled2 = series.fillna(value=series.values[2])
expected = series.copy()
expected.values[3] = expected.values[2]
assert_series_equal(filled, expected)
assert_series_equal(filled2, expected)
df = DataFrame({'A': series})
filled = df.fillna(method='pad')
filled2 = df.fillna(value=series.values[2])
expected = DataFrame({'A': expected})
assert_frame_equal(filled, expected)
assert_frame_equal(filled2, expected)
series = Series([iNaT, 0, 1, 2], dtype='M8[ns]')
filled = series.fillna(method='bfill')
filled2 = series.fillna(value=series[1])
expected = series.copy()
expected[0] = expected[1]
assert_series_equal(filled, expected)
assert_series_equal(filled2, expected)
df = DataFrame({'A': series})
filled = df.fillna(method='bfill')
filled2 = df.fillna(value=series[1])
expected = DataFrame({'A': expected})
assert_frame_equal(filled, expected)
assert_frame_equal(filled2, expected)
def test_string_na_nat_conversion(self):
# GH #999, #858
from pandas.compat import parse_date
strings = np.array(['1/1/2000', '1/2/2000', np.nan,
'1/4/2000, 12:34:56'], dtype=object)
expected = np.empty(4, dtype='M8[ns]')
for i, val in enumerate(strings):
if com.isnull(val):
expected[i] = iNaT
else:
expected[i] = parse_date(val)
result = tslib.array_to_datetime(strings)
assert_almost_equal(result, expected)
result2 = to_datetime(strings)
tm.assert_isinstance(result2, DatetimeIndex)
assert_almost_equal(result, result2)
malformed = np.array(['1/100/2000', np.nan], dtype=object)
result = to_datetime(malformed)
assert_almost_equal(result, malformed)
self.assertRaises(ValueError, to_datetime, malformed,
errors='raise')
idx = ['a', 'b', 'c', 'd', 'e']
series = Series(['1/1/2000', np.nan, '1/3/2000', np.nan,
'1/5/2000'], index=idx, name='foo')
dseries = Series([to_datetime('1/1/2000'), np.nan,
to_datetime('1/3/2000'), np.nan,
to_datetime('1/5/2000')], index=idx, name='foo')
result = to_datetime(series)
dresult = to_datetime(dseries)
expected = Series(np.empty(5, dtype='M8[ns]'), index=idx)
for i in range(5):
x = series[i]
if isnull(x):
expected[i] = iNaT
else:
expected[i] = to_datetime(x)
assert_series_equal(result, expected)
self.assertEqual(result.name, 'foo')
assert_series_equal(dresult, expected)
self.assertEqual(dresult.name, 'foo')
def test_to_datetime_iso8601(self):
result = to_datetime(["2012-01-01 00:00:00"])
exp = Timestamp("2012-01-01 00:00:00")
self.assertEqual(result[0], exp)
result = to_datetime(['20121001']) # bad iso 8601
exp = Timestamp('2012-10-01')
self.assertEqual(result[0], exp)
def test_to_datetime_default(self):
rs = to_datetime('2001')
xp = datetime(2001, 1, 1)
self.assertTrue(rs, xp)
#### dayfirst is essentially broken
#### to_datetime('01-13-2012', dayfirst=True)
#### self.assertRaises(ValueError, to_datetime('01-13-2012', dayfirst=True))
def test_to_datetime_on_datetime64_series(self):
# #2699
s = Series(date_range('1/1/2000', periods=10))
result = to_datetime(s)
self.assertEqual(result[0], s[0])
def test_to_datetime_with_apply(self):
# this is only locale tested with US/None locales
_skip_if_has_locale()
# GH 5195
# with a format and coerce a single item to_datetime fails
td = Series(['May 04', 'Jun 02', 'Dec 11'], index=[1,2,3])
expected = pd.to_datetime(td, format='%b %y')
result = td.apply(pd.to_datetime, format='%b %y')
assert_series_equal(result, expected)
td = pd.Series(['May 04', 'Jun 02', ''], index=[1,2,3])
self.assertRaises(ValueError, lambda : pd.to_datetime(td,format='%b %y'))
self.assertRaises(ValueError, lambda : td.apply(pd.to_datetime, format='%b %y'))
expected = pd.to_datetime(td, format='%b %y', coerce=True)
result = td.apply(lambda x: pd.to_datetime(x, format='%b %y', coerce=True))
assert_series_equal(result, expected)
def test_nat_vector_field_access(self):
idx = DatetimeIndex(['1/1/2000', None, None, '1/4/2000'])
fields = ['year', 'quarter', 'month', 'day', 'hour',
'minute', 'second', 'microsecond', 'nanosecond',
'week', 'dayofyear']
for field in fields:
result = getattr(idx, field)
expected = [getattr(x, field) if x is not NaT else -1
for x in idx]
self.assert_numpy_array_equal(result, expected)
def test_nat_scalar_field_access(self):
fields = ['year', 'quarter', 'month', 'day', 'hour',
'minute', 'second', 'microsecond', 'nanosecond',
'week', 'dayofyear']
for field in fields:
result = getattr(NaT, field)
self.assertEqual(result, -1)
self.assertEqual(NaT.weekday(), -1)
def test_to_datetime_types(self):
# empty string
result = to_datetime('')
self.assertIs(result, NaT)
result = to_datetime(['', ''])
self.assertTrue(isnull(result).all())
# ints
result = Timestamp(0)
expected = to_datetime(0)
self.assertEqual(result, expected)
# GH 3888 (strings)
expected = to_datetime(['2012'])[0]
result = to_datetime('2012')
self.assertEqual(result, expected)
### array = ['2012','20120101','20120101 12:01:01']
array = ['20120101','20120101 12:01:01']
expected = list(to_datetime(array))
result = lmap(Timestamp,array)
tm.assert_almost_equal(result,expected)
### currently fails ###
### result = Timestamp('2012')
### expected = to_datetime('2012')
### self.assertEqual(result, expected)
def test_to_datetime_unprocessable_input(self):
# GH 4928
self.assert_numpy_array_equal(
to_datetime([1, '1']),
np.array([1, '1'], dtype='O')
)
self.assertRaises(TypeError, to_datetime, [1, '1'], errors='raise')
def test_to_datetime_other_datetime64_units(self):
# 5/25/2012
scalar = np.int64(1337904000000000).view('M8[us]')
as_obj = scalar.astype('O')
index = DatetimeIndex([scalar])
self.assertEqual(index[0], scalar.astype('O'))
value = Timestamp(scalar)
self.assertEqual(value, as_obj)
def test_to_datetime_list_of_integers(self):
rng = date_range('1/1/2000', periods=20)
rng = DatetimeIndex(rng.values)
ints = list(rng.asi8)
result = DatetimeIndex(ints)
self.assertTrue(rng.equals(result))
def test_to_datetime_dt64s(self):
in_bound_dts = [
np.datetime64('2000-01-01'),
np.datetime64('2000-01-02'),
]
for dt in in_bound_dts:
self.assertEqual(
pd.to_datetime(dt),
Timestamp(dt)
)
oob_dts = [
np.datetime64('1000-01-01'),
np.datetime64('5000-01-02'),
]
for dt in oob_dts:
self.assertRaises(ValueError, pd.to_datetime, dt, errors='raise')
self.assertRaises(ValueError, tslib.Timestamp, dt)
self.assertIs(pd.to_datetime(dt, coerce=True), NaT)
def test_to_datetime_array_of_dt64s(self):
dts = [
np.datetime64('2000-01-01'),
np.datetime64('2000-01-02'),
]
# Assuming all datetimes are in bounds, to_datetime() returns
# an array that is equal to Timestamp() parsing
self.assert_numpy_array_equal(
pd.to_datetime(dts, box=False),
np.array([Timestamp(x).asm8 for x in dts])
)
# A list of datetimes where the last one is out of bounds
dts_with_oob = dts + [np.datetime64('9999-01-01')]
self.assertRaises(
ValueError,
pd.to_datetime,
dts_with_oob,
coerce=False,
errors='raise'
)
self.assert_numpy_array_equal(
pd.to_datetime(dts_with_oob, box=False, coerce=True),
np.array(
[
Timestamp(dts_with_oob[0]).asm8,
Timestamp(dts_with_oob[1]).asm8,
iNaT,
],
dtype='M8'
)
)
# With coerce=False and errors='ignore', out of bounds datetime64s
# are converted to their .item(), which depending on the version of
# numpy is either a python datetime.datetime or datetime.date
self.assert_numpy_array_equal(
pd.to_datetime(dts_with_oob, box=False, coerce=False),
np.array(
[dt.item() for dt in dts_with_oob],
dtype='O'
)
)
def test_index_to_datetime(self):
idx = Index(['1/1/2000', '1/2/2000', '1/3/2000'])
result = idx.to_datetime()
expected = DatetimeIndex(datetools.to_datetime(idx.values))
self.assertTrue(result.equals(expected))
today = datetime.today()
idx = Index([today], dtype=object)
result = idx.to_datetime()
expected = DatetimeIndex([today])
self.assertTrue(result.equals(expected))
def test_to_datetime_freq(self):
xp = bdate_range('2000-1-1', periods=10, tz='UTC')
rs = xp.to_datetime()
self.assertEqual(xp.freq, rs.freq)
self.assertEqual(xp.tzinfo, rs.tzinfo)
def test_range_misspecified(self):
# GH #1095
self.assertRaises(ValueError, date_range, '1/1/2000')
self.assertRaises(ValueError, date_range, end='1/1/2000')
self.assertRaises(ValueError, date_range, periods=10)
self.assertRaises(ValueError, date_range, '1/1/2000', freq='H')
self.assertRaises(ValueError, date_range, end='1/1/2000', freq='H')
self.assertRaises(ValueError, date_range, periods=10, freq='H')
def test_reasonable_keyerror(self):
# GH #1062
index = DatetimeIndex(['1/3/2000'])
try:
index.get_loc('1/1/2000')
except KeyError as e:
self.assertIn('2000', str(e))
def test_reindex_with_datetimes(self):
rng = date_range('1/1/2000', periods=20)
ts = Series(np.random.randn(20), index=rng)
result = ts.reindex(list(ts.index[5:10]))
expected = ts[5:10]
tm.assert_series_equal(result, expected)
result = ts[list(ts.index[5:10])]
tm.assert_series_equal(result, expected)
def test_promote_datetime_date(self):
rng = date_range('1/1/2000', periods=20)
ts = Series(np.random.randn(20), index=rng)
ts_slice = ts[5:]
ts2 = ts_slice.copy()
ts2.index = [x.date() for x in ts2.index]
result = ts + ts2
result2 = ts2 + ts
expected = ts + ts[5:]
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
# test asfreq
result = ts2.asfreq('4H', method='ffill')
expected = ts[5:].asfreq('4H', method='ffill')
assert_series_equal(result, expected)
result = rng.get_indexer(ts2.index)
expected = rng.get_indexer(ts_slice.index)
self.assert_numpy_array_equal(result, expected)
def test_asfreq_normalize(self):
rng = date_range('1/1/2000 09:30', periods=20)
norm = date_range('1/1/2000', periods=20)
vals = np.random.randn(20)
ts = Series(vals, index=rng)
result = ts.asfreq('D', normalize=True)
norm = date_range('1/1/2000', periods=20)
expected = Series(vals, index=norm)
assert_series_equal(result, expected)
vals = np.random.randn(20, 3)
ts = DataFrame(vals, index=rng)
result = ts.asfreq('D', normalize=True)
expected = DataFrame(vals, index=norm)
assert_frame_equal(result, expected)
def test_date_range_gen_error(self):
rng = date_range('1/1/2000 00:00', '1/1/2000 00:18', freq='5min')
self.assertEqual(len(rng), 4)
def test_first_subset(self):
ts = _simple_ts('1/1/2000', '1/1/2010', freq='12h')
result = ts.first('10d')
self.assertEqual(len(result), 20)
ts = _simple_ts('1/1/2000', '1/1/2010')
result = ts.first('10d')
self.assertEqual(len(result), 10)
result = ts.first('3M')
expected = ts[:'3/31/2000']
assert_series_equal(result, expected)
result = ts.first('21D')
expected = ts[:21]
assert_series_equal(result, expected)
result = ts[:0].first('3M')
assert_series_equal(result, ts[:0])
def test_last_subset(self):
ts = _simple_ts('1/1/2000', '1/1/2010', freq='12h')
result = ts.last('10d')
self.assertEqual(len(result), 20)
ts = _simple_ts('1/1/2000', '1/1/2010')
result = ts.last('10d')
self.assertEqual(len(result), 10)
result = ts.last('21D')
expected = ts['12/12/2009':]
assert_series_equal(result, expected)
result = ts.last('21D')
expected = ts[-21:]
assert_series_equal(result, expected)
result = ts[:0].last('3M')
assert_series_equal(result, ts[:0])
def test_add_offset(self):
rng = date_range('1/1/2000', '2/1/2000')
result = rng + offsets.Hour(2)
expected = date_range('1/1/2000 02:00', '2/1/2000 02:00')
self.assertTrue(result.equals(expected))
def test_format_pre_1900_dates(self):
rng = date_range('1/1/1850', '1/1/1950', freq='A-DEC')
rng.format()
ts = Series(1, index=rng)
repr(ts)
def test_repeat(self):
rng = date_range('1/1/2000', '1/1/2001')
result = rng.repeat(5)
self.assertIsNone(result.freq)
self.assertEqual(len(result), 5 * len(rng))
def test_at_time(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = Series(np.random.randn(len(rng)), index=rng)
rs = ts.at_time(rng[1])
self.assertTrue((rs.index.hour == rng[1].hour).all())
self.assertTrue((rs.index.minute == rng[1].minute).all())
self.assertTrue((rs.index.second == rng[1].second).all())
result = ts.at_time('9:30')
expected = ts.at_time(time(9, 30))
assert_series_equal(result, expected)
df = DataFrame(np.random.randn(len(rng), 3), index=rng)
result = ts[time(9, 30)]
result_df = df.ix[time(9, 30)]
expected = ts[(rng.hour == 9) & (rng.minute == 30)]
exp_df = df[(rng.hour == 9) & (rng.minute == 30)]
# expected.index = date_range('1/1/2000', '1/4/2000')
assert_series_equal(result, expected)
tm.assert_frame_equal(result_df, exp_df)
chunk = df.ix['1/4/2000':]
result = chunk.ix[time(9, 30)]
expected = result_df[-1:]
tm.assert_frame_equal(result, expected)
# midnight, everything
rng = date_range('1/1/2000', '1/31/2000')
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts.at_time(time(0, 0))
assert_series_equal(result, ts)
# time doesn't exist
rng = date_range('1/1/2012', freq='23Min', periods=384)
ts = Series(np.random.randn(len(rng)), rng)
rs = ts.at_time('16:00')
self.assertEqual(len(rs), 0)
def test_at_time_frame(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
rs = ts.at_time(rng[1])
self.assertTrue((rs.index.hour == rng[1].hour).all())
self.assertTrue((rs.index.minute == rng[1].minute).all())
self.assertTrue((rs.index.second == rng[1].second).all())
result = ts.at_time('9:30')
expected = ts.at_time(time(9, 30))
assert_frame_equal(result, expected)
result = ts.ix[time(9, 30)]
expected = ts.ix[(rng.hour == 9) & (rng.minute == 30)]
assert_frame_equal(result, expected)
# midnight, everything
rng = date_range('1/1/2000', '1/31/2000')
ts = DataFrame(np.random.randn(len(rng), 3), index=rng)
result = ts.at_time(time(0, 0))
assert_frame_equal(result, ts)
# time doesn't exist
rng = date_range('1/1/2012', freq='23Min', periods=384)
ts = DataFrame(np.random.randn(len(rng), 2), rng)
rs = ts.at_time('16:00')
self.assertEqual(len(rs), 0)
def test_between_time(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = Series(np.random.randn(len(rng)), index=rng)
stime = time(0, 0)
etime = time(1, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = 13 * 4 + 1
if not inc_start:
exp_len -= 5
if not inc_end:
exp_len -= 4
self.assertEqual(len(filtered), exp_len)
for rs in filtered.index:
t = rs.time()
if inc_start:
self.assertTrue(t >= stime)
else:
self.assertTrue(t > stime)
if inc_end:
self.assertTrue(t <= etime)
else:
self.assertTrue(t < etime)
result = ts.between_time('00:00', '01:00')
expected = ts.between_time(stime, etime)
assert_series_equal(result, expected)
# across midnight
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = Series(np.random.randn(len(rng)), index=rng)
stime = time(22, 0)
etime = time(9, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = (12 * 11 + 1) * 4 + 1
if not inc_start:
exp_len -= 4
if not inc_end:
exp_len -= 4
self.assertEqual(len(filtered), exp_len)
for rs in filtered.index:
t = rs.time()
if inc_start:
self.assertTrue((t >= stime) or (t <= etime))
else:
self.assertTrue((t > stime) or (t <= etime))
if inc_end:
self.assertTrue((t <= etime) or (t >= stime))
else:
self.assertTrue((t < etime) or (t >= stime))
def test_between_time_frame(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
stime = time(0, 0)
etime = time(1, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = 13 * 4 + 1
if not inc_start:
exp_len -= 5
if not inc_end:
exp_len -= 4
self.assertEqual(len(filtered), exp_len)
for rs in filtered.index:
t = rs.time()
if inc_start:
self.assertTrue(t >= stime)
else:
self.assertTrue(t > stime)
if inc_end:
self.assertTrue(t <= etime)
else:
self.assertTrue(t < etime)
result = ts.between_time('00:00', '01:00')
expected = ts.between_time(stime, etime)
assert_frame_equal(result, expected)
# across midnight
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
stime = time(22, 0)
etime = time(9, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = (12 * 11 + 1) * 4 + 1
if not inc_start:
exp_len -= 4
if not inc_end:
exp_len -= 4
self.assertEqual(len(filtered), exp_len)
for rs in filtered.index:
t = rs.time()
if inc_start:
self.assertTrue((t >= stime) or (t <= etime))
else:
self.assertTrue((t > stime) or (t <= etime))
if inc_end:
self.assertTrue((t <= etime) or (t >= stime))
else:
self.assertTrue((t < etime) or (t >= stime))
def test_dti_constructor_preserve_dti_freq(self):
rng = date_range('1/1/2000', '1/2/2000', freq='5min')
rng2 = DatetimeIndex(rng)
self.assertEqual(rng.freq, rng2.freq)
def test_normalize(self):
rng = date_range('1/1/2000 9:30', periods=10, freq='D')
result = rng.normalize()
expected = date_range('1/1/2000', periods=10, freq='D')
self.assertTrue(result.equals(expected))
rng_ns = pd.DatetimeIndex(np.array([1380585623454345752, 1380585612343234312]).astype("datetime64[ns]"))
rng_ns_normalized = rng_ns.normalize()
expected = pd.DatetimeIndex(np.array([1380585600000000000, 1380585600000000000]).astype("datetime64[ns]"))
self.assertTrue(rng_ns_normalized.equals(expected))
self.assertTrue(result.is_normalized)
self.assertFalse(rng.is_normalized)
def test_to_period(self):
from pandas.tseries.period import period_range
ts = _simple_ts('1/1/2000', '1/1/2001')
pts = ts.to_period()
exp = ts.copy()
exp.index = period_range('1/1/2000', '1/1/2001')
assert_series_equal(pts, exp)
pts = ts.to_period('M')
self.assertTrue(pts.index.equals(exp.index.asfreq('M')))
def create_dt64_based_index(self):
data = [Timestamp('2007-01-01 10:11:12.123456Z'),
Timestamp('2007-01-01 10:11:13.789123Z')]
index = DatetimeIndex(data)
return index
def test_to_period_millisecond(self):
index = self.create_dt64_based_index()
period = index.to_period(freq='L')
self.assertEqual(2, len(period))
self.assertEqual(period[0], Period('2007-01-01 10:11:12.123Z', 'L'))
self.assertEqual(period[1], Period('2007-01-01 10:11:13.789Z', 'L'))
def test_to_period_microsecond(self):
index = self.create_dt64_based_index()
period = index.to_period(freq='U')
self.assertEqual(2, len(period))
self.assertEqual(period[0], Period('2007-01-01 10:11:12.123456Z', 'U'))
self.assertEqual(period[1], Period('2007-01-01 10:11:13.789123Z', 'U'))
def test_to_period_tz(self):
_skip_if_no_pytz()
from dateutil.tz import tzlocal
from pytz import utc as UTC
xp = date_range('1/1/2000', '4/1/2000').to_period()
ts = date_range('1/1/2000', '4/1/2000', tz='US/Eastern')
result = ts.to_period()[0]
expected = ts[0].to_period()
self.assertEqual(result, expected)
self.assertTrue(ts.to_period().equals(xp))
ts = date_range('1/1/2000', '4/1/2000', tz=UTC)
result = ts.to_period()[0]
expected = ts[0].to_period()
self.assertEqual(result, expected)
self.assertTrue(ts.to_period().equals(xp))
ts = date_range('1/1/2000', '4/1/2000', tz=tzlocal())
result = ts.to_period()[0]
expected = ts[0].to_period()
self.assertEqual(result, expected)
self.assertTrue(ts.to_period().equals(xp))
def test_frame_to_period(self):
K = 5
from pandas.tseries.period import period_range
dr = date_range('1/1/2000', '1/1/2001')
pr = period_range('1/1/2000', '1/1/2001')
df = DataFrame(randn(len(dr), K), index=dr)
df['mix'] = 'a'
pts = df.to_period()
exp = df.copy()
exp.index = pr
assert_frame_equal(pts, exp)
pts = df.to_period('M')
self.assertTrue(pts.index.equals(exp.index.asfreq('M')))
df = df.T
pts = df.to_period(axis=1)
exp = df.copy()
exp.columns = pr
assert_frame_equal(pts, exp)
pts = df.to_period('M', axis=1)
self.assertTrue(pts.columns.equals(exp.columns.asfreq('M')))
self.assertRaises(ValueError, df.to_period, axis=2)
def test_timestamp_fields(self):
# extra fields from DatetimeIndex like quarter and week
idx = tm.makeDateIndex(100)
fields = ['dayofweek', 'dayofyear', 'week', 'weekofyear', 'quarter', 'is_month_start', 'is_month_end', 'is_quarter_start', 'is_quarter_end', 'is_year_start', 'is_year_end']
for f in fields:
expected = getattr(idx, f)[-1]
result = getattr(Timestamp(idx[-1]), f)
self.assertEqual(result, expected)
self.assertEqual(idx.freq, Timestamp(idx[-1], idx.freq).freq)
self.assertEqual(idx.freqstr, Timestamp(idx[-1], idx.freq).freqstr)
def test_woy_boundary(self):
# make sure weeks at year boundaries are correct
d = datetime(2013,12,31)
result = Timestamp(d).week
expected = 1 # ISO standard
self.assertEqual(result, expected)
d = datetime(2008,12,28)
result = Timestamp(d).week
expected = 52 # ISO standard
self.assertEqual(result, expected)
d = datetime(2009,12,31)
result = Timestamp(d).week
expected = 53 # ISO standard
self.assertEqual(result, expected)
d = datetime(2010,1,1)
result = Timestamp(d).week
expected = 53 # ISO standard
self.assertEqual(result, expected)
d = datetime(2010,1,3)
result = Timestamp(d).week
expected = 53 # ISO standard
self.assertEqual(result, expected)
result = np.array([Timestamp(datetime(*args)).week for args in
[(2000,1,1),(2000,1,2),(2005,1,1),(2005,1,2)]])
self.assertTrue((result == [52, 52, 53, 53]).all())
def test_timestamp_date_out_of_range(self):
self.assertRaises(ValueError, Timestamp, '1676-01-01')
self.assertRaises(ValueError, Timestamp, '2263-01-01')
# 1475
self.assertRaises(ValueError, DatetimeIndex, ['1400-01-01'])
self.assertRaises(ValueError, DatetimeIndex, [datetime(1400, 1, 1)])
def test_timestamp_repr(self):
# pre-1900
stamp = Timestamp('1850-01-01', tz='US/Eastern')
repr(stamp)
iso8601 = '1850-01-01 01:23:45.012345'
stamp = Timestamp(iso8601, tz='US/Eastern')
result = repr(stamp)
self.assertIn(iso8601, result)
def test_timestamp_from_ordinal(self):
# GH 3042
dt = datetime(2011, 4, 16, 0, 0)
ts = Timestamp.fromordinal(dt.toordinal())
self.assertEqual(ts.to_pydatetime(), dt)
# with a tzinfo
stamp = Timestamp('2011-4-16', tz='US/Eastern')
dt_tz = stamp.to_pydatetime()
ts = Timestamp.fromordinal(dt_tz.toordinal(),tz='US/Eastern')
self.assertEqual(ts.to_pydatetime(), dt_tz)
def test_datetimeindex_integers_shift(self):
rng = date_range('1/1/2000', periods=20)
result = rng + 5
expected = rng.shift(5)
self.assertTrue(result.equals(expected))
result = rng - 5
expected = rng.shift(-5)
self.assertTrue(result.equals(expected))
def test_astype_object(self):
# NumPy 1.6.1 weak ns support
rng = date_range('1/1/2000', periods=20)
casted = rng.astype('O')
exp_values = list(rng)
self.assert_numpy_array_equal(casted, exp_values)
def test_catch_infinite_loop(self):
offset = datetools.DateOffset(minute=5)
# blow up, don't loop forever
self.assertRaises(Exception, date_range, datetime(2011, 11, 11),
datetime(2011, 11, 12), freq=offset)
def test_append_concat(self):
rng = date_range('5/8/2012 1:45', periods=10, freq='5T')
ts = Series(np.random.randn(len(rng)), rng)
df = DataFrame(np.random.randn(len(rng), 4), index=rng)
result = ts.append(ts)
result_df = df.append(df)
ex_index = DatetimeIndex(np.tile(rng.values, 2))
self.assertTrue(result.index.equals(ex_index))
self.assertTrue(result_df.index.equals(ex_index))
appended = rng.append(rng)
self.assertTrue(appended.equals(ex_index))
appended = rng.append([rng, rng])
ex_index = DatetimeIndex(np.tile(rng.values, 3))
self.assertTrue(appended.equals(ex_index))
# different index names
rng1 = rng.copy()
rng2 = rng.copy()
rng1.name = 'foo'
rng2.name = 'bar'
self.assertEqual(rng1.append(rng1).name, 'foo')
self.assertIsNone(rng1.append(rng2).name)
def test_append_concat_tz(self):
#GH 2938
_skip_if_no_pytz()
rng = date_range('5/8/2012 1:45', periods=10, freq='5T',
tz='US/Eastern')
rng2 = date_range('5/8/2012 2:35', periods=10, freq='5T',
tz='US/Eastern')
rng3 = date_range('5/8/2012 1:45', periods=20, freq='5T',
tz='US/Eastern')
ts = Series(np.random.randn(len(rng)), rng)
df = DataFrame(np.random.randn(len(rng), 4), index=rng)
ts2 = Series(np.random.randn(len(rng2)), rng2)
df2 = DataFrame(np.random.randn(len(rng2), 4), index=rng2)
result = ts.append(ts2)
result_df = df.append(df2)
self.assertTrue(result.index.equals(rng3))
self.assertTrue(result_df.index.equals(rng3))
appended = rng.append(rng2)
self.assertTrue(appended.equals(rng3))
def test_set_dataframe_column_ns_dtype(self):
x = DataFrame([datetime.now(), datetime.now()])
self.assertEqual(x[0].dtype, np.dtype('M8[ns]'))
def test_groupby_count_dateparseerror(self):
dr = date_range(start='1/1/2012', freq='5min', periods=10)
# BAD Example, datetimes first
s = Series(np.arange(10), index=[dr, lrange(10)])
grouped = s.groupby(lambda x: x[1] % 2 == 0)
result = grouped.count()
s = Series(np.arange(10), index=[lrange(10), dr])
grouped = s.groupby(lambda x: x[0] % 2 == 0)
expected = grouped.count()
assert_series_equal(result, expected)
def test_datetimeindex_repr_short(self):
dr = date_range(start='1/1/2012', periods=1)
repr(dr)
dr = date_range(start='1/1/2012', periods=2)
repr(dr)
dr = date_range(start='1/1/2012', periods=3)
repr(dr)
def test_constructor_int64_nocopy(self):
# #1624
arr = np.arange(1000, dtype=np.int64)
index = DatetimeIndex(arr)
arr[50:100] = -1
self.assertTrue((index.asi8[50:100] == -1).all())
arr = np.arange(1000, dtype=np.int64)
index = DatetimeIndex(arr, copy=True)
arr[50:100] = -1
self.assertTrue((index.asi8[50:100] != -1).all())
def test_series_interpolate_method_values(self):
# #1646
ts = _simple_ts('1/1/2000', '1/20/2000')
ts[::2] = np.nan
result = ts.interpolate(method='values')
exp = ts.interpolate()
assert_series_equal(result, exp)
def test_frame_datetime64_handling_groupby(self):
# it works!
df = DataFrame([(3, np.datetime64('2012-07-03')),
(3, np.datetime64('2012-07-04'))],
columns=['a', 'date'])
result = df.groupby('a').first()
self.assertEqual(result['date'][3], Timestamp('2012-07-03'))
def test_series_interpolate_intraday(self):
# #1698
index = pd.date_range('1/1/2012', periods=4, freq='12D')
ts = pd.Series([0, 12, 24, 36], index)
new_index = index.append(index + pd.DateOffset(days=1)).order()
exp = ts.reindex(new_index).interpolate(method='time')
index = pd.date_range('1/1/2012', periods=4, freq='12H')
ts = pd.Series([0, 12, 24, 36], index)
new_index = index.append(index + pd.DateOffset(hours=1)).order()
result = ts.reindex(new_index).interpolate(method='time')
self.assert_numpy_array_equal(result.values, exp.values)
def test_frame_dict_constructor_datetime64_1680(self):
dr = date_range('1/1/2012', periods=10)
s = Series(dr, index=dr)
# it works!
DataFrame({'a': 'foo', 'b': s}, index=dr)
DataFrame({'a': 'foo', 'b': s.values}, index=dr)
def test_frame_datetime64_mixed_index_ctor_1681(self):
dr = date_range('2011/1/1', '2012/1/1', freq='W-FRI')
ts = Series(dr)
# it works!
d = DataFrame({'A': 'foo', 'B': ts}, index=dr)
self.assertTrue(d['B'].isnull().all())
def test_frame_timeseries_to_records(self):
index = date_range('1/1/2000', periods=10)
df = DataFrame(np.random.randn(10, 3), index=index,
columns=['a', 'b', 'c'])
result = df.to_records()
result['index'].dtype == 'M8[ns]'
result = df.to_records(index=False)
def test_frame_datetime64_duplicated(self):
dates = date_range('2010-07-01', end='2010-08-05')
tst = DataFrame({'symbol': 'AAA', 'date': dates})
result = tst.duplicated(['date', 'symbol'])
self.assertTrue((-result).all())
tst = DataFrame({'date': dates})
result = tst.duplicated()
self.assertTrue((-result).all())
def test_timestamp_compare_with_early_datetime(self):
# e.g. datetime.min
stamp = Timestamp('2012-01-01')
self.assertFalse(stamp == datetime.min)
self.assertFalse(stamp == datetime(1600, 1, 1))
self.assertFalse(stamp == datetime(2700, 1, 1))
self.assertNotEqual(stamp, datetime.min)
self.assertNotEqual(stamp, datetime(1600, 1, 1))
self.assertNotEqual(stamp, datetime(2700, 1, 1))
self.assertTrue(stamp > datetime(1600, 1, 1))
self.assertTrue(stamp >= datetime(1600, 1, 1))
self.assertTrue(stamp < datetime(2700, 1, 1))
self.assertTrue(stamp <= datetime(2700, 1, 1))
def test_to_html_timestamp(self):
rng = date_range('2000-01-01', periods=10)
df = DataFrame(np.random.randn(10, 4), index=rng)
result = df.to_html()
self.assertIn('2000-01-01', result)
def test_to_csv_numpy_16_bug(self):
frame = DataFrame({'a': date_range('1/1/2000', periods=10)})
buf = StringIO()
frame.to_csv(buf)
result = buf.getvalue()
self.assertIn('2000-01-01', result)
def test_series_map_box_timestamps(self):
# #2689, #2627
s = Series(date_range('1/1/2000', periods=10))
def f(x):
return (x.hour, x.day, x.month)
# it works!
s.map(f)
s.apply(f)
DataFrame(s).applymap(f)
def test_concat_datetime_datetime64_frame(self):
# #2624
rows = []
rows.append([datetime(2010, 1, 1), 1])
rows.append([datetime(2010, 1, 2), 'hi'])
df2_obj = DataFrame.from_records(rows, columns=['date', 'test'])
ind = date_range(start="2000/1/1", freq="D", periods=10)
df1 = DataFrame({'date': ind, 'test':lrange(10)})
# it works!
pd.concat([df1, df2_obj])
def test_period_resample(self):
# GH3609
s = Series(range(100),index=date_range('20130101', freq='s', periods=100), dtype='float')
s[10:30] = np.nan
expected = Series([34.5, 79.5], index=[Period('2013-01-01 00:00', 'T'), Period('2013-01-01 00:01', 'T')])
result = s.to_period().resample('T', kind='period')
assert_series_equal(result, expected)
result2 = s.resample('T', kind='period')
assert_series_equal(result2, expected)
def test_period_resample_with_local_timezone(self):
# GH5430
_skip_if_no_pytz()
import pytz
local_timezone = pytz.timezone('America/Los_Angeles')
start = datetime(year=2013, month=11, day=1, hour=0, minute=0, tzinfo=pytz.utc)
# 1 day later
end = datetime(year=2013, month=11, day=2, hour=0, minute=0, tzinfo=pytz.utc)
index = pd.date_range(start, end, freq='H')
series = pd.Series(1, index=index)
series = series.tz_convert(local_timezone)
result = series.resample('D', kind='period')
# Create the expected series
expected_index = (pd.period_range(start=start, end=end, freq='D') - 1) # Index is moved back a day with the timezone conversion from UTC to Pacific
expected = pd.Series(1, index=expected_index)
assert_series_equal(result, expected)
def test_pickle(self):
#GH4606
from pandas.compat import cPickle
import pickle
for pick in [pickle, cPickle]:
p = pick.loads(pick.dumps(NaT))
self.assertTrue(p is NaT)
idx = pd.to_datetime(['2013-01-01', NaT, '2014-01-06'])
idx_p = pick.loads(pick.dumps(idx))
self.assertTrue(idx_p[0] == idx[0])
self.assertTrue(idx_p[1] is NaT)
self.assertTrue(idx_p[2] == idx[2])
def _simple_ts(start, end, freq='D'):
rng = date_range(start, end, freq=freq)
return Series(np.random.randn(len(rng)), index=rng)
class TestDatetimeIndex(tm.TestCase):
_multiprocess_can_split_ = True
def test_hash_error(self):
index = date_range('20010101', periods=10)
with tm.assertRaisesRegexp(TypeError,
"unhashable type: %r" %
type(index).__name__):
hash(index)
def test_stringified_slice_with_tz(self):
#GH2658
import datetime
start=datetime.datetime.now()
idx=DatetimeIndex(start=start,freq="1d",periods=10)
df=DataFrame(lrange(10),index=idx)
df["2013-01-14 23:44:34.437768-05:00":] # no exception here
def test_append_join_nondatetimeindex(self):
rng = date_range('1/1/2000', periods=10)
idx = Index(['a', 'b', 'c', 'd'])
result = rng.append(idx)
tm.assert_isinstance(result[0], Timestamp)
# it works
rng.join(idx, how='outer')
def test_astype(self):
rng = date_range('1/1/2000', periods=10)
result = rng.astype('i8')
self.assert_numpy_array_equal(result, rng.asi8)
def test_to_period_nofreq(self):
idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-04'])
self.assertRaises(ValueError, idx.to_period)
idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-03'],
freq='infer')
idx.to_period()
def test_000constructor_resolution(self):
# 2252
t1 = Timestamp((1352934390 * 1000000000) + 1000000 + 1000 + 1)
idx = DatetimeIndex([t1])
self.assertEqual(idx.nanosecond[0], t1.nanosecond)
def test_constructor_coverage(self):
rng = date_range('1/1/2000', periods=10.5)
exp = date_range('1/1/2000', periods=10)
self.assertTrue(rng.equals(exp))
self.assertRaises(ValueError, DatetimeIndex, start='1/1/2000',
periods='foo', freq='D')
self.assertRaises(ValueError, DatetimeIndex, start='1/1/2000',
end='1/10/2000')
self.assertRaises(ValueError, DatetimeIndex, '1/1/2000')
# generator expression
gen = (datetime(2000, 1, 1) + timedelta(i) for i in range(10))
result = DatetimeIndex(gen)
expected = DatetimeIndex([datetime(2000, 1, 1) + timedelta(i)
for i in range(10)])
self.assertTrue(result.equals(expected))
# NumPy string array
strings = np.array(['2000-01-01', '2000-01-02', '2000-01-03'])
result = DatetimeIndex(strings)
expected = DatetimeIndex(strings.astype('O'))
self.assertTrue(result.equals(expected))
from_ints = DatetimeIndex(expected.asi8)
self.assertTrue(from_ints.equals(expected))
# non-conforming
self.assertRaises(ValueError, DatetimeIndex,
['2000-01-01', '2000-01-02', '2000-01-04'],
freq='D')
self.assertRaises(ValueError, DatetimeIndex,
start='2011-01-01', freq='b')
self.assertRaises(ValueError, DatetimeIndex,
end='2011-01-01', freq='B')
self.assertRaises(ValueError, DatetimeIndex, periods=10, freq='D')
def test_constructor_name(self):
idx = DatetimeIndex(start='2000-01-01', periods=1, freq='A',
name='TEST')
self.assertEqual(idx.name, 'TEST')
def test_comparisons_coverage(self):
rng = date_range('1/1/2000', periods=10)
# raise TypeError for now
self.assertRaises(TypeError, rng.__lt__, rng[3].value)
result = rng == list(rng)
exp = rng == rng
self.assert_numpy_array_equal(result, exp)
def test_map(self):
rng = date_range('1/1/2000', periods=10)
f = lambda x: x.strftime('%Y%m%d')
result = rng.map(f)
exp = [f(x) for x in rng]
self.assert_numpy_array_equal(result, exp)
def test_add_union(self):
rng = date_range('1/1/2000', periods=5)
rng2 = date_range('1/6/2000', periods=5)
result = rng + rng2
expected = rng.union(rng2)
self.assertTrue(result.equals(expected))
def test_misc_coverage(self):
rng = date_range('1/1/2000', periods=5)
result = rng.groupby(rng.day)
tm.assert_isinstance(list(result.values())[0][0], Timestamp)
idx = DatetimeIndex(['2000-01-03', '2000-01-01', '2000-01-02'])
self.assertTrue(idx.equals(list(idx)))
non_datetime = Index(list('abc'))
self.assertFalse(idx.equals(list(non_datetime)))
def test_union_coverage(self):
idx = DatetimeIndex(['2000-01-03', '2000-01-01', '2000-01-02'])
ordered = DatetimeIndex(idx.order(), freq='infer')
result = ordered.union(idx)
self.assertTrue(result.equals(ordered))
result = ordered[:0].union(ordered)
self.assertTrue(result.equals(ordered))
self.assertEqual(result.freq, ordered.freq)
def test_union_bug_1730(self):
rng_a = date_range('1/1/2012', periods=4, freq='3H')
rng_b = date_range('1/1/2012', periods=4, freq='4H')
result = rng_a.union(rng_b)
exp = DatetimeIndex(sorted(set(list(rng_a)) | set(list(rng_b))))
self.assertTrue(result.equals(exp))
def test_union_bug_1745(self):
left = DatetimeIndex(['2012-05-11 15:19:49.695000'])
right = DatetimeIndex(['2012-05-29 13:04:21.322000',
'2012-05-11 15:27:24.873000',
'2012-05-11 15:31:05.350000'])
result = left.union(right)
exp = DatetimeIndex(sorted(set(list(left)) | set(list(right))))
self.assertTrue(result.equals(exp))
def test_union_bug_4564(self):
from pandas import DateOffset
left = date_range("2013-01-01", "2013-02-01")
right = left + DateOffset(minutes=15)
result = left.union(right)
exp = DatetimeIndex(sorted(set(list(left)) | set(list(right))))
self.assertTrue(result.equals(exp))
def test_intersection_bug_1708(self):
from pandas import DateOffset
index_1 = date_range('1/1/2012', periods=4, freq='12H')
index_2 = index_1 + DateOffset(hours=1)
result = index_1 & index_2
self.assertEqual(len(result), 0)
# def test_add_timedelta64(self):
# rng = date_range('1/1/2000', periods=5)
# delta = rng.values[3] - rng.values[1]
# result = rng + delta
# expected = rng + timedelta(2)
# self.assertTrue(result.equals(expected))
def test_get_duplicates(self):
idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-02',
'2000-01-03', '2000-01-03', '2000-01-04'])
result = idx.get_duplicates()
ex = DatetimeIndex(['2000-01-02', '2000-01-03'])
self.assertTrue(result.equals(ex))
def test_argmin_argmax(self):
idx = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-02'])
self.assertEqual(idx.argmin(), 1)
self.assertEqual(idx.argmax(), 0)
def test_order(self):
idx = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-02'])
ordered = idx.order()
self.assertTrue(ordered.is_monotonic)
ordered = idx.order(ascending=False)
self.assertTrue(ordered[::-1].is_monotonic)
ordered, dexer = idx.order(return_indexer=True)
self.assertTrue(ordered.is_monotonic)
self.assert_numpy_array_equal(dexer, [1, 2, 0])
ordered, dexer = idx.order(return_indexer=True, ascending=False)
self.assertTrue(ordered[::-1].is_monotonic)
self.assert_numpy_array_equal(dexer, [0, 2, 1])
def test_insert(self):
idx = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-02'])
result = idx.insert(2, datetime(2000, 1, 5))
exp = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-05',
'2000-01-02'])
self.assertTrue(result.equals(exp))
# insertion of non-datetime should coerce to object index
result = idx.insert(1, 'inserted')
expected = Index([datetime(2000, 1, 4), 'inserted', datetime(2000, 1, 1),
datetime(2000, 1, 2)])
self.assertNotIsInstance(result, DatetimeIndex)
tm.assert_index_equal(result, expected)
idx = date_range('1/1/2000', periods=3, freq='M')
result = idx.insert(3, datetime(2000, 4, 30))
self.assertEqual(result.freqstr, 'M')
def test_map_bug_1677(self):
index = DatetimeIndex(['2012-04-25 09:30:00.393000'])
f = index.asof
result = index.map(f)
expected = np.array([f(index[0])])
self.assert_numpy_array_equal(result, expected)
def test_groupby_function_tuple_1677(self):
df = DataFrame(np.random.rand(100),
index=date_range("1/1/2000", periods=100))
monthly_group = df.groupby(lambda x: (x.year, x.month))
result = monthly_group.mean()
tm.assert_isinstance(result.index[0], tuple)
def test_append_numpy_bug_1681(self):
# another datetime64 bug
dr = date_range('2011/1/1', '2012/1/1', freq='W-FRI')
a = DataFrame()
c = DataFrame({'A': 'foo', 'B': dr}, index=dr)
result = a.append(c)
self.assertTrue((result['B'] == dr).all())
def test_isin(self):
index = tm.makeDateIndex(4)
result = index.isin(index)
self.assertTrue(result.all())
result = index.isin(list(index))
self.assertTrue(result.all())
assert_almost_equal(index.isin([index[2], 5]),
[False, False, True, False])
def test_union(self):
i1 = Int64Index(np.arange(0, 20, 2))
i2 = Int64Index(np.arange(10, 30, 2))
result = i1.union(i2)
expected = Int64Index(np.arange(0, 30, 2))
self.assert_numpy_array_equal(result, expected)
def test_union_with_DatetimeIndex(self):
i1 = Int64Index(np.arange(0, 20, 2))
i2 = DatetimeIndex(start='2012-01-03 00:00:00', periods=10, freq='D')
i1.union(i2) # Works
i2.union(i1) # Fails with "AttributeError: can't set attribute"
def test_time(self):
rng = pd.date_range('1/1/2000', freq='12min', periods=10)
result = pd.Index(rng).time
expected = [t.time() for t in rng]
self.assertTrue((result == expected).all())
def test_date(self):
rng = pd.date_range('1/1/2000', freq='12H', periods=10)
result = pd.Index(rng).date
expected = [t.date() for t in rng]
self.assertTrue((result == expected).all())
def test_does_not_convert_mixed_integer(self):
df = tm.makeCustomDataframe(10, 10, data_gen_f=lambda *args, **kwargs:
randn(), r_idx_type='i', c_idx_type='dt')
cols = df.columns.join(df.index, how='outer')
joined = cols.join(df.columns)
self.assertEqual(cols.dtype, np.dtype('O'))
self.assertEqual(cols.dtype, joined.dtype)
assert_array_equal(cols.values, joined.values)
def test_slice_keeps_name(self):
# GH4226
st = pd.Timestamp('2013-07-01 00:00:00', tz='America/Los_Angeles')
et = pd.Timestamp('2013-07-02 00:00:00', tz='America/Los_Angeles')
dr = pd.date_range(st, et, freq='H', name='timebucket')
self.assertEqual(dr[1:].name, dr.name)
def test_join_self(self):
index = date_range('1/1/2000', periods=10)
kinds = 'outer', 'inner', 'left', 'right'
for kind in kinds:
joined = index.join(index, how=kind)
self.assertIs(index, joined)
def assert_index_parameters(self, index):
assert index.freq == '40960N'
assert index.inferred_freq == '40960N'
def test_ns_index(self):
if _np_version_under1p7:
raise nose.SkipTest
nsamples = 400
ns = int(1e9 / 24414)
dtstart = np.datetime64('2012-09-20T00:00:00')
dt = dtstart + np.arange(nsamples) * np.timedelta64(ns, 'ns')
freq = ns * pd.datetools.Nano()
index = pd.DatetimeIndex(dt, freq=freq, name='time')
self.assert_index_parameters(index)
new_index = pd.DatetimeIndex(start=index[0], end=index[-1], freq=index.freq)
self.assert_index_parameters(new_index)
def test_join_with_period_index(self):
df = tm.makeCustomDataframe(10, 10, data_gen_f=lambda *args:
np.random.randint(2), c_idx_type='p',
r_idx_type='dt')
s = df.iloc[:5, 0]
joins = 'left', 'right', 'inner', 'outer'
for join in joins:
with tm.assertRaisesRegexp(ValueError, 'can only call with other '
'PeriodIndex-ed objects'):
df.columns.join(s.index, how=join)
def test_factorize(self):
idx1 = DatetimeIndex(['2014-01', '2014-01', '2014-02',
'2014-02', '2014-03', '2014-03'])
exp_arr = np.array([0, 0, 1, 1, 2, 2])
exp_idx = DatetimeIndex(['2014-01', '2014-02', '2014-03'])
arr, idx = idx1.factorize()
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(exp_idx))
arr, idx = idx1.factorize(sort=True)
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(exp_idx))
# tz must be preserved
idx1 = idx1.tz_localize('Asia/Tokyo')
exp_idx = exp_idx.tz_localize('Asia/Tokyo')
arr, idx = idx1.factorize()
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(exp_idx))
idx2 = pd.DatetimeIndex(['2014-03', '2014-03', '2014-02', '2014-01',
'2014-03', '2014-01'])
exp_arr = np.array([2, 2, 1, 0, 2, 0])
exp_idx = DatetimeIndex(['2014-01', '2014-02', '2014-03'])
arr, idx = idx2.factorize(sort=True)
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(exp_idx))
exp_arr = np.array([0, 0, 1, 2, 0, 2])
exp_idx = DatetimeIndex(['2014-03', '2014-02', '2014-01'])
arr, idx = idx2.factorize()
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(exp_idx))
# freq must be preserved
idx3 = date_range('2000-01', periods=4, freq='M', tz='Asia/Tokyo')
exp_arr = np.array([0, 1, 2, 3])
arr, idx = idx3.factorize()
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(idx3))
class TestDatetime64(tm.TestCase):
"""
Also test support for datetime64[ns] in Series / DataFrame
"""
def setUp(self):
dti = DatetimeIndex(start=datetime(2005, 1, 1),
end=datetime(2005, 1, 10), freq='Min')
self.series = Series(rand(len(dti)), dti)
def test_datetimeindex_accessors(self):
dti = DatetimeIndex(
freq='D', start=datetime(1998, 1, 1), periods=365)
self.assertEqual(dti.year[0], 1998)
self.assertEqual(dti.month[0], 1)
self.assertEqual(dti.day[0], 1)
self.assertEqual(dti.hour[0], 0)
self.assertEqual(dti.minute[0], 0)
self.assertEqual(dti.second[0], 0)
self.assertEqual(dti.microsecond[0], 0)
self.assertEqual(dti.dayofweek[0], 3)
self.assertEqual(dti.dayofyear[0], 1)
self.assertEqual(dti.dayofyear[120], 121)
self.assertEqual(dti.weekofyear[0], 1)
self.assertEqual(dti.weekofyear[120], 18)
self.assertEqual(dti.quarter[0], 1)
self.assertEqual(dti.quarter[120], 2)
self.assertEqual(dti.is_month_start[0], True)
self.assertEqual(dti.is_month_start[1], False)
self.assertEqual(dti.is_month_start[31], True)
self.assertEqual(dti.is_quarter_start[0], True)
self.assertEqual(dti.is_quarter_start[90], True)
self.assertEqual(dti.is_year_start[0], True)
self.assertEqual(dti.is_year_start[364], False)
self.assertEqual(dti.is_month_end[0], False)
self.assertEqual(dti.is_month_end[30], True)
self.assertEqual(dti.is_month_end[31], False)
self.assertEqual(dti.is_month_end[364], True)
self.assertEqual(dti.is_quarter_end[0], False)
self.assertEqual(dti.is_quarter_end[30], False)
self.assertEqual(dti.is_quarter_end[89], True)
self.assertEqual(dti.is_quarter_end[364], True)
self.assertEqual(dti.is_year_end[0], False)
self.assertEqual(dti.is_year_end[364], True)
self.assertEqual(len(dti.year), 365)
self.assertEqual(len(dti.month), 365)
self.assertEqual(len(dti.day), 365)
self.assertEqual(len(dti.hour), 365)
self.assertEqual(len(dti.minute), 365)
self.assertEqual(len(dti.second), 365)
self.assertEqual(len(dti.microsecond), 365)
self.assertEqual(len(dti.dayofweek), 365)
self.assertEqual(len(dti.dayofyear), 365)
self.assertEqual(len(dti.weekofyear), 365)
self.assertEqual(len(dti.quarter), 365)
self.assertEqual(len(dti.is_month_start), 365)
self.assertEqual(len(dti.is_month_end), 365)
self.assertEqual(len(dti.is_quarter_start), 365)
self.assertEqual(len(dti.is_quarter_end), 365)
self.assertEqual(len(dti.is_year_start), 365)
self.assertEqual(len(dti.is_year_end), 365)
dti = DatetimeIndex(
freq='BQ-FEB', start=datetime(1998, 1, 1), periods=4)
self.assertEqual(sum(dti.is_quarter_start), 0)
self.assertEqual(sum(dti.is_quarter_end), 4)
self.assertEqual(sum(dti.is_year_start), 0)
self.assertEqual(sum(dti.is_year_end), 1)
# Ensure is_start/end accessors throw ValueError for CustomBusinessDay, CBD requires np >= 1.7
if not _np_version_under1p7:
bday_egypt = offsets.CustomBusinessDay(weekmask='Sun Mon Tue Wed Thu')
dti = date_range(datetime(2013, 4, 30), periods=5, freq=bday_egypt)
self.assertRaises(ValueError, lambda: dti.is_month_start)
dti = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-03'])
self.assertEqual(dti.is_month_start[0], 1)
tests = [
(Timestamp('2013-06-01', offset='M').is_month_start, 1),
(Timestamp('2013-06-01', offset='BM').is_month_start, 0),
(Timestamp('2013-06-03', offset='M').is_month_start, 0),
(Timestamp('2013-06-03', offset='BM').is_month_start, 1),
(Timestamp('2013-02-28', offset='Q-FEB').is_month_end, 1),
(Timestamp('2013-02-28', offset='Q-FEB').is_quarter_end, 1),
(Timestamp('2013-02-28', offset='Q-FEB').is_year_end, 1),
(Timestamp('2013-03-01', offset='Q-FEB').is_month_start, 1),
(Timestamp('2013-03-01', offset='Q-FEB').is_quarter_start, 1),
(Timestamp('2013-03-01', offset='Q-FEB').is_year_start, 1),
(Timestamp('2013-03-31', offset='QS-FEB').is_month_end, 1),
(Timestamp('2013-03-31', offset='QS-FEB').is_quarter_end, 0),
(Timestamp('2013-03-31', offset='QS-FEB').is_year_end, 0),
(Timestamp('2013-02-01', offset='QS-FEB').is_month_start, 1),
(Timestamp('2013-02-01', offset='QS-FEB').is_quarter_start, 1),
(Timestamp('2013-02-01', offset='QS-FEB').is_year_start, 1),
(Timestamp('2013-06-30', offset='BQ').is_month_end, 0),
(Timestamp('2013-06-30', offset='BQ').is_quarter_end, 0),
(Timestamp('2013-06-30', offset='BQ').is_year_end, 0),
(Timestamp('2013-06-28', offset='BQ').is_month_end, 1),
(Timestamp('2013-06-28', offset='BQ').is_quarter_end, 1),
(Timestamp('2013-06-28', offset='BQ').is_year_end, 0),
(Timestamp('2013-06-30', offset='BQS-APR').is_month_end, 0),
(Timestamp('2013-06-30', offset='BQS-APR').is_quarter_end, 0),
(Timestamp('2013-06-30', offset='BQS-APR').is_year_end, 0),
(Timestamp('2013-06-28', offset='BQS-APR').is_month_end, 1),
(Timestamp('2013-06-28', offset='BQS-APR').is_quarter_end, 1),
(Timestamp('2013-03-29', offset='BQS-APR').is_year_end, 1),
(Timestamp('2013-11-01', offset='AS-NOV').is_year_start, 1),
(Timestamp('2013-10-31', offset='AS-NOV').is_year_end, 1)]
for ts, value in tests:
self.assertEqual(ts, value)
def test_nanosecond_field(self):
dti = DatetimeIndex(np.arange(10))
self.assert_numpy_array_equal(dti.nanosecond, np.arange(10))
def test_datetimeindex_diff(self):
dti1 = DatetimeIndex(freq='Q-JAN', start=datetime(1997, 12, 31),
periods=100)
dti2 = DatetimeIndex(freq='Q-JAN', start=datetime(1997, 12, 31),
periods=98)
self.assertEqual(len(dti1.diff(dti2)), 2)
def test_fancy_getitem(self):
dti = DatetimeIndex(freq='WOM-1FRI', start=datetime(2005, 1, 1),
end=datetime(2010, 1, 1))
s = Series(np.arange(len(dti)), index=dti)
self.assertEqual(s[48], 48)
self.assertEqual(s['1/2/2009'], 48)
self.assertEqual(s['2009-1-2'], 48)
self.assertEqual(s[datetime(2009, 1, 2)], 48)
self.assertEqual(s[lib.Timestamp(datetime(2009, 1, 2))], 48)
self.assertRaises(KeyError, s.__getitem__, '2009-1-3')
assert_series_equal(s['3/6/2009':'2009-06-05'],
s[datetime(2009, 3, 6):datetime(2009, 6, 5)])
def test_fancy_setitem(self):
dti = DatetimeIndex(freq='WOM-1FRI', start=datetime(2005, 1, 1),
end=datetime(2010, 1, 1))
s = Series(np.arange(len(dti)), index=dti)
s[48] = -1
self.assertEqual(s[48], -1)
s['1/2/2009'] = -2
self.assertEqual(s[48], -2)
s['1/2/2009':'2009-06-05'] = -3
self.assertTrue((s[48:54] == -3).all())
def test_datetimeindex_constructor(self):
arr = ['1/1/2005', '1/2/2005', 'Jn 3, 2005', '2005-01-04']
self.assertRaises(Exception, DatetimeIndex, arr)
arr = ['1/1/2005', '1/2/2005', '1/3/2005', '2005-01-04']
idx1 = DatetimeIndex(arr)
arr = [datetime(2005, 1, 1), '1/2/2005', '1/3/2005', '2005-01-04']
idx2 = DatetimeIndex(arr)
arr = [lib.Timestamp(datetime(2005, 1, 1)), '1/2/2005', '1/3/2005',
'2005-01-04']
idx3 = DatetimeIndex(arr)
arr = np.array(['1/1/2005', '1/2/2005', '1/3/2005',
'2005-01-04'], dtype='O')
idx4 = DatetimeIndex(arr)
arr = to_datetime(['1/1/2005', '1/2/2005', '1/3/2005', '2005-01-04'])
idx5 = DatetimeIndex(arr)
arr = to_datetime(
['1/1/2005', '1/2/2005', 'Jan 3, 2005', '2005-01-04'])
idx6 = DatetimeIndex(arr)
idx7 = DatetimeIndex(['12/05/2007', '25/01/2008'], dayfirst=True)
idx8 = DatetimeIndex(['2007/05/12', '2008/01/25'], dayfirst=False,
yearfirst=True)
self.assertTrue(idx7.equals(idx8))
for other in [idx2, idx3, idx4, idx5, idx6]:
self.assertTrue((idx1.values == other.values).all())
sdate = datetime(1999, 12, 25)
edate = datetime(2000, 1, 1)
idx = DatetimeIndex(start=sdate, freq='1B', periods=20)
self.assertEqual(len(idx), 20)
self.assertEqual(idx[0], sdate + 0 * dt.bday)
self.assertEqual(idx.freq, 'B')
idx = DatetimeIndex(end=edate, freq=('D', 5), periods=20)
self.assertEqual(len(idx), 20)
self.assertEqual(idx[-1], edate)
self.assertEqual(idx.freq, '5D')
idx1 = DatetimeIndex(start=sdate, end=edate, freq='W-SUN')
idx2 = DatetimeIndex(start=sdate, end=edate,
freq=dt.Week(weekday=6))
self.assertEqual(len(idx1), len(idx2))
self.assertEqual(idx1.offset, idx2.offset)
idx1 = DatetimeIndex(start=sdate, end=edate, freq='QS')
idx2 = DatetimeIndex(start=sdate, end=edate,
freq=dt.QuarterBegin(startingMonth=1))
self.assertEqual(len(idx1), len(idx2))
self.assertEqual(idx1.offset, idx2.offset)
idx1 = DatetimeIndex(start=sdate, end=edate, freq='BQ')
idx2 = DatetimeIndex(start=sdate, end=edate,
freq=dt.BQuarterEnd(startingMonth=12))
self.assertEqual(len(idx1), len(idx2))
self.assertEqual(idx1.offset, idx2.offset)
def test_dayfirst(self):
# GH 5917
arr = ['10/02/2014', '11/02/2014', '12/02/2014']
expected = DatetimeIndex([datetime(2014, 2, 10),
datetime(2014, 2, 11),
datetime(2014, 2, 12)])
idx1 = DatetimeIndex(arr, dayfirst=True)
idx2 = DatetimeIndex(np.array(arr), dayfirst=True)
idx3 = to_datetime(arr, dayfirst=True)
idx4 = to_datetime(np.array(arr), dayfirst=True)
idx5 = DatetimeIndex(Index(arr), dayfirst=True)
idx6 = DatetimeIndex(Series(arr), dayfirst=True)
self.assertTrue(expected.equals(idx1))
self.assertTrue(expected.equals(idx2))
self.assertTrue(expected.equals(idx3))
self.assertTrue(expected.equals(idx4))
self.assertTrue(expected.equals(idx5))
self.assertTrue(expected.equals(idx6))
def test_dti_snap(self):
dti = DatetimeIndex(['1/1/2002', '1/2/2002', '1/3/2002', '1/4/2002',
'1/5/2002', '1/6/2002', '1/7/2002'], freq='D')
res = dti.snap(freq='W-MON')
exp = date_range('12/31/2001', '1/7/2002', freq='w-mon')
exp = exp.repeat([3, 4])
self.assertTrue((res == exp).all())
res = dti.snap(freq='B')
exp = date_range('1/1/2002', '1/7/2002', freq='b')
exp = exp.repeat([1, 1, 1, 2, 2])
self.assertTrue((res == exp).all())
def test_dti_reset_index_round_trip(self):
dti = DatetimeIndex(start='1/1/2001', end='6/1/2001', freq='D')
d1 = DataFrame({'v': np.random.rand(len(dti))}, index=dti)
d2 = d1.reset_index()
self.assertEqual(d2.dtypes[0], np.dtype('M8[ns]'))
d3 = d2.set_index('index')
assert_frame_equal(d1, d3, check_names=False)
# #2329
stamp = datetime(2012, 11, 22)
df = DataFrame([[stamp, 12.1]], columns=['Date', 'Value'])
df = df.set_index('Date')
self.assertEqual(df.index[0], stamp)
self.assertEqual(df.reset_index()['Date'][0], stamp)
def test_dti_set_index_reindex(self):
# GH 6631
df = DataFrame(np.random.random(6))
idx1 = date_range('2011/01/01', periods=6, freq='M', tz='US/Eastern')
idx2 = date_range('2013', periods=6, freq='A', tz='Asia/Tokyo')
df = df.set_index(idx1)
self.assertTrue(df.index.equals(idx1))
df = df.reindex(idx2)
self.assertTrue(df.index.equals(idx2))
def test_datetimeindex_union_join_empty(self):
dti = DatetimeIndex(start='1/1/2001', end='2/1/2001', freq='D')
empty = Index([])
result = dti.union(empty)
tm.assert_isinstance(result, DatetimeIndex)
self.assertIs(result, result)
result = dti.join(empty)
tm.assert_isinstance(result, DatetimeIndex)
def test_series_set_value(self):
# #1561
dates = [datetime(2001, 1, 1), datetime(2001, 1, 2)]
index = DatetimeIndex(dates)
s = Series().set_value(dates[0], 1.)
s2 = s.set_value(dates[1], np.nan)
exp = Series([1., np.nan], index=index)
assert_series_equal(s2, exp)
# s = Series(index[:1], index[:1])
# s2 = s.set_value(dates[1], index[1])
# self.assertEqual(s2.values.dtype, 'M8[ns]')
@slow
def test_slice_locs_indexerror(self):
times = [datetime(2000, 1, 1) + timedelta(minutes=i * 10)
for i in range(100000)]
s = Series(lrange(100000), times)
s.ix[datetime(1900, 1, 1):datetime(2100, 1, 1)]
class TestSeriesDatetime64(tm.TestCase):
def setUp(self):
self.series = Series(date_range('1/1/2000', periods=10))
def test_auto_conversion(self):
series = Series(list(date_range('1/1/2000', periods=10)))
self.assertEqual(series.dtype, 'M8[ns]')
def test_constructor_cant_cast_datetime64(self):
self.assertRaises(TypeError, Series,
date_range('1/1/2000', periods=10), dtype=float)
def test_series_comparison_scalars(self):
val = datetime(2000, 1, 4)
result = self.series > val
expected = np.array([x > val for x in self.series])
self.assert_numpy_array_equal(result, expected)
val = self.series[5]
result = self.series > val
expected = np.array([x > val for x in self.series])
self.assert_numpy_array_equal(result, expected)
def test_between(self):
left, right = self.series[[2, 7]]
result = self.series.between(left, right)
expected = (self.series >= left) & (self.series <= right)
assert_series_equal(result, expected)
#----------------------------------------------------------------------
# NaT support
def test_NaT_scalar(self):
series = Series([0, 1000, 2000, iNaT], dtype='M8[ns]')
val = series[3]
self.assertTrue(com.isnull(val))
series[2] = val
self.assertTrue(com.isnull(series[2]))
def test_set_none_nan(self):
self.series[3] = None
self.assertIs(self.series[3], NaT)
self.series[3:5] = None
self.assertIs(self.series[4], NaT)
self.series[5] = np.nan
self.assertIs(self.series[5], NaT)
self.series[5:7] = np.nan
self.assertIs(self.series[6], NaT)
def test_intercept_astype_object(self):
# this test no longer makes sense as series is by default already M8[ns]
expected = self.series.astype('object')
df = DataFrame({'a': self.series,
'b': np.random.randn(len(self.series))})
result = df.values.squeeze()
self.assertTrue((result[:, 0] == expected.values).all())
df = DataFrame({'a': self.series,
'b': ['foo'] * len(self.series)})
result = df.values.squeeze()
self.assertTrue((result[:, 0] == expected.values).all())
def test_union(self):
rng1 = date_range('1/1/1999', '1/1/2012', freq='MS')
s1 = Series(np.random.randn(len(rng1)), rng1)
rng2 = date_range('1/1/1980', '12/1/2001', freq='MS')
s2 = Series(np.random.randn(len(rng2)), rng2)
df = DataFrame({'s1': s1, 's2': s2})
self.assertEqual(df.index.values.dtype, np.dtype('M8[ns]'))
def test_intersection(self):
rng = date_range('6/1/2000', '6/15/2000', freq='D')
rng = rng.delete(5)
rng2 = date_range('5/15/2000', '6/20/2000', freq='D')
rng2 = DatetimeIndex(rng2.values)
result = rng.intersection(rng2)
self.assertTrue(result.equals(rng))
# empty same freq GH2129
rng = date_range('6/1/2000', '6/15/2000', freq='T')
result = rng[0:0].intersection(rng)
self.assertEqual(len(result), 0)
result = rng.intersection(rng[0:0])
self.assertEqual(len(result), 0)
def test_date_range_bms_bug(self):
# #1645
rng = date_range('1/1/2000', periods=10, freq='BMS')
ex_first = Timestamp('2000-01-03')
self.assertEqual(rng[0], ex_first)
def test_string_index_series_name_converted(self):
# #1644
df = DataFrame(np.random.randn(10, 4),
index=date_range('1/1/2000', periods=10))
result = df.ix['1/3/2000']
self.assertEqual(result.name, df.index[2])
result = df.T['1/3/2000']
self.assertEqual(result.name, df.index[2])
class TestTimestamp(tm.TestCase):
def test_class_ops(self):
_skip_if_no_pytz()
import pytz
def compare(x,y):
self.assertEqual(int(Timestamp(x).value/1e9), int(Timestamp(y).value/1e9))
compare(Timestamp.now(),datetime.now())
compare(Timestamp.now('UTC'),datetime.now(pytz.timezone('UTC')))
compare(Timestamp.utcnow(),datetime.utcnow())
compare(Timestamp.today(),datetime.today())
def test_basics_nanos(self):
val = np.int64(946684800000000000).view('M8[ns]')
stamp = Timestamp(val.view('i8') + 500)
self.assertEqual(stamp.year, 2000)
self.assertEqual(stamp.month, 1)
self.assertEqual(stamp.microsecond, 0)
self.assertEqual(stamp.nanosecond, 500)
def test_unit(self):
def check(val,unit=None,h=1,s=1,us=0):
stamp = Timestamp(val, unit=unit)
self.assertEqual(stamp.year, 2000)
self.assertEqual(stamp.month, 1)
self.assertEqual(stamp.day, 1)
self.assertEqual(stamp.hour, h)
if unit != 'D':
self.assertEqual(stamp.minute, 1)
self.assertEqual(stamp.second, s)
self.assertEqual(stamp.microsecond, us)
else:
self.assertEqual(stamp.minute, 0)
self.assertEqual(stamp.second, 0)
self.assertEqual(stamp.microsecond, 0)
self.assertEqual(stamp.nanosecond, 0)
ts = Timestamp('20000101 01:01:01')
val = ts.value
days = (ts - Timestamp('1970-01-01')).days
check(val)
check(val/long(1000),unit='us')
check(val/long(1000000),unit='ms')
check(val/long(1000000000),unit='s')
check(days,unit='D',h=0)
# using truediv, so these are like floats
if compat.PY3:
check((val+500000)/long(1000000000),unit='s',us=500)
check((val+500000000)/long(1000000000),unit='s',us=500000)
check((val+500000)/long(1000000),unit='ms',us=500)
# get chopped in py2
else:
check((val+500000)/long(1000000000),unit='s')
check((val+500000000)/long(1000000000),unit='s')
check((val+500000)/long(1000000),unit='ms')
# ok
check((val+500000)/long(1000),unit='us',us=500)
check((val+500000000)/long(1000000),unit='ms',us=500000)
# floats
check(val/1000.0 + 5,unit='us',us=5)
check(val/1000.0 + 5000,unit='us',us=5000)
check(val/1000000.0 + 0.5,unit='ms',us=500)
check(val/1000000.0 + 0.005,unit='ms',us=5)
check(val/1000000000.0 + 0.5,unit='s',us=500000)
check(days + 0.5,unit='D',h=12)
# nan
result = Timestamp(np.nan)
self.assertIs(result, NaT)
result = Timestamp(None)
self.assertIs(result, NaT)
result = Timestamp(iNaT)
self.assertIs(result, NaT)
result = Timestamp(NaT)
self.assertIs(result, NaT)
def test_comparison(self):
# 5-18-2012 00:00:00.000
stamp = long(1337299200000000000)
val = Timestamp(stamp)
self.assertEqual(val, val)
self.assertFalse(val != val)
self.assertFalse(val < val)
self.assertTrue(val <= val)
self.assertFalse(val > val)
self.assertTrue(val >= val)
other = datetime(2012, 5, 18)
self.assertEqual(val, other)
self.assertFalse(val != other)
self.assertFalse(val < other)
self.assertTrue(val <= other)
self.assertFalse(val > other)
self.assertTrue(val >= other)
other = Timestamp(stamp + 100)
self.assertNotEqual(val, other)
self.assertNotEqual(val, other)
self.assertTrue(val < other)
self.assertTrue(val <= other)
self.assertTrue(other > val)
self.assertTrue(other >= val)
def test_cant_compare_tz_naive_w_aware(self):
_skip_if_no_pytz()
# #1404
a = Timestamp('3/12/2012')
b = Timestamp('3/12/2012', tz='utc')
self.assertRaises(Exception, a.__eq__, b)
self.assertRaises(Exception, a.__ne__, b)
self.assertRaises(Exception, a.__lt__, b)
self.assertRaises(Exception, a.__gt__, b)
self.assertRaises(Exception, b.__eq__, a)
self.assertRaises(Exception, b.__ne__, a)
self.assertRaises(Exception, b.__lt__, a)
self.assertRaises(Exception, b.__gt__, a)
if sys.version_info < (3, 3):
self.assertRaises(Exception, a.__eq__, b.to_pydatetime())
self.assertRaises(Exception, a.to_pydatetime().__eq__, b)
else:
self.assertFalse(a == b.to_pydatetime())
self.assertFalse(a.to_pydatetime() == b)
def test_delta_preserve_nanos(self):
val = Timestamp(long(1337299200000000123))
result = val + timedelta(1)
self.assertEqual(result.nanosecond, val.nanosecond)
def test_frequency_misc(self):
self.assertEqual(fmod.get_freq_group('T'),
fmod.FreqGroup.FR_MIN)
code, stride = fmod.get_freq_code(offsets.Hour())
self.assertEqual(code, fmod.FreqGroup.FR_HR)
code, stride = fmod.get_freq_code((5, 'T'))
self.assertEqual(code, fmod.FreqGroup.FR_MIN)
self.assertEqual(stride, 5)
offset = offsets.Hour()
result = fmod.to_offset(offset)
self.assertEqual(result, offset)
result = fmod.to_offset((5, 'T'))
expected = offsets.Minute(5)
self.assertEqual(result, expected)
self.assertRaises(ValueError, fmod.get_freq_code, (5, 'baz'))
self.assertRaises(ValueError, fmod.to_offset, '100foo')
self.assertRaises(ValueError, fmod.to_offset, ('', ''))
result = fmod.get_standard_freq(offsets.Hour())
self.assertEqual(result, 'H')
def test_hash_equivalent(self):
d = {datetime(2011, 1, 1): 5}
stamp = Timestamp(datetime(2011, 1, 1))
self.assertEqual(d[stamp], 5)
def test_timestamp_compare_scalars(self):
# case where ndim == 0
lhs = np.datetime64(datetime(2013, 12, 6))
rhs = Timestamp('now')
nat = Timestamp('nat')
ops = {'gt': 'lt', 'lt': 'gt', 'ge': 'le', 'le': 'ge', 'eq': 'eq',
'ne': 'ne'}
for left, right in ops.items():
left_f = getattr(operator, left)
right_f = getattr(operator, right)
if pd._np_version_under1p7:
# you have to convert to timestamp for this to work with numpy
# scalars
expected = left_f(Timestamp(lhs), rhs)
# otherwise a TypeError is thrown
if left not in ('eq', 'ne'):
with tm.assertRaises(TypeError):
left_f(lhs, rhs)
else:
expected = left_f(lhs, rhs)
result = right_f(rhs, lhs)
self.assertEqual(result, expected)
expected = left_f(rhs, nat)
result = right_f(nat, rhs)
self.assertEqual(result, expected)
def test_timestamp_compare_series(self):
# make sure we can compare Timestamps on the right AND left hand side
# GH4982
s = Series(date_range('20010101', periods=10), name='dates')
s_nat = s.copy(deep=True)
s[0] = pd.Timestamp('nat')
s[3] = pd.Timestamp('nat')
ops = {'lt': 'gt', 'le': 'ge', 'eq': 'eq', 'ne': 'ne'}
for left, right in ops.items():
left_f = getattr(operator, left)
right_f = getattr(operator, right)
# no nats
expected = left_f(s, Timestamp('20010109'))
result = right_f(Timestamp('20010109'), s)
tm.assert_series_equal(result, expected)
# nats
expected = left_f(s, Timestamp('nat'))
result = right_f(Timestamp('nat'), s)
tm.assert_series_equal(result, expected)
# compare to timestamp with series containing nats
expected = left_f(s_nat, Timestamp('20010109'))
result = right_f(Timestamp('20010109'), s_nat)
tm.assert_series_equal(result, expected)
# compare to nat with series containing nats
expected = left_f(s_nat, Timestamp('nat'))
result = right_f(Timestamp('nat'), s_nat)
tm.assert_series_equal(result, expected)
class TestSlicing(tm.TestCase):
def test_slice_year(self):
dti = DatetimeIndex(freq='B', start=datetime(2005, 1, 1), periods=500)
s = Series(np.arange(len(dti)), index=dti)
result = s['2005']
expected = s[s.index.year == 2005]
assert_series_equal(result, expected)
df = DataFrame(np.random.rand(len(dti), 5), index=dti)
result = df.ix['2005']
expected = df[df.index.year == 2005]
assert_frame_equal(result, expected)
rng = date_range('1/1/2000', '1/1/2010')
result = rng.get_loc('2009')
expected = slice(3288, 3653)
self.assertEqual(result, expected)
def test_slice_quarter(self):
dti = DatetimeIndex(freq='D', start=datetime(2000, 6, 1), periods=500)
s = Series(np.arange(len(dti)), index=dti)
self.assertEqual(len(s['2001Q1']), 90)
df = DataFrame(np.random.rand(len(dti), 5), index=dti)
self.assertEqual(len(df.ix['1Q01']), 90)
def test_slice_month(self):
dti = DatetimeIndex(freq='D', start=datetime(2005, 1, 1), periods=500)
s = Series(np.arange(len(dti)), index=dti)
self.assertEqual(len(s['2005-11']), 30)
df = DataFrame(np.random.rand(len(dti), 5), index=dti)
self.assertEqual(len(df.ix['2005-11']), 30)
assert_series_equal(s['2005-11'], s['11-2005'])
def test_partial_slice(self):
rng = DatetimeIndex(freq='D', start=datetime(2005, 1, 1), periods=500)
s = Series(np.arange(len(rng)), index=rng)
result = s['2005-05':'2006-02']
expected = s['20050501':'20060228']
assert_series_equal(result, expected)
result = s['2005-05':]
expected = s['20050501':]
assert_series_equal(result, expected)
result = s[:'2006-02']
expected = s[:'20060228']
assert_series_equal(result, expected)
result = s['2005-1-1']
self.assertEqual(result, s.irow(0))
self.assertRaises(Exception, s.__getitem__, '2004-12-31')
def test_partial_slice_daily(self):
rng = DatetimeIndex(freq='H', start=datetime(2005, 1, 31), periods=500)
s = Series(np.arange(len(rng)), index=rng)
result = s['2005-1-31']
assert_series_equal(result, s.ix[:24])
self.assertRaises(Exception, s.__getitem__, '2004-12-31 00')
def test_partial_slice_hourly(self):
rng = DatetimeIndex(freq='T', start=datetime(2005, 1, 1, 20, 0, 0),
periods=500)
s = Series(np.arange(len(rng)), index=rng)
result = s['2005-1-1']
assert_series_equal(result, s.ix[:60 * 4])
result = s['2005-1-1 20']
assert_series_equal(result, s.ix[:60])
self.assertEqual(s['2005-1-1 20:00'], s.ix[0])
self.assertRaises(Exception, s.__getitem__, '2004-12-31 00:15')
def test_partial_slice_minutely(self):
rng = DatetimeIndex(freq='S', start=datetime(2005, 1, 1, 23, 59, 0),
periods=500)
s = Series(np.arange(len(rng)), index=rng)
result = s['2005-1-1 23:59']
assert_series_equal(result, s.ix[:60])
result = s['2005-1-1']
assert_series_equal(result, s.ix[:60])
self.assertEqual(s[Timestamp('2005-1-1 23:59:00')], s.ix[0])
self.assertRaises(Exception, s.__getitem__, '2004-12-31 00:00:00')
def test_partial_slicing_with_multiindex(self):
# GH 4758
# partial string indexing with a multi-index buggy
df = DataFrame({'ACCOUNT':["ACCT1", "ACCT1", "ACCT1", "ACCT2"],
'TICKER':["ABC", "MNP", "XYZ", "XYZ"],
'val':[1,2,3,4]},
index=date_range("2013-06-19 09:30:00", periods=4, freq='5T'))
df_multi = df.set_index(['ACCOUNT', 'TICKER'], append=True)
expected = DataFrame([[1]],index=Index(['ABC'],name='TICKER'),columns=['val'])
result = df_multi.loc[('2013-06-19 09:30:00', 'ACCT1')]
assert_frame_equal(result, expected)
expected = df_multi.loc[(pd.Timestamp('2013-06-19 09:30:00', tz=None), 'ACCT1', 'ABC')]
result = df_multi.loc[('2013-06-19 09:30:00', 'ACCT1', 'ABC')]
assert_series_equal(result, expected)
# this is a KeyError as we don't do partial string selection on multi-levels
def f():
df_multi.loc[('2013-06-19', 'ACCT1', 'ABC')]
self.assertRaises(KeyError, f)
# GH 4294
# partial slice on a series mi
s = pd.DataFrame(randn(1000, 1000), index=pd.date_range('2000-1-1', periods=1000)).stack()
s2 = s[:-1].copy()
expected = s2['2000-1-4']
result = s2[pd.Timestamp('2000-1-4')]
assert_series_equal(result, expected)
result = s[pd.Timestamp('2000-1-4')]
expected = s['2000-1-4']
assert_series_equal(result, expected)
df2 = pd.DataFrame(s)
expected = df2.ix['2000-1-4']
result = df2.ix[pd.Timestamp('2000-1-4')]
assert_frame_equal(result, expected)
def test_date_range_normalize(self):
snap = datetime.today()
n = 50
rng = date_range(snap, periods=n, normalize=False, freq='2D')
offset = timedelta(2)
values = np.array([snap + i * offset for i in range(n)],
dtype='M8[ns]')
self.assert_numpy_array_equal(rng, values)
rng = date_range(
'1/1/2000 08:15', periods=n, normalize=False, freq='B')
the_time = time(8, 15)
for val in rng:
self.assertEqual(val.time(), the_time)
def test_timedelta(self):
# this is valid too
index = date_range('1/1/2000', periods=50, freq='B')
shifted = index + timedelta(1)
back = shifted + timedelta(-1)
self.assertTrue(tm.equalContents(index, back))
self.assertEqual(shifted.freq, index.freq)
self.assertEqual(shifted.freq, back.freq)
result = index - timedelta(1)
expected = index + timedelta(-1)
self.assertTrue(result.equals(expected))
# GH4134, buggy with timedeltas
rng = date_range('2013', '2014')
s = Series(rng)
result1 = rng - pd.offsets.Hour(1)
result2 = DatetimeIndex(s - np.timedelta64(100000000))
result3 = rng - np.timedelta64(100000000)
result4 = DatetimeIndex(s - pd.offsets.Hour(1))
self.assertTrue(result1.equals(result4))
self.assertTrue(result2.equals(result3))
def test_shift(self):
ts = Series(np.random.randn(5),
index=date_range('1/1/2000', periods=5, freq='H'))
result = ts.shift(1, freq='5T')
exp_index = ts.index.shift(1, freq='5T')
self.assertTrue(result.index.equals(exp_index))
# GH #1063, multiple of same base
result = ts.shift(1, freq='4H')
exp_index = ts.index + datetools.Hour(4)
self.assertTrue(result.index.equals(exp_index))
idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-04'])
self.assertRaises(ValueError, idx.shift, 1)
def test_setops_preserve_freq(self):
rng = date_range('1/1/2000', '1/1/2002')
result = rng[:50].union(rng[50:100])
self.assertEqual(result.freq, rng.freq)
result = rng[:50].union(rng[30:100])
self.assertEqual(result.freq, rng.freq)
result = rng[:50].union(rng[60:100])
self.assertIsNone(result.freq)
result = rng[:50].intersection(rng[25:75])
self.assertEqual(result.freqstr, 'D')
nofreq = DatetimeIndex(list(rng[25:75]))
result = rng[:50].union(nofreq)
self.assertEqual(result.freq, rng.freq)
result = rng[:50].intersection(nofreq)
self.assertEqual(result.freq, rng.freq)
def test_min_max(self):
rng = date_range('1/1/2000', '12/31/2000')
rng2 = rng.take(np.random.permutation(len(rng)))
the_min = rng2.min()
the_max = rng2.max()
tm.assert_isinstance(the_min, Timestamp)
tm.assert_isinstance(the_max, Timestamp)
self.assertEqual(the_min, rng[0])
self.assertEqual(the_max, rng[-1])
self.assertEqual(rng.min(), rng[0])
self.assertEqual(rng.max(), rng[-1])
def test_min_max_series(self):
rng = date_range('1/1/2000', periods=10, freq='4h')
lvls = ['A', 'A', 'A', 'B', 'B', 'B', 'C', 'C', 'C', 'C']
df = DataFrame({'TS': rng, 'V': np.random.randn(len(rng)),
'L': lvls})
result = df.TS.max()
exp = Timestamp(df.TS.iget(-1))
self.assertTrue(isinstance(result, Timestamp))
self.assertEqual(result, exp)
result = df.TS.min()
exp = Timestamp(df.TS.iget(0))
self.assertTrue(isinstance(result, Timestamp))
self.assertEqual(result, exp)
def test_from_M8_structured(self):
dates = [(datetime(2012, 9, 9, 0, 0),
datetime(2012, 9, 8, 15, 10))]
arr = np.array(dates,
dtype=[('Date', 'M8[us]'), ('Forecasting', 'M8[us]')])
df = DataFrame(arr)
self.assertEqual(df['Date'][0], dates[0][0])
self.assertEqual(df['Forecasting'][0], dates[0][1])
s = Series(arr['Date'])
self.assertTrue(s[0], Timestamp)
self.assertEqual(s[0], dates[0][0])
s = Series.from_array(arr['Date'], Index([0]))
self.assertEqual(s[0], dates[0][0])
def test_get_level_values_box(self):
from pandas import MultiIndex
dates = date_range('1/1/2000', periods=4)
levels = [dates, [0, 1]]
labels = [[0, 0, 1, 1, 2, 2, 3, 3],
[0, 1, 0, 1, 0, 1, 0, 1]]
index = MultiIndex(levels=levels, labels=labels)
self.assertTrue(isinstance(index.get_level_values(0)[0], Timestamp))
def test_frame_apply_dont_convert_datetime64(self):
from pandas.tseries.offsets import BDay
df = DataFrame({'x1': [datetime(1996, 1, 1)]})
df = df.applymap(lambda x: x + BDay())
df = df.applymap(lambda x: x + BDay())
self.assertTrue(df.x1.dtype == 'M8[ns]')
def test_date_range_fy5252(self):
dr = date_range(start="2013-01-01",
periods=2,
freq=offsets.FY5253(startingMonth=1,
weekday=3,
variation="nearest"))
self.assertEqual(dr[0], Timestamp('2013-01-31'))
self.assertEqual(dr[1], Timestamp('2014-01-30'))
class TimeConversionFormats(tm.TestCase):
def test_to_datetime_format(self):
values = ['1/1/2000', '1/2/2000', '1/3/2000']
results1 = [ Timestamp('20000101'), Timestamp('20000201'),
Timestamp('20000301') ]
results2 = [ Timestamp('20000101'), Timestamp('20000102'),
Timestamp('20000103') ]
for vals, expecteds in [ (values, (Index(results1), Index(results2))),
(Series(values),(Series(results1), Series(results2))),
(values[0], (results1[0], results2[0])),
(values[1], (results1[1], results2[1])),
(values[2], (results1[2], results2[2])) ]:
for i, fmt in enumerate(['%d/%m/%Y', '%m/%d/%Y']):
result = to_datetime(vals, format=fmt)
expected = expecteds[i]
if isinstance(expected, Series):
assert_series_equal(result, Series(expected))
elif isinstance(expected, Timestamp):
self.assertEqual(result, expected)
else:
self.assertTrue(result.equals(expected))
def test_to_datetime_format_YYYYMMDD(self):
s = Series([19801222,19801222] + [19810105]*5)
expected = Series([ Timestamp(x) for x in s.apply(str) ])
result = to_datetime(s,format='%Y%m%d')
assert_series_equal(result, expected)
result = to_datetime(s.apply(str),format='%Y%m%d')
assert_series_equal(result, expected)
# with NaT
expected = Series([Timestamp("19801222"),Timestamp("19801222")] + [Timestamp("19810105")]*5)
expected[2] = np.nan
s[2] = np.nan
result = to_datetime(s,format='%Y%m%d')
assert_series_equal(result, expected)
# string with NaT
s = s.apply(str)
s[2] = 'nat'
result = to_datetime(s,format='%Y%m%d')
assert_series_equal(result, expected)
def test_to_datetime_format_microsecond(self):
val = '01-Apr-2011 00:00:01.978'
format = '%d-%b-%Y %H:%M:%S.%f'
result = to_datetime(val, format=format)
exp = dt.datetime.strptime(val, format)
self.assertEqual(result, exp)
def test_to_datetime_format_time(self):
data = [
['01/10/2010 15:20', '%m/%d/%Y %H:%M', Timestamp('2010-01-10 15:20')],
['01/10/2010 05:43', '%m/%d/%Y %I:%M', Timestamp('2010-01-10 05:43')],
['01/10/2010 13:56:01', '%m/%d/%Y %H:%M:%S', Timestamp('2010-01-10 13:56:01')]#,
#['01/10/2010 08:14 PM', '%m/%d/%Y %I:%M %p', Timestamp('2010-01-10 20:14')],
#['01/10/2010 07:40 AM', '%m/%d/%Y %I:%M %p', Timestamp('2010-01-10 07:40')],
#['01/10/2010 09:12:56 AM', '%m/%d/%Y %I:%M:%S %p', Timestamp('2010-01-10 09:12:56')]
]
for s, format, dt in data:
self.assertEqual(to_datetime(s, format=format), dt)
def test_to_datetime_format_weeks(self):
data = [
['2009324', '%Y%W%w', Timestamp('2009-08-13')],
['2013020', '%Y%U%w', Timestamp('2013-01-13')]
]
for s, format, dt in data:
self.assertEqual(to_datetime(s, format=format), dt)
class TestToDatetimeInferFormat(tm.TestCase):
def test_to_datetime_infer_datetime_format_consistent_format(self):
time_series = pd.Series(
pd.date_range('20000101', periods=50, freq='H')
)
test_formats = [
'%m-%d-%Y',
'%m/%d/%Y %H:%M:%S.%f',
'%Y-%m-%dT%H:%M:%S.%f',
]
for test_format in test_formats:
s_as_dt_strings = time_series.apply(
lambda x: x.strftime(test_format)
)
with_format = pd.to_datetime(s_as_dt_strings, format=test_format)
no_infer = pd.to_datetime(
s_as_dt_strings, infer_datetime_format=False
)
yes_infer = pd.to_datetime(
s_as_dt_strings, infer_datetime_format=True
)
# Whether the format is explicitly passed, it is inferred, or
# it is not inferred, the results should all be the same
self.assert_numpy_array_equal(with_format, no_infer)
self.assert_numpy_array_equal(no_infer, yes_infer)
def test_to_datetime_infer_datetime_format_inconsistent_format(self):
test_series = pd.Series(
np.array([
'01/01/2011 00:00:00',
'01-02-2011 00:00:00',
'2011-01-03T00:00:00',
]))
# When the format is inconsistent, infer_datetime_format should just
# fallback to the default parsing
self.assert_numpy_array_equal(
pd.to_datetime(test_series, infer_datetime_format=False),
pd.to_datetime(test_series, infer_datetime_format=True)
)
test_series = pd.Series(
np.array([
'Jan/01/2011',
'Feb/01/2011',
'Mar/01/2011',
]))
self.assert_numpy_array_equal(
pd.to_datetime(test_series, infer_datetime_format=False),
pd.to_datetime(test_series, infer_datetime_format=True)
)
def test_to_datetime_infer_datetime_format_series_with_nans(self):
test_series = pd.Series(
np.array([
'01/01/2011 00:00:00',
np.nan,
'01/03/2011 00:00:00',
np.nan,
]))
self.assert_numpy_array_equal(
pd.to_datetime(test_series, infer_datetime_format=False),
pd.to_datetime(test_series, infer_datetime_format=True)
)
def test_to_datetime_infer_datetime_format_series_starting_with_nans(self):
test_series = pd.Series(
np.array([
np.nan,
np.nan,
'01/01/2011 00:00:00',
'01/02/2011 00:00:00',
'01/03/2011 00:00:00',
]))
self.assert_numpy_array_equal(
pd.to_datetime(test_series, infer_datetime_format=False),
pd.to_datetime(test_series, infer_datetime_format=True)
)
class TestGuessDatetimeFormat(tm.TestCase):
def test_guess_datetime_format_with_parseable_formats(self):
dt_string_to_format = (
('20111230', '%Y%m%d'),
('2011-12-30', '%Y-%m-%d'),
('30-12-2011', '%d-%m-%Y'),
('2011-12-30 00:00:00', '%Y-%m-%d %H:%M:%S'),
('2011-12-30T00:00:00', '%Y-%m-%dT%H:%M:%S'),
('2011-12-30 00:00:00.000000', '%Y-%m-%d %H:%M:%S.%f'),
)
for dt_string, dt_format in dt_string_to_format:
self.assertEqual(
tools._guess_datetime_format(dt_string),
dt_format
)
def test_guess_datetime_format_with_dayfirst(self):
ambiguous_string = '01/01/2011'
self.assertEqual(
tools._guess_datetime_format(ambiguous_string, dayfirst=True),
'%d/%m/%Y'
)
self.assertEqual(
tools._guess_datetime_format(ambiguous_string, dayfirst=False),
'%m/%d/%Y'
)
def test_guess_datetime_format_with_locale_specific_formats(self):
# The month names will vary depending on the locale, in which
# case these wont be parsed properly (dateutil can't parse them)
_skip_if_has_locale()
dt_string_to_format = (
('30/Dec/2011', '%d/%b/%Y'),
('30/December/2011', '%d/%B/%Y'),
('30/Dec/2011 00:00:00', '%d/%b/%Y %H:%M:%S'),
)
for dt_string, dt_format in dt_string_to_format:
self.assertEqual(
tools._guess_datetime_format(dt_string),
dt_format
)
def test_guess_datetime_format_invalid_inputs(self):
# A datetime string must include a year, month and a day for it
# to be guessable, in addition to being a string that looks like
# a datetime
invalid_dts = [
'2013',
'01/2013',
'12:00:00',
'1/1/1/1',
'this_is_not_a_datetime',
'51a',
9,
datetime(2011, 1, 1),
]
for invalid_dt in invalid_dts:
self.assertTrue(tools._guess_datetime_format(invalid_dt) is None)
def test_guess_datetime_format_for_array(self):
expected_format = '%Y-%m-%d %H:%M:%S.%f'
dt_string = datetime(2011, 12, 30, 0, 0, 0).strftime(expected_format)
test_arrays = [
np.array([dt_string, dt_string, dt_string], dtype='O'),
np.array([np.nan, np.nan, dt_string], dtype='O'),
np.array([dt_string, 'random_string'], dtype='O'),
]
for test_array in test_arrays:
self.assertEqual(
tools._guess_datetime_format_for_array(test_array),
expected_format
)
format_for_string_of_nans = tools._guess_datetime_format_for_array(
np.array([np.nan, np.nan, np.nan], dtype='O')
)
self.assertTrue(format_for_string_of_nans is None)
class TestTimestampToJulianDate(tm.TestCase):
def test_compare_1700(self):
r = Timestamp('1700-06-23').to_julian_date()
self.assertEqual(r, 2342145.5)
def test_compare_2000(self):
r = Timestamp('2000-04-12').to_julian_date()
self.assertEqual(r, 2451646.5)
def test_compare_2100(self):
r = Timestamp('2100-08-12').to_julian_date()
self.assertEqual(r, 2488292.5)
def test_compare_hour01(self):
r = Timestamp('2000-08-12T01:00:00').to_julian_date()
self.assertEqual(r, 2451768.5416666666666666)
def test_compare_hour13(self):
r = Timestamp('2000-08-12T13:00:00').to_julian_date()
self.assertEqual(r, 2451769.0416666666666666)
class TestDateTimeIndexToJulianDate(tm.TestCase):
def test_1700(self):
r1 = Float64Index([2345897.5,
2345898.5,
2345899.5,
2345900.5,
2345901.5])
r2 = date_range(start=Timestamp('1710-10-01'),
periods=5,
freq='D').to_julian_date()
self.assertIsInstance(r2, Float64Index)
tm.assert_index_equal(r1, r2)
def test_2000(self):
r1 = Float64Index([2451601.5,
2451602.5,
2451603.5,
2451604.5,
2451605.5])
r2 = date_range(start=Timestamp('2000-02-27'),
periods=5,
freq='D').to_julian_date()
self.assertIsInstance(r2, Float64Index)
tm.assert_index_equal(r1, r2)
def test_hour(self):
r1 = Float64Index([2451601.5,
2451601.5416666666666666,
2451601.5833333333333333,
2451601.625,
2451601.6666666666666666])
r2 = date_range(start=Timestamp('2000-02-27'),
periods=5,
freq='H').to_julian_date()
self.assertIsInstance(r2, Float64Index)
tm.assert_index_equal(r1, r2)
def test_minute(self):
r1 = Float64Index([2451601.5,
2451601.5006944444444444,
2451601.5013888888888888,
2451601.5020833333333333,
2451601.5027777777777777])
r2 = date_range(start=Timestamp('2000-02-27'),
periods=5,
freq='T').to_julian_date()
self.assertIsInstance(r2, Float64Index)
| tm.assert_index_equal(r1, r2) | pandas.util.testing.assert_index_equal |
from unittest.mock import MagicMock, patch
import pandas as pd
import pytest
from evalml import AutoMLSearch
from evalml.exceptions import ObjectiveNotFoundError
from evalml.model_family import ModelFamily
from evalml.objectives import MeanSquaredLogError, RootMeanSquaredLogError
from evalml.pipelines import (
MeanBaselineRegressionPipeline,
PipelineBase,
TimeSeriesBaselineRegressionPipeline
)
from evalml.pipelines.components.utils import get_estimators
from evalml.pipelines.utils import make_pipeline
from evalml.preprocessing import TimeSeriesSplit
from evalml.problem_types import ProblemTypes
def test_init(X_y_regression):
X, y = X_y_regression
automl = AutoMLSearch(X_train=X, y_train=y, problem_type='regression', objective="R2", max_iterations=3, n_jobs=1)
automl.search()
assert automl.n_jobs == 1
assert isinstance(automl.rankings, pd.DataFrame)
assert isinstance(automl.best_pipeline, PipelineBase)
automl.best_pipeline.predict(X)
# test with dataframes
automl = AutoMLSearch(pd.DataFrame(X), | pd.Series(y) | pandas.Series |
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 4 15:34:25 2020
@author: diego
"""
import pandas as pd
import os
import sqlite3
from pandas_datareader import DataReader
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from fuzzywuzzy import process
import update_db
pd.set_option('display.width', 400)
pd.set_option('display.max_columns', 10)
conn = sqlite3.connect(os.path.join('data', 'fundos.db'))
db = conn.cursor()
update_db.update_pipeline()
#%% functions
def get_fund_id():
"""
Use this function when you want to find the fund_id using the fund name.
Returns
-------
fund_id: string
The CNPJ of the fund, that is the brazilian tax id and used in this
script as fund_id.
"""
funds = pd.read_sql("SELECT DISTINCT denom_social FROM inf_cadastral", conn)
funds['denom_social_query'] = funds['denom_social'].str.normalize('NFKD').str.encode('ascii', errors='ignore').str.decode('utf-8')
funds_list = funds['denom_social_query'].to_list()
x = 0
while x == 0:
name = input("Mutual Fund name: ")
result = process.extract(name.upper(), funds_list, limit=5)
for i in range(1,6):
print(str(i)+' '+result[i-1][0])
fund = -1
while fund not in range(0,6):
query = input("Type fund number or 0 to query again: ")
try:
if int(query) in range(0,6):
fund = int(query)
if fund != 0:
x = 1
except:
print("Type a number from 1 to 5 to choose, or 0 to try again.")
fund = result[fund-1][0]
idx = funds[funds['denom_social_query'] == fund].index[0]
fund = funds.loc[idx]['denom_social']
fund_id = pd.read_sql(f"SELECT cnpj FROM inf_cadastral WHERE denom_social = '{fund}'", conn)
return fund_id.values[0][0]
def get_returns(fund_id, start='all', end='all'):
"""
Returns a pandas dataframe with log returns and net asset value (nav) that
starts at 1.
Parameters
----------
fund_id : string
Three options here: fund CNPJ, 'ibov', 'cdi'.
start : string, optional
Date formatted as '2019-12-31' or 'all'. The default is 'all'.
end : string, optional
Date formatted as '2019-12-31' or 'all'. The default is 'all'.
Returns
-------
log_returns: pandas dataframe
Log returns and net asset value that starts at 1.
"""
if start == 'all':
start = pd.to_datetime('1990-01-01')
else:
start = pd.to_datetime(start)
if end == 'all':
end = pd.to_datetime('2100-01-01')
else:
end = pd.to_datetime(end)
if fund_id == 'ibov':
returns = DataReader('^BVSP', 'yahoo', start=start+pd.DateOffset(-7), end=end )[['Adj Close']]
returns['d_factor'] = returns['Adj Close'].pct_change().fillna(0) + 1
elif fund_id == 'cdi':
returns = pd.read_sql(f"SELECT date, d_factor FROM cdi WHERE date >= '{start}' AND date <= '{end}' ORDER BY date", conn, index_col='date')
else:
returns = pd.read_sql(f"SELECT date, quota FROM quotas WHERE cnpj = '{fund_id}' AND date >= '{start+pd.DateOffset(-7)}' AND date <= '{end}' ORDER BY date", conn, index_col='date')
returns['d_factor'] = (returns['quota'].pct_change().fillna(0)) + 1
returns = returns[['d_factor']]
returns['log_return'] = np.log(returns['d_factor'])
returns.index = pd.to_datetime(returns.index)
returns = returns[returns.index >= start]
returns['nav'] = np.exp(returns['log_return'].cumsum())
return returns[['log_return', 'nav']]
def fund_performance(fund_id, start='all', end='all', benchmark='cdi', plot=True):
"""
Creates two dataframes, one with the accumulated returns and the second
with the performance table of the fund.
Parameters
----------
fund_id : string
The CNPJ of the fund.
start : string, optional
Date formatted as '2019-12-31' or 'all'. The default is 'all'.
end : string, optional
Date formatted as '2019-12-31' or 'all'. The default is 'all'.
benchmark : string, optional
Benchmark used in the plot. Can be 'ibov' or 'cdi'. The default is 'cdi'.
plot : boolean, optional
Plot or not the results. The default is True.
Returns
-------
accumulated returns : pandas dataframe
Accumulated % returns.
performance_table : pandas dataframe
Performance table of the fund.
"""
name = | pd.read_sql(f"SELECT denom_social FROM inf_cadastral WHERE cnpj = '{fund_id}'", conn) | pandas.read_sql |
import os, sys, inspect
sys.path.insert(1, os.path.join(sys.path[0], '../../'))
import ctypes
import torch
import torchvision as tv
import argparse
import time
import numpy as np
import scipy.sparse as sparse
from scipy.stats import binom
from PIL import Image
import matplotlib
import matplotlib.pyplot as plt
import pandas as pd
import pickle as pkl
from tqdm import tqdm
import seaborn as sns
from utils import *
from core.bounds import hb_p_value
from core.concentration import *
from statsmodels.stats.multitest import multipletests
import pdb
import multiprocessing as mp
import time
data = {}
global_dict = {"loss_tables": None}
def plot_histograms(df_list,alphas,delta):
fig, axs = plt.subplots(nrows=1,ncols=3,figsize=(12,3))
coverages = []
oodt1s = []
labels = []
for df in df_list:
region_name = df['region name'][0]
if region_name == "2D Fixed Sequence":
region_name = '2D Fixed\nSequence'
coverages = coverages + [df['coverage'],]
oodt1s = oodt1s + [df['OOD Type I'],]
labels = labels + [region_name,]
axs[2].scatter(1-df['coverage'],df['OOD Type I'], alpha=0.7, label=region_name)
fraction_violated = ((df['coverage'] < 1-alphas[1]) | (df['OOD Type I'] > alphas[0])).astype(float).mean()
print(f"Fraction violated (at least one risk) using {region_name}: {fraction_violated}")
sns.violinplot(data=coverages,ax=axs[0],orient='h',inner=None)
sns.violinplot(data=oodt1s,ax=axs[1],orient='h',inner=None)
# Limits, lines, and labels
#axs[0].set_ylabel("Histogram Density")
axs[0].set_xlabel("Coverage")
axs[0].axvline(x=1-alphas[1],c='#999999',linestyle='--',alpha=0.7)
axs[0].locator_params(axis="x", nbins=4)
axs[0].set_yticklabels(labels)
axs[1].set_xlabel("CIFAR marked OOD")
axs[1].axvline(x=alphas[0],c='#999999',linestyle='--',alpha=0.7)
axs[1].locator_params(axis="x", nbins=4)
axs[2].axvline(x=alphas[1],c='#999999', linestyle='--', alpha=0.7)
axs[2].axhline(y=alphas[0],c='#999999', linestyle='--', alpha=0.7)
axs[2].legend(loc='lower left')
axs[2].set_xlim(left=0,right=1.05*max([(1-df['coverage']).max() for df in df_list]))
axs[2].set_ylim(bottom=0,top=1.05*max([df['OOD Type I'].max() for df in df_list]))
axs[2].set_xlabel("Miscoverage")
axs[2].set_ylabel("CIFAR Marked OOD")
axs[2].locator_params(axis="x", nbins=4)
axs[2].locator_params(axis="y", nbins=4)
sns.despine(ax=axs[0],top=True,right=True)
sns.despine(ax=axs[1],top=True,right=True)
sns.despine(ax=axs[2],top=True,right=True)
fig.tight_layout()
os.makedirs("./outputs/histograms",exist_ok=True)
plt.savefig("./" + f"outputs/histograms/ood_{alphas[0]}_{alphas[1]}_{delta}_histograms".replace(".","_") + ".pdf")
# Table will be n x m x N x N, where n is number of samples, m is number of losses, and N is sampling of lambda
def get_loss_tables(data,lambda1s,lambda2s):
os.makedirs('./.cache/', exist_ok=True)
try:
loss_tables = torch.load('./.cache/loss_tables.pt')
size_table = torch.load('./.cache/size_table.pt')
frac_ind_ood_table = torch.load('./.cache/frac_ind_ood_table.pt')
frac_ood_ood_table = torch.load('./.cache/frac_ood_ood_table.pt')
except FileNotFoundError:
# Load data
odin_ind = data['odin_ind']
odin_ind, ind_sort = odin_ind.sort()
odin_ood = data['odin_ood']
odin_ood, ood_sort = odin_ood.sort()
softmax_ind = data['softmax_ind'][ind_sort]
softmax_ood = data['softmax_ood'][ood_sort]
labels_ind = data['labels_ind'][ind_sort]
labels_ood = data['labels_ood'][ood_sort]
# Preallocate space
loss_tables = torch.zeros((softmax_ind.shape[0],2,lambda1s.shape[0],lambda2s.shape[0]))
size_table = torch.zeros((softmax_ind.shape[0],lambda1s.shape[0],lambda2s.shape[0]))
frac_ind_ood_table = torch.zeros((lambda1s.shape[0],))
frac_ood_ood_table = torch.zeros((lambda1s.shape[0],))
print("Calculating loss tables.")
for i in tqdm(range(lambda1s.shape[0])):
num_incorrect_ind = (odin_ind > lambda1s[i]).float().sum()
num_incorrect_ood = (odin_ood <= lambda1s[i]).float().sum()
frac_ind_ood_table[i] = num_incorrect_ind/float(odin_ind.shape[0])
frac_ood_ood_table[i] = 1-num_incorrect_ood/float(odin_ind.shape[0])
if i > 0 and frac_ind_ood_table[i] == frac_ind_ood_table[i-1]:
loss_tables[:,:,i,:] = loss_tables[:,:,i-1,:]
size_table[:,i,:] = size_table[:,i-1,:]
else:
for j in range(lambda2s.shape[0]):
if num_incorrect_ind == 0:
index_split = None
else:
index_split = -int(num_incorrect_ind)
_softmax_ind = softmax_ind[:index_split]
if _softmax_ind.shape[0] > 0:
srtd, pi = _softmax_ind.sort(dim=1,descending=True)
sizes = (srtd.cumsum(dim=1) <= lambda2s[j]).int().sum(dim=1)
sizes = torch.max(sizes,torch.ones_like(sizes))
rank_of_true = (pi == labels_ind[:index_split,None]).int().argmax(dim=1) + 1
missed = ( sizes < rank_of_true ).int()
loss_tables[:index_split,1,i,j] = missed
size_table[:index_split,i,j] = sizes
loss_tables[:,0,i,:] = (odin_ind > lambda1s[i]).int().unsqueeze(dim=1)
print(f"\n\ri: {i}, Frac InD OOD: {frac_ind_ood_table[i]}, Frac OOD OOD: {frac_ood_ood_table[i]}\033[1A",end="")
torch.save(loss_tables,"./.cache/loss_tables.pt")
torch.save(size_table,"./.cache/size_table.pt")
torch.save(frac_ind_ood_table,"./.cache/frac_ind_ood_table.pt")
torch.save(frac_ood_ood_table,"./.cache/frac_ood_ood_table.pt")
print("Loss tables calculated!")
return loss_tables, size_table, frac_ind_ood_table, frac_ood_ood_table
def calculate_corrected_p_values(calib_tables, alphas, lambda1s, lambda2s):
n = calib_tables.shape[0]
# Get p-values for each loss
r_hats_risk1 = calib_tables[:,0,:].mean(axis=0).squeeze().flatten() # empirical risk at each lambda combination
p_values_risk1 = np.array([hb_p_value(r_hat,n,alphas[0]) for r_hat in r_hats_risk1])
r_hats_risk2 = (calib_tables[:,1,:] * (1-calib_tables[:,0,:]) - alphas[1]*(1-calib_tables[:,0,:])).mean(axis=0).squeeze().flatten() + alphas[1] # empirical risk at each lambda combination using trick
p_values_risk2 = np.array([hb_p_value(r_hat,n,alphas[1]) for r_hat in r_hats_risk2])
# Combine them
p_values_corrected = np.maximum(p_values_risk1,p_values_risk2)
return p_values_corrected
def flatten_lambda_meshgrid(lambda1s,lambda2s):
l1_meshgrid, l2_meshgrid = torch.meshgrid(torch.tensor(lambda1s),torch.tensor(lambda2s))
l1_meshgrid = l1_meshgrid.flatten()
l2_meshgrid = l2_meshgrid.flatten()
return l1_meshgrid, l2_meshgrid
def getA_gridsplit(lambda1s,lambda2s):
l1_meshgrid, l2_meshgrid = torch.meshgrid(torch.tensor(lambda1s),torch.tensor(lambda2s))
Is = torch.tensor(range(1,lambda1s.shape[0]+1)).flip(dims=(0,)).double()
Js = torch.tensor(range(1,lambda2s.shape[0]+1)).flip(dims=(0,)).double()
Is, Js = torch.meshgrid(Is,Js)
#Wc[i,j]=mass from node [i,j] to node [i,j-1]
Wc = torch.zeros_like(l1_meshgrid)
Wc[:] = 1
data = Wc.flatten().numpy()
row = np.array(range(Wc.numel()))
i_orig = row // Wc.shape[1]
j_orig = row % Wc.shape[1]
col = i_orig*Wc.shape[1] + j_orig - 1
idx = (col >= 0) & (col < Wc.numel())
data = data[idx]
row = row[idx]
col = col[idx]
# Main edges
A = sparse.csr_matrix((data, (row, col)), shape=(Wc.numel(), Wc.numel()))
# Skip edges
#skip_bool = (np.array(range(A.shape[0])) % lambda2s.shape[0])==0
#skip_bool2 = (np.array(range(A.shape[0])) % lambda2s.shape[0])==(lambda2s.shape[0]-1)
#A[skip_bool,:] = 0
#A[skip_bool,skip_bool2] = 1
A.eliminate_zeros()
# Set up the error budget
error_budget = torch.zeros((lambda1s.shape[0],lambda2s.shape[0]))
error_budget[:,-1] = delta/lambda1s.shape[0]
return A, error_budget
def getA_row_equalized(lambda1s, lambda2s):
l1_meshgrid, l2_meshgrid = torch.meshgrid(torch.tensor(lambda1s),torch.tensor(lambda2s))
Is = torch.tensor(range(1,lambda1s.shape[0]+1)).flip(dims=(0,)).double()
Js = torch.tensor(range(1,lambda2s.shape[0]+1)).flip(dims=(0,)).double()
Is, Js = torch.meshgrid(Is,Js)
#Wr[i,j]=mass from node [i,j] to node [i-1,j]
#Wc[i,j]=mass from node [i,j] to node [i,j-1]
Wr = torch.zeros_like(l1_meshgrid)
Wc = torch.zeros_like(l1_meshgrid)
small_axis = min(lambda1s.shape[0],lambda2s.shape[0])
large_axis = max(lambda1s.shape[0],lambda2s.shape[0])
tri_bool = (Is + Js) <= small_axis
Wr[tri_bool] = (Is/(Is+Js))[tri_bool]
Wc[tri_bool] = (Js/(Is+Js))[tri_bool]
Wc[~tri_bool & (Js < large_axis)] = 1
Wr[Js == large_axis] = 1
data = torch.cat((Wr.flatten(),Wc.flatten()),dim=0).numpy()
row = np.concatenate((np.array(range(Wr.numel())),np.array(range(Wr.numel()))),axis=0)
i_orig = row // Wr.shape[1]
j_orig = row % Wr.shape[1]
col = np.concatenate((
(i_orig[:row.shape[0]//2] - 1)*Wr.shape[1] + j_orig[:row.shape[0]//2], (i_orig[row.shape[0]//2:])*Wr.shape[1] + j_orig[row.shape[0]//2:] - 1
), axis=0)
idx = (col >= 0) & (col < Wr.numel())
data = data[idx]
row = row[idx]
col = col[idx]
A = sparse.csr_matrix((data, (row, col)), shape=(Wr.numel(), Wr.numel()))
# Set up the error budget
error_budget = torch.zeros((lambda1s.shape[0],lambda2s.shape[0]))
error_budget[-1,-1] = delta
return A, error_budget
def to_flat_index(idxs,shape):
return idxs[0]*shape[1] + idxs[1]
def to_rect_index(idxs,shape):
return [idxs//shape[1], idxs % shape[1]]
def coordsplit_test(loss_tables, alphas, delta, lambda1s, lambda2s, num_calib):
r_hats_risk1 = loss_tables[:,0,:].mean(dim=0)
r_hats_risk2 = (loss_tables[:,1,:] * (1-loss_tables[:,0,:]) - alphas[1]*(1-loss_tables[:,0,:])).mean(dim=0) + alphas[1] # empirical risk at each lambda combination using trick
r_hats = torch.cat((r_hats_risk1[None,:],r_hats_risk2[None,:]),dim=0)
p_vals = torch.ones_like(r_hats)
# Calculate the p-values
for (r, i, j), r_hat in np.ndenumerate(r_hats):
if r_hat > alphas[r]:
continue # assign a p-value of 1
p_vals[r,i,j] = hb_p_value(r_hat,num_calib,alphas[r])
lambda1_idx = np.argmax(p_vals[0,:,0] < delta/2).item()
lambda2_idx = np.argmax(p_vals[1,lambda1_idx,:] < delta/2).item()
R = np.array([lambda1_idx*lambda2s.shape[0] + lambda2_idx,])
return R
def graph_test(A, error_budget, loss_tables, alphas, delta, lambda1s, lambda2s, num_calib, acyclic=False):
r_hats_risk1 = loss_tables[:,0,:].mean(dim=0)
r_hats_risk2 = (loss_tables[:,1,:] * (1-loss_tables[:,0,:]) - alphas[1]*(1-loss_tables[:,0,:])).mean(dim=0) + alphas[1] # empirical risk at each lambda combination using trick
r_hats = torch.cat((r_hats_risk1[None,:],r_hats_risk2[None,:]),dim=0)
p_vals = torch.ones_like(r_hats)
# Calculate the p-values
for (r, i, j), r_hat in np.ndenumerate(r_hats):
if r_hat > alphas[r]:
continue # assign a p-value of 1
p_vals[r,i,j] = hb_p_value(r_hat,num_calib,alphas[r])
p_vals = p_vals.max(dim=0)[0]
rejected_bool = torch.zeros_like(p_vals) > 1 # all false
A = A.tolil()
if not acyclic:
A_csr = A.tocsr()
# Graph updates
while(rejected_bool.int().sum() < p_vals.numel() and error_budget.sum() > 0):
argmin = (p_vals/error_budget).argmin()
argmin_rect = to_rect_index(argmin.numpy(),p_vals.shape)
minval = p_vals[argmin_rect[0],argmin_rect[1]]
#print(f"discoveries: {rejected_bool.float().sum():.3f}, error left total: {error_budget.sum():.3e}, point:{argmin_rect}, error here: {error_budget[argmin_rect[0],argmin_rect[1]]:.3e}, p_val: {minval:.3e}")
if minval > error_budget[argmin_rect[0],argmin_rect[1]]:
error_budget[argmin_rect[0],argmin_rect[1]] = 0
continue
rejected_bool[argmin_rect[0],argmin_rect[1]] = True
# Modify the graph
outgoing_edges = A[argmin,:]
for e in range(len(outgoing_edges.data[0])):
g_jl = outgoing_edges.data[0][e]
destination = to_rect_index(outgoing_edges.rows[0][e],error_budget.shape)
error_budget[destination[0],destination[1]] += g_jl*error_budget[argmin_rect[0],argmin_rect[1]]
if not acyclic:
incoming_edges = A_csr[:,argmin].T # Use CSR here for speed
nodes_to_update = list(set(outgoing_edges.rows[0] + list(incoming_edges.indices))) #Incoming edges
for node in nodes_to_update:
if node == argmin.item():
continue
g_lj = incoming_edges[0,node]
if g_lj == 0:
continue
A[node,:] = (A[node,:] + g_lj*outgoing_edges)/(1-g_lj*outgoing_edges[0,node])
A[:,argmin] = 0
#A[argmin,:] = 0 # No incoming nodes, so don't have to set this.
if not acyclic:
A_csr = A.tocsr()
error_budget[argmin_rect[0],argmin_rect[1]] = 0.0
return rejected_bool.flatten().nonzero()
def gridsplit_graph_test(loss_tables, alphas, delta, lambda1s, lambda2s, num_calib):
A, error_budget = getA_gridsplit(lambda1s,lambda2s)
return graph_test(A, error_budget, loss_tables, alphas, delta, lambda1s, lambda2s, num_calib, acyclic=True)
def row_equalized_graph_test(loss_tables, alphas, delta, lambda1s, lambda2s, num_calib):
A, error_budget = getA_row_equalized(lambda1s,lambda2s)
return graph_test(A, error_budget, loss_tables, alphas, delta, lambda1s, lambda2s, num_calib, acyclic=True)
def trial_precomputed(method_name, alphas, delta, lambda1s, lambda2s, num_calib, maxiter, i, r1, r2, oodt2, lht, curr_proc_dict):
fix_randomness(seed=(i*num_calib))
n = global_dict['loss_tables'].shape[0]
perm = torch.randperm(n)
loss_tables = global_dict['loss_tables'][perm]
calib_tables, val_tables = (loss_tables[:num_calib], loss_tables[num_calib:])
l1_meshgrid, l2_meshgrid = flatten_lambda_meshgrid(lambda1s,lambda2s)
lambda_selector = np.ones((lambda1s.shape[0]*lambda2s.shape[0],)) > 2 # All false
if method_name == "Hamming":
R = row_equalized_graph_test(loss_tables, alphas, delta, lambda1s, lambda2s, num_calib)
lambda_selector[:] = True
elif method_name == "Gridsplit SGT":
R = gridsplit_graph_test(loss_tables, alphas, delta, lambda1s, lambda2s, num_calib)
lambda_selector[:] = True
elif method_name == "2D Fixed Sequence":
R = coordsplit_test(loss_tables, alphas, delta, lambda1s, lambda2s, num_calib)
lambda_selector[:] = True
else:
if method_name == "Multiscale HBBonferroni":
n_coarse = int(calib_tables.shape[0]/10)
coarse_tables, fine_tables = (calib_tables[:n_coarse], calib_tables[n_coarse:])
p_values_coarse = calculate_corrected_p_values(coarse_tables, alphas, lambda1s, lambda2s)
# Get a band around delta that contains about 5% of examples.
delta_quantile = (p_values_coarse <= delta).mean()
lambda_selector[p_values_coarse <= 1.5*delta] = True
frac_selected = lambda_selector.astype(float).mean()
if frac_selected == 0:
print("Selection failed!")
lambda_selector[:] = True
else:
p_values_corrected = calculate_corrected_p_values(fine_tables, alphas, lambda1s, lambda2s)
else:
p_values_corrected = calculate_corrected_p_values(calib_tables, alphas, lambda1s, lambda2s)
lambda_selector[:] = True
if method_name == "Fallback":
p_values_corrected = p_values_corrected.reshape((lambda1s.shape[0],lambda2s.shape[0]))
mask = np.zeros_like(p_values_corrected)
for row in range(p_values_corrected.shape[0]):
p_value_exceed_indexes = np.nonzero(p_values_corrected[row,:] > (delta/lambda1s.shape[0]))[0]
valid_col = min(p_value_exceed_indexes.max()+1,p_values_corrected.shape[1]-1)
if valid_col == 999:
continue
mask[row,valid_col] = 1
R = np.nonzero(mask.flatten())[0]
#R = np.nonzero(p_values_corrected < (delta / lambda1s.shape[0]))[0]
else:
# Bonferroni correct over lambda to get the valid discoveries
R = bonferroni(p_values_corrected[lambda_selector], delta)
if R.shape[0] == 0:
return 0.0, 0.0, 0.0, np.array([1.0,1.0])
# Index the lambdas
l1_meshgrid = l1_meshgrid[lambda_selector]
l2_meshgrid = l2_meshgrid[lambda_selector]
l1s = l1_meshgrid[R]
l2s = l2_meshgrid[R]
minrow = (R//lambda2s.shape[0]).min()
mincol = (R %lambda2s.shape[0]).min()
print(minrow)
print(mincol)
lhat = np.array([l1s[l2s==l2s.min()].min(), l2s.min()])
#lhat = np.array([l1s.min(), l2s[l1s==l1s.min()].min()])
print(f"Region: {method_name}, Lhat: {lhat}")
# Validate
idx1 = np.nonzero(np.abs(lambda1s-lhat[0]) < 1e-10)[0]
idx2 = np.nonzero(np.abs(lambda2s-lhat[1]) < 1e-10)[0]
num_ood = val_tables[:,0,idx1,idx2].sum()
risk1 = float(num_ood) / float(val_tables.shape[0])
selector = -int(num_ood) if num_ood != 0 else None
risk2 = val_tables[:selector,1,idx1,idx2].mean().item()
ood_type2 = 1-global_dict['frac_ood_ood_table'][idx1].item()
r1[i] = risk1
r2[i] = risk2
oodt2[i] = ood_type2
lht[i] = lhat
curr_proc_dict['num'] -= 1
# Define the tables in the global scope
def experiment(alphas,delta,lambda1s,lambda2s,num_calib,num_trials,maxiter,cache_dir,num_processes):
df_list = []
rejection_region_names = ("Bonferroni","2D Fixed Sequence","Fallback","Hamming")
for idx in range(len(rejection_region_names)):
rejection_region_name = rejection_region_names[idx]
fname = f'./.cache/{alphas}_{delta}_{num_calib}_{num_trials}_{rejection_region_name}_dataframe.pkl'
df = pd.DataFrame(columns = ["$\\hat{\\lambda}$","coverage","OOD Type I","OOD Type II","alpha1","alpha2","delta","region name"])
try:
df = | pd.read_pickle(fname) | pandas.read_pickle |
# -*- coding:utf-8 -*-
# /usr/bin/env python
"""
Author: <NAME>
date: 2020/1/22 19:56
contact: <EMAIL>
desc: 英为财情-外汇-货币对历史数据
https://cn.investing.com/currencies/
https://cn.investing.com/currencies/eur-usd-historical-data
"""
import re
import pandas as pd
import requests
from bs4 import BeautifulSoup
from akshare.index.cons import short_headers, long_headers
def currency_name_url():
url = "https://cn.investing.com/currencies/"
res = requests.post(url, headers=short_headers)
data_table = | pd.read_html(res.text) | pandas.read_html |
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 25 10:30:58 2018
@author: Administrator
"""
import json
from pprint import pprint
import urllib
import pandas as pd
import requests
import numpy as np
import time
from sklearn.preprocessing import MultiLabelBinarizer
import os
import multiprocessing
from multiprocessing import Pool
import shutil
from pandas.io.json import json_normalize
import urllib3
import collections
from tqdm import tqdm
def loading_json():
script_start_time = time.time()
print('%0.2f min: Start loading data'%((time.time() - script_start_time)/60))
train={}
test={}
validation={}
with open('train.json') as json_data:
train= json.load(json_data)
with open('test.json') as json_data:
test= json.load(json_data)
with open('validation.json') as json_data:
validation = json.load(json_data)
print('Train No. of images: %d'%(len(train['images'])))
print('Test No. of images: %d'%(len(test['images'])))
print('Validation No. of images: %d'%(len(validation['images'])))
# JSON TO PANDAS DATAFRAME
# train data
train_img_url=train['images']
train_img_url=pd.DataFrame(train_img_url)
train_ann=train['annotations']
train_ann=pd.DataFrame(train_ann)
train=pd.merge(train_img_url, train_ann, on='imageId', how='inner')
# test data
test=pd.DataFrame(test['images'])
# Validation Data
val_img_url=validation['images']
val_img_url=pd.DataFrame(val_img_url)
val_ann=validation['annotations']
val_ann= | pd.DataFrame(val_ann) | pandas.DataFrame |
import numpy as np
import pandas as pd
from matplotlib import *
# .........................Series.......................#
x1 = np.array([1, 2, 3, 4])
s = pd.Series(x1, index=[1, 2, 3, 4])
print(s)
# .......................DataFrame......................#
x2 = np.array([1, 2, 3, 4, 5, 6])
s = pd.DataFrame(x2)
print(s)
x3 = np.array([['Alex', 10], ['Nishit', 21], ['Aman', 22]])
s = pd.DataFrame(x3, columns=['Name', 'Age'])
print(s)
data = {'Name': ['Tom', 'Jack', 'Steve', 'Ricky'], 'Age': [28, 34, 29, 42]}
df = pd.DataFrame(data, index=['rank1', 'rank2', 'rank3', 'rank4'])
print(df)
data = [{'a': 1, 'b': 2}, {'a': 3, 'b': 4, 'c': 5}]
df = pd.DataFrame(data)
print(df)
d = {'one': pd.Series([1, 2, 3], index=['a', 'b', 'c']),
'two': pd.Series([1, 2, 3, 4], index=['a', 'b', 'c', 'd'])}
df = pd.DataFrame(d)
print(df)
# ....Adding New column......#
data = {'one': pd.Series([1, 2, 3, 4], index=[1, 2, 3, 4]),
'two': pd.Series([1, 2, 3], index=[1, 2, 3])}
df = pd.DataFrame(data)
print(df)
df['three'] = pd.Series([1, 2], index=[1, 2])
print(df)
# ......Deleting a column......#
data = {'one': pd.Series([1, 2, 3, 4], index=[1, 2, 3, 4]),
'two': pd.Series([1, 2, 3], index=[1, 2, 3]),
'three': pd.Series([1, 1], index=[1, 2])
}
df = pd.DataFrame(data)
print(df)
del df['one']
print(df)
df.pop('two')
print(df)
# ......Selecting a particular Row............#
data = {'one': pd.Series([1, 2, 3, 4], index=[1, 2, 3, 4]),
'two': pd.Series([1, 2, 3], index=[1, 2, 3]),
'three': pd.Series([1, 1], index=[1, 2])
}
df = pd.DataFrame(data)
print(df.loc[2])
print(df[1:4])
# .........Addition of Row.................#
df = pd.DataFrame([[1, 2], [3, 4]], columns=['a', 'b'])
df2 = pd.DataFrame([[5, 6], [7, 8]], columns=['a', 'b'])
df = df.append(df2)
print(df.head())
# ........Deleting a Row..................#
df = pd.DataFrame([[1, 2], [3, 4]], columns=['a', 'b'])
df2 = | pd.DataFrame([[5, 6], [7, 8]], columns=['a', 'b']) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 1 14:34:50 2020
@author: Administrator
"""
import numpy as np
# =============================================================================
# 损失函数导数定义
# =============================================================================
der_mse = lambda y_hat,y: y_hat - y
der_llh = lambda y_hat,y: y ## 必须接softmax激活函数,否则错误
class SoftLayer:
def __init__(self):
pass
def forward(self,X,record = False):
rst = np.exp(X)/np.exp(X).sum(1,keepdims=True)
if record: self.temp = rst
return rst
def backward(self, cum_grad):
return self.temp-cum_grad ## 必须接der_llh损失函数导数,否则错误
def update(self, l_rate):
pass
class LinearLayer:
def __init__(self, size_in: int, size_out: int):
self.W = np.random.rand(size_in, size_out) ## X*W+B
self.B = np.random.rand(1, size_out)
def forward(self,X,record=False):
if record: self.temp = X
return X.dot(self.W) + self.B
def backward(self,cum_grad):
self.grad_W = np.matmul(self.temp.T,cum_grad)
self.grad_B = np.matmul(cum_grad.T, np.ones(len(self.temp)) )
return np.matmul(cum_grad,self.W.T)
def update(self, l_rate):
self.W -= self.grad_W * l_rate/(len(self.temp))
self.B -= self.grad_B * l_rate/(len(self.temp))
class SigmodLayer:
def __init__(self):
pass
def forward(self,X,record = False):
rst = 1/(1+np.exp(-X))
if record: self.temp = rst
return rst
def backward(self, cum_grad):
return self.temp*(1-self.temp)*cum_grad
def update(self, l_rate):
pass
class ReluLayer:
def __init__(self):
pass
def forward(self,X,record = False):
rst = np.where(X < 0, 0, X)
if record: self.temp = rst
return rst
def backward(self, cum_grad):
return np.where(self.temp > 0, 1, 0) * cum_grad
def update(self, l_rate):
pass
class DNN:
def __init__(self,layers:list):
self.layers = layers
def predict(self,X,record=False):
for layer in self.layers:
X = layer.forward(X, record=record)
return X.argmax(1)
def train(self,X,Y,testX,testY,loss=der_mse,batch=10,epoch=50,alpha=.1):
'''batch-GD'''
self.info = []
for t in range(epoch):
batches = np.split(np.random.permutation(len(X)),
np.arange(len(X),step=batch)[1:])
for ids in batches:
## 前向传播激活,记录求导时用到的输入或输出值
forward = X[ids].copy()
for layer in self.layers:
forward = layer.forward(forward, record=True)
## 反向传播梯度,计算各层参数梯度
grads = loss(forward, Y[ids]) ## 损失函数MSE导数y_hat-y
for layer in self.layers[::-1]:
grads = layer.backward(grads)
## 根据梯度更新各层参数
for layer in self.layers:
layer.update(alpha)
## 记录训练信息
Y_hat = self.predict(testX)
self.info.append({'t':t,'right':(Y_hat==testY.argmax(1)).mean()})
return 'train done!'
if __name__=='__main__':
import pandas as pd
from sklearn.datasets import load_iris
from sklearn.datasets import fetch_openml
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
# load data
iris = load_iris()
iris.target = pd.get_dummies(iris.target,dtype=float).values
X_train, X_test, y_train, y_test = train_test_split(
iris.data, iris.target, test_size=.3,random_state=42)
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
# train model
## 最普通的sigmod激活+mse损失函数
layers = [LinearLayer(4,8),SigmodLayer(),LinearLayer(8,3),
SigmodLayer()]
dnn = DNN(layers)
dnn.train(X_train,y_train,X_test,y_test,
loss=der_mse,batch=10,epoch=50,alpha=1)
info = pd.DataFrame(dnn.info)
info.plot(x='t',y='right',marker='o',ms=3)
## 对分类问题,softmax激活+对数似然损失函数效果更好
layers = [LinearLayer(4,8),ReluLayer(),LinearLayer(8,3),
SoftLayer()]
dnn = DNN(layers)
dnn.train(X_train,y_train,X_test,y_test,
loss=der_llh,batch=10,epoch=20,alpha=.1)
info = | pd.DataFrame(dnn.info) | pandas.DataFrame |
import pandas as pd
import tqdm
from pynput import keyboard
import bird_view.utils.bz_utils as bzu
import bird_view.utils.carla_utils as cu
from bird_view.models.common import crop_birdview
from perception.utils.helpers import get_segmentation_tensor
from perception.utils.segmentation_labels import DEFAULT_CLASSES
from perception.utils.visualization import get_rgb_segmentation, get_segmentation_colors
def _paint(observations, control, diagnostic, debug, env, show=False, use_cv=False, trained_cv=False):
import cv2
import numpy as np
WHITE = (255, 255, 255)
RED = (255, 0, 0)
CROP_SIZE = 192
X = 176
Y = 192 // 2
R = 2
birdview = cu.visualize_birdview(observations['birdview'])
birdview = crop_birdview(birdview)
if 'big_cam' in observations:
canvas = np.uint8(observations['big_cam']).copy()
rgb = np.uint8(observations['rgb']).copy()
else:
canvas = np.uint8(observations['rgb']).copy()
def _stick_together(a, b, axis=1):
if axis == 1:
h = min(a.shape[0], b.shape[0])
r1 = h / a.shape[0]
r2 = h / b.shape[0]
a = cv2.resize(a, (int(r1 * a.shape[1]), int(r1 * a.shape[0])))
b = cv2.resize(b, (int(r2 * b.shape[1]), int(r2 * b.shape[0])))
return np.concatenate([a, b], 1)
else:
h = min(a.shape[1], b.shape[1])
r1 = h / a.shape[1]
r2 = h / b.shape[1]
a = cv2.resize(a, (int(r1 * a.shape[1]), int(r1 * a.shape[0])))
b = cv2.resize(b, (int(r2 * b.shape[1]), int(r2 * b.shape[0])))
return np.concatenate([a, b], 0)
def _stick_together_and_fill(a, b):
# sticks together a and b.
# a should be wider than b, and b will be filled with black pixels to match a's width.
w_diff = a.shape[1] - b.shape[1]
fill = np.zeros(shape=(b.shape[0], w_diff, 3), dtype=np.uint8)
b_filled = np.concatenate([b, fill], axis=1)
return np.concatenate([a, b_filled], axis=0)
def _write(text, i, j, canvas=canvas, fontsize=0.4):
rows = [x * (canvas.shape[0] // 10) for x in range(10+1)]
cols = [x * (canvas.shape[1] // 9) for x in range(9+1)]
cv2.putText(
canvas, text, (cols[j], rows[i]),
cv2.FONT_HERSHEY_SIMPLEX, fontsize, WHITE, 1)
_command = {
1: 'LEFT',
2: 'RIGHT',
3: 'STRAIGHT',
4: 'FOLLOW',
}.get(observations['command'], '???')
if 'big_cam' in observations:
fontsize = 0.8
else:
fontsize = 0.4
_write('Command: ' + _command, 1, 0, fontsize=fontsize)
_write('Velocity: %.1f' % np.linalg.norm(observations['velocity']), 2, 0, fontsize=fontsize)
_write('Steer: %.2f' % control.steer, 4, 0, fontsize=fontsize)
_write('Throttle: %.2f' % control.throttle, 5, 0, fontsize=fontsize)
_write('Brake: %.1f' % control.brake, 6, 0, fontsize=fontsize)
_write('Collided: %s' % diagnostic['collided'], 1, 6, fontsize=fontsize)
_write('Invaded: %s' % diagnostic['invaded'], 2, 6, fontsize=fontsize)
_write('Lights Ran: %d/%d' % (env.traffic_tracker.total_lights_ran, env.traffic_tracker.total_lights), 3, 6, fontsize=fontsize)
_write('Goal: %.1f' % diagnostic['distance_to_goal'], 4, 6, fontsize=fontsize)
_write('Time: %d' % env._tick, 5, 6, fontsize=fontsize)
_write('Time limit: %d' % env._timeout, 6, 6, fontsize=fontsize)
_write('FPS: %.2f' % (env._tick / (diagnostic['wall'])), 7, 6, fontsize=fontsize)
for x, y in debug.get('locations', []):
x = int(X - x / 2.0 * CROP_SIZE)
y = int(Y + y / 2.0 * CROP_SIZE)
S = R // 2
birdview[x-S:x+S+1,y-S:y+S+1] = RED
for x, y in debug.get('locations_world', []):
x = int(X - x * 4)
y = int(Y + y * 4)
S = R // 2
birdview[x-S:x+S+1,y-S:y+S+1] = RED
for x, y in debug.get('locations_birdview', []):
S = R // 2
birdview[x-S:x+S+1,y-S:y+S+1] = RED
for x, y in debug.get('locations_pixel', []):
S = R // 2
if 'big_cam' in observations:
rgb[y-S:y+S+1,x-S:x+S+1] = RED
else:
canvas[y-S:y+S+1,x-S:x+S+1] = RED
for x, y in debug.get('curve', []):
x = int(X - x * 4)
y = int(Y + y * 4)
try:
birdview[x,y] = [155, 0, 155]
except:
pass
if 'target' in debug:
x, y = debug['target'][:2]
x = int(X - x * 4)
y = int(Y + y * 4)
birdview[x-R:x+R+1,y-R:y+R+1] = [0, 155, 155]
#ox, oy = observations['orientation']
#rot = np.array([
# [ox, oy],
# [-oy, ox]])
#u = observations['node'] - observations['position'][:2]
#v = observations['next'] - observations['position'][:2]
#u = rot.dot(u)
#x, y = u
#x = int(X - x * 4)
#y = int(Y + y * 4)
#v = rot.dot(v)
#x, y = v
#x = int(X - x * 4)
#y = int(Y + y * 4)
if 'big_cam' in observations:
_write('Network input/output', 1, 0, canvas=rgb)
_write('Projected output', 1, 0, canvas=birdview)
full = _stick_together(rgb, birdview)
else:
full = _stick_together(canvas, birdview)
if 'image' in debug:
full = _stick_together(full, cu.visualize_predicted_birdview(debug['image'], 0.01))
if 'big_cam' in observations:
full = _stick_together(canvas, full, axis=0)
if use_cv:
semseg = get_segmentation_tensor(observations["semseg"].copy(), classes=DEFAULT_CLASSES)
class_colors = get_segmentation_colors(len(DEFAULT_CLASSES) + 1, class_indxs=DEFAULT_CLASSES)
semseg_rgb = get_rgb_segmentation(semantic_image=semseg, class_colors=class_colors)
semseg_rgb = np.uint8(semseg_rgb)
full = _stick_together_and_fill(full, semseg_rgb)
depth = np.uint8(observations["depth"]).copy()
depth = np.expand_dims(depth, axis=2)
depth = np.repeat(depth, 3, axis=2)
full = _stick_together_and_fill(full, depth)
if trained_cv:
semseg = observations["semseg"].copy()
class_colors = get_segmentation_colors(len(DEFAULT_CLASSES) + 1, class_indxs=DEFAULT_CLASSES)
semseg_rgb = get_rgb_segmentation(semantic_image=semseg, class_colors=class_colors)
semseg_rgb = np.uint8(semseg_rgb)
full = _stick_together_and_fill(full, semseg_rgb)
depth = cv2.normalize(observations["depth"].copy(), None, alpha=0, beta=255,
norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)
depth = np.uint8(depth)
depth = np.expand_dims(depth, axis=2)
depth = np.repeat(depth, 3, axis=2)
full = _stick_together_and_fill(full, depth)
if show:
bzu.show_image('canvas', full)
bzu.add_to_video(full)
manual_break = False
def run_single(env, weather, start, target, agent_maker, seed, autopilot, show=False, move_camera=False,
use_cv=False, trained_cv=False):
# HACK: deterministic vehicle spawns.
env.seed = seed
env.init(start=start, target=target, weather=cu.PRESET_WEATHERS[weather])
print("Spawn points: ", (start, target))
if not autopilot:
agent = agent_maker()
else:
agent = agent_maker(env._player, resolution=1, threshold=7.5)
agent.set_route(env._start_pose.location, env._target_pose.location)
diagnostics = list()
result = {
'weather': weather,
'start': start, 'target': target,
'success': None, 't': None,
'total_lights_ran': None,
'total_lights': None,
'collided': None,
}
i = 0
listener = keyboard.Listener(on_release=on_release)
listener.start()
while env.tick():
if i % 50 == 0 and move_camera:
env.move_spectator_to_player()
i = 0 if not move_camera else i + 1
observations = env.get_observations()
if autopilot:
control, _, _, _ = agent.run_step(observations)
else:
control = agent.run_step(observations)
diagnostic = env.apply_control(control)
_paint(observations, control, diagnostic, agent.debug, env, show=show, use_cv=use_cv, trained_cv=trained_cv)
diagnostic.pop('viz_img')
diagnostics.append(diagnostic)
global manual_break
if env.is_failure() or env.is_success() or manual_break:
result['success'] = env.is_success()
result['total_lights_ran'] = env.traffic_tracker.total_lights_ran
result['total_lights'] = env.traffic_tracker.total_lights
result['collided'] = env.collided
result['t'] = env._tick
if manual_break:
print("Manual break activated")
result['success'] = False
manual_break = False
if not result['success']:
print("Evaluation route failed! Start: {}, Target: {}, Weather: {}".format(result["start"],
result["target"],
result["weather"]))
break
listener.stop()
return result, diagnostics
def on_release(key):
#print('{0} released'.format(key))
if key == keyboard.Key.page_down:
#print("pgdown pressed")
global manual_break
manual_break = True
def run_benchmark(agent_maker, env, benchmark_dir, seed, autopilot, resume, max_run=5, show=False, move_camera=False,
use_cv=False, trained_cv=False):
"""
benchmark_dir must be an instance of pathlib.Path
"""
summary_csv = benchmark_dir / 'summary.csv'
diagnostics_dir = benchmark_dir / 'diagnostics'
diagnostics_dir.mkdir(parents=True, exist_ok=True)
summary = list()
total = len(list(env.all_tasks))
if summary_csv.exists() and resume:
summary = pd.read_csv(summary_csv)
else:
summary = pd.DataFrame()
num_run = 0
for weather, (start, target), run_name in tqdm.tqdm(env.all_tasks, initial=1, total=total):
if resume and len(summary) > 0 and ((summary['start'] == start) \
& (summary['target'] == target) \
& (summary['weather'] == weather)).any():
print (weather, start, target)
continue
diagnostics_csv = str(diagnostics_dir / ('%s.csv' % run_name))
bzu.init_video(save_dir=str(benchmark_dir / 'videos'), save_path=run_name)
result, diagnostics = run_single(env, weather, start, target, agent_maker, seed, autopilot, show=show,
move_camera=move_camera, use_cv=use_cv, trained_cv=trained_cv)
summary = summary.append(result, ignore_index=True)
# Do this every timestep just in case.
pd.DataFrame(summary).to_csv(summary_csv, index=False)
| pd.DataFrame(diagnostics) | pandas.DataFrame |
import pandas as pd
import numpy as np
#import mysql_prices as ms
class AccountBalances:
usd_bal = 0
btc_bal = 0
def set_usd_bal(self, new_bal):
self.usd_bal = new_bal
def set_btc_bal(self, new_bal):
self.btc_bal = new_bal
class BuyManager:
cur_buys = 0
stop_price = 0
next_buy_price = 0
def set_stop(self, sell_at):
self.stop_price = sell_at
def set_cur_buys(self, cur_buys):
self.cur_buys = 0
def set_next_buy(self, buy_at):
self.cur_buys += 1
self.next_buy_price = buy_at
def gather_data(data_source):
if data_source == "kaggle_coinbase":
source_df = | pd.read_csv("~/coinbase_data.csv") | pandas.read_csv |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.