prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Plots about particles in ExaTrkX routine.
For plot data requirement, detail list below:
- hits:
- required: hit_id, x, y, z or r, phi, z
- pairs:
- required: hit_id_1, hit_id_2
- edges:
- required: hit_id_1, hit_id_2,
- optional: score
- particles:
- required: particle_id
- optional: vx, vy, vz, parent_pid
- truth:
- required: hit_id, particle_id
For required columns, it use for all plot require this type of dataframe.
For optional columns, it use for special purpose and not required for all plots.
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.collections as mc
from ExaTrkXPlotting import plot
@plot('exatrkx.particles.production_vertex', ['pairs', 'hits', 'particles'])
def production_vertices(ax, data):
pairs = data['pairs']
hits = data['hits']
particles = data['particles']
if all(pd.Series(['x', 'y']).isin(hits.columns)):
pass
elif all(pd.Series(['r', 'phi']).isin(hits.columns)):
# Cylindrical coord.
r = hits['r']
phi = hits['phi']
# Compute cartesian coord
x = r * np.cos(phi)
y = r * np.sin(phi)
hits = hits.assign(
x=x, y=y
)
else:
raise KeyError('No valid coordinate data found.')
hits_with_particles = pd.merge(
hits, particles,
how='inner'
)
pairs_with_pid = pd.merge(
pairs,
hits_with_particles,
left_on='hit_id_1',
right_on='hit_id',
how="inner"
)
# Group by vertex.
pairs_group_by_vertices = pairs_with_pid.groupby(['vx', 'vy', 'vz'])
# Create color map.
colors = plt.cm.get_cmap('gnuplot', len(pairs_group_by_vertices) + 1)
for idx, (vertex, pairs) in enumerate(pairs_group_by_vertices):
vx, vy, vz = vertex
# Get color.
color = colors(idx)
ax.scatter(
vx, vy,
marker='+',
color=color,
label=f'({vx}, {vy})'
)
@plot('exatrkx.particles.types', ['pairs', 'hits', 'particles'])
def particle_types(ax, data):
hits = data['hits']
pairs = data['pairs']
particles = data['particles']
if all(pd.Series(['x', 'y']).isin(hits.columns)):
pass
elif all(pd.Series(['r', 'phi']).isin(hits.columns)):
# Cylindrical coord.
r = hits['r']
phi = hits['phi']
# Compute cartesian coord
x = r * np.cos(phi)
y = r * np.sin(phi)
hits = hits.assign(
x=x, y=y
)
else:
raise KeyError('No valid coordinate data found.')
hits_with_particles = pd.merge(
hits, particles,
how='inner'
)
pairs_with_pid = pd.merge(
pairs,
hits_with_particles,
left_on='hit_id_2',
right_on='hit_id',
how="inner"
)
# Group by vertex.
pairs_group_by_vertices = pairs_with_pid.groupby(['vx', 'vy', 'vz'])
for idx, (vertex, pairs) in enumerate(pairs_group_by_vertices):
for particle_id, tracks in pairs.groupby('particle_id'):
# particle_type = particle_types[int(tracks['particle_type'].iloc[0])]
particle_type = int(tracks['particle_type'].iloc[0])
if 'r_2' not in tracks.columns:
tracks['r_2'] = np.sqrt(tracks['x']**2 + tracks['y']**2)
x, y = tracks.loc[
tracks['r_2'].idxmax()
][['x', 'y']]
ax.annotate(particle_type, (x, y))
@plot('exatrkx.particles.tracks_with_production_vertex.2d', ['pairs', 'hits', 'particles'])
def particle_track_with_production_vertex(ax, data, line_width=0.1):
"""
Plot hit pair 2D connections. Require hits dataframe and pairs dataframe.
"""
hits = data['hits']
pairs = data['pairs']
particles = data['particles']
if all(pd.Series(['x', 'y']).isin(hits.columns)):
pass
elif all( | pd.Series(['r', 'phi']) | pandas.Series |
import pandas as pd
from business_rules.operators import (DataframeType, StringType,
NumericType, BooleanType, SelectType,
SelectMultipleType, GenericType)
from . import TestCase
from decimal import Decimal
import sys
import pandas
class StringOperatorTests(TestCase):
def test_operator_decorator(self):
self.assertTrue(StringType("foo").equal_to.is_operator)
def test_string_equal_to(self):
self.assertTrue(StringType("foo").equal_to("foo"))
self.assertFalse(StringType("foo").equal_to("Foo"))
def test_string_not_equal_to(self):
self.assertTrue(StringType("foo").not_equal_to("Foo"))
self.assertTrue(StringType("foo").not_equal_to("boo"))
self.assertFalse(StringType("foo").not_equal_to("foo"))
def test_string_equal_to_case_insensitive(self):
self.assertTrue(StringType("foo").equal_to_case_insensitive("FOo"))
self.assertTrue(StringType("foo").equal_to_case_insensitive("foo"))
self.assertFalse(StringType("foo").equal_to_case_insensitive("blah"))
def test_string_starts_with(self):
self.assertTrue(StringType("hello").starts_with("he"))
self.assertFalse(StringType("hello").starts_with("hey"))
self.assertFalse(StringType("hello").starts_with("He"))
def test_string_ends_with(self):
self.assertTrue(StringType("hello").ends_with("lo"))
self.assertFalse(StringType("hello").ends_with("boom"))
self.assertFalse(StringType("hello").ends_with("Lo"))
def test_string_contains(self):
self.assertTrue(StringType("hello").contains("ell"))
self.assertTrue(StringType("hello").contains("he"))
self.assertTrue(StringType("hello").contains("lo"))
self.assertFalse(StringType("hello").contains("asdf"))
self.assertFalse(StringType("hello").contains("ElL"))
def test_string_matches_regex(self):
self.assertTrue(StringType("hello").matches_regex(r"^h"))
self.assertFalse(StringType("hello").matches_regex(r"^sh"))
def test_non_empty(self):
self.assertTrue(StringType("hello").non_empty())
self.assertFalse(StringType("").non_empty())
self.assertFalse(StringType(None).non_empty())
class NumericOperatorTests(TestCase):
def test_instantiate(self):
err_string = "foo is not a valid numeric type"
with self.assertRaisesRegexp(AssertionError, err_string):
NumericType("foo")
def test_numeric_type_validates_and_casts_decimal(self):
ten_dec = Decimal(10)
ten_int = 10
ten_float = 10.0
if sys.version_info[0] == 2:
ten_long = long(10)
else:
ten_long = int(10) # long and int are same in python3
ten_var_dec = NumericType(ten_dec) # this should not throw an exception
ten_var_int = NumericType(ten_int)
ten_var_float = NumericType(ten_float)
ten_var_long = NumericType(ten_long)
self.assertTrue(isinstance(ten_var_dec.value, Decimal))
self.assertTrue(isinstance(ten_var_int.value, Decimal))
self.assertTrue(isinstance(ten_var_float.value, Decimal))
self.assertTrue(isinstance(ten_var_long.value, Decimal))
def test_numeric_equal_to(self):
self.assertTrue(NumericType(10).equal_to(10))
self.assertTrue(NumericType(10).equal_to(10.0))
self.assertTrue(NumericType(10).equal_to(10.000001))
self.assertTrue(NumericType(10.000001).equal_to(10))
self.assertTrue(NumericType(Decimal('10.0')).equal_to(10))
self.assertTrue(NumericType(10).equal_to(Decimal('10.0')))
self.assertFalse(NumericType(10).equal_to(10.00001))
self.assertFalse(NumericType(10).equal_to(11))
def test_numeric_not_equal_to(self):
self.assertTrue(NumericType(10).not_equal_to(10.00001))
self.assertTrue(NumericType(10).not_equal_to(11))
self.assertTrue(NumericType(Decimal('10.0')).not_equal_to(Decimal('10.1')))
self.assertFalse(NumericType(10).not_equal_to(10))
self.assertFalse(NumericType(10).not_equal_to(10.0))
self.assertFalse(NumericType(Decimal('10.0')).not_equal_to(Decimal('10.0')))
def test_other_value_not_numeric(self):
error_string = "10 is not a valid numeric type"
with self.assertRaisesRegexp(AssertionError, error_string):
NumericType(10).equal_to("10")
def test_numeric_greater_than(self):
self.assertTrue(NumericType(10).greater_than(1))
self.assertFalse(NumericType(10).greater_than(11))
self.assertTrue(NumericType(10.1).greater_than(10))
self.assertFalse(NumericType(10.000001).greater_than(10))
self.assertTrue(NumericType(10.000002).greater_than(10))
def test_numeric_greater_than_or_equal_to(self):
self.assertTrue(NumericType(10).greater_than_or_equal_to(1))
self.assertFalse(NumericType(10).greater_than_or_equal_to(11))
self.assertTrue(NumericType(10.1).greater_than_or_equal_to(10))
self.assertTrue(NumericType(10.000001).greater_than_or_equal_to(10))
self.assertTrue(NumericType(10.000002).greater_than_or_equal_to(10))
self.assertTrue(NumericType(10).greater_than_or_equal_to(10))
def test_numeric_less_than(self):
self.assertTrue(NumericType(1).less_than(10))
self.assertFalse(NumericType(11).less_than(10))
self.assertTrue(NumericType(10).less_than(10.1))
self.assertFalse(NumericType(10).less_than(10.000001))
self.assertTrue(NumericType(10).less_than(10.000002))
def test_numeric_less_than_or_equal_to(self):
self.assertTrue(NumericType(1).less_than_or_equal_to(10))
self.assertFalse(NumericType(11).less_than_or_equal_to(10))
self.assertTrue(NumericType(10).less_than_or_equal_to(10.1))
self.assertTrue(NumericType(10).less_than_or_equal_to(10.000001))
self.assertTrue(NumericType(10).less_than_or_equal_to(10.000002))
self.assertTrue(NumericType(10).less_than_or_equal_to(10))
class BooleanOperatorTests(TestCase):
def test_instantiate(self):
err_string = "foo is not a valid boolean type"
with self.assertRaisesRegexp(AssertionError, err_string):
BooleanType("foo")
err_string = "None is not a valid boolean type"
with self.assertRaisesRegexp(AssertionError, err_string):
BooleanType(None)
def test_boolean_is_true_and_is_false(self):
self.assertTrue(BooleanType(True).is_true())
self.assertFalse(BooleanType(True).is_false())
self.assertFalse(BooleanType(False).is_true())
self.assertTrue(BooleanType(False).is_false())
class SelectOperatorTests(TestCase):
def test_contains(self):
self.assertTrue(SelectType([1, 2]).contains(2))
self.assertFalse(SelectType([1, 2]).contains(3))
self.assertTrue(SelectType([1, 2, "a"]).contains("A"))
def test_does_not_contain(self):
self.assertTrue(SelectType([1, 2]).does_not_contain(3))
self.assertFalse(SelectType([1, 2]).does_not_contain(2))
self.assertFalse(SelectType([1, 2, "a"]).does_not_contain("A"))
class SelectMultipleOperatorTests(TestCase):
def test_contains_all(self):
self.assertTrue(SelectMultipleType([1, 2]).
contains_all([2, 1]))
self.assertFalse(SelectMultipleType([1, 2]).
contains_all([2, 3]))
self.assertTrue(SelectMultipleType([1, 2, "a"]).
contains_all([2, 1, "A"]))
def test_is_contained_by(self):
self.assertTrue(SelectMultipleType([1, 2]).
is_contained_by([2, 1, 3]))
self.assertFalse(SelectMultipleType([1, 2]).
is_contained_by([2, 3, 4]))
self.assertTrue(SelectMultipleType([1, 2, "a"]).
is_contained_by([2, 1, "A"]))
def test_shares_at_least_one_element_with(self):
self.assertTrue(SelectMultipleType([1, 2]).
shares_at_least_one_element_with([2, 3]))
self.assertFalse(SelectMultipleType([1, 2]).
shares_at_least_one_element_with([4, 3]))
self.assertTrue(SelectMultipleType([1, 2, "a"]).
shares_at_least_one_element_with([4, "A"]))
def test_shares_exactly_one_element_with(self):
self.assertTrue(SelectMultipleType([1, 2]).
shares_exactly_one_element_with([2, 3]))
self.assertFalse(SelectMultipleType([1, 2]).
shares_exactly_one_element_with([4, 3]))
self.assertTrue(SelectMultipleType([1, 2, "a"]).
shares_exactly_one_element_with([4, "A"]))
self.assertFalse(SelectMultipleType([1, 2, 3]).
shares_exactly_one_element_with([2, 3, "a"]))
def test_shares_no_elements_with(self):
self.assertTrue(SelectMultipleType([1, 2]).
shares_no_elements_with([4, 3]))
self.assertFalse(SelectMultipleType([1, 2]).
shares_no_elements_with([2, 3]))
self.assertFalse(SelectMultipleType([1, 2, "a"]).
shares_no_elements_with([4, "A"]))
class DataframeOperatorTests(TestCase):
def test_exists(self):
df = pandas.DataFrame.from_dict({
"var1": [1, 2, 4, ],
"var2": [3, 5, 6, ],
})
result: pd.Series = DataframeType({"value": df}).exists({"target": "var1"})
self.assertTrue(result.equals(pd.Series([True, True, True, ])))
result: pd.Series = DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).exists({"target": "--r1"})
self.assertTrue(result.equals(pd.Series([True, True, True, ])))
result: pd.Series = DataframeType({"value": df}).exists({"target": "invalid"})
self.assertTrue(result.equals(pd.Series([False, False, False, ])))
def test_not_exists(self):
df = pandas.DataFrame.from_dict({
"var1": [1, 2, 4, ],
"var2": [3, 5, 6, ]
})
result: pd.Series = DataframeType({"value": df}).not_exists({"target": "invalid"})
self.assertTrue(result.equals(pd.Series([True, True, True, ])))
result: pd.Series = DataframeType({"value": df}).not_exists({"target": "var1"})
self.assertTrue(result.equals(pd.Series([False, False, False, ])))
result: pd.Series = DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).not_exists({"target": "--r1"})
self.assertTrue(result.equals(pd.Series([False, False, False, ])))
def test_equal_to(self):
df = pandas.DataFrame.from_dict({
"var1": [1, 2, 4, "", 7, ],
"var2": [3, 5, 6, "", 2, ],
"var3": [1, 3, 8, "", 7, ],
"var4": ["test", "issue", "one", "", "two", ]
})
self.assertTrue(DataframeType({"value": df}).equal_to({
"target": "var1",
"comparator": ""
}).equals(pandas.Series([False, False, False, False, False, ])))
self.assertTrue(DataframeType({"value": df}).equal_to({
"target": "var1",
"comparator": 2
}).equals(pandas.Series([False, True, False, False, False, ])))
self.assertTrue(DataframeType({"value": df}).equal_to({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([True, False, False, False, True, ])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).equal_to({
"target": "--r1",
"comparator": "--r3"
}).equals(pandas.Series([True, False, False, False, True, ])))
self.assertTrue(DataframeType({"value": df}).equal_to({
"target": "var1",
"comparator": "var2"
}).equals(pandas.Series([False, False, False, False, False, ])))
self.assertTrue(DataframeType({"value": df}).equal_to({
"target": "var1",
"comparator": 20
}).equals(pandas.Series([False, False, False, False, False, ])))
self.assertTrue(DataframeType({"value": df}).equal_to({
"target": "var4",
"comparator": "var1",
"value_is_literal": True
}).equals(pandas.Series([False, False, False, False, False, ])))
def test_not_equal_to(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4]
})
self.assertTrue(DataframeType({"value": df}).not_equal_to({
"target": "var1",
"comparator": "var4"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).not_equal_to({
"target": "var1",
"comparator": "var2"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).not_equal_to({
"target": "--r1",
"comparator": "--r2"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).not_equal_to({
"target": "--r1",
"comparator": 20
}).equals(pandas.Series([True, True, True])))
def test_equal_to_case_insensitive(self):
df = pandas.DataFrame.from_dict({
"var1": ["word", "", "new", "val"],
"var2": ["WORD", "", "test", "VAL"],
"var3": ["LET", "", "GO", "read"]
})
self.assertTrue(DataframeType({"value": df}).equal_to_case_insensitive({
"target": "var1",
"comparator": "NEW"
}).equals(pandas.Series([False, False, True, False])))
self.assertTrue(DataframeType({"value": df}).equal_to_case_insensitive({
"target": "var1",
"comparator": ""
}).equals(pandas.Series([False, False, False, False])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).equal_to_case_insensitive({
"target": "--r1",
"comparator": "--r2"
}).equals(pandas.Series([True, False, False, True])))
self.assertTrue(DataframeType({"value": df}).equal_to_case_insensitive({
"target": "var1",
"comparator": "var2"
}).equals(pandas.Series([True, False, False, True])))
self.assertTrue(DataframeType({"value": df}).equal_to_case_insensitive({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).equal_to_case_insensitive({
"target": "var1",
"comparator": "var1",
"value_is_literal": True
}).equals(pandas.Series([False, False, False, False])))
def test_not_equal_to_case_insensitive(self):
df = pandas.DataFrame.from_dict({
"var1": ["word", "new", "val"],
"var2": ["WORD", "test", "VAL"],
"var3": ["LET", "GO", "read"],
"var4": ["WORD", "NEW", "VAL"]
})
self.assertTrue(DataframeType({"value": df}).not_equal_to_case_insensitive({
"target": "var1",
"comparator": "var4"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).not_equal_to_case_insensitive({
"target": "var1",
"comparator": "var2"
}).equals(pandas.Series([False, True, False])))
self.assertTrue(DataframeType({"value": df}).not_equal_to_case_insensitive({
"target": "var1",
"comparator": "var1",
"value_is_literal": True
}).equals(pandas.Series([True, True, True])))
def test_less_than(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4]
})
self.assertTrue(DataframeType({"value": df}).less_than({
"target": "var1",
"comparator": "var4"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).less_than({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([False, True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).less_than({
"target": "--r1",
"comparator": "var3"
}).equals(pandas.Series([False, True, True])))
self.assertTrue(DataframeType({"value": df}).less_than({
"target": "var2",
"comparator": 2
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).less_than({
"target": "var1",
"comparator": 3
}).equals(pandas.Series([True, True, False])))
another_df = pandas.DataFrame.from_dict(
{
"LBDY": [4, None, None, None, None]
}
)
self.assertTrue(DataframeType({"value": another_df}).less_than({
"target": "LBDY",
"comparator": 5
}).equals(pandas.Series([True, False, False, False, False, ])))
def test_less_than_or_equal_to(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4]
})
self.assertTrue(DataframeType({"value": df}).less_than_or_equal_to({
"target": "var1",
"comparator": "var4"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).less_than_or_equal_to({
"target": "--r1",
"comparator": "var4"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).less_than_or_equal_to({
"target": "var2",
"comparator": "var1"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).less_than_or_equal_to({
"target": "var2",
"comparator": 2
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).less_than_or_equal_to({
"target": "var2",
"comparator": "var3"
}).equals(pandas.Series([False, False, True])))
another_df = pandas.DataFrame.from_dict(
{
"LBDY": [4, 5, None, None, None]
}
)
self.assertTrue(DataframeType({"value": another_df}).less_than_or_equal_to({
"target": "LBDY",
"comparator": 5
}).equals(pandas.Series([True, True, False, False, False, ])))
def test_greater_than(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4]
})
self.assertTrue(DataframeType({"value": df}).greater_than({
"target": "var1",
"comparator": "var4"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).greater_than({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).greater_than({
"target": "var1",
"comparator": "--r3"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).greater_than({
"target": "var2",
"comparator": 2
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).greater_than({
"target": "var1",
"comparator": 5000
}).equals(pandas.Series([False, False, False])))
another_df = pandas.DataFrame.from_dict(
{
"LBDY": [4, None, None, None, None]
}
)
self.assertTrue(DataframeType({"value": another_df}).greater_than({
"target": "LBDY",
"comparator": 3
}).equals(pandas.Series([True, False, False, False, False, ])))
def test_greater_than_or_equal_to(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4]
})
self.assertTrue(DataframeType({"value": df}).greater_than_or_equal_to({
"target": "var1",
"comparator": "var4"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).greater_than_or_equal_to({
"target": "var1",
"comparator": "--r4"
}).equals( | pandas.Series([True, True, True]) | pandas.Series |
import os
import pandas as pd
import xfeat
from xfeat import ArithmeticCombinations, ConcatCombination, CountEncoder, LabelEncoder
from ayniy.preprocessing import xfeat_runner, xfeat_target_encoding
from ayniy.utils import FeatureStore
categorical_cols = [
"Type",
"Breed1",
"Breed2",
"Gender",
"Color1",
"Color2",
"Color3",
"State",
"RescuerID",
]
numerical_cols = [
"Age",
"Dewormed",
"Fee",
"FurLength",
"Health",
"MaturitySize",
"PhotoAmt",
"Quantity",
"Sterilized",
"Vaccinated",
"VideoAmt",
]
target_col = "AdoptionSpeed"
def load_petfinder() -> pd.DataFrame:
filepath = "../input/petfinder-adoption-prediction/train_test.ftr"
if not os.path.exists(filepath):
# Convert dataset into feather format.
train = | pd.read_csv("../input/petfinder-adoption-prediction/train/train.csv") | pandas.read_csv |
# -*- coding: utf-8 -*-
import re
import numpy as np
import pytest
from pandas.core.dtypes.common import (
is_bool_dtype, is_categorical, is_categorical_dtype,
is_datetime64_any_dtype, is_datetime64_dtype, is_datetime64_ns_dtype,
is_datetime64tz_dtype, is_datetimetz, is_dtype_equal, is_interval_dtype,
is_period, is_period_dtype, is_string_dtype)
from pandas.core.dtypes.dtypes import (
CategoricalDtype, DatetimeTZDtype, IntervalDtype, PeriodDtype, registry)
import pandas as pd
from pandas import (
Categorical, CategoricalIndex, IntervalIndex, Series, date_range)
from pandas.core.sparse.api import SparseDtype
import pandas.util.testing as tm
@pytest.fixture(params=[True, False, None])
def ordered(request):
return request.param
class Base(object):
def setup_method(self, method):
self.dtype = self.create()
def test_hash(self):
hash(self.dtype)
def test_equality_invalid(self):
assert not self.dtype == 'foo'
assert not is_dtype_equal(self.dtype, np.int64)
def test_numpy_informed(self):
pytest.raises(TypeError, np.dtype, self.dtype)
assert not self.dtype == np.str_
assert not np.str_ == self.dtype
def test_pickle(self):
# make sure our cache is NOT pickled
# clear the cache
type(self.dtype).reset_cache()
assert not len(self.dtype._cache)
# force back to the cache
result = tm.round_trip_pickle(self.dtype)
assert not len(self.dtype._cache)
assert result == self.dtype
class TestCategoricalDtype(Base):
def create(self):
return CategoricalDtype()
def test_pickle(self):
# make sure our cache is NOT pickled
# clear the cache
type(self.dtype).reset_cache()
assert not len(self.dtype._cache)
# force back to the cache
result = tm.round_trip_pickle(self.dtype)
assert result == self.dtype
def test_hash_vs_equality(self):
dtype = self.dtype
dtype2 = CategoricalDtype()
assert dtype == dtype2
assert dtype2 == dtype
assert hash(dtype) == hash(dtype2)
def test_equality(self):
assert is_dtype_equal(self.dtype, 'category')
assert is_dtype_equal(self.dtype, CategoricalDtype())
assert not is_dtype_equal(self.dtype, 'foo')
def test_construction_from_string(self):
result = CategoricalDtype.construct_from_string('category')
assert is_dtype_equal(self.dtype, result)
pytest.raises(
TypeError, lambda: CategoricalDtype.construct_from_string('foo'))
def test_constructor_invalid(self):
msg = "Parameter 'categories' must be list-like"
with pytest.raises(TypeError, match=msg):
CategoricalDtype("category")
dtype1 = CategoricalDtype(['a', 'b'], ordered=True)
dtype2 = CategoricalDtype(['x', 'y'], ordered=False)
c = Categorical([0, 1], dtype=dtype1, fastpath=True)
@pytest.mark.parametrize('values, categories, ordered, dtype, expected',
[
[None, None, None, None,
CategoricalDtype()],
[None, ['a', 'b'], True, None, dtype1],
[c, None, None, dtype2, dtype2],
[c, ['x', 'y'], False, None, dtype2],
])
def test_from_values_or_dtype(
self, values, categories, ordered, dtype, expected):
result = CategoricalDtype._from_values_or_dtype(values, categories,
ordered, dtype)
assert result == expected
@pytest.mark.parametrize('values, categories, ordered, dtype', [
[None, ['a', 'b'], True, dtype2],
[None, ['a', 'b'], None, dtype2],
[None, None, True, dtype2],
])
def test_from_values_or_dtype_raises(self, values, categories,
ordered, dtype):
msg = "Cannot specify `categories` or `ordered` together with `dtype`."
with pytest.raises(ValueError, match=msg):
CategoricalDtype._from_values_or_dtype(values, categories,
ordered, dtype)
def test_is_dtype(self):
assert CategoricalDtype.is_dtype(self.dtype)
assert CategoricalDtype.is_dtype('category')
assert CategoricalDtype.is_dtype(CategoricalDtype())
assert not CategoricalDtype.is_dtype('foo')
assert not CategoricalDtype.is_dtype(np.float64)
def test_basic(self):
assert is_categorical_dtype(self.dtype)
factor = Categorical(['a', 'b', 'b', 'a', 'a', 'c', 'c', 'c'])
s = Series(factor, name='A')
# dtypes
assert is_categorical_dtype(s.dtype)
assert is_categorical_dtype(s)
assert not is_categorical_dtype(np.dtype('float64'))
assert is_categorical(s.dtype)
assert is_categorical(s)
assert not is_categorical(np.dtype('float64'))
assert not is_categorical(1.0)
def test_tuple_categories(self):
categories = [(1, 'a'), (2, 'b'), (3, 'c')]
result = CategoricalDtype(categories)
assert all(result.categories == categories)
@pytest.mark.parametrize("categories, expected", [
([True, False], True),
([True, False, None], True),
([True, False, "a", "b'"], False),
([0, 1], False),
])
def test_is_boolean(self, categories, expected):
cat = Categorical(categories)
assert cat.dtype._is_boolean is expected
assert is_bool_dtype(cat) is expected
assert is_bool_dtype(cat.dtype) is expected
class TestDatetimeTZDtype(Base):
def create(self):
return DatetimeTZDtype('ns', 'US/Eastern')
def test_alias_to_unit_raises(self):
# 23990
with tm.assert_produces_warning(FutureWarning):
DatetimeTZDtype('datetime64[ns, US/Central]')
def test_alias_to_unit_bad_alias_raises(self):
# 23990
with pytest.raises(TypeError, match=''):
DatetimeTZDtype('this is a bad string')
with pytest.raises(TypeError, match=''):
DatetimeTZDtype('datetime64[ns, US/NotATZ]')
def test_hash_vs_equality(self):
# make sure that we satisfy is semantics
dtype = self.dtype
dtype2 = DatetimeTZDtype('ns', 'US/Eastern')
dtype3 = DatetimeTZDtype(dtype2)
assert dtype == dtype2
assert dtype2 == dtype
assert dtype3 == dtype
assert hash(dtype) == hash(dtype2)
assert hash(dtype) == hash(dtype3)
dtype4 = DatetimeTZDtype("ns", "US/Central")
assert dtype2 != dtype4
assert hash(dtype2) != hash(dtype4)
def test_construction(self):
pytest.raises(ValueError,
lambda: DatetimeTZDtype('ms', 'US/Eastern'))
def test_subclass(self):
a = DatetimeTZDtype.construct_from_string('datetime64[ns, US/Eastern]')
b = DatetimeTZDtype.construct_from_string('datetime64[ns, CET]')
assert issubclass(type(a), type(a))
assert issubclass(type(a), type(b))
def test_compat(self):
assert is_datetime64tz_dtype(self.dtype)
assert is_datetime64tz_dtype('datetime64[ns, US/Eastern]')
assert is_datetime64_any_dtype(self.dtype)
assert is_datetime64_any_dtype('datetime64[ns, US/Eastern]')
assert is_datetime64_ns_dtype(self.dtype)
assert is_datetime64_ns_dtype('datetime64[ns, US/Eastern]')
assert not is_datetime64_dtype(self.dtype)
assert not is_datetime64_dtype('datetime64[ns, US/Eastern]')
def test_construction_from_string(self):
result = DatetimeTZDtype.construct_from_string(
'datetime64[ns, US/Eastern]')
assert is_dtype_equal(self.dtype, result)
pytest.raises(TypeError,
lambda: DatetimeTZDtype.construct_from_string('foo'))
def test_construct_from_string_raises(self):
with pytest.raises(TypeError, match="notatz"):
DatetimeTZDtype.construct_from_string('datetime64[ns, notatz]')
with pytest.raises(TypeError,
match="^Could not construct DatetimeTZDtype$"):
DatetimeTZDtype.construct_from_string(['datetime64[ns, notatz]'])
def test_is_dtype(self):
assert not DatetimeTZDtype.is_dtype(None)
assert DatetimeTZDtype.is_dtype(self.dtype)
assert DatetimeTZDtype.is_dtype('datetime64[ns, US/Eastern]')
assert not DatetimeTZDtype.is_dtype('foo')
assert DatetimeTZDtype.is_dtype(DatetimeTZDtype('ns', 'US/Pacific'))
assert not DatetimeTZDtype.is_dtype(np.float64)
def test_equality(self):
assert is_dtype_equal(self.dtype, 'datetime64[ns, US/Eastern]')
assert is_dtype_equal(self.dtype, DatetimeTZDtype('ns', 'US/Eastern'))
assert not is_dtype_equal(self.dtype, 'foo')
assert not is_dtype_equal(self.dtype, DatetimeTZDtype('ns', 'CET'))
assert not is_dtype_equal(DatetimeTZDtype('ns', 'US/Eastern'),
DatetimeTZDtype('ns', 'US/Pacific'))
# numpy compat
assert is_dtype_equal(np.dtype("M8[ns]"), "datetime64[ns]")
def test_basic(self):
assert is_datetime64tz_dtype(self.dtype)
dr = date_range('20130101', periods=3, tz='US/Eastern')
s = Series(dr, name='A')
# dtypes
assert is_datetime64tz_dtype(s.dtype)
assert is_datetime64tz_dtype(s)
assert not is_datetime64tz_dtype(np.dtype('float64'))
assert not is_datetime64tz_dtype(1.0)
with tm.assert_produces_warning(FutureWarning):
assert is_datetimetz(s)
assert is_datetimetz(s.dtype)
assert not is_datetimetz(np.dtype('float64'))
assert not is_datetimetz(1.0)
def test_dst(self):
dr1 = date_range('2013-01-01', periods=3, tz='US/Eastern')
s1 = Series(dr1, name='A')
assert is_datetime64tz_dtype(s1)
with tm.assert_produces_warning(FutureWarning):
assert is_datetimetz(s1)
dr2 = date_range('2013-08-01', periods=3, tz='US/Eastern')
s2 = Series(dr2, name='A')
assert is_datetime64tz_dtype(s2)
with tm.assert_produces_warning(FutureWarning):
assert is_datetimetz(s2)
assert s1.dtype == s2.dtype
@pytest.mark.parametrize('tz', ['UTC', 'US/Eastern'])
@pytest.mark.parametrize('constructor', ['M8', 'datetime64'])
def test_parser(self, tz, constructor):
# pr #11245
dtz_str = '{con}[ns, {tz}]'.format(con=constructor, tz=tz)
result = DatetimeTZDtype.construct_from_string(dtz_str)
expected = DatetimeTZDtype('ns', tz)
assert result == expected
def test_empty(self):
with pytest.raises(TypeError, match="A 'tz' is required."):
DatetimeTZDtype()
class TestPeriodDtype(Base):
def create(self):
return PeriodDtype('D')
def test_hash_vs_equality(self):
# make sure that we satisfy is semantics
dtype = self.dtype
dtype2 = PeriodDtype('D')
dtype3 = PeriodDtype(dtype2)
assert dtype == dtype2
assert dtype2 == dtype
assert dtype3 == dtype
assert dtype is dtype2
assert dtype2 is dtype
assert dtype3 is dtype
assert hash(dtype) == hash(dtype2)
assert hash(dtype) == hash(dtype3)
def test_construction(self):
with pytest.raises(ValueError):
PeriodDtype('xx')
for s in ['period[D]', 'Period[D]', 'D']:
dt = PeriodDtype(s)
assert dt.freq == pd.tseries.offsets.Day()
assert is_period_dtype(dt)
for s in ['period[3D]', 'Period[3D]', '3D']:
dt = PeriodDtype(s)
assert dt.freq == pd.tseries.offsets.Day(3)
assert is_period_dtype(dt)
for s in ['period[26H]', 'Period[26H]', '26H',
'period[1D2H]', 'Period[1D2H]', '1D2H']:
dt = PeriodDtype(s)
assert dt.freq == pd.tseries.offsets.Hour(26)
assert is_period_dtype(dt)
def test_subclass(self):
a = PeriodDtype('period[D]')
b = PeriodDtype('period[3D]')
assert issubclass(type(a), type(a))
assert issubclass(type(a), type(b))
def test_identity(self):
assert PeriodDtype('period[D]') == PeriodDtype('period[D]')
assert PeriodDtype('period[D]') is PeriodDtype('period[D]')
assert PeriodDtype('period[3D]') == PeriodDtype('period[3D]')
assert PeriodDtype('period[3D]') is PeriodDtype('period[3D]')
assert PeriodDtype('period[1S1U]') == PeriodDtype('period[1000001U]')
assert PeriodDtype('period[1S1U]') is PeriodDtype('period[1000001U]')
def test_compat(self):
assert not is_datetime64_ns_dtype(self.dtype)
assert not is_datetime64_ns_dtype('period[D]')
assert not is_datetime64_dtype(self.dtype)
assert not is_datetime64_dtype('period[D]')
def test_construction_from_string(self):
result = PeriodDtype('period[D]')
assert is_dtype_equal(self.dtype, result)
result = PeriodDtype.construct_from_string('period[D]')
assert is_dtype_equal(self.dtype, result)
with pytest.raises(TypeError):
PeriodDtype.construct_from_string('foo')
with pytest.raises(TypeError):
PeriodDtype.construct_from_string('period[foo]')
with pytest.raises(TypeError):
PeriodDtype.construct_from_string('foo[D]')
with pytest.raises(TypeError):
PeriodDtype.construct_from_string('datetime64[ns]')
with pytest.raises(TypeError):
PeriodDtype.construct_from_string('datetime64[ns, US/Eastern]')
def test_is_dtype(self):
assert PeriodDtype.is_dtype(self.dtype)
assert PeriodDtype.is_dtype('period[D]')
assert PeriodDtype.is_dtype('period[3D]')
assert PeriodDtype.is_dtype(PeriodDtype('3D'))
assert PeriodDtype.is_dtype('period[U]')
assert PeriodDtype.is_dtype('period[S]')
assert PeriodDtype.is_dtype(PeriodDtype('U'))
assert PeriodDtype.is_dtype(PeriodDtype('S'))
assert not PeriodDtype.is_dtype('D')
assert not PeriodDtype.is_dtype('3D')
assert not PeriodDtype.is_dtype('U')
assert not PeriodDtype.is_dtype('S')
assert not PeriodDtype.is_dtype('foo')
assert not PeriodDtype.is_dtype(np.object_)
assert not PeriodDtype.is_dtype(np.int64)
assert not PeriodDtype.is_dtype(np.float64)
def test_equality(self):
assert is_dtype_equal(self.dtype, 'period[D]')
assert is_dtype_equal(self.dtype, PeriodDtype('D'))
assert is_dtype_equal(self.dtype, PeriodDtype('D'))
assert is_dtype_equal(PeriodDtype('D'), PeriodDtype('D'))
assert not is_dtype_equal(self.dtype, 'D')
assert not is_dtype_equal(PeriodDtype('D'), | PeriodDtype('2D') | pandas.core.dtypes.dtypes.PeriodDtype |
import pandas as pd
import numpy as np
import scipy
import os, sys
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import pylab
import matplotlib as mpl
import seaborn as sns
import analysis_utils
from multiprocessing import Pool
sys.path.append('../utils/')
from game_utils import *
in_dir = '../../'
out_dir = '../../plots/'
#out_dir = '../../figures/'
# data_names = ['Behavioral Data', 'Goal Inference Model']
# data_dirs = ['new-processed-processed-1en01','goal-inference-simulations-processed-1en01']
# nominal_dirs = ['new-synthetic-processed-1en01','synthetic-goal-inference-simulations-processed-1en01']
# matched_dirs = ['new-synthetic-score-matched-processed-1en01','synthetic-score-matched-goal-inference-simulations-processed-1en01']
# subset = '1en01'
# start = 1440
# groups = ['High Scoring','Low Scoring','']
# behaviors = ['Skilled','']
# score_cutoff = 0.75
# matched = [True, False]
data_names = ['Goal Inference Noise Model']
data_dirs = ['parset-simulations-processed-1en01']
nominal_dirs = ['synthetic-parset-simulations-processed-1en01']
matched_dirs = ['synthetic-score-matched-parset-simulations-processed-1en01']
subset = '1en01'
# data_names = ['Goal Inference Attention Model']
# data_dirs = ['goal-inference-attention-simulations-processed-1en01']
# nominal_dirs = ['synthetic-goal-inference-attention-simulations-processed-1en01']
# matched_dirs = ['synthetic-score-matched-goal-inference-attention-simulations-processed-1en01']
# subset = '1en01'
# data_names = ['Social Heuristic']
# data_dirs = ['social-heuristic-simulations-processed-1en01']
# nominal_dirs = ['synthetic-social-heuristic-simulations-processed-1en01']
# matched_dirs = ['synthetic-score-matched-social-heuristic-simulations-processed-1en01']
# subset = '1en01'
# data_names = ['Unconditional Social Heuristic']
# data_dirs = ['unconditional-social-heuristic-simulations-simulations-processed-1en01']
# nominal_dirs = ['synthetic-unconditional-social-heuristic-simulations-simulations-processed-1en01']
# matched_dirs = ['synthetic-score-matched-unconditional-social-heuristic-simulations-simulations-processed-1en01']
# subset = '1en01'
start = 1440
groups = ['']
behaviors = ['']
score_cutoff = 0.75
matched = [True, False]
def score(sub):
return np.mean(sub['bg_val'])
def speed(sub):
return sum(sub['velocity'] > 3) > 0
def spinning(sub):
return sum(sub['spinning']) > 0
def dist_to_mean_others(sub):
return np.mean(sub['dist_to_mean_others'])
def face_towards_after_away(sub):
ignore_state = lambda sub, i: sub.iloc[i]['spinning']
this_state = lambda sub, i: sub.iloc[i]['ave_dist_others'] < sub.iloc[i]['dist_to_mean_others']
next_state = lambda sub, i: sub.iloc[i]['facing']
return analysis_utils.get_value(sub, ignore_state, this_state, next_state)
def face_away_when_low(sub):
start_index = 1
initial_condition = lambda sub, i: (sub.iloc[i]['ave_dist_others'] > sub.iloc[i]['dist_to_mean_others']) and sub.iloc[i]['bg_val'] < 1.0
while_condition = lambda sub, i: sub.iloc[i]['bg_val'] < 1.0
final_condition = lambda sub, i: (sub.iloc[i]['ave_dist_others'] < sub.iloc[i]['dist_to_mean_others'])
return analysis_utils.get_while_value(sub, initial_condition, while_condition, final_condition, start_index)
def facing_spinning(sub):
start_index = 1
initial_condition = lambda sub, i: ~sub.iloc[i-1]['spinning'] and ~sub.iloc[i-1]['other_spinning'] and ~sub.iloc[i]['spinning'] and sub.iloc[i]['other_spinning']
while_condition = lambda sub, i: ~sub.iloc[i-1]['facing_spinning']
final_condition = lambda sub, i: sub.iloc[i]['facing_spinning']
return analysis_utils.get_while_value(sub, initial_condition, while_condition, final_condition, start_index)
function_names = ['Score','Speed','Spinning','Distance to Mean of Other Positions','Average Time Before Facing Distant Group', 'Average Time Before Facing Away From Group After Low Score', 'Average Time Before Facing Spinning Players']
functions = [score, speed, spinning, dist_to_mean_others, face_towards_after_away, face_away_when_low, facing_spinning]
compares = [False, False, False, True, True, True, True]
# function_names = ['Distance to Mean of Other Positions', 'Average Time Before Facing Away From Group After Low Score', 'Average Time Before Facing Spinning Players']
# functions = [dist_to_mean_others, face_away_when_low, facing_spinning]
# compares = [True, True, True]
def plot_synthetic(args):
data_ind, func_ind, group, behavior, match = args
data_dir = in_dir + data_dirs[data_ind]
function = functions[func_ind]
if match:
synthetic_dir = in_dir + matched_dirs[data_ind]
else:
synthetic_dir = in_dir + nominal_dirs[data_ind]
games = []
ns = []
values = []
scores = []
sources = []
lengths = []
for t,game in enumerate(os.listdir(data_dir)):
if game[-4:] != '.csv':
continue
if game.split('_')[-2].split('-')[1] != subset:
continue
data = pd.io.parsers.read_csv(data_dir + '/' + game)
syn_data = | pd.io.parsers.read_csv(synthetic_dir + '/' + game) | pandas.io.parsers.read_csv |
from sklearn.model_selection import cross_val_score, train_test_split, GridSearchCV
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
from pprint import pprint
import pandas as pd
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score, precision_score, recall_score
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.preprocessing import MinMaxScaler, StandardScaler, PowerTransformer
from sklearn.cluster import KMeans
import preprocessing_permits as pr
import numpy as np
############################################################################################################
# Cross Validation Modeling #
############################################################################################################
def run_decision_tree_cv(X_train, y_train):
'''
Function to run a decision tree model. The function creates the model, then uses
cross-validation grid search to figure out what the best parameters are. Returns a grid (object
used to find best hyperparameters), df_result (holds the accuracy score for all hyperparameter values)
and model (holds the model with the best hyperparameters, used to create predictions)
'''
# keys are names of hyperparams, values are a list of values to try for that hyper parameter
params = {
'max_depth': range(1, 11),
'criterion': ['gini', 'entropy']
}
dtree = DecisionTreeClassifier()
# cv=4 means 4-fold cross-validation, i.e. k = 4
grid = GridSearchCV(dtree, params, cv=3, scoring= "recall")
grid.fit(X_train, y_train)
model = grid.best_estimator_
results = grid.cv_results_
for score, p in zip(results['mean_test_score'], results['params']):
p['score'] = score
df_result = | pd.DataFrame(results['params']) | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pandas as pd
from datetime import datetime, timedelta
import numpy as np
from scipy.stats import pearsonr
# from mpl_toolkits.axes_grid1 import host_subplot
# import mpl_toolkits.axisartist as AA
# import matplotlib
import matplotlib.pyplot as plt
import matplotlib.ticker as tck
import matplotlib.font_manager as fm
import math as m
import matplotlib.dates as mdates
import netCDF4 as nc
from netCDF4 import Dataset
id
import itertools
import datetime
from scipy.stats import ks_2samp
Estacion = '6001'
df1 = pd.read_table('/home/nacorreasa/Maestria/Datos_Tesis/Piranometro/6001Historico.txt', parse_dates=[2])
Theoric_rad_method = 'GIS_Model' ##-->> PARA QUE USE EL MODELO DE Gis DEBE SER 'GIS_Model'
resolucion = 'diaria' ##-->> LAS OPCIONES SON 'diaria' U 'horaria'
#-----------------------------------------------------------------------------
# Rutas para las fuentes -----------------------------------------------------
prop = fm.FontProperties(fname='/home/nacorreasa/SIATA/Cod_Califi/AvenirLTStd-Heavy.otf' )
prop_1 = fm.FontProperties(fname='/home/nacorreasa/SIATA/Cod_Califi/AvenirLTStd-Book.otf')
prop_2 = fm.FontProperties(fname='/home/nacorreasa/SIATA/Cod_Califi/AvenirLTStd-Black.otf')
## ---CALCULO DE LA RADIACIÓN TEORICA--- ##
def daterange(start_date, end_date):
'Para el ajuste de las fechas en el modelo de Kumar cada 10 min. Las fechas final e inicial son en str: %Y-%m-%d'
start_date = datetime.datetime.strptime(start_date, '%Y-%m-%d')
end_date = datetime.datetime.strptime(end_date, '%Y-%m-%d')
delta = timedelta(minutes=10)
while start_date <= end_date:
yield start_date
start_date += delta
def serie_Kumar_Model_hora(estacion):
'Retorna un dataframe horario con la radiacion teórico con las recomendacione de Kumar elaborado por <NAME> ' \
'para el AMVA y su tesis. El dataframe original se le ordenan los datos a 12 meses ascendentes (2018), aunque pueden ' \
'pertencer a años difernetes. El resultado es para el punto seleccionado y con el archivo de Total_Timeseries.csv'
data_Model = pd.read_csv('/home/nacorreasa/Maestria/Datos_Tesis/Radiacion_GIS/Teoricos_nati/Total_Timeseries.csv',
sep=',')
fecha_hora = [pd.to_datetime(data_Model['Unnamed: 0'], format="%Y-%m-%d %H:%M:%S")[i].to_pydatetime() for i in
range(len(data_Model['Unnamed: 0']))]
data_Model.index = fecha_hora
data_Model = data_Model.sort_index()
data_Model['Month'] = np.array(data_Model.index.month)
data_Model = data_Model.sort_values(by="Month")
fechas = []
for i in daterange('2018-01-01', '2019-01-01'):
fechas.append(i)
fechas = fechas[0:-1]
if estacion == '6001':
punto = data_Model['TS_kumar']
elif estacion == '6002':
punto = data_Model['CI_kumar']
elif estacion == '6003':
punto = data_Model['JV_kumar']
Rad_teorica = []
for i in range(len(fechas)):
mes = fechas[i].month
hora = fechas[i].hour
mint = fechas[i].minute
rad = \
np.where((data_Model.index.month == mes) & (data_Model.index.hour == hora) & (data_Model.index.minute == mint))[
0]
if len(rad) == 0:
Rad_teorica.append(np.nan)
else:
Rad_teorica.append(punto.iloc[rad].values[0])
data_Theorical = pd.DataFrame()
data_Theorical['fecha_hora'] = fechas
data_Theorical['Radiacion_Teo'] = Rad_teorica
data_Theorical.index = data_Theorical['fecha_hora']
df_hourly_theoric = data_Theorical.groupby(pd.Grouper(freq="H")).mean()
df_hourly_theoric = df_hourly_theoric[df_hourly_theoric['Radiacion_Teo'] > 0]
return df_hourly_theoric
def Elevation_RadiationTA(n, lat, lon, start):
'Para obtener la radiación en W/m2 y el ángulo de elevación del sol en grados horariamente para un número "n" de ' \
'días aun punto en una latitud y longitud determinada ( "lat-lon"como flotantes) a partir de una fecha de inicio ' \
'"start" como por ejemplo datetime.datetime(2018, 1, 1, 8).'
import pysolar
import pytz
import datetime
timezone = pytz.timezone("America/Bogota")
start_aware = timezone.localize(start)
# Calculate radiation every hour for 365 days
nhr = 24*n
dates, altitudes_deg, radiations = list(), list(), list()
for ihr in range(nhr):
date = start_aware + datetime.timedelta(hours=ihr)
altitude_deg = pysolar.solar.get_altitude(lat, lon, date)
if altitude_deg <= 0:
radiation = 0.
else:
radiation = pysolar.radiation.get_radiation_direct(date, altitude_deg)
dates.append(date)
altitudes_deg.append(altitude_deg)
radiations.append(radiation)
days = [ihr/24 for ihr in range(nhr)]
return days, altitudes_deg, radiations
if Theoric_rad_method != 'GIS_Model' and Estacion == '6001':
days, altitudes_deg, Io_hora = Elevation_RadiationTA(365, 6.259, -75.588, datetime.datetime(2018, 1, 1, 0))
print('Teorica con pysolar')
elif Theoric_rad_method != 'GIS_Model' and Estacion == '6002':
days, altitudes_deg, Io_hora = Elevation_RadiationTA(365, 6.168, -75.644, datetime.datetime(2018, 1, 1, 0))
print('Teorica con pysolar')
elif Theoric_rad_method != 'GIS_Model' and Estacion == '6003':
days, altitudes_deg, Io_hora = Elevation_RadiationTA(365, 6.255, -75.542, datetime.datetime(2018, 1, 1, 0))
print('Teorica con pysolar')
elif Theoric_rad_method == 'GIS_Model':
Io_hora = serie_Kumar_Model_hora(Estacion)
print('Teorica con el modelo de KUMAR')
###############################################################################
##--------------EFICIENCIAS TEORICAS COMO PROXI DE TRANSPARENCIA-------------##
###############################################################################
'Calculo de la eficiencias teorica como proxi de la transparencia de la atmosfera'
'Para esto se hace uso de la información del piranometro y de la radiación teórica'
'de <NAME>, con esto se prentenden obtener las caracteristicas que deriven'
'del análisis estocastico, similar al de <NAME> en su tesis de doctorado.'
##------------------LECTURA DE LOS DATOS DEL EXPERIMENTO----------------------##
df_P975 = pd.read_csv('/home/nacorreasa/Maestria/Datos_Tesis/Experimentos_Panel/Panel975.txt', sep=',', index_col =0)
df_P350 = pd.read_csv('/home/nacorreasa/Maestria/Datos_Tesis/Experimentos_Panel/Panel350.txt', sep=',', index_col =0)
df_P348 = pd.read_csv('/home/nacorreasa/Maestria/Datos_Tesis/Experimentos_Panel/Panel348.txt', sep=',', index_col =0)
df_P975['Fecha_hora'] = df_P975.index
df_P350['Fecha_hora'] = df_P350.index
df_P348['Fecha_hora'] = df_P348.index
df_P975.index = pd.to_datetime(df_P975.index, format="%Y-%m-%d %H:%M:%S", errors='coerce')
df_P350.index = pd.to_datetime(df_P350.index, format="%Y-%m-%d %H:%M:%S", errors='coerce')
df_P348.index = pd.to_datetime(df_P348.index, format="%Y-%m-%d %H:%M:%S", errors='coerce')
## ----------------ACOTANDO LOS DATOS A VALORES VÁLIDOS---------------- ##
'Como en este caso lo que interesa es la radiacion, para la filtración de los datos, se'
'considerarán los datos de potencia mayores o iguales a 0, los que parecen generarse una'
'hora despues de cuando empieza a incidir la radiación.'
df_P975 = df_P975[(df_P975['radiacion'] > 0) & (df_P975['strength'] >=0) & (df_P975['NI'] >=0)]
df_P350 = df_P350[(df_P350['radiacion'] > 0) & (df_P975['strength'] >=0) & (df_P975['NI'] >=0)]
df_P348 = df_P348[(df_P348['radiacion'] > 0) & (df_P975['strength'] >=0) & (df_P975['NI'] >=0)]
df_P975_h = df_P975.groupby(pd.Grouper(level='fecha_hora', freq='1H')).mean()
df_P350_h = df_P350.groupby(pd.Grouper(level='fecha_hora', freq='1H')).mean()
df_P348_h = df_P348.groupby(pd.Grouper(level='fecha_hora', freq='1H')).mean()
df_P975_h = df_P975_h.between_time('06:00', '17:00')
df_P350_h = df_P350_h.between_time('06:00', '17:00')
df_P348_h = df_P348_h.between_time('06:00', '17:00')
##----AJUSTE DE LOS DATOS DE RADIACIÓN TEORICA AL RANGO DE FECHAS DESEADO-----##
def daterange(start_date, end_date):
'Para el ajuste de las fechas en el modelo de Kumar cada hora. Las fechas'
'final e inicial son en str: %Y-%m-%d'
start_date = datetime.datetime.strptime(start_date, '%Y-%m-%d')
end_date = datetime.datetime.strptime(end_date, '%Y-%m-%d')
delta = timedelta(minutes=60)
while start_date <= end_date:
yield start_date
start_date += delta
Io_hora_975 = serie_Kumar_Model_hora('6001')
Io_hora_350 = serie_Kumar_Model_hora('6002')
Io_hora_348 = serie_Kumar_Model_hora('6003')
fechas_975 = []
for i in daterange(df_P975.index[0].date().strftime("%Y-%m-%d"), (df_P975.index[-1].date() + timedelta(days=1)).strftime("%Y-%m-%d")):
fechas_975.append(i)
fechas_350 = []
for i in daterange(df_P350.index[0].date().strftime("%Y-%m-%d"), (df_P350.index[-1].date() + timedelta(days=1)).strftime("%Y-%m-%d")):
fechas_350.append(i)
fechas_348 = []
for i in daterange(df_P348.index[0].date().strftime("%Y-%m-%d"), (df_P348.index[-1].date() + timedelta(days=1)).strftime("%Y-%m-%d")):
fechas_348.append(i)
Io_hora_975 = Io_hora_975.loc[(Io_hora_975.index >= '2018-03-20') & (Io_hora_975.index <= '2018-'+str(df_P975.index[-1].month)+'-'+str(df_P975.index[-1].day+1))]
Io_hora_350 = Io_hora_350.loc[(Io_hora_350.index >= '2018-03-22') & (Io_hora_350.index <= '2018-'+str(df_P350.index[-1].month)+'-'+str(df_P350.index[-1].day+1))]
Io_hora_348 = Io_hora_348.loc[(Io_hora_348.index >= '2018-03-23') & (Io_hora_348.index <= '2018-'+str(df_P348.index[-1].month)+'-'+str(df_P348.index[-1].day+1))]
Io_hora_975 = Io_hora_975.between_time('06:00', '17:00')
Io_hora_975.index = [Io_hora_975.index[i].replace(year=2019) for i in range(len(Io_hora_975.index))]
Io_hora_350 = Io_hora_350.between_time('06:00', '17:00')
Io_hora_350.index = [Io_hora_350.index[i].replace(year=2019) for i in range(len(Io_hora_350.index))]
Io_hora_348 = Io_hora_348.between_time('06:00', '17:00')
Io_hora_348.index = [Io_hora_348.index[i].replace(year=2019) for i in range(len(Io_hora_348.index))]
df_Rad_P975 = pd.concat([Io_hora_975, df_P975_h], axis = 1)
df_Rad_P350 = pd.concat([Io_hora_350, df_P350_h], axis = 1)
df_Rad_P348 = pd.concat([Io_hora_348, df_P348_h], axis = 1)
df_Rad_P975 = df_Rad_P975.drop(['NI','strength'], axis=1)
df_Rad_P350 = df_Rad_P350.drop(['NI','strength'], axis=1)
df_Rad_P348 = df_Rad_P348.drop(['NI','strength'], axis=1)
##--------------------EFICIANCIA REAL PROXI DE TRANSPARENCIA-----------------##
df_Rad_P975['Efi_Transp'] = df_Rad_P975['radiacion'] / df_Rad_P975['Radiacion_Teo']
df_Rad_P350['Efi_Transp'] = df_Rad_P350['radiacion'] / df_Rad_P350['Radiacion_Teo']
df_Rad_P348['Efi_Transp'] = df_Rad_P348['radiacion'] / df_Rad_P348['Radiacion_Teo']
##-----------------HORAS EN LA QUE SE PRODUCE LA MAYOR EFICIENCIA Y SU HISTOGRAMA-------------##
'La frecuencia de las horas que excedieron el máximo de la eficiencia (1), se presenta en el hisograma'
'a continuación. El resultado muestra que las mayores frecuencias se presentan a als 6 y las 7 de la ma-'
'ñana, y esto es atribuible a falencias en el modelo de radiacion en condiciones de cierlo despejado'
'en esos puntos.'
Hour_Max_Efi_975 = df_Rad_P975[df_Rad_P975['Efi_Transp']>1].index.hour
Hour_Max_Efi_350 = df_Rad_P350[df_Rad_P350['Efi_Transp']>1].index.hour
Hour_Max_Efi_348 = df_Rad_P348[df_Rad_P348['Efi_Transp']>1].index.hour
fig = plt.figure(figsize=[10, 6])
plt.rc('axes', edgecolor='gray')
ax1 = fig.add_subplot(1, 3, 1)
ax1.spines['top'].set_visible(False)
ax1.spines['right'].set_visible(False)
ax1.hist(Hour_Max_Efi_348, bins='auto', alpha = 0.5)
ax1.set_title(u'Distribución horas de excedencia \n de la eficiencia en JV', fontproperties=prop, fontsize = 8)
ax1.set_ylabel(u'Frecuencia', fontproperties=prop_1)
ax1.set_xlabel(u'Horas', fontproperties=prop_1)
ax1.legend()
ax2 = fig.add_subplot(1, 3, 2)
ax2.spines['top'].set_visible(False)
ax2.spines['right'].set_visible(False)
ax2.hist(Hour_Max_Efi_350, bins='auto', alpha = 0.5)
ax2.set_title(u'Distribución horas de excedencia \n de la eficiencia en CI', fontproperties=prop, fontsize = 8)
ax2.set_ylabel(u'Frecuencia', fontproperties=prop_1)
ax2.set_xlabel(u'Horas', fontproperties=prop_1)
ax2.legend()
ax3 = fig.add_subplot(1, 3, 3)
ax3.spines['top'].set_visible(False)
ax3.spines['right'].set_visible(False)
ax3.hist(Hour_Max_Efi_975, bins='auto', alpha = 0.5)
ax3.set_title(u'Distribución horas de excedencia \n de la eficiencia en TS', fontproperties=prop, fontsize = 8)
ax3.set_ylabel(u'Frecuencia', fontproperties=prop_1)
ax3.set_xlabel(u'Horas', fontproperties=prop_1)
ax3.legend()
plt.savefig('/home/nacorreasa/Escritorio/Figuras/HistoHoraExceEfi.png')
plt.show()
##-------DISCRIMINACION ENTRE DIAS LLUVIOSOS Y SECOS POR PERCENTILES DE RADIACION--------##
'Para lidiar cno la situación en que pueden haber dias en los que los piranometros solo midieron'
'durante una fracción del día por posibles daños y alteraciones, se deben considerar los dias que'
'al menos tuvieron 6 horas de medicion.'
df_Rad_P975_count_h_pira = df_Rad_P975.groupby(pd.Grouper(freq="D")).count()['radiacion']>6
df_Rad_P350_count_h_pira = df_Rad_P350.groupby(pd.Grouper(freq="D")).count()['radiacion']>6
df_Rad_P348_count_h_pira = df_Rad_P348.groupby(pd.Grouper(freq="D")).count()['radiacion']>6
days_P975_count_h_pira = df_Rad_P975_count_h_pira.index[df_Rad_P975_count_h_pira == True]
days_P350_count_h_pira = df_Rad_P350_count_h_pira.index[df_Rad_P350_count_h_pira == True]
days_P348_count_h_pira = df_Rad_P348_count_h_pira.index[df_Rad_P348_count_h_pira == True]
'Se establecieron umbrales empiricamente para la seleccion de los dias marcadamente nubados y'
'marcadamente despejados dentro el periodo de registro, de acuerdo a los procedimentos en el'
'programa Umbrales_Radiacion_Piranometro.py'
Sum_df_Rad_P975 = df_Rad_P975.groupby(pd.Grouper(freq='1D')).sum()
Sum_df_Rad_P350 = df_Rad_P350.groupby(pd.Grouper(freq='1D')).sum()
Sum_df_Rad_P348 = df_Rad_P348.groupby(pd.Grouper(freq='1D')).sum()
Sum_df_Rad_P975 = Sum_df_Rad_P975[Sum_df_Rad_P975['radiacion']>0]
Sum_df_Rad_P350 = Sum_df_Rad_P350[Sum_df_Rad_P350['radiacion']>0]
Sum_df_Rad_P348 = Sum_df_Rad_P348[Sum_df_Rad_P348['radiacion']>0]
lista_days_975 = []
for i in range(len(Sum_df_Rad_P975)):
if Sum_df_Rad_P975.index[i] in days_P975_count_h_pira:
lista_days_975.append(1)
else:
lista_days_975.append(0)
Sum_df_Rad_P975['days'] = lista_days_975
Sum_df_Rad_P975 = Sum_df_Rad_P975[Sum_df_Rad_P975['days'] == 1]
Sum_df_Rad_P975 = Sum_df_Rad_P975.drop(['days'], axis = 1)
lista_days_350 = []
for i in range(len(Sum_df_Rad_P350)):
if Sum_df_Rad_P350.index[i] in days_P350_count_h_pira:
lista_days_350.append(1)
else:
lista_days_350.append(0)
Sum_df_Rad_P350['days'] = lista_days_350
Sum_df_Rad_P350 = Sum_df_Rad_P350[Sum_df_Rad_P350['days'] == 1]
Sum_df_Rad_P350 = Sum_df_Rad_P350.drop(['days'], axis = 1)
lista_days_348 = []
for i in range(len(Sum_df_Rad_P348)):
if Sum_df_Rad_P348.index[i] in days_P348_count_h_pira:
lista_days_348.append(1)
else:
lista_days_348.append(0)
Sum_df_Rad_P348['days'] = lista_days_348
Sum_df_Rad_P348 = Sum_df_Rad_P348[Sum_df_Rad_P348['days'] == 1]
Sum_df_Rad_P348 = Sum_df_Rad_P348.drop(['days'], axis = 1)
Desp_Pira_975 = Sum_df_Rad_P975[Sum_df_Rad_P975.radiacion>=(Sum_df_Rad_P975.Radiacion_Teo)*0.85]
Desp_Pira_350 = Sum_df_Rad_P350[Sum_df_Rad_P350.radiacion>=(Sum_df_Rad_P350.Radiacion_Teo)*0.78]
Desp_Pira_348 = Sum_df_Rad_P348[Sum_df_Rad_P348.radiacion>=(Sum_df_Rad_P348.Radiacion_Teo)*0.80]
Nuba_Pira_975 = Sum_df_Rad_P975[Sum_df_Rad_P975.radiacion<=(Sum_df_Rad_P975.Radiacion_Teo)*0.25]
Nuba_Pira_350 = Sum_df_Rad_P350[Sum_df_Rad_P350.radiacion<=(Sum_df_Rad_P350.Radiacion_Teo)*0.25]
Nuba_Pira_348 = Sum_df_Rad_P348[Sum_df_Rad_P348.radiacion<=(Sum_df_Rad_P348.Radiacion_Teo)*0.22]
Appended_data_desp_975 = []
for i in range(len(Desp_Pira_975.index.values)):
Appended_data_desp_975.append(df_P975_h[df_P975_h.index.date == Desp_Pira_975.index.date[i]])
Appended_data_desp_975 = pd.concat(Appended_data_desp_975)
Appended_data_desp_350 = []
for i in range(len(Desp_Pira_350.index.values)):
Appended_data_desp_350.append(df_P350_h[df_P350_h.index.date == Desp_Pira_350.index.date[i]])
Appended_data_desp_350 = pd.concat(Appended_data_desp_350)
Appended_data_desp_348 = []
for i in range(len(Desp_Pira_348.index.values)):
Appended_data_desp_348.append(df_P348_h[df_P348_h.index.date == Desp_Pira_348.index.date[i]])
Appended_data_desp_348 = pd.concat(Appended_data_desp_348)
Appended_data_nuba_975 = []
for i in range(len(Nuba_Pira_975.index.values)):
Appended_data_nuba_975.append(df_P975_h[df_P975_h.index.date == Nuba_Pira_975.index.date[i]])
Appended_data_nuba_975 = pd.concat(Appended_data_nuba_975)
Appended_data_nuba_350 = []
for i in range(len(Nuba_Pira_350.index.values)):
Appended_data_nuba_350.append(df_P350_h[df_P350_h.index.date == Nuba_Pira_350.index.date[i]])
Appended_data_nuba_350 = pd.concat(Appended_data_nuba_350)
Appended_data_nuba_348 = []
for i in range(len(Nuba_Pira_348.index.values)):
Appended_data_nuba_348.append(df_P348_h[df_P348_h.index.date == Nuba_Pira_348.index.date[i]])
Appended_data_nuba_348 = pd.concat(Appended_data_nuba_348)
#------------------HISTOGRAMAS DE RADIACION PARA CADA PUNTO EN LOS DOS CASOS----------------##
fig = plt.figure(figsize=[10, 6])
plt.rc('axes', edgecolor='gray')
ax1 = fig.add_subplot(1, 3, 1)
ax1.spines['top'].set_visible(False)
ax1.spines['right'].set_visible(False)
ax1.hist(Appended_data_desp_348['radiacion'], bins='auto', alpha = 0.5, color = 'orange', label = 'Desp')
ax1.hist(Appended_data_nuba_348['radiacion'], bins='auto', alpha = 0.5, color = 'blue', label = 'Nub')
ax1.set_title(u'Distribución de la radiación \n en dias dispejados y nublados en JV', fontproperties=prop, fontsize = 8)
ax1.set_ylabel(u'Frecuencia', fontproperties=prop_1)
ax1.set_xlabel(u'Radiación $[W/m^{2}]$', fontproperties=prop_1)
ax1.legend()
ax2 = fig.add_subplot(1, 3, 2)
ax2.spines['top'].set_visible(False)
ax2.spines['right'].set_visible(False)
ax2.hist(Appended_data_desp_350['radiacion'], bins='auto', alpha = 0.5, color = 'orange', label = 'Desp')
ax2.hist(Appended_data_nuba_350['radiacion'], bins='auto', alpha = 0.5, color = 'blue', label = 'Nub')
ax2.set_title(u'Distribución de la radiación \n en dias dispejados y nublados en CI', fontproperties=prop, fontsize = 8)
ax2.set_ylabel(u'Frecuencia', fontproperties=prop_1)
ax2.set_xlabel(u'Radiación $[W/m^{2}]$', fontproperties=prop_1)
ax2.legend()
ax3 = fig.add_subplot(1, 3, 3)
ax3.spines['top'].set_visible(False)
ax3.spines['right'].set_visible(False)
ax3.hist(Appended_data_desp_975['radiacion'], bins='auto', alpha = 0.5, color = 'orange', label = 'Desp')
ax3.hist(Appended_data_nuba_975['radiacion'], bins='auto', alpha = 0.5, color = 'blue', label = 'Nub')
ax3.set_title(u'Distribución de la radiación \n en dias dispejados y nublados en TS', fontproperties=prop, fontsize = 8)
ax3.set_ylabel(u'Frecuencia', fontproperties=prop_1)
ax3.set_xlabel(u'Radiación $[W/m^{2}]$', fontproperties=prop_1)
ax3.legend()
plt.savefig('/home/nacorreasa/Escritorio/Figuras/HistoRadiacionNubaDespTotal.png')
plt.show()
#------------------PRUEBA DE KOLMOGOROV-SMIRNOV PARA LA BONDAD DE AJUSTE ----------------##
'Se aplica la prueba de bondad KOLMOGOROV-SMIRNOV sobre los datos de los dias nublados y los'
'despejados con respecto a la serie general de los datos, para evaluar si pertenecen a la '
'funcion de distribución de probabilidad. Se usa un nivel de significancia del 5%. Esta prueba es'
'mas sensible a los valores cercanos a la media que a los extremos, por lo que en general puede'
'usarse para evitar los outliers. La hipotesis nula, será que los datos de ambas series siguen'
'una misma distribución. La hipotesis alternativa sugiere que no sigen la misma distribución.'
Significancia = 0.05
SK_desp_348 = ks_2samp(Appended_data_desp_348['radiacion'].values,df_P348_h['radiacion'].values)
stat_348_desp = SK_desp_348[0]
pvalue_348_desp = SK_desp_348[1]
SK_nuba_348 = ks_2samp(Appended_data_nuba_348['radiacion'].values,df_P348_h['radiacion'].values)
stat_348_nuba = SK_nuba_348[0]
pvalue_348_nuba = SK_nuba_348[1]
if pvalue_348_nuba <= Significancia:
print ('los dias nublados en JV no pertenecen a la misma distribución')
else:
print ('los dias nublados en JV pertenecen a la misma distribución')
if pvalue_348_desp <= Significancia:
print ('los dias despejados en JV no pertenecen a la misma distribución')
else:
print ('los dias despejados en JV pertenecen a la misma distribución')
SK_desp_350 = ks_2samp(Appended_data_desp_350['radiacion'].values,df_P350_h['radiacion'].values)
stat_350_desp = SK_desp_350[0]
pvalue_350_desp = SK_desp_350[1]
SK_nuba_350 = ks_2samp(Appended_data_nuba_350['radiacion'].values,df_P350_h['radiacion'].values)
stat_350_nuba = SK_nuba_350[0]
pvalue_350_nuba = SK_nuba_350[1]
if pvalue_350_nuba <= Significancia:
print ('los dias nublados en CI no pertenecen a la misma distribución')
else:
print ('los dias nublados en CI pertenecen a la misma distribución')
if pvalue_350_desp <= Significancia:
print ('los dias despejados en CI no pertenecen a la misma distribución')
else:
print ('los dias despejados en CI pertenecen a la misma distribución')
SK_desp_975 = ks_2samp(Appended_data_desp_975['radiacion'].values,df_P975_h['radiacion'].values)
stat_975_desp = SK_desp_975[0]
pvalue_975_desp = SK_desp_975[1]
SK_nuba_975 = ks_2samp(Appended_data_nuba_975['radiacion'].values,df_P975_h['radiacion'].values)
stat_975_nuba = SK_nuba_975[0]
pvalue_975_nuba = SK_nuba_975[1]
if pvalue_975_nuba <= Significancia:
print ('los dias nublados en TS no pertenecen a la misma distribución')
else:
print ('los dias nublados en TS pertenecen a la misma distribución')
if pvalue_975_desp <= Significancia:
print ('los dias despejados en TS no pertenecen a la misma distribución')
else:
print ('los dias despejados en TS pertenecen a la misma distribución')
#------------------HISTOGRAMAS DE EFICIENCIA PARA CADA PUNTO EN LOS DOS CASOS----------------##
Desp_Efi_348 = []
for i in range(len(Desp_Pira_348.index.values)):
Desp_Efi_348.append(df_Rad_P348[df_Rad_P348.index.date == Desp_Pira_348.index.date[i]])
Desp_Efi_348 = pd.concat(Desp_Efi_348)
Desp_Efi_350 = []
for i in range(len(Desp_Pira_350.index.values)):
Desp_Efi_350.append(df_Rad_P350[df_Rad_P350.index.date == Desp_Pira_350.index.date[i]])
Desp_Efi_350 = pd.concat(Desp_Efi_350)
Desp_Efi_975 = []
for i in range(len(Desp_Pira_975.index.values)):
Desp_Efi_975.append(df_Rad_P975[df_Rad_P975.index.date == Desp_Pira_975.index.date[i]])
Desp_Efi_975 = pd.concat(Desp_Efi_975)
Nuba_Efi_348 = []
for i in range(len(Nuba_Pira_348.index.values)):
Nuba_Efi_348.append(df_Rad_P348[df_Rad_P348.index.date == Nuba_Pira_348.index.date[i]])
Nuba_Efi_348 = pd.concat(Nuba_Efi_348)
Nuba_Efi_350 = []
for i in range(len(Nuba_Pira_350.index.values)):
Nuba_Efi_350.append(df_Rad_P350[df_Rad_P350.index.date == Nuba_Pira_350.index.date[i]])
Nuba_Efi_350 = pd.concat(Nuba_Efi_350)
Nuba_Efi_975 = []
for i in range(len(Nuba_Pira_975.index.values)):
Nuba_Efi_975.append(df_Rad_P975[df_Rad_P975.index.date == Nuba_Pira_975.index.date[i]])
Nuba_Efi_975 = pd.concat(Nuba_Efi_975)
fig = plt.figure(figsize=[10, 6])
plt.rc('axes', edgecolor='gray')
ax1 = fig.add_subplot(1, 3, 1)
ax1.spines['top'].set_visible(False)
ax1.spines['right'].set_visible(False)
ax1.hist(Desp_Efi_348['Efi_Transp'], bins='auto', alpha = 0.5, color = 'orange', label = 'Desp')
ax1.hist(Nuba_Efi_348['Efi_Transp'], bins='auto', alpha = 0.5, color = 'blue', label = 'Nub')
ax1.set_title(u'Distribución de la eficiencia \n en dias despejados y nublados en JV', fontproperties=prop, fontsize = 8)
ax1.set_ylabel(u'Frecuencia', fontproperties=prop_1)
ax1.set_xlabel(u'Eficiencia', fontproperties=prop_1)
ax1.legend()
ax2 = fig.add_subplot(1, 3, 2)
ax2.spines['top'].set_visible(False)
ax2.spines['right'].set_visible(False)
ax2.hist(Desp_Efi_350['Efi_Transp'], bins='auto', alpha = 0.5, color = 'orange', label = 'Desp')
ax2.hist(Nuba_Efi_350['Efi_Transp'], bins='auto', alpha = 0.5, color = 'blue', label = 'Nub')
ax2.set_title(u'Distribución de la eficiencia \n en dias despejados y nublados en CI', fontproperties=prop, fontsize = 8)
ax2.set_ylabel(u'Frecuencia', fontproperties=prop_1)
ax2.set_xlabel(u'Eficiencia', fontproperties=prop_1)
ax2.legend()
ax3 = fig.add_subplot(1, 3, 3)
ax3.spines['top'].set_visible(False)
ax3.spines['right'].set_visible(False)
ax3.hist(Desp_Efi_975['Efi_Transp'], bins='auto', alpha = 0.5, color = 'orange', label = 'Desp')
ax3.hist(Nuba_Efi_975['Efi_Transp'], bins='auto', alpha = 0.5, color = 'blue', label = 'Nub')
ax3.set_title(u'Distribución de la eficiencia \n en dias despejados y nublados en TS', fontproperties=prop, fontsize = 8)
ax3.set_ylabel(u'Frecuencia', fontproperties=prop_1)
ax3.set_xlabel(u'Eficiencia', fontproperties=prop_1)
ax3.legend()
plt.savefig('/home/nacorreasa/Escritorio/Figuras/HistoEficiencianNubaDespTotal.png')
plt.show()
SK_desp_Efi_348 = ks_2samp(Desp_Efi_348['radiacion'].values,df_P348_h['radiacion'].values)
Efi_348_desp = SK_desp_Efi_348[0]
Efi_348_desp = SK_desp_Efi_348[1]
SK_nuba_Efi_348 = ks_2samp(Nuba_Efi_348['radiacion'].values,df_P348_h['radiacion'].values)
Efi_348_nuba = SK_nuba_Efi_348[0]
Efi_348_nuba = SK_nuba_Efi_348[1]
if Efi_348_nuba <= Significancia:
print ('los dias nublados en JV no pertenecen a la misma distribución')
else:
print ('los dias nublados en JV pertenecen a la misma distribución')
if Efi_348_desp <= Significancia:
print ('los dias despejados en JV no pertenecen a la misma distribución')
else:
print ('los dias despejados en JV pertenecen a la misma distribución')
SK_desp_Efi_350 = ks_2samp(Desp_Efi_350['radiacion'].values,df_P350_h['radiacion'].values)
Efi_350_desp = SK_desp_Efi_350[0]
Efi_350_desp = SK_desp_Efi_350[1]
SK_nuba_Efi_350 = ks_2samp(Nuba_Efi_350['radiacion'].values,df_P350_h['radiacion'].values)
Efi_350_nuba = SK_nuba_Efi_350[0]
Efi_350_nuba = SK_nuba_Efi_350[1]
if Efi_350_nuba <= Significancia:
print ('los dias nublados en CI no pertenecen a la misma distribución')
else:
print ('los dias nublados en CI pertenecen a la misma distribución')
if Efi_350_desp <= Significancia:
print ('los dias despejados en CI no pertenecen a la misma distribución')
else:
print ('los dias despejados en CI pertenecen a la misma distribución')
SK_desp_Efi_975 = ks_2samp(Desp_Efi_975['radiacion'].values,df_P975_h['radiacion'].values)
Efi_975_desp = SK_desp_Efi_975[0]
Efi_975_desp = SK_desp_Efi_975[1]
SK_nuba_Efi_975 = ks_2samp(Nuba_Efi_975['radiacion'].values,df_P975_h['radiacion'].values)
Efi_975_nuba = SK_nuba_Efi_975[0]
Efi_975_nuba = SK_nuba_Efi_975[1]
if Efi_975_nuba <= Significancia:
print ('los dias nublados en TS no pertenecen a la misma distribución')
else:
print ('los dias nublados en TS pertenecen a la misma distribución')
if Efi_975_desp <= Significancia:
print ('los dias despejados en TS no pertenecen a la misma distribución')
else:
print ('los dias despejados en TS pertenecen a la misma distribución')
#------------------ESTIMACIÓN DE LA AUTOCORRELACIÓN EN CADA PUNTO----------------##
def estimated_autocorrelation(x):
"""
http://stackoverflow.com/q/14297012/190597
http://en.wikipedia.org/wiki/Autocorrelation#Estimation
"""
n = len(x)
variance = x.var()
x = x-x.mean()
r = np.correlate(x, x, mode = 'full')[-n:]
assert np.allclose(r, np.array([(x[:n-k]*x[-(n-k):]).sum() for k in range(n)]))
result = r/(variance*(np.arange(n, 0, -1)))
return result
Auto_corr_975 = estimated_autocorrelation(df_P975_h['radiacion'].values)
X = df_P975_h[df_P975_h['radiacion'].values>0]['radiacion'].values
lag = [1, 6, 12, 24]
AutoCorr_lag = []
for j in range(1, len(lag)+1):
print(j)
c = []
for i in range(0,len(X)-j, j):
c.append(pearsonr(X[i:], X[:-(i -len(X))])[1])
AutoCorr_lag.append(sum(c))
###############################################################################
##-------------------RADIACION TEORICA PARA UN AÑO DE DATOS------------------##
###############################################################################
'Se espera encontrar con una año de datos de radiacion teorica para el estable-'
'cimiento de los escenario de prediccion y de los rendimentos teoricos. Pensado'
'para los datos de 2018.'
## ---LECTURA DE DATOS DE PIRANÓMETRO --- ##
df1 = df1.set_index(["fecha_hora"])
df1.index = df1.index.tz_localize('UTC').tz_convert('America/Bogota')
df1.index = df1.index.tz_localize(None)
## ---AGRUPACION DE LOS DATOS HORARIOS A UN AÑO--- ##
df1_hora = df1.groupby(pd.Grouper(freq="H")).mean()
df1_hora = df1_hora[(df1_hora.index >= '2018-01-01 00:00:00') & (df1_hora.index <= '2018-12-31 23:59:00')]
df1_hora = df1_hora.between_time('06:00', '17:00') ##--> Seleccionar solo los datos de horas del dia
## ---CREACIÓN DE LA RADIACIÓN EN SUPERFICIE POR DIA Y AGRUPACION DE LOS DATOS DIARIOS A UN AÑO--- ##
Io_dia = Io.groupby(pd.Grouper(freq="D")).mean()
df1_dia = df1.groupby(pd.Grouper(freq="D")).mean()
df1_dia = df1_dia[(df1_dia.index >= '2018-01-01') & (df1_dia.index <= '2018-12-31')]
## ---CONDICIONANDO LA RESOLUCIÓN TEMPORAL CON LA QUE SE TRABAJARÁ--- ##
if resolucion == 'diaria':
Io = Io_dia
df1_rad = df1_dia
elif resolucion == 'horaria':
Io = Io_hora
df1_rad = df1_hora
## ---CREACIÓN DE LOS ESCENARIOS DE ANÁLISIS EFICIENCIA TEÓRICA--- ##
if len(Io)==len(df1_rad):
df1_rad['TAR'] = Io
df1_rad = df1_rad.drop([u'Unnamed: 0', u'idestacion'], axis=1)
df1_rad['Efi_Teorica'] = df1_rad[u'radiacion']/df1_rad[u'TAR']
else:
print (u'No hay un año de datos con el piranometro')
## --Máximo absosluto
df1_radr_max = df1_rad.loc[lambda df_hora: df_hora['Efi_Teorica'] == np.nanmax(df1_rad.Efi_Teorica)]
## -- Percentil 90 absoluto
df1_rad90 = df1_rad.quantile(0.90)
## -- Percentil 50 absoluto
df1_rad50 = df1_rad.quantile(0.50)
## -- Percentil 10 absoluto
df1_rad10 = df1_rad.quantile(0.10)
## -----MENSUAL----- ##
df1_hm_mean = df1_rad.Efi_Teorica.groupby( | pd.Grouper(freq="M") | pandas.Grouper |
# pylint: disable-msg=E1101,W0612
from datetime import datetime, time, timedelta, date
import sys
import os
import operator
from distutils.version import LooseVersion
import nose
import numpy as np
randn = np.random.randn
from pandas import (Index, Series, TimeSeries, DataFrame,
isnull, date_range, Timestamp, Period, DatetimeIndex,
Int64Index, to_datetime, bdate_range, Float64Index)
import pandas.core.datetools as datetools
import pandas.tseries.offsets as offsets
import pandas.tseries.tools as tools
import pandas.tseries.frequencies as fmod
import pandas as pd
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
from pandas.tslib import NaT, iNaT
import pandas.lib as lib
import pandas.tslib as tslib
import pandas.index as _index
from pandas.compat import range, long, StringIO, lrange, lmap, zip, product
import pandas.core.datetools as dt
from numpy.random import rand
from numpy.testing import assert_array_equal
from pandas.util.testing import assert_frame_equal
import pandas.compat as compat
import pandas.core.common as com
from pandas import concat
from pandas import _np_version_under1p7
from numpy.testing.decorators import slow
def _skip_if_no_pytz():
try:
import pytz
except ImportError:
raise nose.SkipTest("pytz not installed")
def _skip_if_has_locale():
import locale
lang, _ = locale.getlocale()
if lang is not None:
raise nose.SkipTest("Specific locale is set {0}".format(lang))
class TestTimeSeriesDuplicates(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
dates = [datetime(2000, 1, 2), datetime(2000, 1, 2),
datetime(2000, 1, 2), datetime(2000, 1, 3),
datetime(2000, 1, 3), datetime(2000, 1, 3),
datetime(2000, 1, 4), datetime(2000, 1, 4),
datetime(2000, 1, 4), datetime(2000, 1, 5)]
self.dups = Series(np.random.randn(len(dates)), index=dates)
def test_constructor(self):
tm.assert_isinstance(self.dups, TimeSeries)
tm.assert_isinstance(self.dups.index, DatetimeIndex)
def test_is_unique_monotonic(self):
self.assertFalse(self.dups.index.is_unique)
def test_index_unique(self):
uniques = self.dups.index.unique()
expected = DatetimeIndex([datetime(2000, 1, 2), datetime(2000, 1, 3),
datetime(2000, 1, 4), datetime(2000, 1, 5)])
self.assertEqual(uniques.dtype, 'M8[ns]') # sanity
self.assertTrue(uniques.equals(expected))
self.assertEqual(self.dups.index.nunique(), 4)
# #2563
self.assertTrue(isinstance(uniques, DatetimeIndex))
dups_local = self.dups.index.tz_localize('US/Eastern')
dups_local.name = 'foo'
result = dups_local.unique()
expected = DatetimeIndex(expected, tz='US/Eastern')
self.assertTrue(result.tz is not None)
self.assertEqual(result.name, 'foo')
self.assertTrue(result.equals(expected))
# NaT
arr = [ 1370745748 + t for t in range(20) ] + [iNaT]
idx = DatetimeIndex(arr * 3)
self.assertTrue(idx.unique().equals(DatetimeIndex(arr)))
self.assertEqual(idx.nunique(), 21)
arr = [ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT]
idx = DatetimeIndex(arr * 3)
self.assertTrue(idx.unique().equals(DatetimeIndex(arr)))
self.assertEqual(idx.nunique(), 21)
def test_index_dupes_contains(self):
d = datetime(2011, 12, 5, 20, 30)
ix = DatetimeIndex([d, d])
self.assertTrue(d in ix)
def test_duplicate_dates_indexing(self):
ts = self.dups
uniques = ts.index.unique()
for date in uniques:
result = ts[date]
mask = ts.index == date
total = (ts.index == date).sum()
expected = ts[mask]
if total > 1:
assert_series_equal(result, expected)
else:
assert_almost_equal(result, expected[0])
cp = ts.copy()
cp[date] = 0
expected = Series(np.where(mask, 0, ts), index=ts.index)
assert_series_equal(cp, expected)
self.assertRaises(KeyError, ts.__getitem__, datetime(2000, 1, 6))
# new index
ts[datetime(2000,1,6)] = 0
self.assertEqual(ts[datetime(2000,1,6)], 0)
def test_range_slice(self):
idx = DatetimeIndex(['1/1/2000', '1/2/2000', '1/2/2000', '1/3/2000',
'1/4/2000'])
ts = Series(np.random.randn(len(idx)), index=idx)
result = ts['1/2/2000':]
expected = ts[1:]
assert_series_equal(result, expected)
result = ts['1/2/2000':'1/3/2000']
expected = ts[1:4]
assert_series_equal(result, expected)
def test_groupby_average_dup_values(self):
result = self.dups.groupby(level=0).mean()
expected = self.dups.groupby(self.dups.index).mean()
assert_series_equal(result, expected)
def test_indexing_over_size_cutoff(self):
import datetime
# #1821
old_cutoff = _index._SIZE_CUTOFF
try:
_index._SIZE_CUTOFF = 1000
# create large list of non periodic datetime
dates = []
sec = datetime.timedelta(seconds=1)
half_sec = datetime.timedelta(microseconds=500000)
d = datetime.datetime(2011, 12, 5, 20, 30)
n = 1100
for i in range(n):
dates.append(d)
dates.append(d + sec)
dates.append(d + sec + half_sec)
dates.append(d + sec + sec + half_sec)
d += 3 * sec
# duplicate some values in the list
duplicate_positions = np.random.randint(0, len(dates) - 1, 20)
for p in duplicate_positions:
dates[p + 1] = dates[p]
df = DataFrame(np.random.randn(len(dates), 4),
index=dates,
columns=list('ABCD'))
pos = n * 3
timestamp = df.index[pos]
self.assertIn(timestamp, df.index)
# it works!
df.ix[timestamp]
self.assertTrue(len(df.ix[[timestamp]]) > 0)
finally:
_index._SIZE_CUTOFF = old_cutoff
def test_indexing_unordered(self):
# GH 2437
rng = date_range(start='2011-01-01', end='2011-01-15')
ts = Series(randn(len(rng)), index=rng)
ts2 = concat([ts[0:4],ts[-4:],ts[4:-4]])
for t in ts.index:
s = str(t)
expected = ts[t]
result = ts2[t]
self.assertTrue(expected == result)
# GH 3448 (ranges)
def compare(slobj):
result = ts2[slobj].copy()
result = result.sort_index()
expected = ts[slobj]
assert_series_equal(result,expected)
compare(slice('2011-01-01','2011-01-15'))
compare(slice('2010-12-30','2011-01-15'))
compare(slice('2011-01-01','2011-01-16'))
# partial ranges
compare(slice('2011-01-01','2011-01-6'))
compare(slice('2011-01-06','2011-01-8'))
compare(slice('2011-01-06','2011-01-12'))
# single values
result = ts2['2011'].sort_index()
expected = ts['2011']
assert_series_equal(result,expected)
# diff freq
rng = date_range(datetime(2005, 1, 1), periods=20, freq='M')
ts = Series(np.arange(len(rng)), index=rng)
ts = ts.take(np.random.permutation(20))
result = ts['2005']
for t in result.index:
self.assertTrue(t.year == 2005)
def test_indexing(self):
idx = date_range("2001-1-1", periods=20, freq='M')
ts = Series(np.random.rand(len(idx)),index=idx)
# getting
# GH 3070, make sure semantics work on Series/Frame
expected = ts['2001']
df = DataFrame(dict(A = ts))
result = df['2001']['A']
assert_series_equal(expected,result)
# setting
ts['2001'] = 1
expected = ts['2001']
df.loc['2001','A'] = 1
result = df['2001']['A']
assert_series_equal(expected,result)
# GH3546 (not including times on the last day)
idx = date_range(start='2013-05-31 00:00', end='2013-05-31 23:00', freq='H')
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013-05']
assert_series_equal(expected,ts)
idx = date_range(start='2013-05-31 00:00', end='2013-05-31 23:59', freq='S')
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013-05']
assert_series_equal(expected,ts)
idx = [ Timestamp('2013-05-31 00:00'), Timestamp(datetime(2013,5,31,23,59,59,999999))]
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013']
assert_series_equal(expected,ts)
# GH 3925, indexing with a seconds resolution string / datetime object
df = DataFrame(randn(5,5),columns=['open','high','low','close','volume'],index=date_range('2012-01-02 18:01:00',periods=5,tz='US/Central',freq='s'))
expected = df.loc[[df.index[2]]]
result = df['2012-01-02 18:01:02']
assert_frame_equal(result,expected)
# this is a single date, so will raise
self.assertRaises(KeyError, df.__getitem__, df.index[2],)
def test_recreate_from_data(self):
if _np_version_under1p7:
freqs = ['M', 'Q', 'A', 'D', 'B', 'T', 'S', 'L', 'U', 'H']
else:
freqs = ['M', 'Q', 'A', 'D', 'B', 'T', 'S', 'L', 'U', 'H', 'N', 'C']
for f in freqs:
org = DatetimeIndex(start='2001/02/01 09:00', freq=f, periods=1)
idx = DatetimeIndex(org, freq=f)
self.assertTrue(idx.equals(org))
# unbale to create tz-aware 'A' and 'C' freq
if _np_version_under1p7:
freqs = ['M', 'Q', 'D', 'B', 'T', 'S', 'L', 'U', 'H']
else:
freqs = ['M', 'Q', 'D', 'B', 'T', 'S', 'L', 'U', 'H', 'N']
for f in freqs:
org = DatetimeIndex(start='2001/02/01 09:00', freq=f, tz='US/Pacific', periods=1)
idx = DatetimeIndex(org, freq=f, tz='US/Pacific')
self.assertTrue(idx.equals(org))
def assert_range_equal(left, right):
assert(left.equals(right))
assert(left.freq == right.freq)
assert(left.tz == right.tz)
class TestTimeSeries(tm.TestCase):
_multiprocess_can_split_ = True
def test_is_(self):
dti = DatetimeIndex(start='1/1/2005', end='12/1/2005', freq='M')
self.assertTrue(dti.is_(dti))
self.assertTrue(dti.is_(dti.view()))
self.assertFalse(dti.is_(dti.copy()))
def test_dti_slicing(self):
dti = DatetimeIndex(start='1/1/2005', end='12/1/2005', freq='M')
dti2 = dti[[1, 3, 5]]
v1 = dti2[0]
v2 = dti2[1]
v3 = dti2[2]
self.assertEqual(v1, Timestamp('2/28/2005'))
self.assertEqual(v2, Timestamp('4/30/2005'))
self.assertEqual(v3, Timestamp('6/30/2005'))
# don't carry freq through irregular slicing
self.assertIsNone(dti2.freq)
def test_pass_datetimeindex_to_index(self):
# Bugs in #1396
rng = date_range('1/1/2000', '3/1/2000')
idx = Index(rng, dtype=object)
expected = Index(rng.to_pydatetime(), dtype=object)
self.assert_numpy_array_equal(idx.values, expected.values)
def test_contiguous_boolean_preserve_freq(self):
rng = date_range('1/1/2000', '3/1/2000', freq='B')
mask = np.zeros(len(rng), dtype=bool)
mask[10:20] = True
masked = rng[mask]
expected = rng[10:20]
self.assertIsNotNone(expected.freq)
assert_range_equal(masked, expected)
mask[22] = True
masked = rng[mask]
self.assertIsNone(masked.freq)
def test_getitem_median_slice_bug(self):
index = date_range('20090415', '20090519', freq='2B')
s = Series(np.random.randn(13), index=index)
indexer = [slice(6, 7, None)]
result = s[indexer]
expected = s[indexer[0]]
assert_series_equal(result, expected)
def test_series_box_timestamp(self):
rng = date_range('20090415', '20090519', freq='B')
s = Series(rng)
tm.assert_isinstance(s[5], Timestamp)
rng = date_range('20090415', '20090519', freq='B')
s = Series(rng, index=rng)
tm.assert_isinstance(s[5], Timestamp)
tm.assert_isinstance(s.iget_value(5), Timestamp)
def test_date_range_ambiguous_arguments(self):
# #2538
start = datetime(2011, 1, 1, 5, 3, 40)
end = datetime(2011, 1, 1, 8, 9, 40)
self.assertRaises(ValueError, date_range, start, end,
freq='s', periods=10)
def test_timestamp_to_datetime(self):
_skip_if_no_pytz()
rng = date_range('20090415', '20090519',
tz='US/Eastern')
stamp = rng[0]
dtval = stamp.to_pydatetime()
self.assertEqual(stamp, dtval)
self.assertEqual(stamp.tzinfo, dtval.tzinfo)
def test_index_convert_to_datetime_array(self):
_skip_if_no_pytz()
def _check_rng(rng):
converted = rng.to_pydatetime()
tm.assert_isinstance(converted, np.ndarray)
for x, stamp in zip(converted, rng):
tm.assert_isinstance(x, datetime)
self.assertEqual(x, stamp.to_pydatetime())
self.assertEqual(x.tzinfo, stamp.tzinfo)
rng = date_range('20090415', '20090519')
rng_eastern = date_range('20090415', '20090519', tz='US/Eastern')
rng_utc = date_range('20090415', '20090519', tz='utc')
_check_rng(rng)
_check_rng(rng_eastern)
_check_rng(rng_utc)
def test_ctor_str_intraday(self):
rng = DatetimeIndex(['1-1-2000 00:00:01'])
self.assertEqual(rng[0].second, 1)
def test_series_ctor_plus_datetimeindex(self):
rng = date_range('20090415', '20090519', freq='B')
data = dict((k, 1) for k in rng)
result = Series(data, index=rng)
self.assertIs(result.index, rng)
def test_series_pad_backfill_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
result = s[:2].reindex(index, method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected[-3:] = np.nan
assert_series_equal(result, expected)
result = s[-2:].reindex(index, method='backfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected[:3] = np.nan
assert_series_equal(result, expected)
def test_series_fillna_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
result = s[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected[-3:] = np.nan
assert_series_equal(result, expected)
result = s[-2:].reindex(index)
result = result.fillna(method='bfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected[:3] = np.nan
assert_series_equal(result, expected)
def test_frame_pad_backfill_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
result = df[:2].reindex(index, method='pad', limit=5)
expected = df[:2].reindex(index).fillna(method='pad')
expected.values[-3:] = np.nan
tm.assert_frame_equal(result, expected)
result = df[-2:].reindex(index, method='backfill', limit=5)
expected = df[-2:].reindex(index).fillna(method='backfill')
expected.values[:3] = np.nan
tm.assert_frame_equal(result, expected)
def test_frame_fillna_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
result = df[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = df[:2].reindex(index).fillna(method='pad')
expected.values[-3:] = np.nan
tm.assert_frame_equal(result, expected)
result = df[-2:].reindex(index)
result = result.fillna(method='backfill', limit=5)
expected = df[-2:].reindex(index).fillna(method='backfill')
expected.values[:3] = np.nan
tm.assert_frame_equal(result, expected)
def test_frame_setitem_timestamp(self):
# 2155
columns = DatetimeIndex(start='1/1/2012', end='2/1/2012',
freq=datetools.bday)
index = lrange(10)
data = DataFrame(columns=columns, index=index)
t = datetime(2012, 11, 1)
ts = Timestamp(t)
data[ts] = np.nan # works
def test_sparse_series_fillna_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
ss = s[:2].reindex(index).to_sparse()
result = ss.fillna(method='pad', limit=5)
expected = ss.fillna(method='pad', limit=5)
expected = expected.to_dense()
expected[-3:] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
ss = s[-2:].reindex(index).to_sparse()
result = ss.fillna(method='backfill', limit=5)
expected = ss.fillna(method='backfill')
expected = expected.to_dense()
expected[:3] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
def test_sparse_series_pad_backfill_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
s = s.to_sparse()
result = s[:2].reindex(index, method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected[-3:] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
result = s[-2:].reindex(index, method='backfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected[:3] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
def test_sparse_frame_pad_backfill_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
sdf = df.to_sparse()
result = sdf[:2].reindex(index, method='pad', limit=5)
expected = sdf[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected.values[-3:] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
result = sdf[-2:].reindex(index, method='backfill', limit=5)
expected = sdf[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected.values[:3] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
def test_sparse_frame_fillna_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
sdf = df.to_sparse()
result = sdf[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = sdf[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected.values[-3:] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
result = sdf[-2:].reindex(index)
result = result.fillna(method='backfill', limit=5)
expected = sdf[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected.values[:3] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
def test_pad_require_monotonicity(self):
rng = date_range('1/1/2000', '3/1/2000', freq='B')
rng2 = rng[::2][::-1]
self.assertRaises(ValueError, rng2.get_indexer, rng,
method='pad')
def test_frame_ctor_datetime64_column(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50',
freq='10s')
dates = np.asarray(rng)
df = DataFrame({'A': np.random.randn(len(rng)), 'B': dates})
self.assertTrue(np.issubdtype(df['B'].dtype, np.dtype('M8[ns]')))
def test_frame_add_datetime64_column(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50',
freq='10s')
df = DataFrame(index=np.arange(len(rng)))
df['A'] = rng
self.assertTrue(np.issubdtype(df['A'].dtype, np.dtype('M8[ns]')))
def test_frame_datetime64_pre1900_repr(self):
df = DataFrame({'year': date_range('1/1/1700', periods=50,
freq='A-DEC')})
# it works!
repr(df)
def test_frame_add_datetime64_col_other_units(self):
n = 100
units = ['h', 'm', 's', 'ms', 'D', 'M', 'Y']
ns_dtype = np.dtype('M8[ns]')
for unit in units:
dtype = np.dtype('M8[%s]' % unit)
vals = np.arange(n, dtype=np.int64).view(dtype)
df = DataFrame({'ints': np.arange(n)}, index=np.arange(n))
df[unit] = vals
ex_vals = to_datetime(vals.astype('O'))
self.assertEqual(df[unit].dtype, ns_dtype)
self.assertTrue((df[unit].values == ex_vals).all())
# Test insertion into existing datetime64 column
df = DataFrame({'ints': np.arange(n)}, index=np.arange(n))
df['dates'] = np.arange(n, dtype=np.int64).view(ns_dtype)
for unit in units:
dtype = np.dtype('M8[%s]' % unit)
vals = np.arange(n, dtype=np.int64).view(dtype)
tmp = df.copy()
tmp['dates'] = vals
ex_vals = to_datetime(vals.astype('O'))
self.assertTrue((tmp['dates'].values == ex_vals).all())
def test_to_datetime_unit(self):
epoch = 1370745748
s = Series([ epoch + t for t in range(20) ])
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ])
assert_series_equal(result,expected)
s = Series([ epoch + t for t in range(20) ]).astype(float)
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ])
assert_series_equal(result,expected)
s = Series([ epoch + t for t in range(20) ] + [iNaT])
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT])
assert_series_equal(result,expected)
s = Series([ epoch + t for t in range(20) ] + [iNaT]).astype(float)
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT])
assert_series_equal(result,expected)
s = concat([Series([ epoch + t for t in range(20) ]).astype(float),Series([np.nan])],ignore_index=True)
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT])
assert_series_equal(result,expected)
def test_series_ctor_datetime64(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50',
freq='10s')
dates = np.asarray(rng)
series = Series(dates)
self.assertTrue(np.issubdtype(series.dtype, np.dtype('M8[ns]')))
def test_index_cast_datetime64_other_units(self):
arr = np.arange(0, 100, 10, dtype=np.int64).view('M8[D]')
idx = Index(arr)
self.assertTrue((idx.values == tslib.cast_to_nanoseconds(arr)).all())
def test_index_astype_datetime64(self):
idx = Index([datetime(2012, 1, 1)], dtype=object)
if not _np_version_under1p7:
raise nose.SkipTest("test only valid in numpy < 1.7")
casted = idx.astype(np.dtype('M8[D]'))
expected = DatetimeIndex(idx.values)
tm.assert_isinstance(casted, DatetimeIndex)
self.assertTrue(casted.equals(expected))
def test_reindex_series_add_nat(self):
rng = date_range('1/1/2000 00:00:00', periods=10, freq='10s')
series = Series(rng)
result = series.reindex(lrange(15))
self.assertTrue(np.issubdtype(result.dtype, np.dtype('M8[ns]')))
mask = result.isnull()
self.assertTrue(mask[-5:].all())
self.assertFalse(mask[:-5].any())
def test_reindex_frame_add_nat(self):
rng = date_range('1/1/2000 00:00:00', periods=10, freq='10s')
df = DataFrame({'A': np.random.randn(len(rng)), 'B': rng})
result = df.reindex(lrange(15))
self.assertTrue(np.issubdtype(result['B'].dtype, np.dtype('M8[ns]')))
mask = com.isnull(result)['B']
self.assertTrue(mask[-5:].all())
self.assertFalse(mask[:-5].any())
def test_series_repr_nat(self):
series = Series([0, 1000, 2000, iNaT], dtype='M8[ns]')
result = repr(series)
expected = ('0 1970-01-01 00:00:00\n'
'1 1970-01-01 00:00:00.000001\n'
'2 1970-01-01 00:00:00.000002\n'
'3 NaT\n'
'dtype: datetime64[ns]')
self.assertEqual(result, expected)
def test_fillna_nat(self):
series = Series([0, 1, 2, iNaT], dtype='M8[ns]')
filled = series.fillna(method='pad')
filled2 = series.fillna(value=series.values[2])
expected = series.copy()
expected.values[3] = expected.values[2]
assert_series_equal(filled, expected)
assert_series_equal(filled2, expected)
df = DataFrame({'A': series})
filled = df.fillna(method='pad')
filled2 = df.fillna(value=series.values[2])
expected = DataFrame({'A': expected})
assert_frame_equal(filled, expected)
assert_frame_equal(filled2, expected)
series = Series([iNaT, 0, 1, 2], dtype='M8[ns]')
filled = series.fillna(method='bfill')
filled2 = series.fillna(value=series[1])
expected = series.copy()
expected[0] = expected[1]
assert_series_equal(filled, expected)
assert_series_equal(filled2, expected)
df = DataFrame({'A': series})
filled = df.fillna(method='bfill')
filled2 = df.fillna(value=series[1])
expected = DataFrame({'A': expected})
assert_frame_equal(filled, expected)
assert_frame_equal(filled2, expected)
def test_string_na_nat_conversion(self):
# GH #999, #858
from pandas.compat import parse_date
strings = np.array(['1/1/2000', '1/2/2000', np.nan,
'1/4/2000, 12:34:56'], dtype=object)
expected = np.empty(4, dtype='M8[ns]')
for i, val in enumerate(strings):
if com.isnull(val):
expected[i] = iNaT
else:
expected[i] = parse_date(val)
result = tslib.array_to_datetime(strings)
assert_almost_equal(result, expected)
result2 = to_datetime(strings)
tm.assert_isinstance(result2, DatetimeIndex)
assert_almost_equal(result, result2)
malformed = np.array(['1/100/2000', np.nan], dtype=object)
result = to_datetime(malformed)
assert_almost_equal(result, malformed)
self.assertRaises(ValueError, to_datetime, malformed,
errors='raise')
idx = ['a', 'b', 'c', 'd', 'e']
series = Series(['1/1/2000', np.nan, '1/3/2000', np.nan,
'1/5/2000'], index=idx, name='foo')
dseries = Series([to_datetime('1/1/2000'), np.nan,
to_datetime('1/3/2000'), np.nan,
to_datetime('1/5/2000')], index=idx, name='foo')
result = to_datetime(series)
dresult = to_datetime(dseries)
expected = Series(np.empty(5, dtype='M8[ns]'), index=idx)
for i in range(5):
x = series[i]
if isnull(x):
expected[i] = iNaT
else:
expected[i] = to_datetime(x)
assert_series_equal(result, expected)
self.assertEqual(result.name, 'foo')
assert_series_equal(dresult, expected)
self.assertEqual(dresult.name, 'foo')
def test_to_datetime_iso8601(self):
result = to_datetime(["2012-01-01 00:00:00"])
exp = Timestamp("2012-01-01 00:00:00")
self.assertEqual(result[0], exp)
result = to_datetime(['20121001']) # bad iso 8601
exp = Timestamp('2012-10-01')
self.assertEqual(result[0], exp)
def test_to_datetime_default(self):
rs = to_datetime('2001')
xp = datetime(2001, 1, 1)
self.assertTrue(rs, xp)
#### dayfirst is essentially broken
#### to_datetime('01-13-2012', dayfirst=True)
#### self.assertRaises(ValueError, to_datetime('01-13-2012', dayfirst=True))
def test_to_datetime_on_datetime64_series(self):
# #2699
s = Series(date_range('1/1/2000', periods=10))
result = to_datetime(s)
self.assertEqual(result[0], s[0])
def test_to_datetime_with_apply(self):
# this is only locale tested with US/None locales
_skip_if_has_locale()
# GH 5195
# with a format and coerce a single item to_datetime fails
td = Series(['May 04', 'Jun 02', 'Dec 11'], index=[1,2,3])
expected = pd.to_datetime(td, format='%b %y')
result = td.apply(pd.to_datetime, format='%b %y')
assert_series_equal(result, expected)
td = pd.Series(['May 04', 'Jun 02', ''], index=[1,2,3])
self.assertRaises(ValueError, lambda : pd.to_datetime(td,format='%b %y'))
self.assertRaises(ValueError, lambda : td.apply(pd.to_datetime, format='%b %y'))
expected = pd.to_datetime(td, format='%b %y', coerce=True)
result = td.apply(lambda x: pd.to_datetime(x, format='%b %y', coerce=True))
assert_series_equal(result, expected)
def test_nat_vector_field_access(self):
idx = DatetimeIndex(['1/1/2000', None, None, '1/4/2000'])
fields = ['year', 'quarter', 'month', 'day', 'hour',
'minute', 'second', 'microsecond', 'nanosecond',
'week', 'dayofyear']
for field in fields:
result = getattr(idx, field)
expected = [getattr(x, field) if x is not NaT else -1
for x in idx]
self.assert_numpy_array_equal(result, expected)
def test_nat_scalar_field_access(self):
fields = ['year', 'quarter', 'month', 'day', 'hour',
'minute', 'second', 'microsecond', 'nanosecond',
'week', 'dayofyear']
for field in fields:
result = getattr(NaT, field)
self.assertEqual(result, -1)
self.assertEqual(NaT.weekday(), -1)
def test_to_datetime_types(self):
# empty string
result = to_datetime('')
self.assertIs(result, NaT)
result = to_datetime(['', ''])
self.assertTrue(isnull(result).all())
# ints
result = Timestamp(0)
expected = to_datetime(0)
self.assertEqual(result, expected)
# GH 3888 (strings)
expected = to_datetime(['2012'])[0]
result = to_datetime('2012')
self.assertEqual(result, expected)
### array = ['2012','20120101','20120101 12:01:01']
array = ['20120101','20120101 12:01:01']
expected = list(to_datetime(array))
result = lmap(Timestamp,array)
tm.assert_almost_equal(result,expected)
### currently fails ###
### result = Timestamp('2012')
### expected = to_datetime('2012')
### self.assertEqual(result, expected)
def test_to_datetime_unprocessable_input(self):
# GH 4928
self.assert_numpy_array_equal(
to_datetime([1, '1']),
np.array([1, '1'], dtype='O')
)
self.assertRaises(TypeError, to_datetime, [1, '1'], errors='raise')
def test_to_datetime_other_datetime64_units(self):
# 5/25/2012
scalar = np.int64(1337904000000000).view('M8[us]')
as_obj = scalar.astype('O')
index = DatetimeIndex([scalar])
self.assertEqual(index[0], scalar.astype('O'))
value = Timestamp(scalar)
self.assertEqual(value, as_obj)
def test_to_datetime_list_of_integers(self):
rng = date_range('1/1/2000', periods=20)
rng = DatetimeIndex(rng.values)
ints = list(rng.asi8)
result = DatetimeIndex(ints)
self.assertTrue(rng.equals(result))
def test_to_datetime_dt64s(self):
in_bound_dts = [
np.datetime64('2000-01-01'),
np.datetime64('2000-01-02'),
]
for dt in in_bound_dts:
self.assertEqual(
pd.to_datetime(dt),
Timestamp(dt)
)
oob_dts = [
np.datetime64('1000-01-01'),
np.datetime64('5000-01-02'),
]
for dt in oob_dts:
self.assertRaises(ValueError, pd.to_datetime, dt, errors='raise')
self.assertRaises(ValueError, tslib.Timestamp, dt)
self.assertIs(pd.to_datetime(dt, coerce=True), NaT)
def test_to_datetime_array_of_dt64s(self):
dts = [
np.datetime64('2000-01-01'),
np.datetime64('2000-01-02'),
]
# Assuming all datetimes are in bounds, to_datetime() returns
# an array that is equal to Timestamp() parsing
self.assert_numpy_array_equal(
pd.to_datetime(dts, box=False),
np.array([Timestamp(x).asm8 for x in dts])
)
# A list of datetimes where the last one is out of bounds
dts_with_oob = dts + [np.datetime64('9999-01-01')]
self.assertRaises(
ValueError,
pd.to_datetime,
dts_with_oob,
coerce=False,
errors='raise'
)
self.assert_numpy_array_equal(
pd.to_datetime(dts_with_oob, box=False, coerce=True),
np.array(
[
Timestamp(dts_with_oob[0]).asm8,
Timestamp(dts_with_oob[1]).asm8,
iNaT,
],
dtype='M8'
)
)
# With coerce=False and errors='ignore', out of bounds datetime64s
# are converted to their .item(), which depending on the version of
# numpy is either a python datetime.datetime or datetime.date
self.assert_numpy_array_equal(
pd.to_datetime(dts_with_oob, box=False, coerce=False),
np.array(
[dt.item() for dt in dts_with_oob],
dtype='O'
)
)
def test_index_to_datetime(self):
idx = Index(['1/1/2000', '1/2/2000', '1/3/2000'])
result = idx.to_datetime()
expected = DatetimeIndex(datetools.to_datetime(idx.values))
self.assertTrue(result.equals(expected))
today = datetime.today()
idx = Index([today], dtype=object)
result = idx.to_datetime()
expected = DatetimeIndex([today])
self.assertTrue(result.equals(expected))
def test_to_datetime_freq(self):
xp = bdate_range('2000-1-1', periods=10, tz='UTC')
rs = xp.to_datetime()
self.assertEqual(xp.freq, rs.freq)
self.assertEqual(xp.tzinfo, rs.tzinfo)
def test_range_misspecified(self):
# GH #1095
self.assertRaises(ValueError, date_range, '1/1/2000')
self.assertRaises(ValueError, date_range, end='1/1/2000')
self.assertRaises(ValueError, date_range, periods=10)
self.assertRaises(ValueError, date_range, '1/1/2000', freq='H')
self.assertRaises(ValueError, date_range, end='1/1/2000', freq='H')
self.assertRaises(ValueError, date_range, periods=10, freq='H')
def test_reasonable_keyerror(self):
# GH #1062
index = DatetimeIndex(['1/3/2000'])
try:
index.get_loc('1/1/2000')
except KeyError as e:
self.assertIn('2000', str(e))
def test_reindex_with_datetimes(self):
rng = date_range('1/1/2000', periods=20)
ts = Series(np.random.randn(20), index=rng)
result = ts.reindex(list(ts.index[5:10]))
expected = ts[5:10]
tm.assert_series_equal(result, expected)
result = ts[list(ts.index[5:10])]
tm.assert_series_equal(result, expected)
def test_promote_datetime_date(self):
rng = date_range('1/1/2000', periods=20)
ts = Series(np.random.randn(20), index=rng)
ts_slice = ts[5:]
ts2 = ts_slice.copy()
ts2.index = [x.date() for x in ts2.index]
result = ts + ts2
result2 = ts2 + ts
expected = ts + ts[5:]
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
# test asfreq
result = ts2.asfreq('4H', method='ffill')
expected = ts[5:].asfreq('4H', method='ffill')
assert_series_equal(result, expected)
result = rng.get_indexer(ts2.index)
expected = rng.get_indexer(ts_slice.index)
self.assert_numpy_array_equal(result, expected)
def test_asfreq_normalize(self):
rng = date_range('1/1/2000 09:30', periods=20)
norm = date_range('1/1/2000', periods=20)
vals = np.random.randn(20)
ts = Series(vals, index=rng)
result = ts.asfreq('D', normalize=True)
norm = date_range('1/1/2000', periods=20)
expected = Series(vals, index=norm)
assert_series_equal(result, expected)
vals = np.random.randn(20, 3)
ts = DataFrame(vals, index=rng)
result = ts.asfreq('D', normalize=True)
expected = DataFrame(vals, index=norm)
assert_frame_equal(result, expected)
def test_date_range_gen_error(self):
rng = date_range('1/1/2000 00:00', '1/1/2000 00:18', freq='5min')
self.assertEqual(len(rng), 4)
def test_first_subset(self):
ts = _simple_ts('1/1/2000', '1/1/2010', freq='12h')
result = ts.first('10d')
self.assertEqual(len(result), 20)
ts = _simple_ts('1/1/2000', '1/1/2010')
result = ts.first('10d')
self.assertEqual(len(result), 10)
result = ts.first('3M')
expected = ts[:'3/31/2000']
assert_series_equal(result, expected)
result = ts.first('21D')
expected = ts[:21]
assert_series_equal(result, expected)
result = ts[:0].first('3M')
assert_series_equal(result, ts[:0])
def test_last_subset(self):
ts = _simple_ts('1/1/2000', '1/1/2010', freq='12h')
result = ts.last('10d')
self.assertEqual(len(result), 20)
ts = _simple_ts('1/1/2000', '1/1/2010')
result = ts.last('10d')
self.assertEqual(len(result), 10)
result = ts.last('21D')
expected = ts['12/12/2009':]
assert_series_equal(result, expected)
result = ts.last('21D')
expected = ts[-21:]
assert_series_equal(result, expected)
result = ts[:0].last('3M')
assert_series_equal(result, ts[:0])
def test_add_offset(self):
rng = date_range('1/1/2000', '2/1/2000')
result = rng + offsets.Hour(2)
expected = date_range('1/1/2000 02:00', '2/1/2000 02:00')
self.assertTrue(result.equals(expected))
def test_format_pre_1900_dates(self):
rng = date_range('1/1/1850', '1/1/1950', freq='A-DEC')
rng.format()
ts = Series(1, index=rng)
repr(ts)
def test_repeat(self):
rng = date_range('1/1/2000', '1/1/2001')
result = rng.repeat(5)
self.assertIsNone(result.freq)
self.assertEqual(len(result), 5 * len(rng))
def test_at_time(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = Series(np.random.randn(len(rng)), index=rng)
rs = ts.at_time(rng[1])
self.assertTrue((rs.index.hour == rng[1].hour).all())
self.assertTrue((rs.index.minute == rng[1].minute).all())
self.assertTrue((rs.index.second == rng[1].second).all())
result = ts.at_time('9:30')
expected = ts.at_time(time(9, 30))
assert_series_equal(result, expected)
df = DataFrame(np.random.randn(len(rng), 3), index=rng)
result = ts[time(9, 30)]
result_df = df.ix[time(9, 30)]
expected = ts[(rng.hour == 9) & (rng.minute == 30)]
exp_df = df[(rng.hour == 9) & (rng.minute == 30)]
# expected.index = date_range('1/1/2000', '1/4/2000')
assert_series_equal(result, expected)
tm.assert_frame_equal(result_df, exp_df)
chunk = df.ix['1/4/2000':]
result = chunk.ix[time(9, 30)]
expected = result_df[-1:]
tm.assert_frame_equal(result, expected)
# midnight, everything
rng = date_range('1/1/2000', '1/31/2000')
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts.at_time(time(0, 0))
assert_series_equal(result, ts)
# time doesn't exist
rng = date_range('1/1/2012', freq='23Min', periods=384)
ts = Series(np.random.randn(len(rng)), rng)
rs = ts.at_time('16:00')
self.assertEqual(len(rs), 0)
def test_at_time_frame(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
rs = ts.at_time(rng[1])
self.assertTrue((rs.index.hour == rng[1].hour).all())
self.assertTrue((rs.index.minute == rng[1].minute).all())
self.assertTrue((rs.index.second == rng[1].second).all())
result = ts.at_time('9:30')
expected = ts.at_time(time(9, 30))
assert_frame_equal(result, expected)
result = ts.ix[time(9, 30)]
expected = ts.ix[(rng.hour == 9) & (rng.minute == 30)]
assert_frame_equal(result, expected)
# midnight, everything
rng = date_range('1/1/2000', '1/31/2000')
ts = DataFrame(np.random.randn(len(rng), 3), index=rng)
result = ts.at_time(time(0, 0))
assert_frame_equal(result, ts)
# time doesn't exist
rng = | date_range('1/1/2012', freq='23Min', periods=384) | pandas.date_range |
# -*- coding: utf-8 -*-
# *****************************************************************************
# Copyright (c) 2020, Intel Corporation All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# *****************************************************************************
import numpy as np
import pandas as pd
import platform
import unittest
from itertools import combinations, combinations_with_replacement, product
from numba.core.config import IS_32BITS
from numba.core.errors import TypingError
from sdc.tests.test_base import TestCase
from sdc.tests.test_utils import (skip_numba_jit,
_make_func_from_text,
gen_frand_array)
def _make_func_use_binop1(operator):
func_text = "def test_impl(A, B):\n"
func_text += " return A {} B\n".format(operator)
return _make_func_from_text(func_text)
def _make_func_use_binop2(operator):
func_text = "def test_impl(A, B):\n"
func_text += " A {} B\n".format(operator)
func_text += " return A\n"
return _make_func_from_text(func_text)
def _make_func_use_method_arg1(method):
func_text = "def test_impl(A, B):\n"
func_text += " return A.{}(B)\n".format(method)
return _make_func_from_text(func_text)
class TestSeries_ops(TestCase):
def test_series_operators_int(self):
"""Verifies using all various Series arithmetic binary operators on two integer Series with default indexes"""
n = 11
np.random.seed(0)
data_to_test = [np.arange(-5, -5 + n, dtype=np.int32),
np.ones(n + 3, dtype=np.int32),
np.random.randint(-5, 5, n + 7)]
arithmetic_binops = ('+', '-', '*', '/', '//', '%', '**')
for operator in arithmetic_binops:
test_impl = _make_func_use_binop1(operator)
hpat_func = self.jit(test_impl)
for data_left, data_right in combinations_with_replacement(data_to_test, 2):
# integers to negative powers are not allowed
if (operator == '**' and np.any(data_right < 0)):
data_right = np.abs(data_right)
with self.subTest(left=data_left, right=data_right, operator=operator):
S1 = pd.Series(data_left)
S2 = pd.Series(data_right)
# check_dtype=False because SDC implementation always returns float64 Series
pd.testing.assert_series_equal(hpat_func(S1, S2), test_impl(S1, S2), check_dtype=False)
def test_series_operators_int_scalar(self):
"""Verifies using all various Series arithmetic binary operators
on an integer Series with default index and a scalar value"""
n = 11
np.random.seed(0)
data_to_test = [np.arange(-5, -5 + n, dtype=np.int32),
np.ones(n + 3, dtype=np.int32),
np.random.randint(-5, 5, n + 7)]
scalar_values = [1, -1, 0, 3, 7, -5]
arithmetic_binops = ('+', '-', '*', '/', '//', '%', '**')
for operator in arithmetic_binops:
test_impl = _make_func_use_binop1(operator)
hpat_func = self.jit(test_impl)
for data, scalar, swap_operands in product(data_to_test, scalar_values, (False, True)):
S = pd.Series(data)
left, right = (S, scalar) if swap_operands else (scalar, S)
# integers to negative powers are not allowed
if (operator == '**' and np.any(right < 0)):
right = abs(right)
with self.subTest(left=left, right=right, operator=operator):
# check_dtype=False because SDC implementation always returns float64 Series
pd.testing.assert_series_equal(hpat_func(left, right), test_impl(left, right), check_dtype=False)
def test_series_operators_float(self):
"""Verifies using all various Series arithmetic binary operators on two float Series with default indexes"""
n = 11
np.random.seed(0)
data_to_test = [np.arange(-5, -5 + n, dtype=np.float32),
np.ones(n + 3, dtype=np.float32),
np.random.ranf(n + 7)]
arithmetic_binops = ('+', '-', '*', '/', '//', '%', '**')
for operator in arithmetic_binops:
test_impl = _make_func_use_binop1(operator)
hpat_func = self.jit(test_impl)
for data_left, data_right in combinations_with_replacement(data_to_test, 2):
with self.subTest(left=data_left, right=data_right, operator=operator):
S1 = pd.Series(data_left)
S2 = pd.Series(data_right)
# check_dtype=False because SDC implementation always returns float64 Series
pd.testing.assert_series_equal(hpat_func(S1, S2), test_impl(S1, S2), check_dtype=False)
def test_series_operators_float_scalar(self):
"""Verifies using all various Series arithmetic binary operators
on a float Series with default index and a scalar value"""
n = 11
np.random.seed(0)
data_to_test = [np.arange(-5, -5 + n, dtype=np.float32),
np.ones(n + 3, dtype=np.float32),
np.random.ranf(n + 7)]
scalar_values = [1., -1., 0., -0., 7., -5.]
arithmetic_binops = ('+', '-', '*', '/', '//', '%', '**')
for operator in arithmetic_binops:
test_impl = _make_func_use_binop1(operator)
hpat_func = self.jit(test_impl)
for data, scalar, swap_operands in product(data_to_test, scalar_values, (False, True)):
S = pd.Series(data)
left, right = (S, scalar) if swap_operands else (scalar, S)
with self.subTest(left=left, right=right, operator=operator):
pd.testing.assert_series_equal(hpat_func(S, scalar), test_impl(S, scalar), check_dtype=False)
@skip_numba_jit('Not implemented in new-pipeline yet')
def test_series_operators_inplace(self):
arithmetic_binops = ('+=', '-=', '*=', '/=', '//=', '%=', '**=')
for operator in arithmetic_binops:
test_impl = _make_func_use_binop2(operator)
hpat_func = self.jit(test_impl)
# TODO: extend to test arithmetic operations between numeric Series of different dtypes
n = 11
A1 = pd.Series(np.arange(1, n, dtype=np.float64), name='A')
A2 = A1.copy(deep=True)
B = pd.Series(np.ones(n - 1), name='B')
hpat_func(A1, B)
test_impl(A2, B)
pd.testing.assert_series_equal(A1, A2)
@skip_numba_jit('Not implemented in new-pipeline yet')
def test_series_operators_inplace_scalar(self):
arithmetic_binops = ('+=', '-=', '*=', '/=', '//=', '%=', '**=')
for operator in arithmetic_binops:
test_impl = _make_func_use_binop2(operator)
hpat_func = self.jit(test_impl)
# TODO: extend to test arithmetic operations between numeric Series of different dtypes
n = 11
S1 = pd.Series(np.arange(1, n, dtype=np.float64), name='A')
S2 = S1.copy(deep=True)
hpat_func(S1, 1)
test_impl(S2, 1)
pd.testing.assert_series_equal(S1, S2)
@skip_numba_jit('operator.neg for SeriesType is not implemented in yet')
def test_series_operator_neg(self):
def test_impl(A):
return -A
hpat_func = self.jit(test_impl)
n = 11
A = pd.Series(np.arange(n))
pd.testing.assert_series_equal(hpat_func(A), test_impl(A))
def test_series_operators_comp_numeric(self):
"""Verifies using all various Series comparison binary operators on two integer Series with various indexes"""
n = 11
data_left = [1, 2, -1, 3, 4, 2, -3, 5, 6, 6, 0]
data_right = [3, 2, -2, 1, 4, 1, -5, 6, 6, 3, -1]
dtype_to_index = {'None': None,
'int': np.arange(n, dtype='int'),
'float': np.arange(n, dtype='float'),
'string': ['aa', 'aa', '', '', 'b', 'b', 'cccc', None, 'dd', 'ddd', None]}
comparison_binops = ('<', '>', '<=', '>=', '!=', '==')
for operator in comparison_binops:
test_impl = _make_func_use_binop1(operator)
hpat_func = self.jit(test_impl)
for dtype, index_data in dtype_to_index.items():
with self.subTest(operator=operator, index_dtype=dtype, index=index_data):
A = pd.Series(data_left, index=index_data)
B = pd.Series(data_right, index=index_data)
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B))
def test_series_operators_comp_numeric_scalar(self):
"""Verifies using all various Series comparison binary operators on an integer Series and scalar values"""
S = pd.Series([1, 2, -1, 3, 4, 2, -3, 5, 6, 6, 0])
scalar_values = [2, 2.0, -3, np.inf, -np.inf, np.PZERO, np.NZERO]
comparison_binops = ('<', '>', '<=', '>=', '!=', '==')
for operator in comparison_binops:
test_impl = _make_func_use_binop1(operator)
hpat_func = self.jit(test_impl)
for scalar in scalar_values:
with self.subTest(left=S, right=scalar, operator=operator):
pd.testing.assert_series_equal(hpat_func(S, scalar), test_impl(S, scalar))
def test_series_operators_comp_str_scalar(self):
"""Verifies using all various Series comparison binary operators on an string Series and scalar values"""
S = pd.Series(['aa', 'aa', '', '', 'b', 'b', 'cccc', None, 'dd', 'ddd', None])
scalar_values = ['a', 'aa', 'ab', 'ba', '']
comparison_binops = ('<', '>', '<=', '>=', '!=', '==')
for operator in comparison_binops:
test_impl = _make_func_use_binop1(operator)
hpat_func = self.jit(test_impl)
for scalar in scalar_values:
with self.subTest(left=S, right=scalar, operator=operator):
pd.testing.assert_series_equal(hpat_func(S, scalar), test_impl(S, scalar))
@skip_numba_jit
def test_series_operators_inplace_array(self):
def test_impl(A, B):
A += B
return A
hpat_func = self.jit(test_impl)
n = 11
A = np.arange(n)**2.0 # TODO: use 2 for test int casting
B = pd.Series(np.ones(n))
np.testing.assert_array_equal(hpat_func(A.copy(), B), test_impl(A, B))
@skip_numba_jit('Functionally test passes, but in old-style it checked fusion of parfors.\n'
'TODO: implement the same checks in new-pipeline')
def test_series_fusion1(self):
def test_impl(A, B):
return A + B + 1
hpat_func = self.jit(test_impl)
n = 11
A = pd.Series(np.arange(n))
B = pd.Series(np.arange(n)**2)
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B), check_dtype=False)
# self.assertEqual(count_parfor_REPs(), 1)
@skip_numba_jit('Functionally test passes, but in old-style it checked fusion of parfors.\n'
'TODO: implement the same checks in new-pipeline')
def test_series_fusion2(self):
def test_impl(A, B):
S = B + 2
if A.iat[0] == 0:
S = A + 1
return S + B
hpat_func = self.jit(test_impl)
n = 11
A = pd.Series(np.arange(n))
B = pd.Series(np.arange(n)**2)
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B), check_dtype=False)
# self.assertEqual(count_parfor_REPs(), 3)
def test_series_operator_add_numeric_scalar(self):
"""Verifies Series.operator.add implementation for numeric series and scalar second operand"""
def test_impl(A, B):
return A + B
hpat_func = self.jit(test_impl)
n = 7
dtype_to_index = {'None': None,
'int': np.arange(n, dtype='int'),
'float': np.arange(n, dtype='float'),
'string': ['aa', 'aa', 'b', 'b', 'cccc', 'dd', 'ddd']}
int_scalar = 24
for dtype, index_data in dtype_to_index.items():
with self.subTest(index_dtype=dtype, index=index_data):
if platform.system() == 'Windows' and not IS_32BITS:
A = pd.Series(np.arange(n, dtype=np.int64), index=index_data)
else:
A = pd.Series(np.arange(n), index=index_data)
result = hpat_func(A, int_scalar)
result_ref = test_impl(A, int_scalar)
pd.testing.assert_series_equal(result, result_ref, check_dtype=False, check_names=False)
float_scalar = 24.0
for dtype, index_data in dtype_to_index.items():
with self.subTest(index_dtype=dtype, index=index_data):
if platform.system() == 'Windows' and not IS_32BITS:
A = pd.Series(np.arange(n, dtype=np.int64), index=index_data)
else:
A = pd.Series(np.arange(n), index=index_data)
ref_result = test_impl(A, float_scalar)
result = hpat_func(A, float_scalar)
pd.testing.assert_series_equal(result, ref_result, check_dtype=False, check_names=False)
def test_series_operator_add_numeric_same_index_default(self):
"""Verifies implementation of Series.operator.add between two numeric Series
with default indexes and same size"""
def test_impl(A, B):
return A + B
hpat_func = self.jit(test_impl)
n = 7
dtypes_to_test = (np.int32, np.int64, np.float32, np.float64)
for dtype_left, dtype_right in combinations(dtypes_to_test, 2):
with self.subTest(left_series_dtype=dtype_left, right_series_dtype=dtype_right):
A = pd.Series(np.arange(n), dtype=dtype_left)
B = pd.Series(np.arange(n)**2, dtype=dtype_right)
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B), check_dtype=False)
@skip_numba_jit
def test_series_operator_add_numeric_same_index_numeric(self):
"""Verifies implementation of Series.operator.add between two numeric Series
with the same numeric indexes of different dtypes"""
def test_impl(A, B):
return A + B
hpat_func = self.jit(test_impl)
n = 7
dtypes_to_test = (np.int32, np.int64, np.float32, np.float64)
for dtype_left, dtype_right in combinations(dtypes_to_test, 2):
with self.subTest(left_series_dtype=dtype_left, right_series_dtype=dtype_right):
A = pd.Series(np.arange(n), index=np.arange(n, dtype=dtype_left))
B = pd.Series(np.arange(n)**2, index=np.arange(n, dtype=dtype_right))
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B), check_dtype=False)
def test_series_operator_add_numeric_same_index_numeric_fixme(self):
""" Same as test_series_operator_add_same_index_numeric but with w/a for the problem.
Can be deleted when the latter is fixed """
def test_impl(A, B):
return A + B
hpat_func = self.jit(test_impl)
n = 7
index_dtypes_to_test = (np.int32, np.int64, np.float32, np.float64)
for dtype_left, dtype_right in combinations(index_dtypes_to_test, 2):
# FIXME: skip the sub-test if one of the dtypes is float and the other is integer
if not (np.issubdtype(dtype_left, np.integer) and np.issubdtype(dtype_right, np.integer)
or np.issubdtype(dtype_left, np.float) and np.issubdtype(dtype_right, np.float)):
continue
with self.subTest(left_series_dtype=dtype_left, right_series_dtype=dtype_right):
A = pd.Series(np.arange(n), index=np.arange(n, dtype=dtype_left))
B = pd.Series(np.arange(n)**2, index=np.arange(n, dtype=dtype_right))
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B), check_dtype=False)
def test_series_operator_add_numeric_same_index_str(self):
"""Verifies implementation of Series.operator.add between two numeric Series with the same string indexes"""
def test_impl(A, B):
return A + B
hpat_func = self.jit(test_impl)
n = 7
A = pd.Series(np.arange(n), index=['a', 'c', 'e', 'c', 'b', 'a', 'o'])
B = pd.Series(np.arange(n)**2, index=['a', 'c', 'e', 'c', 'b', 'a', 'o'])
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B), check_dtype=False, check_names=False)
def test_series_operator_add_numeric_align_index_int(self):
"""Verifies implementation of Series.operator.add between two numeric Series with non-equal integer indexes"""
def test_impl(A, B):
return A + B
hpat_func = self.jit(test_impl)
n = 11
index_A = [0, 1, 1, 2, 3, 3, 3, 4, 6, 8, 9]
index_B = [0, 1, 1, 3, 4, 4, 5, 5, 6, 6, 9]
np.random.shuffle(index_A)
np.random.shuffle(index_B)
A = pd.Series(np.arange(n), index=index_A)
B = pd.Series(np.arange(n)**2, index=index_B)
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B), check_dtype=False, check_names=False)
def test_series_operator_add_numeric_align_index_str(self):
"""Verifies implementation of Series.operator.add between two numeric Series with non-equal string indexes"""
def test_impl(A, B):
return A + B
hpat_func = self.jit(test_impl)
n = 11
index_A = ['', '', 'aa', 'aa', 'ae', 'ae', 'b', 'ccc', 'cccc', 'oo', 's']
index_B = ['', '', 'aa', 'aa', 'cc', 'cccc', 'e', 'f', 'h', 'oo', 's']
np.random.shuffle(index_A)
np.random.shuffle(index_B)
A = pd.Series(np.arange(n), index=index_A)
B = pd.Series(np.arange(n)**2, index=index_B)
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B), check_dtype=False, check_names=False)
@skip_numba_jit('TODO: fix Series.sort_values to handle both None and '' in string series')
def test_series_operator_add_numeric_align_index_str_fixme(self):
"""Same as test_series_operator_add_align_index_str but with None values in string indexes"""
def test_impl(A, B):
return A + B
hpat_func = self.jit(test_impl)
n = 11
index_A = ['', '', 'aa', 'aa', 'ae', 'b', 'ccc', 'cccc', 'oo', None, None]
index_B = ['', '', 'aa', 'aa', 'cccc', 'f', 'h', 'oo', 's', None, None]
np.random.shuffle(index_A)
np.random.shuffle(index_B)
A = pd.Series(np.arange(n), index=index_A)
B = pd.Series(np.arange(n)**2, index=index_B)
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B), check_dtype=False, check_names=False)
def test_series_operator_add_numeric_align_index_other_dtype(self):
"""Verifies implementation of Series.operator.add between two numeric Series
with non-equal integer indexes of different dtypes"""
def test_impl(A, B):
return A + B
hpat_func = self.jit(test_impl)
n = 7
A = pd.Series(np.arange(3*n), index=np.arange(-n, 2*n, 1, dtype=np.int64))
B = pd.Series(np.arange(3*n)**2, index=np.arange(0, 3*n, 1, dtype=np.float64))
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B), check_dtype=False, check_names=False)
def test_series_operator_add_numeric_diff_series_sizes(self):
"""Verifies implementation of Series.operator.add between two numeric Series with different sizes"""
def test_impl(A, B):
return A + B
hpat_func = self.jit(test_impl)
size_A, size_B = 7, 25
A = pd.Series(np.arange(size_A))
B = pd.Series(np.arange(size_B)**2)
result = hpat_func(A, B)
result_ref = test_impl(A, B)
pd.testing.assert_series_equal(result, result_ref, check_dtype=False, check_names=False)
def test_series_operator_add_align_index_int_capacity(self):
"""Verifies implementation of Series.operator.add and alignment of numeric indexes of large size"""
def test_impl(A, B):
return A + B
hpat_func = self.jit(test_impl)
n = 20000
np.random.seed(0)
index1 = np.random.randint(-30, 30, n)
index2 = np.random.randint(-30, 30, n)
A = pd.Series(np.random.ranf(n), index=index1)
B = pd.Series(np.random.ranf(n), index=index2)
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B), check_dtype=False, check_names=False)
def test_series_operator_add_align_index_str_capacity(self):
"""Verifies implementation of Series.operator.add and alignment of string indexes of large size"""
def test_impl(A, B):
return A + B
hpat_func = self.jit(test_impl)
n = 2000
np.random.seed(0)
valid_ids = ['', 'aaa', 'a', 'b', 'ccc', 'ef', 'ff', 'fff', 'fa', 'dddd']
index1 = [valid_ids[i] for i in np.random.randint(0, len(valid_ids), n)]
index2 = [valid_ids[i] for i in np.random.randint(0, len(valid_ids), n)]
A = pd.Series(np.random.ranf(n), index=index1)
B = pd.Series(np.random.ranf(n), index=index2)
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B), check_dtype=False, check_names=False)
def test_series_operator_add_str_same_index_default(self):
"""Verifies implementation of Series.operator.add between two string Series
with default indexes and same size"""
def test_impl(A, B):
return A + B
hpat_func = self.jit(test_impl)
A = pd.Series(['a', '', 'ae', 'b', 'cccc', 'oo', None])
B = pd.Series(['b', 'aa', '', 'b', 'o', None, 'oo'])
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B), check_dtype=False, check_names=False)
def test_series_operator_add_str_align_index_int(self):
"""Verifies implementation of Series.operator.add between two string Series with non-equal integer indexes"""
def test_impl(A, B):
return A + B
hpat_func = self.jit(test_impl)
np.random.seed(0)
index_A = [0, 1, 1, 2, 3, 3, 3, 4, 6, 8, 9]
index_B = [0, 1, 1, 3, 4, 4, 5, 5, 6, 6, 9]
np.random.shuffle(index_A)
np.random.shuffle(index_B)
data = ['', '', 'aa', 'aa', None, 'ae', 'b', 'ccc', 'cccc', None, 'oo']
A = pd.Series(data, index=index_A)
B = pd.Series(data, index=index_B)
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B), check_dtype=False, check_names=False)
def test_series_operator_add_result_name1(self):
"""Verifies name of the Series resulting from appying Series.operator.add to different arguments"""
def test_impl(A, B):
return A + B
hpat_func = self.jit(test_impl)
n = 7
series_names = ['A', '', None, 'B']
for left_name, right_name in combinations(series_names, 2):
S1 = pd.Series(np.arange(n), name=left_name)
S2 = pd.Series(np.arange(n, 0, -1), name=right_name)
with self.subTest(left_series_name=left_name, right_series_name=right_name):
# check_dtype=False because SDC implementation always returns float64 Series
pd.testing.assert_series_equal(hpat_func(S1, S2), test_impl(S1, S2), check_dtype=False)
# also verify case when second operator is scalar
scalar = 3.0
with self.subTest(scalar=scalar):
S1 = pd.Series(np.arange(n), name='A')
pd.testing.assert_series_equal(hpat_func(S1, scalar), test_impl(S1, scalar), check_dtype=False)
@unittest.expectedFailure
def test_series_operator_add_result_name2(self):
"""Verifies implementation of Series.operator.add differs from Pandas
in returning unnamed Series when both operands are named Series with the same name"""
def test_impl(A, B):
return A + B
hpat_func = self.jit(test_impl)
n = 7
S1 = pd.Series(np.arange(n), name='A')
S2 = pd.Series(np.arange(n, 0, -1), name='A')
result = hpat_func(S1, S2)
result_ref = test_impl(S1, S2)
# check_dtype=False because SDC implementation always returns float64 Series
pd.testing.assert_series_equal(result, result_ref, check_dtype=False)
@unittest.expectedFailure
def test_series_operator_add_series_dtype_promotion(self):
"""Verifies implementation of Series.operator.add differs from Pandas
in dtype of resulting Series that is fixed to float64"""
def test_impl(A, B):
return A + B
hpat_func = self.jit(test_impl)
n = 7
dtypes_to_test = (np.int32, np.int64, np.float32, np.float64)
for dtype_left, dtype_right in combinations(dtypes_to_test, 2):
with self.subTest(left_series_dtype=dtype_left, right_series_dtype=dtype_right):
A = pd.Series(np.array(np.arange(n), dtype=dtype_left))
B = pd.Series(np.array(np.arange(n)**2, dtype=dtype_right))
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B))
def test_series_operator_add_str_scalar(self):
def test_impl(A, B):
return A + B
hpat_func = self.jit(test_impl)
series_data = ['a', '', 'ae', 'b', 'cccc', 'oo', None]
S = pd.Series(series_data)
values_to_test = [' ', 'wq', '', '23']
for scalar in values_to_test:
with self.subTest(left=series_data, right=scalar):
result_ref = test_impl(S, scalar)
result = hpat_func(S, scalar)
pd.testing.assert_series_equal(result, result_ref)
with self.subTest(left=scalar, right=series_data):
result_ref = test_impl(scalar, S)
result = hpat_func(scalar, S)
pd.testing.assert_series_equal(result, result_ref)
def test_series_operator_add_str_unsupported(self):
def test_impl(A, B):
return A + B
hpat_func = self.jit(test_impl)
n = 7
series_data = ['a', '', 'ae', 'b', 'cccc', 'oo', None]
S = pd.Series(series_data)
other_operands = [
1,
3.0,
pd.Series(np.arange(n)),
pd.Series([True, False, False, True, False, True, True]),
]
for operand in other_operands:
with self.subTest(right=operand):
with self.assertRaises(TypingError) as raises:
hpat_func(S, operand)
expected_msg = 'Operator add(). Not supported for not-comparable operands.'
self.assertIn(expected_msg, str(raises.exception))
def test_series_operator_mul_str_scalar(self):
def test_impl(A, B):
return A * B
hpat_func = self.jit(test_impl)
series_data = ['a', '', 'ae', 'b', ' ', 'cccc', 'oo', None]
S = pd.Series(series_data)
values_to_test = [-1, 0, 2, 5]
for scalar in values_to_test:
with self.subTest(left=series_data, right=scalar):
result_ref = test_impl(S, scalar)
result = hpat_func(S, scalar)
pd.testing.assert_series_equal(result, result_ref)
with self.subTest(left=scalar, right=series_data):
result_ref = test_impl(scalar, S)
result = hpat_func(scalar, S)
pd.testing.assert_series_equal(result, result_ref)
def test_series_operator_mul_str_same_index_default(self):
"""Verifies implementation of Series.operator.add between two string Series
with default indexes and same size"""
def test_impl(A, B):
return A * B
hpat_func = self.jit(test_impl)
A = pd.Series(['a', '', 'ae', 'b', 'cccc', 'oo', None])
B = pd.Series([-1, 2, 0, 5, 3, -5, 4])
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B))
def test_series_operator_mul_str_align_index_int1(self):
""" Verifies implementation of Series.operator.add between two string Series
with integer indexes containg same unique values (so alignment doesn't produce NaNs) """
def test_impl(A, B):
return A * B
hpat_func = self.jit(test_impl)
n = 11
np.random.seed(0)
shuffled_data = np.arange(n, dtype=np.int)
np.random.shuffle(shuffled_data)
index_A = shuffled_data
np.random.shuffle(shuffled_data)
index_B = shuffled_data
str_series_values = ['', '', 'aa', 'aa', None, 'ae', 'b', 'ccc', 'cccc', None, 'oo']
int_series_values = np.random.randint(-5, 5, n)
A = pd.Series(str_series_values, index=index_A)
B = pd.Series(int_series_values, index=index_B)
for swap_operands in (False, True):
if swap_operands:
A, B = B, A
with self.subTest(left=A, right=B):
result = hpat_func(A, B)
result_ref = test_impl(A, B)
pd.testing.assert_series_equal(result, result_ref)
@unittest.expectedFailure # pandas can't calculate this due to adding NaNs to int series during alignment
def test_series_operator_mul_str_align_index_int2(self):
""" Verifies implementation of Series.operator.add between two string Series
with integer indexes that cannot be aligned without NaNs """
def test_impl(A, B):
return A * B
hpat_func = self.jit(test_impl)
n = 11
np.random.seed(0)
index_A = [0, 1, 1, 2, 3, 3, 3, 4, 6, 8, 9]
index_B = [0, 1, 1, 3, 4, 4, 5, 5, 6, 6, 9]
np.random.shuffle(index_A)
np.random.shuffle(index_B)
str_series_values = ['', '', 'aa', 'aa', None, 'ae', 'b', 'ccc', 'cccc', None, 'oo']
int_series_values = np.random.randint(-5, 5, n)
A = pd.Series(str_series_values, index=index_A)
B = pd.Series(int_series_values, index=index_B)
for swap_operands in (False, True):
if swap_operands:
A, B = B, A
with self.subTest(left=A, right=B):
result = hpat_func(A, B)
result_ref = test_impl(A, B)
pd.testing.assert_series_equal(result, result_ref)
def test_series_operator_mul_str_unsupported(self):
def test_impl(A, B):
return A * B
hpat_func = self.jit(test_impl)
series_data = ['a', '', 'ae', 'b', 'cccc', 'oo', None]
S = | pd.Series(series_data) | pandas.Series |
import numpy as np
import pandas as pd
import pytest
from rayml.objectives import SensitivityLowAlert
from rayml.tests.objective_tests.test_binary_classification_objective import (
TestBinaryObjective,
)
class TestSLA(TestBinaryObjective):
__test__ = True
def assign_objective(self, alert_rate):
self.objective = SensitivityLowAlert(alert_rate)
def test_sla_objective(self, X_y_binary):
self.assign_problem_type()
self.assign_objective(0.1)
self.run_pipeline(X_y_binary)
@pytest.mark.parametrize("alert_rate", [0.01, 0.99])
def test_valid_alert_rate(self, alert_rate):
obj = SensitivityLowAlert(alert_rate)
assert obj.alert_rate == alert_rate
@pytest.mark.parametrize("alert_rate", [-1, 1.5])
def test_invalid_alert_rate(self, alert_rate):
with pytest.raises(ValueError):
SensitivityLowAlert(alert_rate)
@pytest.mark.parametrize(
"alert_rate, ypred_proba, high_risk",
[
(0.1, pd.Series([0.5, 0.5, 0.5]), [True, True, True]),
(0.1, list(range(10)), [False if i != 9 else True for i in range(10)]),
],
)
def test_high_risk_output(self, alert_rate, ypred_proba, high_risk):
self.assign_objective(alert_rate)
assert self.objective.decision_function(ypred_proba).tolist() == high_risk
@pytest.mark.parametrize(
"y_true, y_predicted, expected_score",
[
(pd.Series([False, False, False]), pd.Series([True, True, False]), np.nan),
(
pd.Series([True, True, True, True]),
| pd.Series([True, True, False, False]) | pandas.Series |
import codecs
import datetime
import functools
import json
import os
import re
import shutil
import pandas as pd
from dateutil.relativedelta import relativedelta
from requests.exceptions import ConnectionError
from utils_pandas import add_data
from utils_pandas import cut_ages
from utils_pandas import export
from utils_pandas import fuzzy_join
from utils_pandas import import_csv
from utils_scraping import logger
from utils_scraping import s
from utils_scraping import web_files
from utils_thai import DISTRICT_RANGE
from utils_thai import join_provinces
from utils_thai import to_thaiyear
from utils_thai import today
#################################
# Cases Apis
#################################
def get_cases_old():
logger.info("========Covid19 Timeline==========")
# https://covid19.th-stat.com/json/covid19v2/getTimeline.json
# https://covid19.ddc.moph.go.th/api/Cases/round-1to2-all
# https://covid19.ddc.moph.go.th/api/Cases/timeline-cases-all
# {"Date":"01\/01\/2020","NewConfirmed":0,"NewRecovered":0,"NewHospitalized":0,"NewDeaths":0,"Confirmed":0,"Recovered":0,"Hospitalized":0,"Deaths":0}
# {"txn_date":"2021-03-31","new_case":42,"total_case":28863,"new_case_excludeabroad":24,"total_case_excludeabroad":25779,"new_death":0,"total_death":94,"new_recovered":47,"total_recovered":27645}
# "txn_date":"2021-04-01","new_case":26,"total_case":28889,"new_case_excludeabroad":21,"total_case_excludeabroad":25800,"new_death":0,"total_death":94,"new_recovered":122,"total_recovered":27767,"update_date":"2021-09-01 07:40:49"}
try:
file, text, url = next(
web_files("https://covid19.th-stat.com/json/covid19v2/getTimeline.json", dir="inputs/json", check=True))
except ConnectionError:
# I think we have all this data covered by other sources. It's a little unreliable.
return pd.DataFrame()
data = pd.DataFrame(json.loads(text)['Data'])
data['Date'] = pd.to_datetime(data['Date'])
data = data.set_index("Date")
cases = data[["NewConfirmed", "NewDeaths", "NewRecovered", "Hospitalized"]]
cases = cases.rename(columns=dict(NewConfirmed="Cases", NewDeaths="Deaths", NewRecovered="Recovered"))
cases["Source Cases"] = url
return cases
def get_cases():
logger.info("========Covid19 Timeline==========")
# https://covid19.th-stat.com/json/covid19v2/getTimeline.json
# https://covid19.ddc.moph.go.th/api/Cases/round-1to2-all
# https://covid19.ddc.moph.go.th/api/Cases/timeline-cases-all
# {"Date":"01\/01\/2020","NewConfirmed":0,"NewRecovered":0,"NewHospitalized":0,"NewDeaths":0,"Confirmed":0,"Recovered":0,"Hospitalized":0,"Deaths":0}
# {"txn_date":"2021-03-31","new_case":42,"total_case":28863,"new_case_excludeabroad":24,"total_case_excludeabroad":25779,"new_death":0,"total_death":94,"new_recovered":47,"total_recovered":27645}
# "txn_date":"2021-04-01","new_case":26,"total_case":28889,"new_case_excludeabroad":21,"total_case_excludeabroad":25800,"new_death":0,"total_death":94,"new_recovered":122,"total_recovered":27767,"update_date":"2021-09-01 07:40:49"}
url1 = "https://covid19.ddc.moph.go.th/api/Cases/round-1to2-all"
url2 = "https://covid19.ddc.moph.go.th/api/Cases/timeline-cases-all"
try:
_, json1, url = next(web_files(url1, dir="inputs/json", check=False))
_, json2, url = next(web_files(url2, dir="inputs/json", check=True))
except ConnectionError:
# I think we have all this data covered by other sources. It's a little unreliable.
return pd.DataFrame()
data = pd.read_json(json1).append(pd.read_json(json2))
data['Date'] = pd.to_datetime(data['txn_date'])
data = data.set_index("Date")
data = data.rename(columns=dict(new_case="Cases", new_death="Deaths", new_recovered="Recovered"))
cases = data[["Cases", "Deaths", "Recovered"]]
cases["Source Cases"] = url
# 2021-12-28 had duplicate because cases went up 4610 from 2305. Why? Google says 4610
cases = cases[~cases.index.duplicated(keep='first')]
return cases
@functools.lru_cache(maxsize=100, typed=False)
def get_case_details_csv():
if False:
return get_case_details_api()
cols = "No.,announce_date,Notified date,sex,age,Unit,nationality,province_of_isolation,risk,province_of_onset,district_of_onset".split(
",")
url = "https://data.go.th/dataset/covid-19-daily"
file, text, _ = next(web_files(url, dir="inputs/json", check=True))
data = re.search(r"packageApp\.value\('meta',([^;]+)\);", text.decode("utf8")).group(1)
apis = json.loads(data)
links = [api['url'] for api in apis if "รายงานจำนวนผู้ติดเชื้อ COVID-19 ประจำวัน" in api['name']]
# get earlier one first
links = sorted([link for link in links if '.php' not in link and '.xlsx' not in link], reverse=True)
# 'https://data.go.th/dataset/8a956917-436d-4afd-a2d4-59e4dd8e906e/resource/be19a8ad-ab48-4081-b04a-8035b5b2b8d6/download/confirmed-cases.csv'
cases = pd.DataFrame()
for link, check in zip(links, ([False] * len(links))[:-1] + [True]):
for file, _, _ in web_files(link, dir="inputs/json", check=check, strip_version=True, appending=True):
if file.endswith(".xlsx"):
continue
#cases = pd.read_excel(file)
elif file.endswith(".csv"):
confirmedcases = pd.read_csv(file)
if "risk" not in confirmedcases.columns:
confirmedcases.columns = cols
if '�' in confirmedcases.loc[0]['risk']:
# bad encoding
with codecs.open(file, encoding="tis-620") as fp:
confirmedcases = pd.read_csv(fp)
first, last, ldate = confirmedcases["No."].iloc[0], confirmedcases["No."].iloc[-1], confirmedcases["announce_date"].iloc[-1]
logger.info("Covid19daily: rows={} {}={} {} {}", len(confirmedcases), last - first, last - first, ldate, file)
cases = cases.combine_first(confirmedcases.set_index("No."))
else:
raise Exception(f"Unknown filetype for covid19daily {file}")
cases = cases.reset_index("No.")
cases['announce_date'] = pd.to_datetime(cases['announce_date'], dayfirst=True)
cases['Notified date'] = pd.to_datetime(cases['Notified date'], dayfirst=True, errors="coerce")
cases = cases.rename(columns=dict(announce_date="Date"))
cases['age'] = pd.to_numeric(cases['age'], downcast="integer", errors="coerce")
#assert cases.index.max() <
# Fix typos in Nationality columns
# This won't include every possible misspellings and need some further improvement
mapping = pd.DataFrame([['Thai', 'Thailand'],
['Thai', 'Thai'],
['Thai', 'India-Thailand'],
['Thai', 'ไทยใหญ่'],
['Lao', 'laotian / Lao'],
['Lao', 'Lao'],
['Lao', 'Laotian/Lao'],
['Lao', 'Laotian / Lao'],
['Lao', 'laos'],
['Lao', 'Laotian'],
['Lao', 'Laos'],
['Lao', 'ลาว'],
['Indian', 'Indian'],
['Indian', 'India'],
['Indian', 'indian'],
['Cambodian', 'Cambodian'],
['Cambodian', 'cambodian'],
['Cambodian', 'Cambodia'],
['South Korean', 'South Korean'],
['South Korean', 'Korea, South'],
['South Korean', 'Korean'],
['Burmese', 'Burmese'],
['Burmese', 'พม่า'],
['Burmese', 'burmese'],
['Burmese', 'Burma'],
['Chinese', 'Chinese'],
['Chinese', 'จีน'],
['Chinese', 'China'],
],
columns=['Nat Main', 'Nat Alt']).set_index('Nat Alt')
cases = fuzzy_join(cases, mapping, 'nationality')
cases['nationality'] = cases['Nat Main'].fillna(cases['nationality'])
return cases
def get_case_details_api():
rid = "67d43695-8626-45ad-9094-dabc374925ab"
chunk = 10000
url = f"https://data.go.th/api/3/action/datastore_search?resource_id={rid}&limit={chunk}&q=&offset="
records = []
cases = import_csv("covid-19", ["_id"], dir="inputs/json")
lastid = cases.last_valid_index() if cases.last_valid_index() else 0
data = None
while data is None or len(data) == chunk:
r = s.get(f"{url}{lastid}")
data = json.loads(r.content)['result']['records']
df = pd.DataFrame(data)
df['announce_date'] = pd.to_datetime(df['announce_date'], dayfirst=True)
df['Notified date'] = | pd.to_datetime(df['Notified date'], dayfirst=True, errors="coerce") | pandas.to_datetime |
"""
This code translates .mdl files and produces
- csv file: detailed descriptives of all variables
- doc file: file with information used later in the testing battery
- equi file: creates file for user input for equilibrium test
- py file: translated .mdl file using pysd
- model stats: model statistics of all files translated
- word analysis files: experimental code for later use
- other testing files: collecting functions, gathering errors, etc.
The descriptives add additional detail compared to pysd and
permit a quick review of the variables in an excel file
Some sections are closely mirrored from pysd
This needs to be drastically improved 30.07.18/sk
version 0.2
30.07.18/sk
"""
import os
import pysd
import pandas as pd
import re
from timeit import default_timer as timer
from collections import Counter
from tb.tb_backend import fileops as fops
def constant(expr):
"""
testing if an expression is numeric
:param expr: any expression to be tested
:return: true if numeric, false if not numeric
"""
try:
float(expr)
return True
except ValueError:
return False
def flow_split(expr):
"""
splitting the stock expressions to extract the flow namespace
expr is INTEG (inflow-outflow,init stock)
returns:
- flows: ['inflow', 'outflow']
- flow: inflow-outflow
- init: init stock)
:param expr: expr expression from translation routine (everything right of the equation sign)
:return: list of flows, flow expression and init expression
"""
# eliminating the INTEG ( part
expr = expr.split('(', 1)[-1]
# splitting the init off the expression
flow, init = expr.rsplit(',', 1)
# splitting the flows off, but avoiding splits between parentheses
flows = re.split('(".*?")|[+-/*]', flow)
# removing empty strings in flows
flows = [x for x in flows if x is not None]
flows = [x for x in flows if x != '']
flows = [x.strip() for x in flows]
# removing the closing parenthesis from the init expression
init = init.replace(')', '')
return flows, flow, init
def get_sections(textstring):
"""
splitting the text string and returning the equation string (uncleaned) for both builtin and elements
the text string is split in three parts (elements, builtins, garbage)
garbage is split off based on the string 'Sketch information'
builtins are split off by string '.Control'
however, sometimes elements are listed below the .Control split, thus this needs to be cleaned afterwards
input is:
{UTF-8}
exo BL1=
3
~ Month
~ |
exo BL2=
0.5
~ 1/Month
~ |
output is:
['exo BL1=3~Month~', 'exo BL2=0.5~1/Month~',
:param textstring: text version of a Vensim model
:return: list of equation strings (unclean)
"""
# removing charset info
basetext = textstring.replace('{UTF-8}', '')
# replace new line and tab characters
wtext = basetext.replace('\n', '').replace('\t', '')
# replace backslashes
text = wtext.replace('\\', '')
# split off garbage
text = text.split('Sketch information')[0]
# split elements and builtin sections
sections = text.split('.Control')
# split elements section into each element
elements = sections[0].split('|')
# remove the last element as it is empty
elements = elements[:-1]
# the same is done with builtins, but this is done in a try block because for some reason the
# .Control splitter could not exist (this case actually is in the sample)
try:
built_ins = sections[1].split('|')
built_ins = built_ins[1:-1]
except:
# if there is no section, the builtins are empty
built_ins = []
return elements, built_ins
def get_vars(varlist):
"""
splitting the equations and creating the varlist info
input is:
['exo BL1=3~Month~', 'exo BL2=0.5~1/Month~',
output is:
[{'eqn': 'exo BL1=3', 'unit': 'Month', 'comment': '', 'name': 'exo BL1', 'expr': '3'},
is run for both the elements and the builtins
the inner try except block ensures that table functions are also read in
:param varlist: list of equation strings (unclean)
:return: list of dicts with elements and builtins
"""
components = []
for element in varlist:
try:
eqn, unit, comment = element.split('~', 2)
eqn = eqn.strip()
unit = unit.strip()
comment = comment.strip()
# name is left of =, expr on the right
try:
name, expr = eqn.split('=', 1)
except ValueError:
name = eqn
expr = eqn
components.append({'eqn': eqn,
'unit': unit,
'comment': comment,
'name': name.strip(),
'expr': expr.strip()})
except ValueError:
pass
return components
def corr_lists(varlist, builtins):
"""
this corrects list in case the split with .Control didn't split correctly
this could be much easier if we hadn't split for sections before, but this has to be revisited at some other point,
right now I'm just happy this works
:param varlist: list of dict with variables
:param builtins: list of dict with builtins
:return: clean list of dict with variables and builtins (of which there are 4)
"""
# is used to ensure that there is no infinite loop, two iterations are enough
# it is for example possible that (BI = builtin):
# builtins = [var1] and varlist = [var2, var3, BI1, BI2, BI3, BI4]
# in that case the function will first move the builtins to the builtin list with the result that
# builtins = [var1, BI1, BI2, BI3, BI4] and varlist = [var2, var3]
# now in the second iteration the elif condition applies and the var1 is moved to the varlist, resulting
# builtins = [BI1, BI2, BI3, BI4] and varlist = [var1, var2, var3]
i = 0
while len(builtins) != 4 and i <= 2:
if len(builtins) < 4:
translist = [x for x in varlist if x['name'] in ['FINAL TIME', 'INITIAL TIME', 'TIME STEP', 'SAVEPER']]
varlist = [x for x in varlist if x['name'] not in ['FINAL TIME', 'INITIAL TIME', 'TIME STEP', 'SAVEPER']]
for item in translist:
builtins.append(item)
elif len(builtins) > 4:
translist = [x for x in builtins if x['name'] not in ['FINAL TIME', 'INITIAL TIME', 'TIME STEP', 'SAVEPER']]
builtins = [x for x in builtins if x['name'] in ['FINAL TIME', 'INITIAL TIME', 'TIME STEP', 'SAVEPER']]
for item in translist:
varlist.append(item)
i = i + 1
return varlist, builtins
def id_tables(var, tbl_functions):
"""
This identifies tables and replaces the table information with the string '(table_expr)' to
facilitate calculating statistics
input is:
{'eqn': 'exo BL1=3', 'unit': 'Month', 'comment': '', 'name': 'exo BL1', 'expr': '3', 'type': 'constant',
'flow expr': 'NA', 'init expr': 'NA', 'table expr': 'NA'}
:param var: dict for each variable
:param tbl_functions: list of tbl_functions
:return: corrected variable dict
"""
# function list are put in tuple to be used here
if var['expr'].startswith(tuple(tbl_functions)):
# this is the routine if tables are introduced with functions
var['type'] = 'table function'
# in this case the table expression is after the last comma
c = re.split(',', var['expr'], 1)
c = [x for x in c if x != '']
var['table expr'] = c[-1]
var['expr'] = var['expr'].replace(c[-1], '(table_expr))')
# if tables are introduced without a function it's a bit more complicated
# the identifiers are that the eqn string does not have an equal sign and
# that either no : exists (which would be for :AND:) OR the string ([( exists,
# which is the opening bracket of the table expression
elif len(var['eqn'].split('=')) == 1 and len(var['eqn'].split(':')) == 1 or len(var['eqn'].split('=')) == 1 \
and len(var['eqn'].split('([(')) > 1:
var['type'] = 'table function'
# in this case the table expression is between two non-greedy parantheses
c = re.split('(\(.*\))', var['expr'])
c = [x for x in c if x != '']
var['table expr'] = c[-1]
var['expr'] = var['expr'].replace(c[-1], '(table_expr)')
var['name'] = var['name'].replace(c[-1], '')
# test with without parentheses 30.07.18/sk
return var
def get_types(components, tbl_functions, c_functions, s_functions, t_functions):
"""
defining types for the variables
input is:
[{'eqn': 'exo BL1=3', 'unit': 'Month', 'comment': '', 'name': 'exo BL1', 'expr': '3'},...
[{'eqn': 'exo BL1=3', 'unit': 'Month', 'comment': '', 'name': 'exo BL1', 'expr': '3', 'type': 'constant',
'flow expr': 'NA', 'init expr': 'NA', 'table expr': 'NA', 'math type': 'regular', 'function': 'NA'},...
:param components: list of dict with the variables
:param tbl_functions: list of functions that are used for tables
:param c_functions: list of complicated functions
:param s_functions: list of simple functions
:param t_functions: list of testing functions
:return:
"""
flows_list = []
for entry in components:
# if the expression is a constant, the type is always constant
if constant(entry['expr']):
entry['type'] = 'constant'
entry['flow expr'] = 'NA'
entry['init expr'] = 'NA'
entry['table expr'] = 'NA'
# if the expression starts with INTEG, it's always a stock
elif entry['expr'].startswith('INTEG') and not entry['expr'].startswith('INTEGER'):
entry['type'] = 'stock'
# flows differ from other auxiliaries that they are the only ones that can impact a stock
# thus the flow names are saved in a list and changed later
flows, flow_expr, init_expr = flow_split(entry['expr'])
for flow in flows:
if flow not in flows_list and len(flow) > 0:
flows_list.append(flow)
# an init variable list could be created here and init variables could be classified different
# than constants 06.07.18/sk
entry['flow expr'] = flow_expr
entry['init expr'] = init_expr
entry['table expr'] = 'NA'
else:
# everything that is not a constant or a stock, is first typed as an auxiliary
entry['type'] = 'auxiliary'
entry['flow expr'] = 'NA'
entry['init expr'] = 'NA'
entry['table expr'] = 'NA'
for entry in components:
# if the name is in the flow list, it's a flow, the split on [ is just if there are subscripts
if entry['name'].split('[')[0] in flows_list or entry['name'] in flows_list:
entry['type'] = 'flow'
# then tables are identified with the ID tables function
entry = id_tables(entry, tbl_functions)
# subscripts need to be named subscripts in this one
for entry in components:
# split should be on elements, not on the entire equation
# this is to define the math types and separate the function types for statistics
# this only works for functions that are at the beginning of the expression
try:
func, expr = entry['expr'].split('(', 1)
except ValueError:
func = ''
expr = ''
func = func.strip()
# removing the closing bracket
expr = expr.rsplit(')', 1)[0]
# The code below splits first level commas, but not commas within brackets, such as in MAX ( X , Y )
# and returns 3 or 4 elements depending on which function
# This is separated in order to be able to replace the init element and put it back together
expr_split = re.split(',\s*(?![^()]*\))', expr)
if func == 'A FUNCTION OF':
entry['math type'] = 'incomplete equation'
entry['function'] = func
elif func in c_functions:
entry['math type'] = 'complicated function'
entry['function'] = func
if func in regfunc_lst:
pos = -1
elif func in nfunc_lst:
pos = -2
else:
pos = None
if pos is not None:
entry['init expr'] = expr_split[pos]
elif func in s_functions:
entry['math type'] = 'simple function'
entry['function'] = func
elif func in t_functions:
entry['math type'] = 'testing function'
entry['function'] = func
else:
entry['math type'] = 'regular'
entry['function'] = 'NA'
return components
# handling the subscripts
def rem_subscripts(components):
"""
this removes the subscript brackets and adds them to the sub_expr and subs column in the doc
this function has to be reviewed and better documented
:param components: list of dict of the variables
:return: list of dict of the variables with subscript elements added
"""
sublist = []
subdict = {}
for entry in components:
if len(entry['eqn'].split('=')) == 1 and len(entry['eqn'].split(':')) > 1:
subs = re.split(':', entry['expr'])
entry['expr'] = subs[0]
entry['sub_expr'] = subs[-1]
sub_ins = re.split(',', subs[-1])
ins = len(sub_ins)
if ins == 1:
ex = subs[-1].replace('(', '').replace(')', '')
bounds = re.split('([0-9]*)', ex)
bounds = [x for x in bounds if constant(x)]
try:
ins = int(bounds[-1]) - int(bounds[0]) + 1
except:
ins = 1
entry['no of sub_ins'] = ins
entry['no of subs'] = 1
entry['type'] = 'subscript list'
if subs[0] not in sublist:
sublist.append(subs[0])
subdict[subs[0]] = len(sub_ins)
else:
entry['subs'] = 'NA'
entry['no of subs'] = 'NA'
entry['no of sub_ins'] = 1
entry['sub_expr'] = 'NA'
for entry in components:
if len(re.split('(\[.*?\])', entry['name'])) > 1:
subexpr = re.split('(\[.*?\])', entry['name'])[1]
subexpr = subexpr.replace('[', '').replace(']', '')
subs = subexpr.split(',')
ins = 1
if set(subs).issubset(sublist):
entry['subs'] = subs
entry['no of subs'] = len(subs)
for sub in subs:
ins = ins * subdict.get(sub)
entry['no of sub_ins'] = ins
temp_expr = entry['expr'].replace(';', ',')
temp_els = re.split(',', temp_expr)
temp_els = [x for x in temp_els if x != '']
i = 0
for el in temp_els:
if constant(el):
i = i + 1
if len(temp_els) == ins and ins == i:
entry['type'] = 'subscripted constant'
for entry in components:
if entry['no of subs'] != 'NA':
# should probably remove emtpies here
if entry['no of subs'] == 1 and entry['math type'] == 'regular' and len(entry['expr'].split(',')) > 1:
entry['det_sub_ins'] = len(entry['expr'].split(','))
elif entry['no of subs'] > 1 and entry['math type'] == 'regular' and len(entry['expr'].split(';')) > 1:
entry['det_sub_ins'] = len(entry['expr'].split(';'))
else:
entry['det_sub_ins'] = 'NA'
else:
entry['det_sub_ins'] = 'NA'
return components
def collect_functions(components, collection, missing):
"""
collects additional functions that haven't been sorted
also collects garbage, but it's good enough
this could potentially be eliminated as the collection happens better in equation split
:param components: list of dict with variables
:param collection: list of functions that have been sorted
:param missing: list of functions that haven't been sorted from previous iterations
:return: list of functions updated with new functions
"""
for entry in components:
func = entry['expr'].split('(')[0].strip()
func = re.split("[+-/*]", func)[-1].strip()
if func.isupper():
if len(func) < len(entry['expr'].strip()) and func not in collection and func not in missing:
missing.append(func)
return missing
def equation_split(varlist, funclist, missing, t_function):
"""
this splits the equation into its elements and counts them and saves some information based on types to the varlist
:param varlist: list of dict of the variables
:param funclist: list, combined with all functions
:param missing: list, missing functions
:param t_function: list, testing functions
:return: list of dict with updated information
"""
for var in varlist:
e = re.split('(".*?")|\+|-|\*|/|\(|\)|\^|,|>|<', var['expr'])
e = [x for x in e if x is not None]
e = [x.strip() for x in e]
e = [x for x in e if x != '']
# m collects the missing functions (statements in upper case) that are not at the beginning of the expression
m = [x for x in e if x.isupper()]
# f collects functions even if they are not upper case
f = [x for x in e if x.upper() in funclist]
e = [x for x in e if x.upper() not in funclist]
for func in m:
# the functions that are not already in the list or already collected in missing are added
if func not in funclist and func not in missing:
missing.append(func)
# types and subscripted constants don't need further information
if var['type'] == 'constant' or var['type'] == 'subscripted constant':
nbr, e, hasinit = 0, [], 'NA'
# stocks have additional information here (number of elements and hasinit)
elif var['type'] == 'stock':
e = [x for x in e if x != 'INTEG']
nbr = len(e) - 1
if constant(var['init expr']):
hasinit = 'no'
else:
hasinit = 'yes'
else:
nbr = len(e)
hasinit = 'NA'
var['Number of elements'] = nbr
var['INIT'] = hasinit
var['elements'] = e
var['function list'] = f
funcs = len(f)
if funcs > 0 and var['math type'] == 'regular':
var['function'] = f[0]
if f[0] in t_function:
var['math type'] = 'testing function'
else:
var['math type'] = 'simple function'
var['no of functions'] = funcs
# splitting the elements in the init expression because we need them for the loop recognition
if var['init expr'] != 'NA':
ie = re.split('(".*?")|\+|-|\*|/|\(|\)|\^|,|>|<', var['init expr'])
ie = [x for x in ie if x is not None]
ie = [x.strip() for x in ie]
ie = [x for x in ie if x != '']
var['init elements'] = ie
else:
var['init elements'] = []
return varlist
def add_builtin(varlist, builtin, model_name):
"""
this adds the builtin information to the variables for statistical purposes
:param varlist: list of dicts with variables
:param builtin: list with builtins
:param model_name: str with model name
:return: list of dicts with builtin information added
"""
# bunit: base unit is interesting when other time units are in the model
# SAVEPER is irrelevant and thus ignored
ftime, bunit, itime, tstep = '', '', '', ''
for item in builtin:
if item['name'] == 'FINAL TIME':
ftime = item['expr']
bunit = item['unit']
elif item['name'] == 'INITIAL TIME':
itime = item['expr']
elif item['name'] == 'TIME STEP':
tstep = item['expr']
for var in varlist:
var['FINAL TIME'] = ftime
var['Base Unit'] = bunit
var['INITIAL TIME'] = itime
var['TIME STEP'] = tstep
var['Model name'] = model_name
return varlist
def corr_units(varlist):
"""
The unit correction is necessary as in the case when there are subscripts,
the units are not associated correctly with all the subscript instances
therefore the same unit needs to be passed down to other instances
unit_dict is not used anywhere else, but could be:
{'exo BL1': 'Month', 'exo BL2': '1/Month', 'exo RL1': 'Month', ...
:param varlist: list of dict of the variables
:return: list of dict of the variables with corrected units
"""
unit_dict = {}
for var in varlist:
if var['unit'] != '':
unit_dict[var['name'].split('[')[0]] = var['unit']
for var in varlist:
if var['type'] != 'subscript list' and var['unit'] == '':
var['unit'] = unit_dict.get(var['name'].split('[')[0])
return varlist
def calc_avg(varlist):
"""
Collecting the statistics for descriptives, including number of elements, number of functions,
number of variables, number of constants
:param varlist: list of dict of the variables
:return: total variables, average elements per equation, number of functions and average, constants, empty units
"""
tot, els, avg, funcs, cons, e_unit, sl_tot = 0, 0, 0, 0, 0, 0, 0
# two different count types, once with subscript and once without (i.e. number of elements with
# and without subscripts)
for var in varlist:
if var['type'] != 'constant' and var['type'] != 'subscripted constant':
tot = tot + 1 * var['no of sub_ins']
els = els + var['Number of elements'] * var['no of sub_ins']
funcs = funcs + var['no of functions'] * var['no of sub_ins']
if var['type'] == 'constant' or var['type'] == 'subscripted constant':
cons = cons + 1 * var['no of sub_ins']
if var['type'] != 'subscript list':
sl_tot = sl_tot + 1
if var['unit'] is None:
e_unit = e_unit + 1
try:
avg = els / tot
f_avg = funcs / tot
unit_per = e_unit / sl_tot
except ZeroDivisionError:
avg = 0
f_avg = 0
unit_per = 1
return tot, avg, funcs, f_avg, cons, unit_per
def word_analysis(varlist, worddict, wordlist):
"""
Function to collect word use in models, collects a stream of words and a dictionary with number of uses
:param varlist: list of dict of the variables
:param worddict: dictionary with the word counts
:param wordlist: used words in the models
:return: word data
"""
for var in varlist:
w_name = re.sub('\[.*?\]', ' ', var['name'])
w_name = re.sub('\"', ' ', w_name)
w_name = re.sub(':', ' ', w_name)
w_name = re.sub('\(table_expr\)', ' ', w_name)
w_list = w_name.split()
for w in w_list:
w = w.strip()
# all words are capitalized to make sure no double counting happens
w = w.upper()
if w in worddict:
worddict[w] = worddict[w] + 1
else:
worddict[w] = 1
if var['unit'] is not None:
# btu = base time unit, this is to indicate that when a time unit is used, it's not the base
# and multiple time units are used in the model
w_unit = var['unit'].replace(var['Base Unit'], 'btu')
else:
w_unit = None
wordlist.append({'unit': w_unit,
'words': w_list})
return worddict, wordlist
def flagging(type_counter, func_counter, empty_stocks, varlist, builtins):
"""
This function flags models with obvious errors and codifies the error
:param type_counter: collections.counter object with the type information of the model
:param func_counter: collections.counter object with the function information of the model
:param empty_stocks: int, count of empty stocks from collect stats
:param varlist: list of dict of the variables
:param builtins: list of dict of the builtins
:return: flag and code for the
"""
flag = 'No'
code = 'None'
# if the length of the varlist is 0, the model is empty
if len(varlist) == 0:
flag = 'Yes'
code = 'Empty'
# if the length of the builtin is not 4, there is a problem with the builtin
elif len(builtins) != 4:
flag = 'Yes'
code = 'Builtin'
# if the count of functions with 'A FUNCTION OF' is higher than 0, there is a problem with some equations
elif func_counter['A FUNCTION OF'] > 0:
flag = 'Yes'
code = 'incomplete equations'
# if there are empty stocks (i.e. stocks with a constant in it and nothing else), there is a problem
elif empty_stocks > 0:
flag = 'Yes'
code = 'constant stocks'
try:
# if the number of flows is smaller than 0.5, the model is obviously wrong
# (there needs to be at least 0.5 flows per stock)
# could be replaced with fs ratio from collect stats, but would require a
# division by zero check anyway for next flag, so why bother?
if type_counter['flow'] / type_counter['stock'] < 0.5:
flag = 'Yes'
code = 'flow recognition'
# if the previous test returns a zerodivision error, then there are no stocks in the model
except ZeroDivisionError:
flag = 'Yes'
code = 'No Stocks'
return flag, code
def doc(doc_name, doc_vars, model_doc):
"""
this creates the doc folder with all the information that is used in further tests and
is a selection of the descriptives
The doc file uses the following columns:
- Base Unit: for the x axis of plots
- flow expr: for the equilibrium function
- type: for the different type df (switches are not a type here)
- Real Name: For output lists
- Py Name: For input dicts
- elements: for distance calculations
- TIME STEP: for the integration test
- function: used for init replacement in doc file
- Unit: used for plots
:param doc_name: name of the doc file
:param doc_vars: full descriptive database
:param model_doc: doc from pysd
:return: saved doc file
"""
def fill_blanks(row):
"""
Function to fill the blanks for the builtin variables coming from model.doc()
:param row: row to fill with NA where empty
:return: row: NA filled row
"""
# the builtins and init variables need to be added back to the list now for the doc,
# they are coming from the model.doc() from pysd because init variables are only created there
builtin_list = ['FINAL TIME', 'INITIAL TIME', 'TIME STEP', 'SAVEPER']
if pd.isnull(row['type']):
if row['Real Name'] in builtin_list:
row['type'] = 'builtin'
elif row['Real Name'].startswith('init'):
row['type'] = 'constant'
else:
# if there is an undef type, something is wrong
row['type'] = 'undef'
# here we just fill the remaining columns as they are irrelevant for both the builtins and the
row['flow expr'] = 'NA'
row['elements'] = []
row['init elements'] = []
row['function list'] = []
row['expr'] = 'NA'
row['table expr'] = 'NA'
row['Base Unit'] = doc_vars.iloc[0]['Base Unit']
return row
# these are the dropped columns because they are not used from descriptives
# last line is used for testing the _doc columns
drop_cols = ['INIT', 'eqn', 'unit', 'comment', 'init expr', 'math type',
'subs', 'no of subs', 'no of sub_ins', 'sub_expr', 'det_sub_ins', 'Number of elements',
'no of functions', 'FINAL TIME', 'INITIAL TIME', 'Model name',
'TIME STEP', 'function']
doc_vars.drop(drop_cols, axis=1, inplace=True)
# merge with model.doc() from pysd to get the py names in the doc
doc_vars = pd.merge(left=doc_vars, right=model_doc, how='outer', left_on='name', right_on='<NAME>')
# drop columns that are not used from model.doc()
drop_cols = ['Type', 'Comment', 'name']
doc_vars.drop(drop_cols, axis=1, inplace=True)
doc_vars.apply(fill_blanks, axis=1)
fops.save_csv(doc_vars, doc_name, test)
return doc_vars
def collect_stats(model_vars):
"""
Collecting the model stats for current model
This function creates counters for:
- type: type of variable (i.e. constant, auxiliary, etc.)
- math type: math type of variable (i.e. regular, simple function, etc.)
- INIT: whether or not a stock has an init variable
- function: which function is used (only first in the equation)
it also checks for empty stocks, i.e. when the flow expression of a stock is a constant
then it also collects the number of subscript instances, i.e. if a subscript has 10 instances,
an auxiliary with that subscript counts for 10 variables
also checks the flow stock ratio
:param model_vars: list of dict of the variables
:return: counters, number of empty stocks, subscript counts for stocks, auxiliaries and flows, flow stock ratio
"""
# counter for types
c = Counter()
# counter for math types
d = Counter()
# counter for INIT
e = Counter()
# counter for functions
f = Counter()
emstocks = 0
for var in model_vars:
c[var['type']] += 1
d[var['math type']] += 1
e[var['INIT']] += 1
f[var['function']] += 1
if constant(var['flow expr']):
emstocks += 1
s_stocks = sum(x['no of sub_ins'] for x in model_vars if x['type'] == 'stock')
s_aux = sum(x['no of sub_ins'] for x in model_vars if x['type'] == 'auxiliary')
s_flow = sum(x['no of sub_ins'] for x in model_vars if x['type'] == 'flow')
try:
fs_ratio = c['flow'] / c['stock']
except ZeroDivisionError:
fs_ratio = 0
return c, d, e, f, emstocks, s_stocks, s_aux, s_flow, fs_ratio
def list_combine():
"""
combining the lists of functions for general function list checks
:return: combined list of functions
"""
for f in tbl_func_list:
comb_func_list.append(f)
for f in comp_func_list:
comb_func_list.append(f)
for f in simp_func_list:
comb_func_list.append(f)
for f in test_func_list:
comb_func_list.append(f)
def init(folder):
"""
Creates the globals for descriptives and handles preparatory file operations in the base and report folder
:param folder: string, path to the base folder coming from the testingbattery
:return:
"""
fops.init(folder)
# defining the target folder
# current directories are test and models
global test
test = 'doc'
global source_folder
source_folder = folder
# clear and create structure of the report folder, happens in fileops
fops.initiate_report_folder()
# then we remove the .py files from the source folder
# could potentially be an append function, but this is going to be run after hopefully extensive model changes,
# so why bother?
fops.clear_files_type('source', '.py')
# then we remove the .csv files, mainly time tracking files
fops.clear_files_type('source', '.csv')
# combine the lists
list_combine()
def descriptives(mdl_file, flag_count, word_dict, word_list, mis_func_list, rep_type='work'):
"""
This function runs the descriptives for an individual file and is called from the full translate
outputs are:
- descriptive file
- list of dict with variables
- flag related information
- word analysis information
- missing function collection
:param rep_type:
:param mdl_file: string, .mdl file to be run through descriptives
:param flag_count: int, number of flagged models previous to this one
:param word_dict: dict, word dictionary with words previous to this one
:param word_list: list, words used previous to this one
:param mis_func_list: list, missing functions previous to this one
:return: list of dict with vars, flag count, word files, missing functions list
"""
model_name = mdl_file.split('.')[0]
# code specific to the ISDC sample, returns garbage in other cases
year = re.split('_', model_name)[0]
ind_start = timer()
with open(os.path.join(source_folder, mdl_file), 'r', encoding='utf8', errors='replace') as in_file:
text = in_file.read()
# extracting the sections from the text string and get the variables in two sets, model vars and builtins
# builtins may fail or contain also model vars
var_list, built_ins = get_sections(text)
model_vars = get_vars(var_list)
built_ins = get_vars(built_ins)
# correct the lists, builtins should end up being 4 variables
model_vars, built_ins = corr_lists(model_vars, built_ins)
# determining the typology of the variables
model_vars = get_types(model_vars, tbl_func_list, comp_func_list, simp_func_list, test_func_list)
# removing subscripts from the expr
model_vars = rem_subscripts(model_vars)
# split the equations to get the elements of the equation
model_vars = equation_split(model_vars, comb_func_list, mis_func_list, test_func_list)
# collect missing functions to be added to the function list
# currently collects lots of garbage and misses functions that are not in first place of the eqn string
mis_func_list = collect_functions(model_vars, comb_func_list, mis_func_list)
# add builtins to model vars to save space (builtins only relevant with values, not name)
model_vars = add_builtin(model_vars, built_ins, model_name)
# correcting units if subscripts are involved
model_vars = corr_units(model_vars)
# word analysis
word_dict, word_list = word_analysis(model_vars, word_dict, word_list)
# model_vars operations need to be done here
tot, avg, funcs, f_avg, cons, unit_per = calc_avg(model_vars)
c, d, e, f, emstocks, s_stocks, s_aux, s_flow, fs_ratio = collect_stats(model_vars)
# finishing the documentation of current model
# csv file contains all descriptive information about the variables in the model
fops.save_lst_csv(model_vars, model_name, test, append=False)
# adding the current variables to the global variable collection
for var in model_vars:
vardb.append(var)
# flagging for data integrity
# parking for debug in debug folder
flag, code = flagging(c, f, emstocks, model_vars, built_ins)
if flag == 'Yes':
flag_count += 1
fops.move_mdl_debug(mdl_file, 'flag')
# individual operation of model is done here
ind_end = timer()
# adding the model stats to the stats file
model_stat_vars.append({'Name': model_name,
'Variables': len(model_vars),
'sub_Variables': tot + cons,
'Constants': c['constant'],
'sub_Constants': cons,
'Auxiliaries': c['auxiliary'],
'sub_Auxiliaries': s_aux,
'Flows': c['flow'],
'sub_Flows': s_flow,
'Stocks': c['stock'],
'sub_Stocks': s_stocks,
'Flow/Stock Ratio': fs_ratio,
'Empty units percentage': unit_per,
'Table Functions': c['table function'],
'Subscripts': c['subscript list'],
'math_Simple Functions': d['simple function'],
'math_Complicated functions': d['complicated function'],
'math_Testing functions': d['testing function'],
'math_Incomplete equations': d['incomplete equation'],
'Non-constant variables': tot,
'Number of functions': funcs,
'Elements per equation': avg,
'Functions per equation': f_avg,
'Built-ins': len(built_ins),
'Stocks with INIT': e['yes'],
'Stocks without INIT': e['no'],
'Year': year,
'Flag': flag,
'Code': code,
'Time': ind_end - ind_start,
'Timestamp': ind_start})
# doing the reporting for the html file
if rep_type == 'orig':
title = 'Original Model'
else:
title = 'Working Model'
orig_df = pd.DataFrame(model_vars)
sel = orig_df[orig_df['Number of elements'] == 1].count()['Number of elements']
base_lst = [orig_df['Base Unit'][0], orig_df['INITIAL TIME'][0], orig_df['FINAL TIME'][0], orig_df['TIME STEP'][0]]
cnt_lst = [len(model_vars), c['auxiliary'], c['constant'], c['flow'], c['stock'], c['table function']]
ind_lst = [avg, unit_per, fs_ratio, f_avg, e['no'] / (e['no'] + e['yes']), sel]
rep_tpl = (title, base_lst, cnt_lst, ind_lst)
return model_vars, flag_count, word_dict, word_list, mis_func_list, rep_tpl
# Functions are above
# initializing the output files
dbname = 'Var_DB'
model_stat_file = 'Model_Stats'
# These are for word analysis which is currently not further developed
word_dict_file = 'word_dict'
word_list_file = 'word_list'
# These are more for testing
mis_name = 'missing_functions'
track_name = 'tracking'
err_name_adj = 'translation_errors_adjusted.txt'
err_name_std = 'translation_errors_standard.txt'
# initializing the collection lists for output files
vardb = []
track_lst = []
model_stat_vars = []
comb_func_list = []
# initilialzing the function list
# list is static and needs to be completed manually
# level of detail to be discussed
# currently no clear separation
# table function list
tbl_func_list = ['WITH LOOKUP', 'LOOKUP INVERT']
# complicated function list, roughly functions that add structure
comp_func_list = ['DELAY N', 'DELAY3', 'DELAY3I', 'DELAY FIXED', 'DELAY1', 'DELAY1I', 'DELAY MATERIAL', 'IF THEN ELSE',
'SAMPLE IF TRUE', 'SMOOTH', 'SMOOTHI', 'SMOOTH3', 'SMOOTH3I', 'SMOOTH N',
'FORECAST', 'TREND', 'NPV']
# simple functions that roughly don't add structure
simp_func_list = ['MIN', 'VMIN', 'MAX', 'VMAX', 'SIN', 'INITIAL', 'SQRT', 'ACTIVE INITIAL', 'INITIAL TIME',
'ZIDZ', 'XIDZ', 'SUM', 'MODULO', 'ABS', 'LN', 'SIMULTANEOUS', ':AND:', ':IMPLIES:', ':OR:',
'INTEGER']
# functions that are/should only be used for model testing, should probably be ignored in the testing suite
# (currently is not ignored)
test_func_list = ['RANDOM UNIFORM', 'RANDOM 0 1' 'RANDOM NORMAL', 'STEP', 'PULSE', 'PULSE TRAIN', 'RAMP', 'RND']
# documenting the init expr for all functions that have init variables
regfunc_lst = ['INTEG', 'DELAY1I', 'DELAY3I', 'SMOOTHI', 'SMOOTH3I']
nfunc_lst = ['DELAY N', 'SMOOTH N']
def full_translate(first_file=None, last_file=None):
"""
This function runs the descriptives and translation part for all the models in the testing folder
Output is:
- descriptives
- .py file of the models
- doc file
- statistic collections
:param first_file: first file to be tested (index on list), defaults to None, meaning the first file in the list
:param last_file: last file to be tested (index on list), defaults to None, meaning the last file in the list
:return: time elapsed for the entire test as float
"""
# initializing the counts
model_count = 0
flag_count = 0
err_adj = 0
err_std = 0
# explorative collection for word analysis
word_dict = {}
word_list = []
# missing function list
mis_func_list = []
total_start = timer()
# selecting the files
files = fops.load_files('mdl')
for mdl_file in files[first_file:last_file]:
print('Translating', mdl_file)
model_count += 1
model_name = mdl_file.split('.')[0]
fops.output_folder(model_name, active='doc')
# doc file contains all the information needed for further steps in the testing battery
doc_name = mdl_file.replace('.mdl', '_doc')
# ind_track is to keep an overview of the created documents that are necessary for next steps
ind_track = [mdl_file, 'no', 'no', 'no']
err_rep_adj = True
err_rep_std = True
# adjusted translate for descriptives
if err_rep_adj:
try:
model_vars, flag_count, word_dict, word_list, mis_func_list, rep_tpl = \
descriptives(mdl_file, flag_count, word_dict, word_list, mis_func_list)
ind_track[1] = 'yes'
doc_vars = pd.DataFrame(model_vars)
except Exception as e:
fops.write_error_file(model_name, e, err_name_adj)
err_adj += 1
rep_tpl = ('Working Model', [], [], [])
model_vars = []
doc_vars = pd.DataFrame(model_vars)
else:
model_vars, flag_count, word_dict, word_list, mis_func_list, rep_tpl = \
descriptives(mdl_file, flag_count, word_dict, word_list, mis_func_list)
ind_track[1] = 'yes'
doc_vars = | pd.DataFrame(model_vars) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import re
import numpy as np
import pytest
from pandas.core.dtypes.common import (
is_bool_dtype, is_categorical, is_categorical_dtype,
is_datetime64_any_dtype, is_datetime64_dtype, is_datetime64_ns_dtype,
is_datetime64tz_dtype, is_datetimetz, is_dtype_equal, is_interval_dtype,
is_period, is_period_dtype, is_string_dtype)
from pandas.core.dtypes.dtypes import (
CategoricalDtype, DatetimeTZDtype, IntervalDtype, PeriodDtype, registry)
import pandas as pd
from pandas import (
Categorical, CategoricalIndex, IntervalIndex, Series, date_range)
from pandas.core.sparse.api import SparseDtype
import pandas.util.testing as tm
@pytest.fixture(params=[True, False, None])
def ordered(request):
return request.param
class Base(object):
def setup_method(self, method):
self.dtype = self.create()
def test_hash(self):
hash(self.dtype)
def test_equality_invalid(self):
assert not self.dtype == 'foo'
assert not is_dtype_equal(self.dtype, np.int64)
def test_numpy_informed(self):
pytest.raises(TypeError, np.dtype, self.dtype)
assert not self.dtype == np.str_
assert not np.str_ == self.dtype
def test_pickle(self):
# make sure our cache is NOT pickled
# clear the cache
type(self.dtype).reset_cache()
assert not len(self.dtype._cache)
# force back to the cache
result = tm.round_trip_pickle(self.dtype)
assert not len(self.dtype._cache)
assert result == self.dtype
class TestCategoricalDtype(Base):
def create(self):
return CategoricalDtype()
def test_pickle(self):
# make sure our cache is NOT pickled
# clear the cache
type(self.dtype).reset_cache()
assert not len(self.dtype._cache)
# force back to the cache
result = tm.round_trip_pickle(self.dtype)
assert result == self.dtype
def test_hash_vs_equality(self):
dtype = self.dtype
dtype2 = CategoricalDtype()
assert dtype == dtype2
assert dtype2 == dtype
assert hash(dtype) == hash(dtype2)
def test_equality(self):
assert is_dtype_equal(self.dtype, 'category')
assert is_dtype_equal(self.dtype, CategoricalDtype())
assert not is_dtype_equal(self.dtype, 'foo')
def test_construction_from_string(self):
result = CategoricalDtype.construct_from_string('category')
assert is_dtype_equal(self.dtype, result)
pytest.raises(
TypeError, lambda: CategoricalDtype.construct_from_string('foo'))
def test_constructor_invalid(self):
msg = "Parameter 'categories' must be list-like"
with pytest.raises(TypeError, match=msg):
CategoricalDtype("category")
dtype1 = CategoricalDtype(['a', 'b'], ordered=True)
dtype2 = CategoricalDtype(['x', 'y'], ordered=False)
c = Categorical([0, 1], dtype=dtype1, fastpath=True)
@pytest.mark.parametrize('values, categories, ordered, dtype, expected',
[
[None, None, None, None,
CategoricalDtype()],
[None, ['a', 'b'], True, None, dtype1],
[c, None, None, dtype2, dtype2],
[c, ['x', 'y'], False, None, dtype2],
])
def test_from_values_or_dtype(
self, values, categories, ordered, dtype, expected):
result = CategoricalDtype._from_values_or_dtype(values, categories,
ordered, dtype)
assert result == expected
@pytest.mark.parametrize('values, categories, ordered, dtype', [
[None, ['a', 'b'], True, dtype2],
[None, ['a', 'b'], None, dtype2],
[None, None, True, dtype2],
])
def test_from_values_or_dtype_raises(self, values, categories,
ordered, dtype):
msg = "Cannot specify `categories` or `ordered` together with `dtype`."
with pytest.raises(ValueError, match=msg):
CategoricalDtype._from_values_or_dtype(values, categories,
ordered, dtype)
def test_is_dtype(self):
assert CategoricalDtype.is_dtype(self.dtype)
assert CategoricalDtype.is_dtype('category')
assert CategoricalDtype.is_dtype(CategoricalDtype())
assert not CategoricalDtype.is_dtype('foo')
assert not CategoricalDtype.is_dtype(np.float64)
def test_basic(self):
assert is_categorical_dtype(self.dtype)
factor = Categorical(['a', 'b', 'b', 'a', 'a', 'c', 'c', 'c'])
s = Series(factor, name='A')
# dtypes
assert is_categorical_dtype(s.dtype)
assert is_categorical_dtype(s)
assert not is_categorical_dtype(np.dtype('float64'))
assert is_categorical(s.dtype)
assert is_categorical(s)
assert not is_categorical(np.dtype('float64'))
assert not is_categorical(1.0)
def test_tuple_categories(self):
categories = [(1, 'a'), (2, 'b'), (3, 'c')]
result = CategoricalDtype(categories)
assert all(result.categories == categories)
@pytest.mark.parametrize("categories, expected", [
([True, False], True),
([True, False, None], True),
([True, False, "a", "b'"], False),
([0, 1], False),
])
def test_is_boolean(self, categories, expected):
cat = Categorical(categories)
assert cat.dtype._is_boolean is expected
assert is_bool_dtype(cat) is expected
assert is_bool_dtype(cat.dtype) is expected
class TestDatetimeTZDtype(Base):
def create(self):
return DatetimeTZDtype('ns', 'US/Eastern')
def test_alias_to_unit_raises(self):
# 23990
with tm.assert_produces_warning(FutureWarning):
DatetimeTZDtype('datetime64[ns, US/Central]')
def test_alias_to_unit_bad_alias_raises(self):
# 23990
with pytest.raises(TypeError, match=''):
DatetimeTZDtype('this is a bad string')
with pytest.raises(TypeError, match=''):
DatetimeTZDtype('datetime64[ns, US/NotATZ]')
def test_hash_vs_equality(self):
# make sure that we satisfy is semantics
dtype = self.dtype
dtype2 = DatetimeTZDtype('ns', 'US/Eastern')
dtype3 = DatetimeTZDtype(dtype2)
assert dtype == dtype2
assert dtype2 == dtype
assert dtype3 == dtype
assert hash(dtype) == hash(dtype2)
assert hash(dtype) == hash(dtype3)
dtype4 = DatetimeTZDtype("ns", "US/Central")
assert dtype2 != dtype4
assert hash(dtype2) != hash(dtype4)
def test_construction(self):
pytest.raises(ValueError,
lambda: DatetimeTZDtype('ms', 'US/Eastern'))
def test_subclass(self):
a = DatetimeTZDtype.construct_from_string('datetime64[ns, US/Eastern]')
b = DatetimeTZDtype.construct_from_string('datetime64[ns, CET]')
assert issubclass(type(a), type(a))
assert issubclass(type(a), type(b))
def test_compat(self):
assert is_datetime64tz_dtype(self.dtype)
assert is_datetime64tz_dtype('datetime64[ns, US/Eastern]')
assert is_datetime64_any_dtype(self.dtype)
assert is_datetime64_any_dtype('datetime64[ns, US/Eastern]')
assert is_datetime64_ns_dtype(self.dtype)
assert is_datetime64_ns_dtype('datetime64[ns, US/Eastern]')
assert not is_datetime64_dtype(self.dtype)
assert not is_datetime64_dtype('datetime64[ns, US/Eastern]')
def test_construction_from_string(self):
result = DatetimeTZDtype.construct_from_string(
'datetime64[ns, US/Eastern]')
assert is_dtype_equal(self.dtype, result)
pytest.raises(TypeError,
lambda: DatetimeTZDtype.construct_from_string('foo'))
def test_construct_from_string_raises(self):
with pytest.raises(TypeError, match="notatz"):
DatetimeTZDtype.construct_from_string('datetime64[ns, notatz]')
with pytest.raises(TypeError,
match="^Could not construct DatetimeTZDtype$"):
DatetimeTZDtype.construct_from_string(['datetime64[ns, notatz]'])
def test_is_dtype(self):
assert not DatetimeTZDtype.is_dtype(None)
assert DatetimeTZDtype.is_dtype(self.dtype)
assert DatetimeTZDtype.is_dtype('datetime64[ns, US/Eastern]')
assert not DatetimeTZDtype.is_dtype('foo')
assert DatetimeTZDtype.is_dtype(DatetimeTZDtype('ns', 'US/Pacific'))
assert not DatetimeTZDtype.is_dtype(np.float64)
def test_equality(self):
assert is_dtype_equal(self.dtype, 'datetime64[ns, US/Eastern]')
assert is_dtype_equal(self.dtype, DatetimeTZDtype('ns', 'US/Eastern'))
assert not is_dtype_equal(self.dtype, 'foo')
assert not is_dtype_equal(self.dtype, DatetimeTZDtype('ns', 'CET'))
assert not is_dtype_equal(DatetimeTZDtype('ns', 'US/Eastern'),
DatetimeTZDtype('ns', 'US/Pacific'))
# numpy compat
assert is_dtype_equal(np.dtype("M8[ns]"), "datetime64[ns]")
def test_basic(self):
assert is_datetime64tz_dtype(self.dtype)
dr = date_range('20130101', periods=3, tz='US/Eastern')
s = Series(dr, name='A')
# dtypes
assert is_datetime64tz_dtype(s.dtype)
assert is_datetime64tz_dtype(s)
assert not is_datetime64tz_dtype(np.dtype('float64'))
assert not is_datetime64tz_dtype(1.0)
with tm.assert_produces_warning(FutureWarning):
assert is_datetimetz(s)
assert is_datetimetz(s.dtype)
assert not is_datetimetz(np.dtype('float64'))
assert not is_datetimetz(1.0)
def test_dst(self):
dr1 = date_range('2013-01-01', periods=3, tz='US/Eastern')
s1 = Series(dr1, name='A')
assert is_datetime64tz_dtype(s1)
with tm.assert_produces_warning(FutureWarning):
assert is_datetimetz(s1)
dr2 = date_range('2013-08-01', periods=3, tz='US/Eastern')
s2 = Series(dr2, name='A')
assert is_datetime64tz_dtype(s2)
with tm.assert_produces_warning(FutureWarning):
assert is_datetimetz(s2)
assert s1.dtype == s2.dtype
@pytest.mark.parametrize('tz', ['UTC', 'US/Eastern'])
@pytest.mark.parametrize('constructor', ['M8', 'datetime64'])
def test_parser(self, tz, constructor):
# pr #11245
dtz_str = '{con}[ns, {tz}]'.format(con=constructor, tz=tz)
result = DatetimeTZDtype.construct_from_string(dtz_str)
expected = DatetimeTZDtype('ns', tz)
assert result == expected
def test_empty(self):
with pytest.raises(TypeError, match="A 'tz' is required."):
DatetimeTZDtype()
class TestPeriodDtype(Base):
def create(self):
return PeriodDtype('D')
def test_hash_vs_equality(self):
# make sure that we satisfy is semantics
dtype = self.dtype
dtype2 = PeriodDtype('D')
dtype3 = PeriodDtype(dtype2)
assert dtype == dtype2
assert dtype2 == dtype
assert dtype3 == dtype
assert dtype is dtype2
assert dtype2 is dtype
assert dtype3 is dtype
assert hash(dtype) == hash(dtype2)
assert hash(dtype) == hash(dtype3)
def test_construction(self):
with pytest.raises(ValueError):
PeriodDtype('xx')
for s in ['period[D]', 'Period[D]', 'D']:
dt = PeriodDtype(s)
assert dt.freq == pd.tseries.offsets.Day()
assert is_period_dtype(dt)
for s in ['period[3D]', 'Period[3D]', '3D']:
dt = PeriodDtype(s)
assert dt.freq == pd.tseries.offsets.Day(3)
assert is_period_dtype(dt)
for s in ['period[26H]', 'Period[26H]', '26H',
'period[1D2H]', 'Period[1D2H]', '1D2H']:
dt = PeriodDtype(s)
assert dt.freq == pd.tseries.offsets.Hour(26)
assert is_period_dtype(dt)
def test_subclass(self):
a = PeriodDtype('period[D]')
b = PeriodDtype('period[3D]')
assert issubclass(type(a), type(a))
assert issubclass(type(a), type(b))
def test_identity(self):
assert PeriodDtype('period[D]') == PeriodDtype('period[D]')
assert PeriodDtype('period[D]') is PeriodDtype('period[D]')
assert PeriodDtype('period[3D]') == PeriodDtype('period[3D]')
assert PeriodDtype('period[3D]') is PeriodDtype('period[3D]')
assert PeriodDtype('period[1S1U]') == PeriodDtype('period[1000001U]')
assert PeriodDtype('period[1S1U]') is PeriodDtype('period[1000001U]')
def test_compat(self):
assert not is_datetime64_ns_dtype(self.dtype)
assert not is_datetime64_ns_dtype('period[D]')
assert not is_datetime64_dtype(self.dtype)
assert not is_datetime64_dtype('period[D]')
def test_construction_from_string(self):
result = PeriodDtype('period[D]')
assert is_dtype_equal(self.dtype, result)
result = PeriodDtype.construct_from_string('period[D]')
assert is_dtype_equal(self.dtype, result)
with pytest.raises(TypeError):
PeriodDtype.construct_from_string('foo')
with pytest.raises(TypeError):
PeriodDtype.construct_from_string('period[foo]')
with pytest.raises(TypeError):
PeriodDtype.construct_from_string('foo[D]')
with pytest.raises(TypeError):
PeriodDtype.construct_from_string('datetime64[ns]')
with pytest.raises(TypeError):
PeriodDtype.construct_from_string('datetime64[ns, US/Eastern]')
def test_is_dtype(self):
assert PeriodDtype.is_dtype(self.dtype)
assert PeriodDtype.is_dtype('period[D]')
assert PeriodDtype.is_dtype('period[3D]')
assert PeriodDtype.is_dtype(PeriodDtype('3D'))
assert PeriodDtype.is_dtype('period[U]')
assert PeriodDtype.is_dtype('period[S]')
assert PeriodDtype.is_dtype(PeriodDtype('U'))
assert PeriodDtype.is_dtype(PeriodDtype('S'))
assert not PeriodDtype.is_dtype('D')
assert not PeriodDtype.is_dtype('3D')
assert not PeriodDtype.is_dtype('U')
assert not PeriodDtype.is_dtype('S')
assert not PeriodDtype.is_dtype('foo')
assert not PeriodDtype.is_dtype(np.object_)
assert not PeriodDtype.is_dtype(np.int64)
assert not PeriodDtype.is_dtype(np.float64)
def test_equality(self):
assert is_dtype_equal(self.dtype, 'period[D]')
assert is_dtype_equal(self.dtype, PeriodDtype('D'))
assert is_dtype_equal(self.dtype, PeriodDtype('D'))
assert is_dtype_equal(PeriodDtype('D'), PeriodDtype('D'))
assert not is_dtype_equal(self.dtype, 'D')
assert not is_dtype_equal(PeriodDtype('D'), PeriodDtype('2D'))
def test_basic(self):
assert is_period_dtype(self.dtype)
pidx = pd.period_range('2013-01-01 09:00', periods=5, freq='H')
assert is_period_dtype(pidx.dtype)
assert is_period_dtype(pidx)
with tm.assert_produces_warning(FutureWarning):
assert is_period(pidx)
s = Series(pidx, name='A')
assert is_period_dtype(s.dtype)
assert is_period_dtype(s)
with tm.assert_produces_warning(FutureWarning):
assert is_period(s)
assert not is_period_dtype(np.dtype('float64'))
assert not is_period_dtype(1.0)
with tm.assert_produces_warning(FutureWarning):
assert not is_period(np.dtype('float64'))
with tm.assert_produces_warning(FutureWarning):
assert not is_period(1.0)
def test_empty(self):
dt = PeriodDtype()
with pytest.raises(AttributeError):
str(dt)
def test_not_string(self):
# though PeriodDtype has object kind, it cannot be string
assert not is_string_dtype(PeriodDtype('D'))
class TestIntervalDtype(Base):
def create(self):
return IntervalDtype('int64')
def test_hash_vs_equality(self):
# make sure that we satisfy is semantics
dtype = self.dtype
dtype2 = IntervalDtype('int64')
dtype3 = IntervalDtype(dtype2)
assert dtype == dtype2
assert dtype2 == dtype
assert dtype3 == dtype
assert dtype is dtype2
assert dtype2 is dtype3
assert dtype3 is dtype
assert hash(dtype) == hash(dtype2)
assert hash(dtype) == hash(dtype3)
dtype1 = IntervalDtype('interval')
dtype2 = IntervalDtype(dtype1)
dtype3 = IntervalDtype('interval')
assert dtype2 == dtype1
assert dtype2 == dtype2
assert dtype2 == dtype3
assert dtype2 is dtype1
assert dtype2 is dtype2
assert dtype2 is dtype3
assert hash(dtype2) == hash(dtype1)
assert hash(dtype2) == hash(dtype2)
assert hash(dtype2) == hash(dtype3)
@pytest.mark.parametrize('subtype', [
'interval[int64]', 'Interval[int64]', 'int64', np.dtype('int64')])
def test_construction(self, subtype):
i = IntervalDtype(subtype)
assert i.subtype == np.dtype('int64')
assert is_interval_dtype(i)
@pytest.mark.parametrize('subtype', [None, 'interval', 'Interval'])
def test_construction_generic(self, subtype):
# generic
i = IntervalDtype(subtype)
assert i.subtype is None
assert is_interval_dtype(i)
@pytest.mark.parametrize('subtype', [
CategoricalDtype(list('abc'), False),
CategoricalDtype(list('wxyz'), True),
object, str, '<U10', 'interval[category]', 'interval[object]'])
def test_construction_not_supported(self, subtype):
# GH 19016
msg = ('category, object, and string subtypes are not supported '
'for IntervalDtype')
with pytest.raises(TypeError, match=msg):
IntervalDtype(subtype)
@pytest.mark.parametrize('subtype', ['xx', 'IntervalA', 'Interval[foo]'])
def test_construction_errors(self, subtype):
msg = 'could not construct IntervalDtype'
with pytest.raises(TypeError, match=msg):
IntervalDtype(subtype)
def test_construction_from_string(self):
result = IntervalDtype('interval[int64]')
assert is_dtype_equal(self.dtype, result)
result = IntervalDtype.construct_from_string('interval[int64]')
assert is_dtype_equal(self.dtype, result)
@pytest.mark.parametrize('string', [
0, 3.14, ('a', 'b'), None])
def test_construction_from_string_errors(self, string):
# these are invalid entirely
msg = 'a string needs to be passed, got type'
with pytest.raises(TypeError, match=msg):
IntervalDtype.construct_from_string(string)
@pytest.mark.parametrize('string', [
'foo', 'foo[int64]', 'IntervalA'])
def test_construction_from_string_error_subtype(self, string):
# this is an invalid subtype
msg = ("Incorrectly formatted string passed to constructor. "
r"Valid formats include Interval or Interval\[dtype\] "
"where dtype is numeric, datetime, or timedelta")
with pytest.raises(TypeError, match=msg):
IntervalDtype.construct_from_string(string)
def test_subclass(self):
a = IntervalDtype('interval[int64]')
b = IntervalDtype('interval[int64]')
assert issubclass(type(a), type(a))
assert issubclass(type(a), type(b))
def test_is_dtype(self):
assert IntervalDtype.is_dtype(self.dtype)
assert IntervalDtype.is_dtype('interval')
assert IntervalDtype.is_dtype(IntervalDtype('float64'))
assert IntervalDtype.is_dtype(IntervalDtype('int64'))
assert IntervalDtype.is_dtype(IntervalDtype(np.int64))
assert not IntervalDtype.is_dtype('D')
assert not IntervalDtype.is_dtype('3D')
assert not IntervalDtype.is_dtype('U')
assert not IntervalDtype.is_dtype('S')
assert not IntervalDtype.is_dtype('foo')
assert not IntervalDtype.is_dtype('IntervalA')
assert not IntervalDtype.is_dtype(np.object_)
assert not IntervalDtype.is_dtype(np.int64)
assert not IntervalDtype.is_dtype(np.float64)
def test_equality(self):
assert is_dtype_equal(self.dtype, 'interval[int64]')
assert is_dtype_equal(self.dtype, IntervalDtype('int64'))
assert is_dtype_equal(IntervalDtype('int64'), IntervalDtype('int64'))
assert not is_dtype_equal(self.dtype, 'int64')
assert not is_dtype_equal(IntervalDtype('int64'),
IntervalDtype('float64'))
# invalid subtype comparisons do not raise when directly compared
dtype1 = IntervalDtype('float64')
dtype2 = IntervalDtype('datetime64[ns, US/Eastern]')
assert dtype1 != dtype2
assert dtype2 != dtype1
@pytest.mark.parametrize('subtype', [
None, 'interval', 'Interval', 'int64', 'uint64', 'float64',
'complex128', 'datetime64', 'timedelta64', PeriodDtype('Q')])
def test_equality_generic(self, subtype):
# GH 18980
dtype = IntervalDtype(subtype)
assert is_dtype_equal(dtype, 'interval')
assert is_dtype_equal(dtype, IntervalDtype())
@pytest.mark.parametrize('subtype', [
'int64', 'uint64', 'float64', 'complex128', 'datetime64',
'timedelta64', PeriodDtype('Q')])
def test_name_repr(self, subtype):
# GH 18980
dtype = IntervalDtype(subtype)
expected = 'interval[{subtype}]'.format(subtype=subtype)
assert str(dtype) == expected
assert dtype.name == 'interval'
@pytest.mark.parametrize('subtype', [None, 'interval', 'Interval'])
def test_name_repr_generic(self, subtype):
# GH 18980
dtype = IntervalDtype(subtype)
assert str(dtype) == 'interval'
assert dtype.name == 'interval'
def test_basic(self):
assert is_interval_dtype(self.dtype)
ii = IntervalIndex.from_breaks(range(3))
assert is_interval_dtype(ii.dtype)
assert is_interval_dtype(ii)
s = Series(ii, name='A')
assert is_interval_dtype(s.dtype)
assert is_interval_dtype(s)
def test_basic_dtype(self):
assert is_interval_dtype('interval[int64]')
assert is_interval_dtype(IntervalIndex.from_tuples([(0, 1)]))
assert is_interval_dtype(IntervalIndex.from_breaks(np.arange(4)))
assert is_interval_dtype(IntervalIndex.from_breaks(
date_range('20130101', periods=3)))
assert not is_interval_dtype('U')
assert not is_interval_dtype('S')
assert not is_interval_dtype('foo')
assert not is_interval_dtype(np.object_)
assert not is_interval_dtype(np.int64)
assert not is_interval_dtype(np.float64)
def test_caching(self):
IntervalDtype.reset_cache()
dtype = IntervalDtype("int64")
assert len(IntervalDtype._cache) == 1
IntervalDtype("interval")
assert len(IntervalDtype._cache) == 2
IntervalDtype.reset_cache()
tm.round_trip_pickle(dtype)
assert len(IntervalDtype._cache) == 0
class TestCategoricalDtypeParametrized(object):
@pytest.mark.parametrize('categories', [
list('abcd'),
np.arange(1000),
['a', 'b', 10, 2, 1.3, True],
[True, False],
pd.date_range('2017', periods=4)])
def test_basic(self, categories, ordered):
c1 = CategoricalDtype(categories, ordered=ordered)
tm.assert_index_equal(c1.categories, pd.Index(categories))
assert c1.ordered is ordered
def test_order_matters(self):
categories = ['a', 'b']
c1 = CategoricalDtype(categories, ordered=True)
c2 = CategoricalDtype(categories, ordered=False)
c3 = CategoricalDtype(categories, ordered=None)
assert c1 is not c2
assert c1 is not c3
@pytest.mark.parametrize('ordered', [False, None])
def test_unordered_same(self, ordered):
c1 = CategoricalDtype(['a', 'b'], ordered=ordered)
c2 = CategoricalDtype(['b', 'a'], ordered=ordered)
assert hash(c1) == hash(c2)
def test_categories(self):
result = CategoricalDtype(['a', 'b', 'c'])
tm.assert_index_equal(result.categories, pd.Index(['a', 'b', 'c']))
assert result.ordered is None
def test_equal_but_different(self, ordered):
c1 = CategoricalDtype([1, 2, 3])
c2 = CategoricalDtype([1., 2., 3.])
assert c1 is not c2
assert c1 != c2
@pytest.mark.parametrize('v1, v2', [
([1, 2, 3], [1, 2, 3]),
([1, 2, 3], [3, 2, 1]),
])
def test_order_hashes_different(self, v1, v2):
c1 = CategoricalDtype(v1, ordered=False)
c2 = CategoricalDtype(v2, ordered=True)
c3 = CategoricalDtype(v1, ordered=None)
assert c1 is not c2
assert c1 is not c3
def test_nan_invalid(self):
with pytest.raises(ValueError):
CategoricalDtype([1, 2, np.nan])
def test_non_unique_invalid(self):
with pytest.raises(ValueError):
CategoricalDtype([1, 2, 1])
def test_same_categories_different_order(self):
c1 = CategoricalDtype(['a', 'b'], ordered=True)
c2 = CategoricalDtype(['b', 'a'], ordered=True)
assert c1 is not c2
@pytest.mark.parametrize('ordered1', [True, False, None])
@pytest.mark.parametrize('ordered2', [True, False, None])
def test_categorical_equality(self, ordered1, ordered2):
# same categories, same order
# any combination of None/False are equal
# True/True is the only combination with True that are equal
c1 = CategoricalDtype(list('abc'), ordered1)
c2 = CategoricalDtype(list('abc'), ordered2)
result = c1 == c2
expected = bool(ordered1) is bool(ordered2)
assert result is expected
# same categories, different order
# any combination of None/False are equal (order doesn't matter)
# any combination with True are not equal (different order of cats)
c1 = CategoricalDtype(list('abc'), ordered1)
c2 = CategoricalDtype(list('cab'), ordered2)
result = c1 == c2
expected = (bool(ordered1) is False) and (bool(ordered2) is False)
assert result is expected
# different categories
c2 = CategoricalDtype([1, 2, 3], ordered2)
assert c1 != c2
# none categories
c1 = CategoricalDtype(list('abc'), ordered1)
c2 = CategoricalDtype(None, ordered2)
c3 = CategoricalDtype(None, ordered1)
assert c1 == c2
assert c2 == c1
assert c2 == c3
@pytest.mark.parametrize('categories', [list('abc'), None])
@pytest.mark.parametrize('other', ['category', 'not a category'])
def test_categorical_equality_strings(self, categories, ordered, other):
c1 = CategoricalDtype(categories, ordered)
result = c1 == other
expected = other == 'category'
assert result is expected
def test_invalid_raises(self):
with pytest.raises(TypeError, match='ordered'):
CategoricalDtype(['a', 'b'], ordered='foo')
with pytest.raises(TypeError, match="'categories' must be list-like"):
CategoricalDtype('category')
def test_mixed(self):
a = CategoricalDtype(['a', 'b', 1, 2])
b = CategoricalDtype(['a', 'b', '1', '2'])
assert hash(a) != hash(b)
def test_from_categorical_dtype_identity(self):
c1 = Categorical([1, 2], categories=[1, 2, 3], ordered=True)
# Identity test for no changes
c2 = CategoricalDtype._from_categorical_dtype(c1)
assert c2 is c1
def test_from_categorical_dtype_categories(self):
c1 = Categorical([1, 2], categories=[1, 2, 3], ordered=True)
# override categories
result = CategoricalDtype._from_categorical_dtype(
c1, categories=[2, 3])
assert result == CategoricalDtype([2, 3], ordered=True)
def test_from_categorical_dtype_ordered(self):
c1 = Categorical([1, 2], categories=[1, 2, 3], ordered=True)
# override ordered
result = CategoricalDtype._from_categorical_dtype(
c1, ordered=False)
assert result == CategoricalDtype([1, 2, 3], ordered=False)
def test_from_categorical_dtype_both(self):
c1 = | Categorical([1, 2], categories=[1, 2, 3], ordered=True) | pandas.Categorical |
from copy import deepcopy
import numpy as np
import pandas as pd
import pytest
from Bio import Alphabet
from Bio.Seq import reverse_complement, Seq
from Bio.SeqRecord import SeqRecord
from pandas.util.testing import assert_series_equal, assert_index_equal
from sklearn.pipeline import Pipeline
from crseek import estimators
from crseek import evaluators
from crseek import preprocessing
from crseek import utils
from test.test_preprocessing import make_random_seq
def build_estimator():
mod = Pipeline(steps=[('transform', preprocessing.MatchingTransformer()),
('predict', estimators.MismatchEstimator())])
return mod
def do_rev_comp(seqs):
ndata = []
for seqR in seqs:
ndata.append(deepcopy(seqR))
ndata[-1].seq = reverse_complement(ndata[-1].seq)
ndata[-1].id = ndata[-1].id + '-R'
return ndata
@pytest.mark.skipif(utils._missing_casoffinder(), reason="Need CasOff installed")
class TestCheckgRNA(object):
def get_basic_info(self):
spacer = Seq('A' * 20, alphabet=Alphabet.generic_rna)
loci = [SeqRecord(Seq('T' * 10 + str(spacer) + 'TGG' + 'T' * 5,
alphabet=Alphabet.generic_dna),
id='Seq1'),
SeqRecord(Seq('T' * 15 + str(spacer) + 'TGG' + 'T' * 5,
alphabet=Alphabet.generic_dna),
id='Seq2'),
SeqRecord(Seq('C' * 30,
alphabet=Alphabet.generic_dna),
id='Seq3')
]
corr = pd.Series([True, True, np.nan],
index=[s.id + ' ' + s.description for s in loci])
return spacer, loci, corr
def test_basic(self):
spacer, loci, corr = self.get_basic_info()
est = build_estimator()
res = evaluators.check_spacer_across_loci(spacer, loci, est)
assert list(res.columns) == ['left', 'strand', 'spacer', 'target', 'score']
assert_series_equal(corr, res['score'], check_names=False)
assert_series_equal(pd.Series([10, 15], index=corr.index[:2]),
res['left'].iloc[:2],
check_dtype=False,
check_names=False)
assert_series_equal(pd.Series([1, 1], index=corr.index[:2]),
res['strand'].iloc[:2],
check_dtype=False,
check_names=False)
def test_basic_RC(self):
spacer, loci, _ = self.get_basic_info()
loci += do_rev_comp(loci)
corr = pd.Series([True, True, np.nan] * 2,
[s.id + ' ' + s.description for s in loci])
est = build_estimator()
res = evaluators.check_spacer_across_loci(spacer, loci, est)
assert_series_equal(corr, res['score'],
check_names=False,
check_dtype=False)
def test_accepts_short_seqs(self):
spacer, loci, _ = self.get_basic_info()
loci.append(SeqRecord(Seq('G' * 12,
alphabet=Alphabet.generic_dna),
id='Seq4'))
loci += do_rev_comp(loci)
corr = pd.Series([True, True, np.nan, np.nan] * 2,
index=[s.id + ' ' + s.description for s in loci])
est = build_estimator()
res = evaluators.check_spacer_across_loci(spacer, loci, est)
assert_series_equal(corr, res['score'],
check_dtype=False,
check_names=False)
def test_carries_series_index(self):
spacer, loci, _ = self.get_basic_info()
loci.append(SeqRecord(Seq('G' * 12,
alphabet=Alphabet.generic_dna),
id='Seq4'))
loci += do_rev_comp(loci)
index = pd.Index(['Seq%i' % i for i in range(len(loci))], name='SeqIndex')
loci = pd.Series(loci, index=index)
corr = pd.Series([True, True, np.nan, np.nan] * 2,
index=index)
est = build_estimator()
res = evaluators.check_spacer_across_loci(spacer, loci, est)
assert_series_equal(corr, res['score'],
check_names=False,
check_dtype=False)
assert_index_equal(corr.index, res.index)
def test_accepts_index(self):
spacer, loci, _ = self.get_basic_info()
loci.append(SeqRecord(Seq('G' * 12,
alphabet=Alphabet.generic_dna),
id='Seq4'))
loci += do_rev_comp(loci)
index = pd.Index(['Seq%i' % i for i in range(len(loci))], name='SeqIndex')
loci = | pd.Series(loci, index=index) | pandas.Series |
""" This module provides the functionality to calculate ephemeris for two bodies problem
also in the case of perturbed methods. More advance pertubed methods will be handled
in other module
"""
# Standard library imports
import logging
from math import isclose
from typing import ForwardRef
# Third party imports
import pandas as pd
import numpy as np
from numpy.linalg import norm
from toolz import pipe
# Local application imports
from myorbit.util.general import my_range, NoConvergenceError, my_isclose
import myorbit.data_catalog as dc
from myorbit.util.timeut import mjd2str_date
from myorbit.planets import g_xyz_equat_sun_j2000
from myorbit.kepler.keplerian import KeplerianStateSolver, ParabolicalStateSolver, EllipticalStateSolver
from myorbit.kepler.ellipitical import calc_rv_for_elliptic_orbit, calc_M
from myorbit.lagrange.lagrange_coeff import calc_rv_from_r0v0
from myorbit.util.general import mu_Sun, calc_eccentricity_vector, angle_between_vectors
from myorbit.pert_cowels import calc_eph_by_cowells
from myorbit.two_body import calc_eph_planet
from myorbit.util.timeut import EQX_B1950, EQX_J2000
from myorbit.ephemeris_input import EphemrisInput
from myorbit.pert_enckes import calc_eph_by_enckes
from myorbit.two_body import calc_eph_twobody
from myorbit.util.constants import *
logger = logging.getLogger(__name__)
def calc_tp(M0, a, epoch):
deltaT = TWOPI*np.sqrt(pow(a,3)/GM)*(1-M0/TWOPI)
return deltaT + epoch
def calc_comets_that_no_converge(delta_days):
"""The orbit of all comets is studied around the perihelion [-days, +days]
Parameters
----------
delta_days : int
[description]
"""
df = dc.DF_COMETS
not_converged=[]
for idx, name in enumerate(df['Name']):
obj = dc.read_comet_elms_for(name,df)
msg = f'Testing Object: {obj.name}'
print (msg)
logger.info(msg)
if hasattr(obj,'M0') :
M_at_epoch = obj.M0
else :
M_at_epoch = None
# from 20 days before perihelion passage to 20 days after 20 days perihelion passage
solver = KeplerianStateSolver.make(e=obj.e, a=obj.a, tp_mjd=obj.tp_mjd, q=obj.q, epoch=obj.epoch_mjd, M_at_epoch=M_at_epoch)
T0_MJD = obj.tp_mjd-delta_days
r0_xyz, rdot0_xyz, r0, h0_xyz, _ , f0 = solver.calc_rv(T0_MJD)
hs = []
es = []
for dt in range(2,delta_days*2,2):
clock_mjd = T0_MJD + dt
try :
r_xyz, rdot_xyz, h_xyz, f = calc_rv_from_r0v0(mu_Sun, r0_xyz, rdot0_xyz, dt, f0)
hs.append(np.linalg.norm(h_xyz))
es.append(np.linalg.norm(calc_eccentricity_vector(r_xyz, rdot_xyz,h_xyz)))
except NoConvergenceError :
print (f"===== Object {name} doest not converged at {clock_mjd} MJD")
not_converged.append(name)
if not all(isclose(h, hs[0], abs_tol=1e-12) for h in hs):
msg = f'The angular momentum is NOT constant in the orbit'
print (msg)
logger.error(msg)
if not all(isclose(ec, es[0], abs_tol=1e-12) for ec in es):
msg = f'The eccentric vector is NOT constant in the orbit'
print (msg)
logger.error(msg)
print (not_converged)
def test_all_bodies(delta_days):
df = dc.DF_BODIES
not_converged=[]
for idx, name in enumerate(df['Name']):
body = dc.read_body_elms_for(name,df)
msg = f'Testing Object: {body.name}'
solver = KeplerianStateSolver.make(e=body.e, a=body.a, epoch=body.epoch_mjd, M_at_epoch=body.M0)
tp = calc_tp(body.M0, body.a, body.epoch_mjd)
hs = []
try :
for clock_mjd in my_range(tp-delta_days, tp+delta_days, 2):
r_xyz, rdot_xyz, r, h = solver.calc_rv(clock_mjd)
hs.append(h)
if not all(isclose(h, hs[0], abs_tol=1e-12) for h in hs):
msg = f'The angular momentum is NOT constant in the orbit'
print (msg)
logger.error(msg)
except NoConvergenceError :
print (f"===========> NOT converged for object {name}")
not_converged.append(name)
if idx % 1000 == 0 :
print (f"================================================>> {idx}")
print (not_converged)
def test_almost_parabolical(delta_days):
df = dc.DF_COMETS
not_converged=[]
names = ['C/1680 V1', 'C/1843 D1 (Great March comet)', 'C/1882 R1-A (Great September comet)', 'C/1882 R1-B (Great September comet)', 'C/1882 R1-C (Great September comet)', 'C/1882 R1-D (Great September comet)', 'C/1963 R1 (Pereyra)', 'C/1965 S1-A (Ikeya-Seki)', 'C/1965 S1-B (Ikeya-Seki)', 'C/1967 C1 (Seki)', 'C/1970 K1 (White-Ortiz-Bolelli)', 'C/2004 V13 (SWAN)', 'C/2011 W3 (Lovejoy)', 'C/2013 G5 (Catalina)', 'C/2020 U5 (PANSTARRS)']
#names = ['C/2020 U5 (PANSTARRS)']
df = df[df.Name.isin(names)]
for idx, name in enumerate(df['Name']):
if name not in names :
continue
obj = dc.read_comet_elms_for(name,df)
msg = f'Testing Object: {obj.name} with Tp:{mjd2str_date(obj.tp_mjd)}'
print (msg)
logger.info(msg)
if hasattr(obj,'M0') :
M_at_epoch = obj.M0
else :
M_at_epoch = None
# from 20 days before perihelion passage to 20 days after 20 days perihelion passage
#solver = ParabolicalStateSolver(obj.tp_mjd, obj.q, obj.e)
solver = EllipticalStateSolver(q=obj.q, a=obj.a, e=obj.e, tp_mjd=obj.tp_mjd, epoch_mjd=obj.epoch_mjd)
hs = []
for clock_mjd in my_range(obj.tp_mjd-delta_days, obj.tp_mjd+delta_days, 2):
r_xyz, rdot_xyz, r, h_xyz, *others = solver.calc_rv(clock_mjd)
hs.append(h_xyz)
print(mjd2str_date(clock_mjd))
if not all(np.allclose(h_xyz, hs[0], atol=1e-12) for h_xyz in hs):
msg = f'The angular momentum is NOT constant in the orbit'
print (msg)
logger.error(msg)
print (not_converged)
def test_comets_convergence(delta_days=50):
df = dc.DF_COMETS
#FILTERED_OBJS = ['C/1680 V1', 'C/1843 D1 (Great March comet)', 'C/1882 R1-A (Great September comet)', 'C/1882 R1-B (Great September comet)', 'C/1882 R1-C (Great September comet)', 'C/1882 R1-D (Great September comet)', 'C/1963 R1 (Pereyra)', 'C/1965 S1-A (Ikeya-Seki)', 'C/1965 S1-B (Ikeya-Seki)', 'C/1967 C1 (Seki)', 'C/1970 K1 (White-Ortiz-Bolelli)', 'C/2004 V13 (SWAN)', 'C/2011 W3 (Lovejoy)', 'C/2013 G5 (Catalina)', 'C/2020 U5 (PANSTARRS)']
#FILTERED_OBJS=['C/1827 P1 (Pons)']
FILTERED_OBJS=[]
if len(FILTERED_OBJS) != 0:
df = df[df.Name.isin(FILTERED_OBJS)]
result = []
df = df.sort_values('e', ascending=False)
for idx, name in enumerate(df['Name']):
obj = dc.read_comet_elms_for(name,df)
solver = KeplerianStateSolver.make(e=obj.e, a=obj.a, tp_mjd=obj.tp_mjd, q=obj.q, epoch=obj.epoch_mjd)
T0_MJD = obj.tp_mjd-delta_days
r0_xyz, rdot0_xyz, r0, h0_xyz, _ , f0 = solver.calc_rv(T0_MJD)
kep_nc = uni_nc = 0
#print (f"Object {name} with e={obj.e}")
for dt in range(2,delta_days*2,2):
r1_xyz = rdot1_xyz = f1 = None
try :
r1_xyz, rdot1_xyz, r1, h1_xyz, _ , f1 = solver.calc_rv(T0_MJD+dt)
except NoConvergenceError :
kep_nc += 1
r2_xyz = rdot2_xyz = f2 = None
try :
r2_xyz, rdot2_xyz, h_xyz, f2 = calc_rv_from_r0v0(mu_Sun, r0_xyz, rdot0_xyz, dt, f0)
except NoConvergenceError :
uni_nc += 1
print (f"The noconvergence was with e: {obj.e}")
if (kep_nc >0) or (uni_nc > 0) :
row = {}
row['name'] = name
row['e'] = obj.e
row['kep_nc'] = kep_nc
row['uni_nc'] = uni_nc
result.append(row)
df_out = pd.DataFrame(result)
if len(df_out) > 0:
print (f'There are {len(df_out)} comets with convergence problems')
df_out = df_out.sort_values(by=['uni_nc','kep_nc'],ascending=False)
df_out.to_csv('convergence_problems.csv',index=False,header=True)
else :
print ("Undetected no-convergences")
def test_universal_kepler(delta_days=50):
df = dc.DF_COMETS
FILTERED_OBJS=[]
#FILTERED_OBJS=['C/1933 D1 (Peltier)','C/1989 R1 (Helin-Roman)','C/2007 M5 (SOHO)','C/1988 M1 (SMM)','C/2008 C5 (SOHO)']
#FILTERED_OBJS=['C/2007 M5 (SOHO)']
# C/2000 O1 (Koehn)
# This one has high nonconverence with 500 C/2000 O1 (Koehn)
if len(FILTERED_OBJS) != 0:
df = df[df.Name.isin(FILTERED_OBJS)]
df = df.sort_values('e', ascending=False)
result = []
for idx, name in enumerate(df['Name']):
obj = dc.read_comet_elms_for(name,df)
#print (name)
solver = KeplerianStateSolver.make(e=obj.e, a=obj.a, tp_mjd=obj.tp_mjd, q=obj.q, epoch=obj.epoch_mjd)
T0_MJD = obj.tp_mjd-delta_days
r0_xyz, rdot0_xyz, r0, h0_xyz, _ , f0 = solver.calc_rv(T0_MJD)
r_failed = v_failed = f_failed = nc_failed= 0
for dt in range(2,delta_days*2,2):
try :
r1_xyz, rdot1_xyz, r1, h1_xyz, _ , f1 = solver.calc_rv(T0_MJD+dt)
r2_xyz, rdot2_xyz, h2_xyz, f2 = calc_rv_from_r0v0(mu_Sun, r0_xyz, rdot0_xyz, dt, f0)
e_xyz = calc_eccentricity_vector(r1_xyz, rdot1_xyz, h1_xyz)
f3 = angle_between_vectors(e_xyz, r1_xyz)
if not isclose(f1,f2,rel_tol=0, abs_tol=1e-03):
f_failed += 1
msg=f"name: {obj.name}, TWOPI - f univ: {TWOPI-f2} f Universal: {f2} f Kepler: {f1} e:{obj.e} f Excentricity: {f3} f Excentricity: {TWOPI-f3}"
logger.error(msg)
if not my_isclose(r1_xyz, r2_xyz, abs_tol=1e-03):
msg = f"name: {obj.name}, e: {obj.e}, diff_rxyz ={np.linalg.norm(r1_xyz- r2_xyz)} diff_rdotxyz: {np.linalg.norm(rdot1_xyz- rdot2_xyz)}"
logger.error(msg)
r_failed += 1
if not my_isclose (rdot1_xyz, rdot2_xyz, abs_tol=1e-03) :
v_failed += 1
except NoConvergenceError :
nc_failed += 1
if (f_failed >0) or (r_failed > 0) or (v_failed > 0) or (nc_failed > 0):
row = {}
row['name'] = name
row['e'] = obj.e
row['f_failed'] = f_failed
row['r_failed'] = r_failed
row['v_failed'] = v_failed
row['nc_failed'] = nc_failed
result.append(row)
df_out = | pd.DataFrame(result) | pandas.DataFrame |
#! /usr/bin/env python3
import argparse
import re,sys,os,math,gc
import numpy as np
import pandas as pd
import matplotlib as mpl
import copy
import math
from math import pi
mpl.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
from scipy import sparse
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
import copy
import math
import seaborn as sns
#from scipy.interpolate import BSpline, make_interp_spline
plt.rcParams.update({'figure.max_open_warning': 100000})
plt.style.use('seaborn-colorblind')
mpl.rcParams['ytick.direction'] = 'out'
mpl.rcParams['savefig.dpi'] = 300 #图片像素
mpl.rcParams['figure.dpi'] = 300
mpl.rcParams['pdf.fonttype']=42
mpl.rcParams['ps.fonttype']=42
__author__ ='赵玥'
__mail__ ='<EMAIL>'
_data__ ='20191101'
def draw_boundaries(ax,Boundary_dict,start,end,samplelist,str_x,sam_x):
ax.tick_params(top='off',bottom='off',left='on',right='off')
for loc in ['top','left','right','bottom']:
ax.spines[loc].set_visible(False)
#ax.spines['left'].set_color('k')
#ax.spines['left'].set_linewidth(2)
#ax.spines['left'].set_smart_bounds(True)
#ax.spines['left'].set_linewidth(1)
#ax.spines['right'].set_visible(False)
#ax.spines['bottom'].set_visible(False)
ax.set_axis_bgcolor('w')
ax.set(xticks=[])
ax.set(yticks=[])
sample1 = samplelist[0]
sample2 = samplelist[1]
boundary_mid1 = Boundary_dict[sample1]['mid'].tolist()
boundary_mid2 = Boundary_dict[sample2]['mid'].tolist()
bound_y1min = [1.25 for i in boundary_mid1]
bound_y1max = [1.75 for i in boundary_mid1]
bound_y2min = [0.25 for i in boundary_mid2]
bound_y2max = [0.75 for i in boundary_mid2]
ax.set_ylim(0,2)
ax.vlines(boundary_mid1,bound_y1min,bound_y1max,lw=2,color='red')
ax.vlines(boundary_mid2,bound_y2min,bound_y2max,lw=2,color='green')
ax.set_xlim(start,end)
ax.text(str_x,0.5,'bound',horizontalalignment='right',verticalalignment='center',rotation='vertical',transform=ax.transAxes,fontsize=8)
ax.text(sam_x,0.75,sample1,horizontalalignment='right',verticalalignment='center',rotation='horizontal',transform=ax.transAxes,color="red",fontsize=8)
ax.text(sam_x,0.25,sample2,horizontalalignment='right',verticalalignment='center',rotation='horizontal',transform=ax.transAxes,color="green",fontsize=8)
def cut_boundaries(Boundary_dict,sample,boundaryPath,chrom,start,end):
Boundary_df = pd.read_table(boundaryPath,header=0,index_col=None,encoding='utf-8')
Boundary_df = Boundary_df.fillna(0)
Boundary_df = Boundary_df[['start','end']]
Boundary_df['mid'] = (Boundary_df['start'] + Boundary_df['end'])/2
Boundary_df = Boundary_df[Boundary_df['mid']>=start]
Boundary_df = Boundary_df[Boundary_df['mid']<=end]
Boundary_df.reset_index(drop=True)
Boundary_dict[sample] = Boundary_df
return Boundary_dict
def draw_insulation(ax,insu,chrs,start,end,color):
#df_insu=cut_insulation(insu,chrs,start,end)
df_insu=pd.read_table(insu,sep='\t',names=['chrs','start','end','insu'])
ax.tick_params(top='off',bottom='off',left='on',right='off')
line=ax.plot(df_insu['start'],df_insu['insu'], color=color, linewidth=0.8, label="insulation")
ax.set_xlim(start,end)
ax.set_xticks([])
ax.set_ylim(df_insu['insu'].min(),df_insu['insu'].max())
#ax.set_yticks([df_insu['insu'].min(),df_insu['insu'].max()])
for loc in ['left','top','bottom']:
ax.spines[loc].set_linewidth(0)
ax.spines[loc].set_color('black')
ax.spines['right'].set_linewidth(0)
ax.spines[loc].set_color('black')
def draw_SV(files,ax,chrom,start,end,sample,color,types):
markdf=pd.read_table(files,sep='\t')
markdf=markdf[markdf['types']==types]
markdf=markdf[markdf['chrs']==chrom]
markdf=markdf[markdf['start']>start]
markdf=markdf[markdf['end']<end]
ax.tick_params(left='on',right='off',top='off',bottom='on')
markdf['width'] = markdf['end'] - markdf['start']
markdf['sign']=[1]*len(markdf)
#vectorf = np.vectorize(np.float)
#vectori = np.vectorize(np.int)
#starts=list(markdf['start'])
#hight=list(markdf['sign'])
#width=(markdf['width'])
ax.bar(x=list(markdf['start']),height=list(markdf['sign']),bottom=0, width = list(markdf['width']),color=color,linewidth=0,align='edge')
ax.set_xlim([start,end])
ax.set_ylim([0,1])
xts = np.linspace(start,end,2)
yts = np.linspace(0,1,2)
xtkls = ['{:,}'.format(int(i)) for i in xts]
ytkls = ['{:,}'.format(int(j)) for j in yts]
ax.tick_params(direction='out',pad=1)
ax.set_yticks([])
#ax.set_yticklabels(ytkls,fontsize=5)
ax.text(-0.11,0.0,sample,fontsize=12,color='k',horizontalalignment='left',verticalalignment='bottom',rotation='vertical',transform=ax.transAxes)
#ax.set_title("{}_{}_{}_{}".format(sample,chrom,start,end),fontsize=10)
ax.spines['bottom'].set_linewidth(0)
ax.spines['left'].set_linewidth(0)
ax.spines['right'].set_linewidth(0)
ax.spines['top'].set_linewidth(0)
if type =='bottom':
ax.set_xticks(xts)
ax.set_xticklabels(xtkls,fontsize=12)
ax.spines['bottom'].set_linewidth(0.5)
ax.spines['bottom'].set_color('k')
ax.text(-0.11,-0.7,chrom,fontsize=12,color='k',horizontalalignment='left',verticalalignment='bottom',rotation='horizontal',transform=ax.transAxes)
else:
ax.set_xticks([])
ax.set_xticklabels('')
markdf = pd.DataFrame()
gc.collect()
def cut_insulation(insu,chrs,start,end):
file=open(insu)
file_list=[]
for i in file:
i=i.strip()
file_list.append(i)
insu_list=[]
for i in range(len(file_list)):
x=file_list[i].split('/')
insu_list.append([x[-2],file_list[i]])
list_df=pd.DataFrame(insu_list,columns=['chrs','insu'])
list_df=list_df[list_df['chrs']==chrs]
list_df=list_df.reset_index(drop=True)
df_insu=pd.read_table(list_df['insu'][0],sep='\t',names=['chrs','start','end','insu'],comment='t')
df_insu['mid']=(df_insu['start']+df_insu['end'])/2
df_insu=df_insu.fillna(0)
df_insu=df_insu[(df_insu['start']>start)&(df_insu['end']<end)]
return df_insu
def draw_AB(files,res,chrom,start,end,sample,ax):
compartdf = pd.read_table(files,sep='\t',names=['chrom','start','end','eigen1'])
compartdf = compartdf[compartdf['chrom']==chrom]
compartdf = compartdf.reset_index(drop=True)
df = compartdf
df=df[df['end']>=start]
df=df[df['start']<=end]
df=df.reset_index(drop=True)
ax.tick_params(top='off',bottom='on',left='off',right='off')
for loc in ['left','right','top','bottom']:
ax.spines[loc].set_visible(False)
df['width']=df['end']-df['start']
#ax.axis([start, end, min,max])
for i in range(len(df)):
if df['eigen1'][i]>0:
ax.bar(x=df['start'][i],height=df['eigen1'][i],bottom=0, width = df['width'][i],color='#E7605B',linewidth=0,align='edge')
else:
ax.bar(x=df['start'][i],height=df['eigen1'][i],bottom=0, width = df['width'][i],color='#3B679E',linewidth=0,align='edge')
ax.set_ylim(-0.1,0.1)
ax.set_ylabel(sample)
ax.set_yticks([])
ax.set_xticks([])
def Express_Swith(Epipath,chrom,start,end):
Expressdf = pd.read_table(Epipath,header=None,index_col=False,sep='\t')
Expressdf.columns = ['chrom','start','end','sign']
Expressdf = Expressdf[Expressdf['chrom']==chrom]
Expressdf = Expressdf[Expressdf['start']>=int(start)]
Expressdf = Expressdf[Expressdf['end']<=int(end)]
Expressdf = Expressdf.reset_index(drop=True)
return Expressdf
def draw_epigenetic(file,ax,chrom,start,end,sample,color,MaxYlim,type,mins):
markdf=pd.read_table(file,sep='\t',names=['chrs','start','end','sign'])
markdf=markdf[markdf['chrs']==chrom]
markdf=markdf[markdf['start']>start]
markdf=markdf[markdf['end']<end]
ax.tick_params(left='on',right='off',top='off',bottom='on')
markdf['width'] = markdf['end'] - markdf['start']
recs = ax.bar(x=list(markdf['start']),height=list(markdf['sign']),bottom=0, width = list(markdf['width']),color=color,linewidth=0,align='edge')
if MaxYlim == 'None':
ymaxlim = markdf['sign'].max()
yminlim = markdf['sign'].min()
else:
ymaxlim = float(MaxYlim)
yminlim = float(mins)
ax.set_xlim([start,end])
ax.set_ylim([yminlim,ymaxlim])
xts = np.linspace(start,end,5)
yts = np.linspace(yminlim,ymaxlim,2)
xtkls = ['{:,}'.format(int(i)) for i in xts]
ytkls = ['{:,}'.format(float(j)) for j in yts]
ax.tick_params(direction='out',pad=1)
ax.set_yticks(yts)
ax.set_yticklabels(ytkls,fontsize=5)
ax.text(-0.11,0.4,sample,fontsize=6,color='k',horizontalalignment='left',verticalalignment='bottom',rotation='horizontal',transform=ax.transAxes)
ax.spines['bottom'].set_linewidth(1)
ax.spines['left'].set_linewidth(1)
ax.spines['right'].set_linewidth(0)
ax.spines['top'].set_linewidth(0)
#ax.set_title("{}_{}_{}_{}".format(sample,chrom,start,end),fontsize=10)
if type =='bottom':
ax.set_xticks(xts)
ax.set_xticklabels(xtkls,fontsize=8)
ax.spines['bottom'].set_linewidth(0.5)
ax.spines['bottom'].set_color('k')
ax.text(-0.11,-0.7,chrom,fontsize=8,color='k',horizontalalignment='left',verticalalignment='bottom',rotation='horizontal',transform=ax.transAxes)
else:
ax.set_xticks([])
ax.set_xticklabels('')
markdf = pd.DataFrame()
gc.collect()
def draw_epigenetic2(file,ax,chrom,start,end,sample,color,MaxYlim,type,mins):
markdf=pd.read_table(file,sep='\t',names=['chrs','start','end','sign'])
#print (markdf.head())
markdf=markdf[markdf['chrs']==chrom]
markdf=markdf[markdf['start']>start]
markdf=markdf[markdf['end']<end]
ax.tick_params(left='on',right='off',top='off',bottom='on')
markdf['width'] = markdf['end'] - markdf['start']
markdf['width'] = markdf['end'] - markdf['start']
x = np.linspace(start,end,int(len(markdf)/8))
a_BSpline=make_interp_spline(markdf['start'],markdf['sign'],k=3)
y_new=a_BSpline(x)
ax.plot(x, y_new, color=color,linewidth=2)
ax.fill_between(x,y_new ,0,facecolor=color,linewidth=0,label=sample)
if MaxYlim == 'None':
ymaxlim = markdf['sign'].max()
yminlim = markdf['sign'].min()
else:
ymaxlim = float(MaxYlim)
yminlim = float(mins)
ax.set_xlim([start,end])
ax.set_ylim([yminlim,ymaxlim])
xts = np.linspace(start,end,4)
yts = np.linspace(yminlim,ymaxlim,2)
xtkls = ['{:,}'.format(int(i)) for i in xts]
ytkls = ['{:,}'.format(int(j)) for j in yts]
ax.spines['bottom'].set_linewidth(1)
ax.spines['left'].set_linewidth(1)
ax.spines['right'].set_linewidth(0)
ax.spines['top'].set_linewidth(0)
ax.tick_params(top=False,right=False,width=1,colors='black',direction='out')
ax.set_yticks(yts)
ax.set_yticklabels(ytkls,fontsize=12)
ax.text(-0.11,0.0,sample,fontsize=12,color='k',horizontalalignment='left',verticalalignment='bottom',rotation='vertical',transform=ax.transAxes)
#ax.set_title("{}_{}_{}_{}".format(sample,chrom,start,end),fontsize=10)
if type =='bottom':
ax.set_xticks(xts)
ax.set_xticklabels(xtkls,fontsize=12)
ax.spines['bottom'].set_linewidth(0.5)
ax.spines['bottom'].set_color('k')
ax.text(-0.11,-0.7,chrom,fontsize=8,color='k',horizontalalignment='left',verticalalignment='bottom',rotation='horizontal',transform=ax.transAxes)
else:
ax.set_xticks([])
ax.set_xticklabels('')
markdf = pd.DataFrame()
gc.collect()
def draw_RNA(file,ax,chrom,start,end,sample,color,MaxYlim,type,mins):
markdf=pd.read_table(file,sep='\t',names=['chrs','start','end','sign'])
#print (markdf.head())
markdf=markdf[markdf['chrs']==chrom]
markdf=markdf[markdf['start']>start]
markdf=markdf[markdf['end']<end]
ax.tick_params(left='on',right='off',top='off',bottom='on')
markdf['width'] = markdf['end'] - markdf['start']
vectorf = np.vectorize(np.float)
vectori = np.vectorize(np.int)
starts=vectori(markdf['start'])
hight=vectorf(markdf['sign'])
width=vectori(markdf['width'])
ax.bar(x=starts,height=hight,bottom=0,width=width,color=color,linewidth=0,align='edge')
if MaxYlim == 'None':
ymaxlim = markdf['sign'].max()
yminlim = markdf['sign'].min()
else:
ymaxlim = float(MaxYlim)
yminlim = float(mins)
ax.set_xlim([start,end])
ax.set_ylim([yminlim,ymaxlim])
xts = np.linspace(start,end,5)
yts = np.linspace(yminlim,ymaxlim,2)
xtkls = ['{:,}'.format(int(i)) for i in xts]
ytkls = ['{:,}'.format(int(j)) for j in yts]
ax.tick_params(direction='out',pad=1)
ax.spines['bottom'].set_linewidth(1)
ax.spines['left'].set_linewidth(1)
ax.spines['right'].set_linewidth(0)
ax.spines['top'].set_linewidth(0)
ax.set_yticks(yts)
ax.set_yticklabels(ytkls,fontsize=12)
ax.text(-0.11,0.4,sample,fontsize=12,color='k',horizontalalignment='left',verticalalignment='bottom',rotation='vertical',transform=ax.transAxes)
#ax.set_title("{}_{}_{}_{}".format(sample,chrom,start,end),fontsize=10)
if type =='bottom':
ax.set_xticks(xts)
ax.set_xticklabels(xtkls,fontsize=12)
ax.spines['bottom'].set_linewidth(0.5)
ax.spines['bottom'].set_color('k')
ax.text(-0.11,-0.7,chrom,fontsize=12,color='k',horizontalalignment='left',verticalalignment='bottom',rotation='horizontal',transform=ax.transAxes)
else:
ax.set_xticks([])
ax.set_xticklabels('')
markdf = pd.DataFrame()
gc.collect()
def Express_Swith(Epipath,chrs,start,end):
Expressdf = pd.read_table(Epipath,header=None,index_col=False,sep='\t')
Expressdf.columns = ['chrs','start','end','sign']
Expressdf = Expressdf[Expressdf['chrs']==chrs]
Expressdf = Expressdf[Expressdf['start']>=int(start)]
Expressdf = Expressdf[Expressdf['end']<=int(end)]
Expressdf = Expressdf.reset_index(drop=True)
return Expressdf
def draw_diff_epigenetic(file1,file2,ax,chrs,start,end,color,MaxYlim,MinYlim,type):
df1=Express_Swith(file1,chrs,start,end)
df2=Express_Swith(file2,chrs,start,end)
markdf = pd.merge(df1,df2,on='start',how='inner')
markdf['sign'] = np.log2(markdf['sign_x']) - np.log2(markdf['sign_y'])
markdf = markdf[['chrs_x','start','end_x','sign']]
markdf.columns = ['chrs','start','end','sign']
markdf = markdf.reset_index(drop=True)
ax.tick_params(left='on',right='off',top='off',bottom='on')
markdf['width'] = markdf['end'] - markdf['start']
recs = ax.bar(markdf['start'],markdf['sign'],bottom=0, width = markdf['width'],color=color,linewidth=0)
if MaxYlim == 'None':
ymaxlim = markdf['sign'].max()
yminlim = markdf['sign'].min()
else:
ymaxlim = float(MaxYlim)
yminlim = float(MinYlim)
ax.set_xlim([start,end])
ax.set_ylim([yminlim,ymaxlim])
xts = np.linspace(start,end,5)
yts = np.linspace(yminlim,ymaxlim,2)
xtkls = ['{:,}'.format(int(i)) for i in xts]
ytkls = ['{:,}'.format(int(j)) for j in yts]
ax.tick_params(direction='out',pad=1)
ax.set_yticks(yts)
ax.set_yticklabels(ytkls,fontsize=5)
#ax.text(-0.11,0.4,sample,fontsize=6,color='k',horizontalalignment='left',verticalalignment='bottom',rotation='horizontal',transform=ax.transAxes)
#ax.set_title("{}_{}_{}_{}".format(sample,chrom,start,end),fontsize=10)
if type =='bottom':
ax.set_xticks(xts)
ax.set_xticklabels(xtkls,fontsize=8)
ax.spines['bottom'].set_linewidth(0.5)
ax.spines['bottom'].set_color('k')
ax.text(-0.11,-0.7,chrs,fontsize=8,color='k',horizontalalignment='left',verticalalignment='bottom',rotation='horizontal',transform=ax.transAxes)
else:
ax.set_xticks([])
ax.set_xticklabels('')
markdf = pd.DataFrame()
ax.spines['bottom'].set_linewidth(0)
ax.spines['left'].set_linewidth(1)
ax.spines['top'].set_linewidth(0)
ax.spines['right'].set_linewidth(0)
gc.collect()
def draw_bar(ax,file,chrom,start,end,max,min):
df=pd.read_table(file,sep='\t',names=['chrs','start','end','sign'])
df=df[df['chrs']==chrom]
df=df[df['start']>start]
df=df[df['end']<end]
df=df.reset_index(drop=True)
ax.tick_params(top='off',bottom='on',left='off',right='off')
for loc in ['left','right','top']:
ax.spines[loc].set_visible(False)
df['width']=df['end']-df['start']
#ax.axis([start, end, min,max])
for i in range(len(df)):
if df['sign'][i]>0:
ax.bar(df['start'][i],df['sign'][i],bottom=0, width = df['width'][i],color='#E7605B',linewidth=0)
else:
ax.bar(df['start'][i],df['sign'][i],bottom=0, width = df['width'][i],color='#3B679E',linewidth=0)
ax.set_ylim(min,max)
ax.set_yticks([])
ax.set_xticks([])
def get_4C_data(matrix,tstart,tend,binsize,start,end):
print (binsize)
t=int((tstart-start)/int(binsize))
print ('t',t,'matrix',len(matrix))
datalist=matrix.loc[:,[t]]
return datalist
from statsmodels.nonparametric.smoothers_lowess import lowess
def draw_4C_module(ax,df_list,chrs,start,end,color_list,ymin,ymax,sample_list):
ax.tick_params(top='off',bottom='off',left='on',right='off')
i=0
for df in df_list:
x = np.linspace(start,end,len(df))
df['width']=df['end']-df['start']
df_loess = pd.DataFrame(lowess(df['sign'], np.arange(len(df['sign'])), frac=0.05)[:, 1], index=df.index, columns=['sign'])
ax.plot(x,df_loess['sign'], color=color_list[i], linewidth=2,label=sample_list[i],alpha=0.3)
i+=1
#ax.legend(handles2, labels2)
ax.set_xlim(start,end)
ax.set_ylim(ymin,ymax)
ax.set_yticks([ymin,ymax])
ax.legend(loc='right',bbox_to_anchor=(1.05,0.3),handlelength=1,handleheight=0.618,fontsize=6,frameon=False)
for loc in ['left']:
ax.spines[loc].set_linewidth(0.6)
ax.spines[loc].set_color('gray')
#ax.tick_params(top=False,right=False,width=1,colors='black',direction='out')
ax.spines['bottom'].set_linewidth(1)
ax.spines['left'].set_linewidth(1)
ax.spines['right'].set_linewidth(0)
ax.spines['top'].set_linewidth(0)
ax.tick_params(top=False,right=False,bottom=False,width=1,colors='black',direction='out')
def draw_4C(ax,chrs,start,end,matrix_list,samples,binsize,tstart,tend,colors,ymax):
sample_list=samples.split(',')
bed_list=[]
for sample in sample_list:
matrix,min=extract_raw_matrix(matrix_list,sample,chrs,start,end,binsize)
datalist=get_4C_data(matrix,int(tstart),int(tend),binsize,int(start),int(end))
bed_list.append(datalist)
starts=[]
for i in range(start,end,int(binsize)):
starts.append(i)
df_list=[]
for i in bed_list:
df=pd.DataFrame({'start':starts})
df['chrs']=[chrs]*len(df)
df['end']=df['start']+int(binsize)
df['sign']=i
df_list.append(df)
color_list=colors.split(',')
draw_4C_module(ax,df_list,chrs,start,end,color_list,0,int(ymax),sample_list)
def draw_compartment(ax,sample,compmergedf,chrom,start,end,type='top'):
ax.tick_params(top='off',bottom='on',left='off',right='off')
for loc in ['left','right','top']:
ax.spines[loc].set_visible(False)
mat = compmergedf[sample]
#print(mat)
s = compmergedf['start']
colors = ['red','blue','#458B00','#B9BBF9','black']
ax.set_xlim(start,end)
if sample == 'Merge':
ax.fill_between(s, 0, 0.25,where=mat==1, facecolor=colors[0],linewidth=0,label='CompartmentA')
ax.fill_between(s, 0.25, 0.5,where=mat==2, facecolor=colors[2],linewidth=0,label='A Switch B')
ax.fill_between(s, 0, -0.25,where=mat==-1,facecolor=colors[1],linewidth=0,label='CompartmentB')
ax.fill_between(s, -0.25,-0.5,where=mat==-2,facecolor=colors[3],linewidth=0,label='B Switch A')
legend = ax.legend(bbox_to_anchor=(1.01, 1), loc=2, borderaxespad=0.,prop={'size':4},ncol=1)
legend.get_frame().set_facecolor('white')
else:
ax.fill_between(s, 0, mat,where=mat>= 0, facecolor=colors[0],linewidth=0,label='CompartmentA')
ax.fill_between(s, 0, mat,where=mat< 0, facecolor=colors[1],linewidth=0,label='CompartmentB')
#ax.text(max(mat)/4,-5,'A');ax.text(max(mat)/2,-5,'B')
ax.text(-0.11,0.4,sample,fontsize=6,color='k',horizontalalignment='left',verticalalignment='bottom',rotation='horizontal',transform=ax.transAxes)
ymax = mat.max()+0.005
ymin = mat.min()-0.005
xts = np.linspace(start,end,5)
xtkls = ['{:,}'.format(int(i)) for i in xts]
ax.set_ylim(ymin,ymax)
ax.set_yticks([])
#ax.set_ylabel(sample,rotation='vertical',fontsize='small')
#compmergedf = pd.DataFrame()
if type =='bottom':
ax.set_xticks(xts)
ax.set_xticklabels(xtkls,fontsize=8)
ax.spines['bottom'].set_linewidth(1)
ax.spines['bottom'].set_color('k')
ax.text(-0.11,-0.7,chrom,fontsize=8,color='k',horizontalalignment='left',verticalalignment='bottom',rotation='horizontal',transform=ax.transAxes)
else:
ax.set_xticks([])
ax.spines['bottom'].set_visible(False)
gc.collect()
def colorbar(ax,im,vmin,vmax):
axins1 = inset_axes(ax, width=0.1,height=0.6,loc=3, bbox_to_anchor=(0, 0.2, 0.5, 1), bbox_transform=ax.transAxes,borderpad=0)
print (vmin,vmax)
cbar=plt.colorbar(im, cax=axins1, orientation='vertical',ticks=[math.ceil(vmin),int(vmax)])
axins1.tick_params(left='on',right='off',top='off',bottom='off',labelsize=12)
axins1.yaxis.set_ticks_position('left')
return cbar
import math
from matplotlib import pyplot as plt
plt.style.use('seaborn-colorblind')
pd.set_option('display.precision',2)
from scipy import sparse
import matplotlib.gridspec as gridspec
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
#from annotation_GenePred import Gene_annotation
def cut_mat(mat,start,end,resolution,min):
start = int(int(start)/resolution)
end = math.ceil(int(end)/resolution)
start = int(start - min)
end = int(end - min)
mat = mat.fillna(0)
mat = mat.iloc[start:end+1,start:end+1]
gc.collect()
return mat,start,end
def self_zscore(df):
dsc = pd.DataFrame(np.ravel(df)).describe(include=[np.number])
df = (df - dsc.loc['mean',0])/dsc.loc['std',0]
return df
from scipy.ndimage import gaussian_filter
def get_matrix(mat_path,binsize,start,end):
binsize=int(binsize)
mat=pd.read_table(mat_path,names=['b1','b2','contacts'])
mat=mat[(mat['b1']>=start-3000000) & (mat['b2']>=start-3000000)]
mat=mat[(mat['b1']<=end+3000000) & (mat['b2']<=end+3000000)]
#-----------xlim genome start genome end-------------------------------
min=mat['b1'].min()
max=mat['b1'].max()
min=math.ceil(int(min)/binsize)*binsize
max=int(int(max)/binsize)*binsize
N=int(max/binsize)-math.ceil(min/binsize)+1
mat['b1']=mat['b1'].apply(lambda x: (x-min-1)/binsize)
mat['b2']=mat['b2'].apply(lambda x: (x-min-1)/binsize)
#-----------coo matrix-----------------------------------------------
counts=sparse.coo_matrix((mat['contacts'],(mat['b1'],mat['b2'])),shape=(N, N),dtype=float).toarray()
diag_matrix=np.diag(np.diag(counts))
counts=counts.T + counts
#counts=counts-diag_matrix
counts=counts-diag_matrix-diag_matrix
df=pd.DataFrame(counts)
#----------zscore minus ---------------------------------
df=self_zscore(df)
min=int(min/binsize)
df,min,max=cut_mat(df,start,end,binsize,min)
np.fill_diagonal(df.values, 0)
return df,min
def get_matrix_df(lists,sample,chrs):
df= | pd.read_table(lists,sep='\t',names=['sample','chrs','matrix']) | pandas.read_table |
# coding: utf-8
# NIDEM LiDAR tidal tagging
#
# This script imports multiple xyz .csv files for each LiDAR validation site, converts GPS timestamps to UTC, then
# uses these to compute tide heights at the exact moment each point was acquired during the LiDAR survey.
# Non-inundated points are then identified by selecting points located above the water's surface at the time
# each point was acquired. These non-inundated points (representing intertidal and terrestrial locations)
# are then exported as a single .csv.
#
# Date: January 2019
# Author: <NAME>, <NAME>, <NAME>
import glob
import os
import pandas as pd
import numpy as np
import datetime as dt
from otps.predict_wrapper import predict_tide
from otps import TimePoint
from pytz import timezone
def gps_week(input_datetime):
"""
Computes GPS week number since start of GPS time epoch (6 Jan 1980).
Currently does not account for leap seconds (only affects 10-15 second
window around midnight Saturday night/Sunday morning each week)
:param input_datetime: Datetime object used to identify GPS week
:return: GPS weeks since start of GPS epoch (6 Jan 1980)
"""
# Identify GPS week from GPS epoch using floor division
gps_epoch = dt.datetime(1980, 1, 6)
delta = input_datetime - gps_epoch
gps_week_num = int(np.floor(delta.total_seconds() / 86400 / 7))
return gps_week_num
def gps_adj_utc(gps_adj, leap_seconds=10):
"""
Converts between adjusted GPS time and UTC, returning a datetime object.
This assumes adjusted GPS time has already had - 1 billion subtracted from it;
if you have unadjusted GPS time instead, subtract 1 billion before inputting
it into this function.
:param gps_adj: Adjusted GPS time
:param leap_seconds: Leap seconds since start of GPS epoch; default 10
:return: Datetime object with converted time in UTC
"""
# Identify UTC and GPS epochs and compute offset between them
utc_epoch = dt.datetime(1970, 1, 1)
gps_epoch = dt.datetime(1980, 1, 6)
utc_offset = (gps_epoch - utc_epoch).total_seconds() - leap_seconds
# Convert to unix time then UTC by adding 1 billion + UTC offset to GPS time
unix_timestamp = utc_offset + (int(gps_adj) + 1000000000)
utc_time = dt.datetime.utcfromtimestamp(unix_timestamp)
# Set UTC timezone info
utc_time = utc_time.replace(tzinfo=timezone('UTC'))
return utc_time
def gps_sotw_utc(gps_sotw, reference_date, leap_seconds=10):
"""
Computes UTC time from GPS Seconds of Week format time
:param gps_sotw: GPS seconds-of-the-week value
:param reference_date: Date used to compute current GPS week number
:param leap_seconds: Leap seconds since start of GPS epoch; default 10
:return: Datetime object with converted time in UTC
"""
# First test if GPS seconds-of-week fall within 0 and 604800 seconds
if 0 <= int(gps_sotw) <= dt.timedelta(days=7).total_seconds():
# Identify UTC and GPS epochs and compute offset between them
utc_epoch = dt.datetime(1970, 1, 1)
gps_epoch = dt.datetime(1980, 1, 6)
utc_offset = (gps_epoch - utc_epoch).total_seconds() - leap_seconds
# Identify GPS week
gps_week_num = gps_week(reference_date)
# Compute difference between UTC epoch and GPS time, then add GPS week days
unix_timestamp = utc_offset + int(gps_sotw)
utc_basetime = dt.datetime.utcfromtimestamp(unix_timestamp)
utc_time = utc_basetime + dt.timedelta(days=gps_week_num * 7)
# Set UTC timezone info
utc_time = utc_time.replace(tzinfo=timezone('UTC'))
return utc_time
else:
print("GPS seconds-of-week must be between 0 and 604800 seconds")
return None
#########
# Setup #
#########
# User input to read in setup parameters from file
os.chdir('/g/data/r78/rt1527/nidem')
# Dict of study areas and files to process
study_areas_df = pd.read_csv('lidar_study_areas.csv', index_col=0)
study_areas = study_areas_df.to_dict('index')
# Iterate through each study area
# for name in study_areas.keys():
for name in ['SAcoastal']:
# Read in study area details
input_location = study_areas[name]['input_loc']
# Test if tidal tagging is required for area
if not pd.isnull(input_location):
print(name)
###############
# Import data #
###############
# Iterate through each file and merge list of dataframes into single dataframe
point_files = glob.glob('raw_data/validation/*{}*.csv'.format(name))
df_list = [pd.read_csv(input_file, sep=",") for input_file in point_files]
points_df = | pd.concat(df_list) | pandas.concat |
from __future__ import absolute_import, division, unicode_literals
import unittest
import jsonpickle
from helper import SkippableTest
try:
import pandas as pd
import numpy as np
from pandas.testing import assert_series_equal
from pandas.testing import assert_frame_equal
from pandas.testing import assert_index_equal
except ImportError:
np = None
class PandasTestCase(SkippableTest):
def setUp(self):
if np is None:
self.should_skip = True
return
self.should_skip = False
import jsonpickle.ext.pandas
jsonpickle.ext.pandas.register_handlers()
def tearDown(self):
if self.should_skip:
return
import jsonpickle.ext.pandas
jsonpickle.ext.pandas.unregister_handlers()
def roundtrip(self, obj):
return jsonpickle.decode(jsonpickle.encode(obj))
def test_series_roundtrip(self):
if self.should_skip:
return self.skip('pandas is not importable')
ser = pd.Series(
{
'an_int': np.int_(1),
'a_float': np.float_(2.5),
'a_nan': np.nan,
'a_minus_inf': -np.inf,
'an_inf': np.inf,
'a_str': np.str_('foo'),
'a_unicode': np.unicode_('bar'),
'date': np.datetime64('2014-01-01'),
'complex': np.complex_(1 - 2j),
# TODO: the following dtypes are not currently supported.
# 'object': np.object_({'a': 'b'}),
}
)
decoded_ser = self.roundtrip(ser)
assert_series_equal(decoded_ser, ser)
def test_dataframe_roundtrip(self):
if self.should_skip:
return self.skip('pandas is not importable')
df = pd.DataFrame(
{
'an_int': np.int_([1, 2, 3]),
'a_float': np.float_([2.5, 3.5, 4.5]),
'a_nan': np.array([np.nan] * 3),
'a_minus_inf': np.array([-np.inf] * 3),
'an_inf': np.array([np.inf] * 3),
'a_str': np.str_('foo'),
'a_unicode': np.unicode_('bar'),
'date': np.array([np.datetime64('2014-01-01')] * 3),
'complex': np.complex_([1 - 2j, 2 - 1.2j, 3 - 1.3j]),
# TODO: the following dtypes are not currently supported.
# 'object': np.object_([{'a': 'b'}]*3),
}
)
decoded_df = self.roundtrip(df)
assert_frame_equal(decoded_df, df)
def test_multindex_dataframe_roundtrip(self):
if self.should_skip:
return self.skip('pandas is not importable')
df = pd.DataFrame(
{
'idx_lvl0': ['a', 'b', 'c'],
'idx_lvl1': np.int_([1, 1, 2]),
'an_int': np.int_([1, 2, 3]),
'a_float': np.float_([2.5, 3.5, 4.5]),
'a_nan': np.array([np.nan] * 3),
'a_minus_inf': np.array([-np.inf] * 3),
'an_inf': np.array([np.inf] * 3),
'a_str': np.str_('foo'),
'a_unicode': np.unicode_('bar'),
}
)
df = df.set_index(['idx_lvl0', 'idx_lvl1'])
decoded_df = self.roundtrip(df)
assert_frame_equal(decoded_df, df)
def test_dataframe_with_interval_index_roundtrip(self):
if self.should_skip:
return self.skip('pandas is not importable')
df = pd.DataFrame(
{'a': [1, 2], 'b': [3, 4]}, index=pd.IntervalIndex.from_breaks([1, 2, 4])
)
decoded_df = self.roundtrip(df)
assert_frame_equal(decoded_df, df)
def test_index_roundtrip(self):
if self.should_skip:
return self.skip('pandas is not importable')
idx = pd.Index(range(5, 10))
decoded_idx = self.roundtrip(idx)
assert_index_equal(decoded_idx, idx)
def test_datetime_index_roundtrip(self):
if self.should_skip:
return self.skip('pandas is not importable')
idx = pd.date_range(start='2019-01-01', end='2019-02-01', freq='D')
decoded_idx = self.roundtrip(idx)
assert_index_equal(decoded_idx, idx)
def test_ragged_datetime_index_roundtrip(self):
if self.should_skip:
return self.skip('pandas is not importable')
idx = pd.DatetimeIndex(['2019-01-01', '2019-01-02', '2019-01-05'])
decoded_idx = self.roundtrip(idx)
assert_index_equal(decoded_idx, idx)
def test_timedelta_index_roundtrip(self):
if self.should_skip:
return self.skip('pandas is not importable')
idx = pd.timedelta_range(start='1 day', periods=4, closed='right')
decoded_idx = self.roundtrip(idx)
assert_index_equal(decoded_idx, idx)
def test_period_index_roundtrip(self):
if self.should_skip:
return self.skip('pandas is not importable')
idx = | pd.period_range(start='2017-01-01', end='2018-01-01', freq='M') | pandas.period_range |
## TECHNICHAL ANALYSIS
import pandas as pd
import numpy as np
# import talib
from plotly.graph_objs import Figure
from .utils import make_list
class StudyError(Exception):
pass
def _ohlc_dict(df_or_figure,open='',high='',low='',close='',volume='',
validate='',**kwargs):
"""
Returns a dictionary with the actual column names that
correspond to each of the OHLCV values.
df_or_figure : DataFrame or Figure
open : string
Column name to be used for OPEN values
high : string
Column name to be used for HIGH values
low : string
Column name to be used for LOW values
close : string
Column name to be used for CLOSE values
volume : string
Column name to be used for VOLUME values
validate : string
Validates that the stated column exists
Example:
validate='ohv' | Will ensure Open, High
and close values exist.
"""
c_dir={}
ohlcv=['open','high','low','close','volume']
if type(df_or_figure)==pd.DataFrame:
cnames=df_or_figure.columns
elif type(df_or_figure)==Figure or type(df_or_figure) == dict:
cnames=df_or_figure.axis['ref'].keys()
elif type(df_or_figure)==pd.Series:
cnames=[df_or_figure.name]
c_min=dict([(v.lower(),v) for v in cnames])
for _ in ohlcv:
if _ in c_min.keys():
c_dir[_]=c_min[_]
else:
for c in cnames:
if _ in c.lower():
c_dir[_]=c
if open:
c_dir['open']=open
if high:
c_dir['high']=high
if low:
c_dir['low']=low
if close:
c_dir['close']=close
if volume:
c_dir['volume']=volume
for v in list(c_dir.values()):
if v not in cnames:
raise StudyError('{0} is not a valid column name'.format(v))
if validate:
errs=[]
val=validate.lower()
s_names=dict([(_[0],_) for _ in ohlcv])
cols=[_[0] for _ in c_dir.keys()]
for _ in val:
if _ not in cols:
errs.append(s_names[_])
if errs:
raise StudyError('Missing Columns: {0}'.format(', '.join(errs)))
return c_dir
def get_column_name(name,study=None,str=None,period=None,column=None,period_dict=None):
if str:
if period_dict:
if name in period_dict:
period=period_dict[name]
study='' if name.lower()==study.lower() else study
return str.format(study=study,period=period,column=column,name=name)
else:
return name
def validate(df,column=None):
if isinstance(df,pd.DataFrame):
if column is not None:
df=pd.DataFrame(df[column])
_df=pd.DataFrame()
elif len(df.columns)>1:
raise StudyError("DataFrame needs to be a single column \n"
"Or the column name needs to be specified")
else:
df=df.copy()
_df=pd.DataFrame()
column=df.columns[0]
else:
df=pd.DataFrame(df)
_df=pd.DataFrame()
column=df.columns[0]
return df,_df,column
def rename(df,_df,study,periods,column,include,str,detail,output=None,period_dict=None):
d_name=dict([(i,get_column_name(i,study=study,str=str,
period=periods,column=column,period_dict=period_dict)) for i in _df.columns])
_df=_df.rename(columns=d_name)
if detail:
__df=_df
elif output:
__df=_df[[d_name[_] for _ in output]]
else:
__df=_df[d_name[study]]
if include:
return | pd.concat([df,__df],axis=1) | pandas.concat |
import os
import fnmatch
import calendar
import numpy as np
import pandas as pd
import xarray as xr
from itertools import product
from util import month_num_to_string
import xesmf as xe
"""
Module contains several functions for preprocessing S2S hindcasts.
Author: <NAME>, NCAR (<EMAIL>)
Contributions from <NAME>, NCAR
"""
def regrid_mask(ds, variable, reuse_weights=False):
"""
Function to regrid mcs obs mask onto coarser ERA5 grid (0.25-degree).
Args:
ds (xarray dataset): Mask file.
variable (str): variable.
reuse_weights (boolean): Whether to use precomputed weights to speed up calculation.
Defaults to ``False``.
Returns:
Regridded mask file for use with machine learning model.
"""
ds_out = xe.util.grid_2d(lon0_b=0-0.5, lon1_b=360-0.5, d_lon=1.,
lat0_b=-90-0.5, lat1_b=90, d_lat=1.)
regridder = xe.Regridder(ds, ds_out, method='nearest_s2d', reuse_weights=reuse_weights)
return regridder(ds[variable])
def create_cesm2_folders(variable, parent_directory, start='1999-01-01', end='2019-12-31', freq='W-MON'):
"""
Create folders to place new variable files that were not preprocessed p1 (or other SubX priority).
Args:
variable (str): Name of variable (e.g., 'sst').
parent_directory (str): Directory to place files (e.g., '/glade/scratch/$USER/s2s/').
start (str): Start of hindcasts. Defaults to '1999-01-01' for CESM2.
end (str): End of hindcasts. Defaults to '2019-12-31' for CESM2.
freq (str): Frequency of hindcast starts. Defaults to 'W-MON' for CESM2.
"""
d1 = pd.date_range(start=start, end=end, freq=freq)
if os.path.exists(parent_directory):
for yr, mo in product(np.unique(d1.strftime("%Y")), np.unique(d1.strftime("%m"))):
new_directory = 'CESM2/'+variable+'/'+yr+'/'+mo
path = os.path.join(parent_directory, new_directory)
try:
os.makedirs(path, exist_ok = True)
print("Directory '%s' created successfully" % new_directory)
except OSError as error:
print("Directory '%s' cannot be created" % new_directory)
if not os.path.exists(parent_directory):
print('Parent directory does not exist.')
return
def create_cesm2_files(variable, parent_directory, ensemble, start='1999-01-01', end='2019-12-31', freq='W-MON'):
"""
Create CESM2 variable files that were not preprocessed p1 (or other SubX priority) variables.
Here we extract variable from daily file containing many variables to reduce memory usage.
Contain daily files in ``/temp/`` sub-folder.
Args:
variable (str): Name of variable in lower case (e.g., 'sst').
parent_directory (str): Directory where files are located (e.g., '/glade/scratch/$USER/s2s/').
ensemble (str): Two digit ensemble member of hindcast (e.g., '09').
start (str): Start of hindcasts. Defaults to '1999-01-01' for CESM2.
end (str): End of hindcasts. Defaults to '2019-12-31' for CESM2.
freq (str): Frequency of hindcast starts. Defaults to 'W-MON' for CESM2.
"""
d1 = pd.date_range(start=start, end=end, freq=freq)
for root, dirnames, filenames in os.walk(f'{parent_directory}CESM2/temp/'):
for num, (yr, mo, dy) in enumerate(zip(d1.strftime("%Y"), d1.strftime("%m"), d1.strftime("%d"))):
if yr == '2016' and mo == '02' and dy == '29':
dy = '28'
for filename in fnmatch.filter(filenames, f'cesm2cam6v2*{yr}-{mo}-{dy}.{ensemble}.cam.h2.{yr}-{mo}-{dy}-00000.nc'):
ds = xr.open_dataset(root+filename)[variable.upper()]
ds.to_dataset(name=variable.upper()).to_netcdf(
f'{parent_directory}CESM2/{variable}/{yr}/{mo}/{variable}_cesm2cam6v2_{dy}{month_num_to_string(mo)}{yr}_00z_d01_d46_m{ensemble}.nc')
return
def create_cesm2_pressure_files(filelist, variable, pressure=300.):
"""
Create CESM2 variable files that were not preprocessed p1 (or other SubX priority) variables.
Here we extract variables on a pressure level from files containing many pressure levels
to reduce memory usage.
Args:
filelist (list of str): List of file names and directory locations.
variable (str): Name of variable in lower case (e.g., 'sst').
pressure (float): Pressure level. Defaults to ``300.``
"""
for fil in filelist:
ds = xr.open_dataset(fil).sel(lev_p=pressure).drop('lev_p')
ds.to_netcdf(f"{fil.split(variable)[0]}{variable}_temp{fil.split(variable)[1]}{fil.split('/')[-1]}")
return
def gpcp_filelist(parent_directory, variable='precip', start='1999-01-01', end='2019-12-31', freq='D'):
"""
Create list of daily GPCP Version 2.3 Combined Precipitation Data Set files.
https://www.ncei.noaa.gov/data/global-precipitation-climatology-project-gpcp-daily/access/
Args:
parent_directory (str): Directory where files are located (e.g., '/glade/scratch/$USER/s2s/').
variable (str): Name of GPCP variable (e.g., 'precip').
start (str): Start of hindcasts. Defaults to '1999-01-01' for CESM2.
end (str): End of hindcasts. Defaults to '2019-12-31' for CESM2.
freq (str): Frequency of hindcast starts. Defaults to 'D' for daily.
"""
d1 = pd.date_range(start=start, end=end, freq=freq)
matches = []
for num, (yr, mo, dy) in enumerate(zip(d1.strftime("%Y"), d1.strftime("%m"), d1.strftime("%d"))):
if mo == '02' and dy == '29':
continue # skip leap years
for root, dirnames, filenames in os.walk(f'{parent_directory}/'):
for filename in fnmatch.filter(filenames, f'*_daily_d{yr}{mo}{dy}_c*.nc'):
thefile = os.path.join(root, filename)
if os.access(thefile, os.R_OK):
matches.append(thefile)
if not os.access(thefile, os.R_OK):
matches.append(np.nan)
return matches
def cesm2_filelist(variable, parent_directory, ensemble, start='1999-01-01', end='2019-12-31', freq='W-MON'):
"""
Create list of variable files.
Args:
variable (str): Name of variable (e.g., 'zg_200').
parent_directory (str): Directory where files are located (e.g., '/glade/scratch/$USER/s2s/').
ensemble (str or list of str): Two digit ensemble member of hindcast (e.g., '09') or list (e.g., ['00', '01']).
start (str): Start of hindcasts. Defaults to '1999-01-01' for CESM2.
end (str): End of hindcasts. Defaults to '2019-12-31' for CESM2.
freq (str): Frequency of hindcast starts. Defaults to 'W-MON' for CESM2.
"""
d1 = pd.date_range(start=start, end=end, freq=freq)
matches = []
for num, (yr, mo, dy) in enumerate(zip(d1.strftime("%Y"), d1.strftime("%m"), d1.strftime("%d"))):
if mo == '02' and dy == '29':
dy = '28'
for root, dirnames, filenames in os.walk(f'{parent_directory}CESM2/{variable}/{yr}/{mo}/'):
if isinstance(ensemble, str):
for filename in fnmatch.filter(filenames, f'*_cesm2cam6v2_{dy}*_m{ensemble}.nc'):
thefile = os.path.join(root, filename)
if os.access(thefile, os.R_OK):
matches.append(thefile)
if not os.access(thefile, os.R_OK):
matches.append(np.nan)
if isinstance(ensemble, list):
for ens in ensemble:
for filename in fnmatch.filter(filenames, f'*_cesm2cam6v2_{dy}*_m{ens}.nc'):
thefile = os.path.join(root, filename)
if os.access(thefile, os.R_OK):
matches.append(thefile)
if not os.access(thefile, os.R_OK):
matches.append(np.nan)
return matches
def gpcp_climatology(filelist, variable='precip', save=False, author=None, parent_directory=None):
"""
Create GPCP Version 2.3 Combined Precipitation Data Set climatology.
Args:
filelist (list of str): List of file names and directory locations.
save (boolean): Set to True if want to save climatology as netCDF. Defaults to False.
author (str): Author of file. Defaults to None.
parent_directory (str): Directory where files are located (e.g., '/glade/scratch/$USER/s2s/').
Defaults to None.
"""
if save:
assert isinstance(author, str), "Please set author for file saving."
assert isinstance(parent_directory, str), "Please set parent_directory to save file to."
clim = np.zeros((int(len(filelist)/365), 365, 180, 360))
doy = 0
yr = 0
dates = []
years = []
for num, file in enumerate(filelist):
ds = xr.open_dataset(file)
ds = ds[variable].isel(time=0)
dates.append(pd.Timestamp(ds.time.values))
ds = ds.where(ds>=0.,0.) # valid range: [0.,100.]
ds = ds.where(ds<=100.,100.)
if num == 0:
lats = ds.latitude.values
lons = ds.longitude.values
clim[yr,doy,:,:] = ds.values
doy += 1
if doy == 365:
doy = 0
yr += 1
years.append(int(ds.time.dt.strftime('%Y').values))
data_assemble = xr.Dataset({
'clim': (['time','lat','lon'], np.nanmean(clim, axis=0)),
},
coords =
{'date_range': (['date_range'], pd.to_datetime(dates)),
'time': (['time'], np.arange(1,365 + 1,1)),
'lat' : (['lat'], lats),
'lon' : (['lon'], lons)
},
attrs =
{'File Author' : author,
'Years' : np.array(years)})
if not save:
return data_assemble
if save:
data_assemble.to_netcdf(f'{parent_directory}CESM2_OBS/{variable.lower()}_clim_gpcp_data.nc')
def cesm2_hindcast_climatology(filelist, variable, save=False, author=None, parent_directory=None):
"""
Create CESM2 hindcast climatology. Outputs array (lon, lat, lead, 365).
Translated from MATLAB (provided by <NAME>, NCAR).
Args:
filelist (list of str): List of file names and directory locations.
variable (str): Name of variable (e.g., 'zg_200').
save (boolean): Set to True if want to save climatology as netCDF. Defaults to False.
author (str): Author of file. Defaults to None.
parent_directory (str): Directory where files are located (e.g., '/glade/scratch/$USER/s2s/').
Defaults to None.
"""
if save:
assert isinstance(author, str), "Please set author for file saving."
assert isinstance(parent_directory, str), "Please set parent_directory to save file to."
dateStrPrevious = '01jan1000' # just a random date
index_help = 0
char_1 = "cesm2cam6v2_"
char_2 = "_00z_d01_d46"
grab_ensembles = True
for fil in filelist:
dateStr = fil[fil.find(char_1)+12 : fil.find(char_2)]
starttime = pd.to_datetime(dateStr)
doy = starttime.dayofyear
if (starttime.year % 4) == 0 and starttime.month > 2:
doy = doy - 1
var = xr.open_dataset(fil)[variable].transpose('lon','lat','time').values # (lon,lat,lead); load file and grab variable
varChosen = var
if variable == 'pr' or variable == 'pr_sfc':
varChosen = varChosen * 84600 # convert kg/m2/s to mm/day
if variable == 'tas_2m':
varChosen = varChosen - 273.15 # convert K to C
if varChosen.shape[2] != 46:
varChosen = np.ones((ensAvg.shape)) * np.nan
if index_help == 0:
climBin = np.zeros((varChosen.shape[0], varChosen.shape[1], varChosen.shape[2], 365)) # (lon, lat, lead, 365 days)
climBinDays = np.zeros((varChosen.shape[0], varChosen.shape[1], varChosen.shape[2], 365))
lon = xr.open_dataset(fil)[variable].coords['lon'].values # grab lon and lat arrays
lat = xr.open_dataset(fil)[variable].coords['lat'].values
if grab_ensembles:
all_ensembles = []
all_ensembles.append(fil[fil.find('_m')+2:fil.find('_m')+4]) # saving ensemble members for attrs
if dateStr == dateStrPrevious: # if dates match, means you are on next ensemble member
x += 1 # to compute ensemble mean
ensAvg = (ensAvg * (x - 1) + varChosen) / x
if grab_ensembles:
all_ensembles.append(fil[fil.find('_m')+2:fil.find('_m')+4])
else:
if index_help != 0: # if dates don't match, but make sure we are past the first file and ensAvg has data
if not np.all(ensAvg == 0):
climBin[:,:,:,doyPrevious - 1] = climBin[:,:,:,doyPrevious - 1] + ensAvg # doyPrevious - 1 bc 0-based index
climBinDays[:,:,:,doyPrevious - 1] = climBinDays[:,:,:,doyPrevious - 1] + 1
grab_ensembles = False
ensAvg = varChosen
x = 1
dateStrPrevious = dateStr
doyPrevious = doy
index_help += 1
climBin[:,:,:,doyPrevious - 1] = climBin[:,:,:,doyPrevious - 1] + ensAvg
climBinDays[:,:,:,doyPrevious - 1] = climBinDays[:,:,:,doyPrevious - 1] + 1
clim = climBin / climBinDays
dates_array = pd.to_datetime(np.array([file[file.find(char_1)+12:file.find(char_2)] for file in filelist])).unique()
data_assemble = xr.Dataset({
'clim': (['lon','lat','lead','time'], clim),
'date_range': (['date_range'], dates_array),
},
coords =
{'lead': (['lead'], np.arange(0,clim.shape[2],1)),
'time': (['time'], np.arange(1,clim.shape[3]+1,1)),
'lat' : (['lat'], lat),
'lon' : (['lon'], lon)
},
attrs =
{'File Author' : author,
'Ensembles' : all_ensembles})
if not save:
return data_assemble
if save:
if len(all_ensembles) > 1:
data_assemble.to_netcdf(f'{parent_directory}CESM2/{variable.lower()}_clim_cesm2cam6v2_{str(len(all_ensembles))}members_s2s_data.nc')
if len(all_ensembles) == 1:
data_assemble.to_netcdf(f'{parent_directory}CESM2/{variable.lower()}_clim_cesm2cam6v2_{str(all_ensembles[0])}member_s2s_data.nc')
def cesm2_total_ensemble(filelist):
"""
Extract the total number of ensembles contained in the list of CESM2 hindcast files.
Returns an integer type scalar.
Args:
filelist (list of str): List of file names and directory locations.
"""
dateStrPrevious = '01jan1000' # just a random date
index_help = 0
char_1 = "cesm2cam6v2_"
char_2 = "_00z_d01_d46"
grab_ensembles = True
for fil in filelist:
dateStr = fil[fil.find(char_1)+12 : fil.find(char_2)]
if index_help == 0:
if grab_ensembles:
all_ensembles = []
all_ensembles.append(fil[fil.find('_m')+2:fil.find('_m')+4])
if dateStr == dateStrPrevious:
if grab_ensembles:
all_ensembles.append(fil[fil.find('_m')+2:fil.find('_m')+4])
else:
if index_help != 0:
if not np.all(ensAvg == 0):
grab_ensembles = False
ensAvg = 1
dateStrPrevious = dateStr
index_help += 1
if not grab_ensembles:
return int(len(all_ensembles))
def cesm2_hindcast_anomalies(filelist, variable, parent_directory, save=False, author=None):
"""
Create CESM2 hindcast anomalies. Outputs array (lon, lat, lead, number of forecasts).
Number of forecasts is equal to the length of ``filelist`` divided by ``total ensembles``.
Translated from MATLAB (provided by <NAME>, NCAR).
Args:
filelist (list of str): List of file names and directory locations.
variable (str): Name of variable (e.g., 'zg_200').
parent_directory (str): Directory where climatology is located and where to save anomalies
(e.g., '/glade/scratch/$USER/s2s/').
save (boolean): Set to True if want to save climatology as netCDF. Defaults to False.
author (str): Author of file. Defaults to None.
"""
if save:
assert isinstance(author, str), "Please set author for file saving."
assert isinstance(parent_directory, str), "Please set parent_directory to save file to."
if str(cesm2_total_ensemble(filelist)) != str(11):
import warnings
warnings.warn("Using climatology computed from 11 ensemble members!")
clima = xr.open_dataset(f'{parent_directory}CESM2/{variable.lower()}_clim_cesm2cam6v2_11members_s2s_data.nc') # open climo
climCyclical = xr.concat([clima['clim'], clima['clim'], clima['clim']], dim='time') # stack 3x's time for smoothing
# smooth time with 31 days 2x's (31 day window to copy Lantao, but maybe it should be 16)
climSmooth = climCyclical.rolling(time=31, min_periods=1, center=True).mean(skipna=True).rolling(
time=31, min_periods=1, center=True).mean(skipna=True)
climSmooth = climSmooth.isel(time=slice(365,365 * 2)) # choose the middle year (smoothed)
climSmooth = climSmooth.transpose('lon','lat','lead','time').values # extract array for loop
del climCyclical # delete previous arrays
del clima
dateStrPrevious = '01jan1000' # just a random date
index_help = 0
forecastCounter = 0
char_1 = "cesm2cam6v2_"
char_2 = "_00z_d01_d46"
grab_ensembles = True
for fil in filelist: # loop through list of hindcast files
dateStr = fil[fil.find(char_1)+12 : fil.find(char_2)]
starttime = pd.to_datetime(dateStr)
doy = starttime.dayofyear
if (starttime.year % 4) == 0 and starttime.month > 2:
doy = doy - 1
var = xr.open_dataset(fil)[variable].transpose('lon','lat','time').values # (lon,lat,lead); load file and grab variable
varChosen = var
if variable == 'pr' or variable == 'pr_sfc':
varChosen = varChosen * 84600 # convert kg/m2/s to mm/day
if variable == 'tas_2m':
varChosen = varChosen - 273.15 # convert K to C
if varChosen.shape[2] != 46:
varChosen = np.ones((ensAvg.shape)) * np.nan
if index_help == 0:
dim_last = int(len(filelist)/cesm2_total_ensemble(filelist))
anom = np.empty((varChosen.shape[0], varChosen.shape[1], varChosen.shape[2], dim_last))
starttimeBin = np.empty((dim_last), dtype="S10") # (lon, lat, lead, num of forecasts)
lon = xr.open_dataset(fil)[variable].coords['lon'].values # grab lon and lat arrays
lat = xr.open_dataset(fil)[variable].coords['lat'].values
if grab_ensembles:
all_ensembles = []
all_ensembles.append(fil[fil.find('_m')+2:fil.find('_m')+4]) # saving ensemble members for attrs
if dateStr == dateStrPrevious: # if dates match, means you are on next ensemble member
x += 1 # to compute ensemble mean
ensAvg = (ensAvg * (x - 1) + varChosen) / x
if grab_ensembles:
all_ensembles.append(fil[fil.find('_m')+2:fil.find('_m')+4])
else:
if index_help != 0: # if dates don't match, but make sure we are past the first file and ensAvg has data
if not np.all(ensAvg == 0):
forecastCounter += 1
anom[:,:,:,forecastCounter - 1] = ensAvg - np.squeeze(climSmooth[:,:,:,doyPrevious - 1])
starttimeBin[forecastCounter - 1] = str(starttimePrevious)
grab_ensembles = False
ensAvg = varChosen
x = 1
dateStrPrevious = dateStr
starttimePrevious = starttime
doyPrevious = doy
index_help += 1
forecastCounter += 1
anom[:,:,:,forecastCounter - 1] = ensAvg - np.squeeze(climSmooth[:,:,:,doyPrevious - 1])
starttimeBin[forecastCounter - 1] = starttimePrevious
dates_array = pd.to_datetime(np.array([file[file.find(char_1)+12:file.find(char_2)] for file in filelist])).unique()
data_assemble = xr.Dataset({
'anom': (['lon','lat','lead','time'], anom),
'fcst': (['time'], starttimeBin),
'date_range': (['date_range'], dates_array),
},
coords =
{'lead': (['lead'], np.arange(0,anom.shape[2],1)),
'time': (['time'], np.arange(1,anom.shape[3]+1,1)),
'lat' : (['lat'], lat),
'lon' : (['lon'], lon)
},
attrs =
{'File Author' : author,
'Ensembles' : all_ensembles})
if not save:
return data_assemble
if save:
if len(all_ensembles) > 1:
data_assemble.to_netcdf(f'{parent_directory}CESM2/{variable.lower()}_anom_cesm2cam6v2_{str(len(all_ensembles))}members_s2s_data.nc')
if len(all_ensembles) == 1:
data_assemble.to_netcdf(f'{parent_directory}CESM2/{variable.lower()}_anom_cesm2cam6v2_{str(all_ensembles[0])}member_s2s_data.nc')
def gpcp_hindcast_anomalies(parent_directory, variable='precip',
start_range='1999-01-01', end_range='2019-12-31',
save=False, author=None,):
"""
Create GPCP Version 2.3 Combined Precipitation Data Set anomalies.
Args:
parent_directory (str): Directory where climatology is located and where to save anomalies
(e.g., '/glade/scratch/$USER/s2s/').
variable (str): Name of variable. Defaults to precip for GPCP.
start_range (str): Start range of analysis. Defaults to '1999-01-01'.
end_range (str): End range of analysis. Defaults to '2019-12-31'.
save (boolean): Set to True if want to save climatology as netCDF. Defaults to False.
author (str): Author of file. Defaults to None.
"""
if save:
assert isinstance(author, str), "Please set author for file saving."
assert isinstance(parent_directory, str), "Please set parent_directory to save file to."
# -- open and smooth obs climo
clima = xr.open_dataset(f'{parent_directory}CESM2_OBS/{variable.lower()}_clim_gpcp_data.nc')
climCyclical = xr.concat([clima['clim'], clima['clim'], clima['clim']], dim='time')
climSmooth = climCyclical.rolling(time=31, min_periods=1, center=True).mean(skipna=True).rolling(
time=31, min_periods=1, center=True).mean(skipna=True)
climSmooth = climSmooth.isel(time=slice(365,365 * 2))
climSmooth = climSmooth.transpose('time','lat','lon')
# -- reduce mem usage
del climCyclical
del clima
# -- add lead time to climo
climCyclicalObs = xr.concat([climSmooth, climSmooth, climSmooth], dim='time')
climFinal = np.zeros((climSmooth.shape[0],45,climSmooth.shape[1],climSmooth.shape[2]))
for i in range(365):
climFinal[i,:,:,:] = climCyclicalObs[365+i:365+i+45,:,:]
# -- create time arrays for subsequent indexing
d_mon = pd.date_range(start=start_range, end=end_range, freq='W-MON')
d_dly = pd.date_range(start=start_range, end=end_range, freq='D')
for num, (yr, mo, day) in enumerate(zip(d_dly.strftime("%Y"),d_dly.strftime("%m"),d_dly.strftime("%d"))):
if calendar.isleap(int(yr)):
if mo == '02' and day == '29':
d_dly = d_dly.drop(f'{yr}-02-29')
for num, (yr, mo, day) in enumerate(zip(d_mon.strftime("%Y"),d_mon.strftime("%m"),d_mon.strftime("%d"))):
if calendar.isleap(int(yr)):
if mo == '02' and day == '29':
d_mon = d_mon.drop(f'{yr}-02-29')
# -- create daily obs for final anom computation
filelist2 = gpcp_filelist(parent_directory='/glade/work/molina/GPCP',
start=start_range,
end=str(int((end_range)[:4])+1)+'-12-31')
varObs = np.zeros((len(filelist2), 180, 360))
for num, file in enumerate(filelist2):
ds = xr.open_dataset(file)
ds = ds[variable].isel(time=0)
ds = ds.where(ds>=0.,0.) # valid range: [0.,100.]
ds = ds.where(ds<=100.,100.)
if num == 0:
lats = ds.latitude.values
lons = ds.longitude.values
varObs[num,:,:] = ds.values
# -- add lead time to daily obs
varFinal = np.zeros((int(len(d_mon)), 45, 180, 360))
for num, i in enumerate(d_mon):
varFinal[num,:,:,:] = varObs[int(np.argwhere(d_dly==np.datetime64(i))[0]):int(np.argwhere(d_dly==np.datetime64(i))[0])+45,:,:]
# -- compute obs anomalies
anom = np.zeros((int(len(d_mon)), 45, 180, 360))
for num, i in enumerate(d_mon):
doy_indx = i.dayofyear - 1
if calendar.isleap(int(i.year)) and i.month > 2:
doy_indx = doy_indx - 1
anom[num,:,:,:] = varFinal[num,:,:,:] - climFinal[doy_indx,:,:,:]
# --
data_assemble = xr.Dataset({
'anom': (['time','lead','lat','lon'], anom),
'date_range': (['date_range'], d_mon),
},
coords =
{'lead': (['lead'], np.arange(0,anom.shape[1],1)),
'time': (['time'], np.arange(1,anom.shape[0]+1,1)),
'lat' : (['lat'], lats),
'lon' : (['lon'], lons)
},
attrs =
{'File Author' : author})
if not save:
return data_assemble
if save:
data_assemble.to_netcdf(f'{parent_directory}CESM2_OBS/{variable.lower()}_anom_gpcp_data.nc')
def era5_temp_regrid(obs_directory, start_range='1999-01-01', end_range='2020-12-31'):
"""
Regridding of ERA5 temperatures.
Args:
obs_directory (str): Directory where files are located.
start_range (str): Start of hindcasts. Defaults to '1999-01-01'.
end_range (str): End of hindcasts. Defaults to '2020-12-31'.
"""
d_daily = pd.date_range(start=start_range, end=end_range, freq='D')
d_daily = d_daily[~((d_daily.day==29)&(d_daily.month==2))]
for num, t in enumerate(d_daily):
tmax = xr.open_dataset(
f"{obs_directory}era5_tmax/e5.oper.fc.sfc.minmax.128_201_mx2t.ll025sc.{t.strftime('%Y%m%d')}.nc")
tmax = regrid_mask(tmax, 'MX2T')
tmax.to_dataset(name='MX2T').to_netcdf(
f"{obs_directory}era5_tmax_regrid/e5.oper.fc.sfc.minmax.128_201_mx2t.ll025sc.{t.strftime('%Y%m%d')}.nc")
tmin = xr.open_dataset(
f"{obs_directory}era5_tmin/e5.oper.fc.sfc.minmax.128_202_mn2t.ll025sc.{t.strftime('%Y%m%d')}.nc")
tmin = regrid_mask(tmin, 'MN2T')
tmin.to_dataset(name='MN2T').to_netcdf(
f"{obs_directory}era5_tmin_regrid/e5.oper.fc.sfc.minmax.128_202_mn2t.ll025sc.{t.strftime('%Y%m%d')}.nc")
def era5_temp_climatology(obs_directory, save_directory, start='1999-01-01', end='2020-12-31',
save=False, author=None):
"""
Create ERA5 temperature hindcast climatology. Outputs array (365, lat, lon).
Args:
obs_directory (str): Directory where files are located.
save_directory (str): Directory where to save files.
start (str): Start of hindcasts. Defaults to '1999-01-01'.
end (str): End of hindcasts. Defaults to '2020-12-31'.
save (boolean): Set to True if want to save climatology as netCDF. Defaults to False.
author (str): Author of file. Defaults to None.
"""
td = pd.date_range(start=start, end=end, freq='D')
td = td[~((td.day==29)&(td.month==2))]
doy = 0
yr = 0
dates = []
years = []
for num, t in enumerate(td):
tmax = xr.open_dataset(
f"{obs_directory}era5_tmax_regrid/e5.oper.fc.sfc.minmax.128_201_mx2t.ll025sc.{t.strftime('%Y%m%d')}.nc")['MX2T']
tmin = xr.open_dataset(
f"{obs_directory}era5_tmin_regrid/e5.oper.fc.sfc.minmax.128_202_mn2t.ll025sc.{t.strftime('%Y%m%d')}.nc")['MN2T']
avg_temp = (tmin + tmax) / 2
dates.append(pd.Timestamp(t.strftime('%Y%m%d')))
if num == 0:
clim = np.zeros((td.year.unique().shape[0],365,avg_temp.shape[0],avg_temp.shape[1]))
lats = tmin.y.values
lons = tmin.x.values
clim[yr,doy,:,:] = avg_temp
doy += 1
if doy == 365:
doy = 0
yr += 1
years.append(int(t.strftime('%Y')))
data_assemble = xr.Dataset({
'clim': (['time','lat','lon'], np.nanmean(clim, axis=0)),
},
coords =
{'date_range': (['date_range'], pd.to_datetime(dates)),
'time': (['time'], np.arange(1,365 + 1,1)),
'lat' : (['lat'], lats),
'lon' : (['lon'], lons)
},
attrs =
{'File Author' : author,
'Years' : np.array(years)})
if not save:
return data_assemble
if save:
data_assemble.to_netcdf(f'{save_directory}era5_temp_clim_gpcp_data.nc')
def era5_temp_anomalies(obs_directory, save_directory, start_range='1999-01-01', end_range='2019-12-31',
save=False, author=None):
"""
Create ERA5 temperature hindcast anomalies.
Args:
obs_directory (str): Directory where files are located.
save_directory (str): Directory where to save files.
start (str): Start of hindcasts. Defaults to '1999-01-01'.
end (str): End of hindcasts. Defaults to '2020-12-31'.
save (boolean): Set to True if want to save climatology as netCDF. Defaults to False.
author (str): Author of file. Defaults to None.
"""
# -- open and smooth obs climo
clima = xr.open_dataset(f'{save_directory}era5_temp_clim_gpcp_data.nc')
climCyclical = xr.concat([clima['clim'], clima['clim'], clima['clim']], dim='time')
climSmooth = climCyclical.rolling(time=31, min_periods=1, center=True).mean(skipna=True).rolling(
time=31, min_periods=1, center=True).mean(skipna=True)
climSmooth = climSmooth.isel(time=slice(365,365 * 2))
climSmooth = climSmooth.transpose('time','lat','lon')
# -- reduce mem usage
del climCyclical
del clima
# -- add lead time to climo
climCyclicalObs = xr.concat([climSmooth, climSmooth, climSmooth], dim='time')
climFinal = np.zeros((climSmooth.shape[0],45,climSmooth.shape[1],climSmooth.shape[2]))
for i in range(365):
climFinal[i,:,:,:] = climCyclicalObs[365+i:365+i+45,:,:]
# -- create time arrays for subsequent indexing
d_mon = pd.date_range(start=start_range, end=end_range, freq='W-MON')
d_dly = | pd.date_range(start=start_range, end=end_range, freq='D') | pandas.date_range |
import pathlib
import datetime
import time
import uuid
import pandas as pd
import numpy as np
import simpy
import dill as pickle
import openclsim.model
def save_logs(simulation, location, file_prefix):
# todo add code to LogSaver to allow adding a file_prefix to each file
site_logs = list(simulation.sites.values())
equipment_logs = list(simulation.equipment.values())
activity_logs = [
activity["activity_log"] for activity in simulation.activities.values()
]
logsaver = LogSaver(
site_logs,
equipment_logs,
activity_logs,
location=location,
file_prefix=file_prefix,
overwrite=True,
append_to_existing=False,
)
logsaver.save_all_logs()
class ToSave:
"""
Class that defines objects that have to be saved.
data_type is the object type: ship, site, crane, etc.
data: is the dictionary that is used to fill the data_type
"""
def __init__(self, data_type, data, *args, **kwargs):
# This is the case for activities
if data_type == openclsim.model.Activity:
self.data_type = "Activity"
self.data = {
"name": data["name"],
"id": data["id"],
"mover": data["mover"].name,
"loader": data["loader"].name,
"unloader": data["unloader"].name,
"origin": data["origin"].name,
"destination": data["destination"].name,
"stop_event": None, # data["stop_event"],
"start_event": None,
} # data["start_event"]
# This is the case for equipment and sites
elif type(data_type) == type:
self.data_type = []
for subclass in data_type.__mro__:
if (
subclass.__module__ == "openclsim.core"
and subclass.__name__ not in ["Identifiable", "Log", "SimpyObject"]
):
self.data_type.append(subclass.__name__)
self.data = data
self.data["env"] = None
class SimulationSave:
"""
SimulationSave allows save all obtained data.
Environment: The simpy environment
Activities: List element with 'ToSave' classes of all unique activities
Equipment: List element with 'ToSave' classes of all unique pieces of equipment
Sites: List element with 'ToSave' classes of all unique sites
"""
def __init__(self, environment, activities, equipment, sites, *args, **kwargs):
""" Initialization """
# Generate unique ID for the simulation
self.id = str(uuid.uuid1())
# Save the environment
# assert type(environment) == simpy.core.Environment
self.simulation_start = environment.now
# Save all properties
assert type(activities) == list
self.activities = activities
assert type(equipment) == list
self.equipment = equipment
assert type(sites) == list
self.sites = sites
# Save the initialization properties
self.init = self.init_properties
@property
def init_properties(self):
"""
Save all properties of the simulation
"""
return {
"ID": self.id,
"Simulation start": self.simulation_start,
"Activities": self.activities,
"Equipment": self.equipment,
"Sites": self.sites,
}
def save_ini_file(self, filename, location=""):
"""
For all items of the simulation, save the properties and generate an initialization file.
This file should be a JSON format and readable to start a new simulation.
If location is "", the init will be saved in the current working directory.
"""
# assure location is a path
location = pathlib.Path(location)
file_name = location / (filename + ".pkl")
with open(file_name, "wb") as file:
pickle.dump(self.init, file)
class SimulationOpen:
"""
SimulationOpen allows to define simulations from .pkl files.
If location is "", the init will be saved in the current working directory.
"""
def __init__(self, file_name):
""" Initialization """
self.simulation = self.open_ini_file(file_name)
def open_ini_file(self, file_name):
"""
For all items of the simulation, save the properties and generate an initialization file.
This file should be a JSON format and readable to start a new simulation.
If location is "", the init will be saved in the current working directory.
"""
with open(file_name, "rb") as file:
return pickle.load(file)
def extract_files(self):
environment = simpy.Environment(
initial_time=self.simulation["Simulation start"]
)
environment.epoch = time.mktime(
datetime.datetime.fromtimestamp(
self.simulation["Simulation start"]
).timetuple()
)
sites = []
equipment = []
for site in self.simulation["Sites"]:
site_object = openclsim.model.get_class_from_type_list(
"Site", site.data_type
)
site.data["env"] = environment
sites.append(site_object(**site.data))
for ship in self.simulation["Equipment"]:
ship_object = openclsim.model.get_class_from_type_list(
"Ship", ship.data_type
)
ship.data["env"] = environment
equipment.append(ship_object(**ship.data))
activities = []
for activity in self.simulation["Activities"]:
data = activity.data
mover = [i for i in equipment if i.name == data["mover"]][0]
loader = [i for i in equipment if i.name == data["loader"]][0]
unloader = [i for i in equipment if i.name == data["unloader"]][0]
origin = [i for i in sites if i.name == data["origin"]][0]
destination = [i for i in sites if i.name == data["destination"]][0]
activities.append(
openclsim.model.Activity(
env=environment, # The simpy environment defined in the first cel
name=data["name"], # We are moving soil
ID=data["id"], # The id
origin=origin, # We originate from the from_site
destination=destination, # And therefore travel to the to_site
loader=loader, # The benefit of a TSHD, all steps can be done
mover=mover, # The benefit of a TSHD, all steps can be done
unloader=unloader,
)
) # The benefit of a TSHD, all steps can be done
return sites, equipment, activities, environment
class LogSaver:
"""
LogSaver allow saving all logs as .csv files.
Objects should be a list containing the activities, sites and equipment.
The ID could be the ID that is saved to the .pkl file, entering an ID is optional.
If location is "", the files will be saved in the current working directory.
"""
def __init__(
self,
sites,
equipment,
activities,
simulation_id="",
simulation_name="",
location="",
file_prefix="",
overwrite=False,
append_to_existing=True,
):
""" Initialization """
# Save all properties
assert type(activities) == list
self.activities = activities
assert type(equipment) == list
self.equipment = equipment
assert type(sites) == list
self.sites = sites
# Save simulation id and simulation name
self.id = simulation_id if simulation_id else str(uuid.uuid1())
self.name = simulation_name if simulation_name else self.id
# Define location to save files
self.location = location
if len(self.location) != 0 and self.location[-1] != "/":
self.location += "/"
self.location += file_prefix
# Finally save all items
self.overwrite = overwrite
self.append_to_existing = append_to_existing
def save_all_logs(self):
"""
Save all logs to a specified location.
If location is "", the logs will be saved in the current working directory.
A file is saved with unique events -- events.csv
A file is saved with unique location objects -- locations.csv
A file is saved with unique equipment objects -- equipment.csv
A file is saved with unique activity objects -- activities.csv
A file is saved with unique simulations -- simulations.csv
A file is saved with equipment logs -- equipment_log.csv
A file is saved with energy use -- energy_use.csv
A file is saved with dredging spill info -- dredging_spill.csv
A file is saved with simulation properties -- generic_results.csv
"""
# First get all unique properties
# Obtain information on simulations
simulation_dict = {"SimulationID": [], "SimulationName": []}
self.get_unique_properties("simulations", simulation_dict)
# Obtain information on activities
activity_dict = {
"ActivityID": [],
"ActivityName": [],
"EquipmentID": [],
"ActivityFunction": [],
}
self.get_unique_properties("activities", activity_dict)
# Obtain information on equipment
equipment_dict = {"EquipmentID": [], "EquipmentName": []}
self.get_unique_properties("equipment", equipment_dict)
# Obtain information on locations
location_dict = {
"LocationID": [],
"LocationName": [],
"Longitude": [],
"Latitude": [],
}
self.get_unique_properties("location", location_dict)
# Obtain information on events
event_dict = {"EventID": [], "EventName": []}
self.get_unique_properties("events", event_dict)
# Continue with obtaining the logs, energy use and dredging spill
self.get_equipment_log()
self.get_energy()
self.get_spill()
self.get_results()
# Save all as csv files
self.generic_results.to_csv(self.location + "generic_results.csv", index=False)
self.dredging_spill.to_csv(self.location + "dredging_spill.csv", index=False)
self.energy_use.to_csv(self.location + "energy_use.csv", index=False)
self.equipment_log.to_csv(self.location + "equipment_log.csv", index=False)
self.unique_events.to_csv(self.location + "events.csv", index=False)
self.unique_activities.to_csv(self.location + "activities.csv", index=False)
self.unique_equipment.to_csv(self.location + "equipment.csv", index=False)
self.unique_locations.to_csv(self.location + "locations.csv", index=False)
self.unique_simulations.to_csv(self.location + "simulations.csv", index=False)
def get_unique_properties(self, object_type, object_dict):
"""
Obtain unique properties for the given list
"""
if self.append_to_existing:
try:
unique_df = pd.read_csv(self.location + object_type + ".csv")
except FileNotFoundError:
unique_df = pd.DataFrame.from_dict(object_dict)
else:
unique_df = pd.DataFrame.from_dict(object_dict)
if object_type == "simulations":
self.unique_simulations = self.append_dataframe(
unique_df, self, "Simulation"
)
elif object_type == "activities":
for activity in self.activities:
unique_df = self.append_dataframe(unique_df, activity, "Activity")
self.unique_activities = unique_df
elif object_type == "equipment":
for piece in self.equipment:
unique_df = self.append_dataframe(unique_df, piece, "Equipment")
self.unique_equipment = unique_df
elif object_type == "events":
for piece in self.equipment:
unique_df = self.event_dataframe(unique_df, piece)
self.unique_events = unique_df
elif object_type == "location":
for site in self.sites:
unique_df = self.append_dataframe(unique_df, site, "Location")
self.unique_locations = unique_df
def append_dataframe(self, existing_df, object_id, object_type):
"""
Check if dataframe is alfready filled with information, if not append.
If it is filled with similar values, raise an error unless self.overwrite == True.
"""
if object_id.id not in list(existing_df[object_type + "ID"]):
if object_type != "Location" and object_type != "Activity":
existing_df = existing_df.append(
{
object_type + "ID": object_id.id,
object_type + "Name": object_id.name,
},
ignore_index=True,
)
elif object_type == "Activity":
# lookup the equipment
# TODO: clean this up, it's now not filled in for move activities
loader_id = ""
if hasattr(object_id, "loader"):
loader_id = object_id.loader.id
existing_df = existing_df.append(
{
object_type + "ID": object_id.id,
object_type + "Name": object_id.name,
"EquipmentID": loader_id,
"ActivityFunction": "Loader",
},
ignore_index=True,
)
mover_id = ""
if hasattr(object_id, "mover"):
mover_id = object_id.mover.id
existing_df = existing_df.append(
{
object_type + "ID": object_id.id,
object_type + "Name": object_id.name,
"EquipmentID": mover_id,
"ActivityFunction": "Mover",
},
ignore_index=True,
)
unloader_id = ""
if hasattr(object_id, "unloader"):
unloader_id = object_id.unloader.id
existing_df = existing_df.append(
{
object_type + "ID": object_id.id,
object_type + "Name": object_id.name,
"EquipmentID": unloader_id,
"ActivityFunction": "Unloader",
},
ignore_index=True,
)
elif object_type == "Location":
existing_df = existing_df.append(
{
object_type + "ID": object_id.id,
object_type + "Name": object_id.name,
"Longitude": object_id.geometry.x,
"Latitude": object_id.geometry.y,
},
ignore_index=True,
)
elif self.overwrite == True:
existing_df = existing_df[existing_df[object_type + "ID"] != object_id.id]
if object_type != "Location":
existing_df = existing_df.append(
{
object_type + "ID": object_id.id,
object_type + "Name": object_id.name,
},
ignore_index=True,
)
else:
existing_df = existing_df.append(
{
object_type + "ID": object_id.id,
object_type + "Name": object_id.name,
"Longitude": object_id.geometry.x,
"Latitude": object_id.geometry.y,
},
ignore_index=True,
)
else:
raise KeyError(
"Simulation ID or simulation name already exist. "
+ "If you wish to overwrite the existing data, set overwrite to True"
)
return existing_df
def event_dataframe(self, existing_df, piece):
"""
Check if dataframe is alfready filled with information, if not append.
If it is filled with similar values, raise an error unless self.overwrite == True.
"""
log = pd.DataFrame.from_dict(piece.log)
events = list(log["Message"].unique())
for event in events:
if "start" in event or "stop" in event:
event = event.replace(" start", "")
event = event.replace(" stop", "")
if event not in list(existing_df["EventName"]):
existing_df = existing_df.append(
{"EventID": str(uuid.uuid1()), "EventName": event},
ignore_index=True,
)
return existing_df
def get_equipment_log(self):
"""
Create a dataframe from all equipment logs
"""
object_dict = {
"SimulationID": [],
"ObjectID": [],
"EventID": [],
"ActivityID": [],
"LocationID": [],
"EventStart": [],
"EventStop": [],
}
try:
unique_df = pd.read_csv(self.location + "equipment_log.csv")
except FileNotFoundError:
unique_df = pd.DataFrame.from_dict(object_dict)
for piece in self.equipment:
object_log = pd.DataFrame.from_dict(piece.log)
for i, message in enumerate(object_log["Message"]):
for j, event in enumerate(self.unique_events["EventName"]):
if message == event + " start":
object_dict["SimulationID"].append(self.id)
object_dict["ObjectID"].append(piece.id)
object_dict["EventID"].append(self.unique_events["EventID"][j])
object_dict["ActivityID"].append(object_log["ActivityID"][i])
object_dict["EventStart"].append(object_log["Timestamp"][i])
x, y = object_log["Geometry"][i].x, object_log["Geometry"][i].y
for k, LocationID in enumerate(
self.unique_locations["LocationID"]
):
if (
x == self.unique_locations["Longitude"][k]
and y == self.unique_locations["Latitude"][k]
):
object_dict["LocationID"].append(LocationID)
elif message == event + " stop":
object_dict["EventStop"].append(object_log["Timestamp"][i])
# Create durations column
object_df = pd.DataFrame.from_dict(object_dict)
durations = object_df["EventStop"] - object_df["EventStart"]
durations_days = []
for event in durations:
durations_days.append(event.total_seconds() / 3600 / 24)
object_df["EventDuration"] = durations_days
# Check if combination of simulation ID and object ID already exists
if len(unique_df["SimulationID"]) == 0:
unique_df = object_df
elif not (unique_df["SimulationID"] == self.id).any():
unique_df = pd.concat([unique_df, object_df], ignore_index=True)
elif self.overwrite == True:
drop_rows = []
for i, row in enumerate(unique_df["SimulationID"] == self.id):
if row == True:
drop_rows.append(i)
unique_df = unique_df.drop(drop_rows, axis=0)
unique_df = pd.concat([unique_df, object_df], ignore_index=True)
else:
raise KeyError(
"Simulation ID or simulation name already exist. "
+ "If you wish to overwrite the existing data, set overwrite to True"
)
self.equipment_log = unique_df
def get_spill(self):
"""
Obtain a log of all dreding spill
"""
object_dict = {
"SimulationID": [],
"ObjectID": [],
"EventID": [],
"ActivityID": [],
"LocationID": [],
"SpillStart": [],
"SpillStop": [],
"SpillDuration": [],
"Spill": [],
}
try:
unique_df = pd.read_csv(self.location + "dredging_spill.csv")
except FileNotFoundError:
unique_df = pd.DataFrame.from_dict(object_dict)
for piece in self.equipment:
object_log = pd.DataFrame.from_dict(piece.log)
for i, message in enumerate(object_log["Message"]):
if message == "fines released":
loop_list = list(object_log["Message"][0:i])
for j, event_message in enumerate(loop_list[::-1]):
if "start" in event_message:
event_start_time = object_log["Timestamp"][i - j - 1]
event_start_msg = event_message.replace(" start", "")
break
loop_list = list(object_log["Message"][i::])
for j, event_message in enumerate(loop_list):
if "stop" in event_message:
event_stop_time = object_log["Timestamp"][i + j]
event_stop_msg = event_message.replace(" stop", "")
break
assert event_start_msg == event_stop_msg
for j, event in enumerate(self.unique_events["EventName"]):
if event_start_msg == event:
object_dict["SimulationID"].append(self.id)
object_dict["ObjectID"].append(piece.id)
object_dict["EventID"].append(
self.unique_events["EventID"][j]
)
object_dict["ActivityID"].append(
object_log["ActivityID"][i]
)
object_dict["SpillStart"].append(event_start_time)
object_dict["SpillStop"].append(event_stop_time)
object_dict["SpillDuration"].append(
(event_stop_time - event_start_time).total_seconds()
/ 3600
/ 24
)
object_dict["Spill"].append(object_log["Value"][i])
x, y = (
object_log["Geometry"][i].x,
object_log["Geometry"][i].y,
)
for k, LocationID in enumerate(
self.unique_locations["LocationID"]
):
if (
x == self.unique_locations["Longitude"][k]
and y == self.unique_locations["Latitude"][k]
):
object_dict["LocationID"].append(LocationID)
object_df = pd.DataFrame.from_dict(object_dict)
if len(unique_df["SimulationID"]) == 0:
unique_df = object_df
elif not (unique_df["SimulationID"] == self.id).any():
unique_df = | pd.concat([unique_df, object_df], ignore_index=True) | pandas.concat |
import numpy as np
import pandas as pd
import datetime as dt
import pickle
import bz2
from .analyzer import summarize_returns
DATA_PATH = '../backtest/'
class Portfolio():
"""
Portfolio is the core class for event-driven backtesting. It conducts the
backtesting in the following order:
1. Initialization:
Set the capital base we invest and the securities we
want to trade.
2. Receive the price information with .receive_price():
Insert the new price information of each securities so that the
Portfolio class will calculated and updated the relevant status such
as the portfolio value and position weights.
3. Rebalance with .rebalance():
Depending on the signal, we can choose to change the position
on each securities.
4. Keep position with .keep_position():
If we don't rebalance the portfolio, we need to tell it to keep
current position at the end of the market.
Example
-------
see Vol_MA.ipynb, Vol_MA_test_robustness.ipynb
Parameters
----------
capital: numeric
capital base we put into the porfolio
inception: datetime.datetime
the time when we start backtesting
components: list of str
tikers of securities to trade, such as ['AAPL', 'MSFT', 'AMZN]
name: str
name of the portfolio
is_share_integer: boolean
If true, the shares of securities will be rounded to integers.
"""
def __init__(self, capital, inception, components,
name='portfolio', is_share_integer=False):
# -----------------------------------------------
# initialize parameters
# -----------------------------------------------
self.capital = capital # initial money invested
if isinstance(components, str):
components = [components] # should be list
self.components = components # equities in the portfolio
# self.commission_rate = commission_rate
self.inception = inception
self.component_prices = pd.DataFrame(columns=self.components)
self.name = name
self.is_share_integer = is_share_integer
# self.benchmark = benchmark
# -----------------------------------------------
# record portfolio status to series and dataFrames
# -----------------------------------------------
# temoprary values
self._nav = pd.Series(capital,index=[inception])
self._cash = pd.Series(capital,index=[inception])
self._security = pd.Series(0,index=[inception])
self._component_prices = pd.DataFrame(columns=self.components) # empty
self._shares = pd.DataFrame(0, index=[inception], columns=self.components)
self._positions = pd.DataFrame(0, index=[inception], columns=self.components)
self._weights = pd.DataFrame(0, index=[inception], columns=self.components)
self._share_changes = pd.DataFrame(columns=self.components) # empty
self._now = self.inception
self._max_nav = pd.Series(capital,index=[inception])
self._drawdown = pd.Series(0, index=[inception])
self._relative_drawdown = pd.Series(0, index=[inception])
# series
self.nav_open = pd.Series()
self.nav_close = pd.Series()
self.cash_open = pd.Series()
self.cash_close = pd.Series()
self.security_open = pd.Series()
self.security_close = pd.Series()
self.max_nav = pd.Series()
self.drawdown_open = pd.Series()
self.drawdown_close = pd.Series()
self.relative_drawdown_open = pd.Series()
self.relative_drawdown_close = | pd.Series() | pandas.Series |
"""Tests for climTrend.
Author: <NAME>
"""
from climvis import climtrend
import numpy as np
import pandas as pd
import pandas.util.testing as pdt
import bokeh
def test_get_lat_lon():
city = 'Innsbruck'
city_2 = climtrend.cities_list[1]
lat_corr = 47.2666667
lon_corr = 11.4
lat, lon = climtrend.get_lat_lon(city)
lat2, lon2 = climtrend.get_lat_lon(city_2)
np.testing.assert_almost_equal(lat, lat_corr, decimal=4)
np.testing.assert_almost_equal(lon, lon_corr, decimal=4)
def test_resample_data():
# Some dummy data.
data = np.arange(24)
index = pd.date_range('2017-01-01', periods=24,
freq='MS').tolist()
df = pd.DataFrame(data, index)
method = 'Yearly'
variable = 'Temperature'
lat = 15
df_resample = climtrend.resample_data(df, method, variable, lat)
# What the returned data is supposed to look like.
index_corr = pd.date_range('2017-12-31', periods=2, freq='12M').tolist()
data_corr = [data[:12].mean(), data[12:].mean()]
df_corr = pd.DataFrame(data_corr, index_corr)
pdt.assert_frame_equal(df_resample, df_corr)
# Case using summer period.
method = 'Summer'
variable = 'Temperature'
df_resample_summer = climtrend.resample_data(df, method, variable, lat)
# Maybe this is a bit too lazy.
df_corr_summer = df['2017-04':].resample('6M', closed='left').mean()[0::2]
pdt.assert_frame_equal(df_resample_summer, df_corr_summer)
# Case using winter period.
method = 'Winter'
variable = 'Temperature'
df_resample_winter = climtrend.resample_data(df, method, variable, lat)
df_corr_winter = df['2017-04':].resample('6M', closed='left').mean()[1::2]
| pdt.assert_frame_equal(df_resample_winter, df_corr_winter) | pandas.util.testing.assert_frame_equal |
"""
Additional tests for PandasArray that aren't covered by
the interface tests.
"""
import numpy as np
import pytest
import pandas as pd
import pandas._testing as tm
from pandas.arrays import PandasArray
from pandas.core.arrays.numpy_ import PandasDtype
@pytest.fixture(
params=[
np.array(["a", "b"], dtype=object),
np.array([0, 1], dtype=float),
np.array([0, 1], dtype=int),
np.array([0, 1 + 2j], dtype=complex),
np.array([True, False], dtype=bool),
np.array([0, 1], dtype="datetime64[ns]"),
np.array([0, 1], dtype="timedelta64[ns]"),
]
)
def any_numpy_array(request):
"""
Parametrized fixture for NumPy arrays with different dtypes.
This excludes string and bytes.
"""
return request.param
# ----------------------------------------------------------------------------
# PandasDtype
@pytest.mark.parametrize(
"dtype, expected",
[
("bool", True),
("int", True),
("uint", True),
("float", True),
("complex", True),
("str", False),
("bytes", False),
("datetime64[ns]", False),
("object", False),
("void", False),
],
)
def test_is_numeric(dtype, expected):
dtype = PandasDtype(dtype)
assert dtype._is_numeric is expected
@pytest.mark.parametrize(
"dtype, expected",
[
("bool", True),
("int", False),
("uint", False),
("float", False),
("complex", False),
("str", False),
("bytes", False),
("datetime64[ns]", False),
("object", False),
("void", False),
],
)
def test_is_boolean(dtype, expected):
dtype = PandasDtype(dtype)
assert dtype._is_boolean is expected
def test_repr():
dtype = PandasDtype(np.dtype("int64"))
assert repr(dtype) == "PandasDtype('int64')"
def test_constructor_from_string():
result = PandasDtype.construct_from_string("int64")
expected = PandasDtype(np.dtype("int64"))
assert result == expected
# ----------------------------------------------------------------------------
# Construction
def test_constructor_no_coercion():
with pytest.raises(ValueError, match="NumPy array"):
PandasArray([1, 2, 3])
def test_series_constructor_with_copy():
ndarray = np.array([1, 2, 3])
ser = pd.Series(PandasArray(ndarray), copy=True)
assert ser.values is not ndarray
def test_series_constructor_with_astype():
ndarray = np.array([1, 2, 3])
result = pd.Series(PandasArray(ndarray), dtype="float64")
expected = pd.Series([1.0, 2.0, 3.0], dtype="float64")
tm.assert_series_equal(result, expected)
def test_from_sequence_dtype():
arr = np.array([1, 2, 3], dtype="int64")
result = PandasArray._from_sequence(arr, dtype="uint64")
expected = PandasArray(np.array([1, 2, 3], dtype="uint64"))
tm.assert_extension_array_equal(result, expected)
def test_constructor_copy():
arr = np.array([0, 1])
result = PandasArray(arr, copy=True)
assert np.shares_memory(result._ndarray, arr) is False
def test_constructor_with_data(any_numpy_array):
nparr = any_numpy_array
arr = PandasArray(nparr)
assert arr.dtype.numpy_dtype == nparr.dtype
# ----------------------------------------------------------------------------
# Conversion
def test_to_numpy():
arr = PandasArray(np.array([1, 2, 3]))
result = arr.to_numpy()
assert result is arr._ndarray
result = arr.to_numpy(copy=True)
assert result is not arr._ndarray
result = arr.to_numpy(dtype="f8")
expected = np.array([1, 2, 3], dtype="f8")
tm.assert_numpy_array_equal(result, expected)
# ----------------------------------------------------------------------------
# Setitem
def test_setitem_series():
ser = | pd.Series([1, 2, 3]) | pandas.Series |
import json
import numpy as np
import random, csv, math
from collections import OrderedDict
from queue import PriorityQueue
import argparse, os
import time
from textwrap import wrap
import subprocess
import os, sys
import libs.inputs as inputs
import shutil
import random
from shutil import copyfile
import libs.query_json as query_json
import configparser
import libs.definition as definition
from copy import deepcopy
import pandas as pd
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_score
import random
import logging
import libs.search_common as search_common
from nltk.cluster import KMeansClusterer, euclidean_distance
def align_fields(dict_to_align, field_info):
lst = []
for fid, f_metadata in field_info.items():
lst.append(dict_to_align[fid] )
return lst
def parse_json(f, field_info, minAF):
json_data = query_json.read_json(f)
data_list = []
amp_list = []
for entry in json_data:
fv = align_fields(entry["fields"], field_info)
amp_factor = round(float(entry["amp_factor"]), 4)
server_ip = entry["server_ip"]
if amp_factor >= minAF:
data_list.append(fv)
amp_list.append(amp_factor)
#:qprint(fv)
return data_list, amp_list
def iterate_files(base_dir, minAF, field_info ):
lol = []
feature_id = []
print(base_dir)
data_list = []
amp_list = []
for root, dirs, files in os.walk(base_dir):
for sub_dir in dirs:
path = os.path.join( root, sub_dir )
json_files = os.listdir(path)
for f in json_files:
file_path = os.path.join( path, f )
data, amp = parse_json(file_path, field_info, minAF)
data_list = data_list + data
amp_list = amp_list + amp
assert(len(amp_list) == len(data_list))
return data_list, amp_list
def parse_data(ampdata , proto_fields, minAF):
data_list = []
amp_list = []
for server_ip, server_data in ampdata.items():
for entry in server_data:
amp_factor = round(float(entry["amp_factor"]), 4) #server_data["amp_factor"]
fv = align_fields(entry["fields"], proto_fields)
if amp_factor >= minAF:
data_list.append(fv)
amp_list.append(amp_factor)
assert(len(amp_list) == len(data_list))
return data_list, amp_list
def convert_dataframe_to_list(df ):
data = []
for index, row in df.iterrows():
data.append(row.tolist() )
return data
'''
Normalizing each field
as some are large vs. small, discrete vs. continuous
'''
def normalize_field(df, proto_fields, numBin = 10 ):
print("proto fields ", proto_fields)
for c in df.columns:
print("c is " , c , " and df.columns is ", df.columns)
logging.info("\nFIELD c is {} and and df.columns is {}".format(c,df.columns))
f_metadata = proto_fields[c]
'''
Field only takes one value so normalize to all 1
'''
if len(f_metadata.accepted_range) == 1:
print("Field type 0 ", c , type(f_metadata.accepted_range))
val_max = 1
logging.info("\tvalue max is {}".format(val_max))
df[c] = pd.Series(df[c]).astype('category')
df[c] = df[c].cat.codes
# Field takes a set of discrete values
# Then it is a categorical field
elif f_metadata.is_int == False or type(f_metadata.accepted_range) != range :
print("Field type 1 ", c , type(f_metadata.accepted_range))
logging.info("Field type 1 {} : {}".format(c, type(f_metadata.accepted_range)))
val_max = len(f_metadata.accepted_range) - 1
df[c] = pd.Series(df[c]).astype('category')
df[c] = df[c].cat.codes
# For field that takes a range of contiguous values, accordingly bin the values
elif len(f_metadata.accepted_range) > definition.SMALL_FIELD_THRESHOLD:
print("Large range is ", c)
print("Field type 2 ", c , type(f_metadata.accepted_range))
logging.info("Field type 2 {} : {}".format(c, type(f_metadata.accepted_range)))
bin_size = math.ceil(f_metadata.accepted_range[-1] / numBin)
logging.info("Bin size is {}".format(bin_size))
print("bin size ", bin_size)
df[c] = df[c]//bin_size
val_max = max(df[c])
logging.info("\tvalue max is {}".format(val_max))
print("\tvalue max is {}".format(val_max))
# For all others,
else:
print("Field type 3 ", c , type(f_metadata.accepted_range))
logging.info("Field type 3 {} : {}".format(c, type(f_metadata.accepted_range)))
val_max = max(f_metadata.accepted_range)
#print(df[c])
df[c] = df[c].astype(float)
# Corner case: a field takes a value of 0 only
if len(f_metadata.accepted_range) == 1 and f_metadata.accepted_range[0] == 0:
print("Field ", c , " is NOT normalized ")
continue
# Make the range betwen n 0 to 1
if val_max != 0:
df[c] = df[c]/val_max * 1
return df
def pick_representative_queries_weight_based(query_to_cluster, num_probe, num_cluster):
num_cluster = len(query_to_cluster)
num_spent = 0
cluster_ids = list(query_to_cluster.keys())
queries_final = []
'''
If size of cluster is greater than probe,
then make sure we pick one sampel from each cluster
'''
while num_spent < num_probe and len(query_to_cluster) > 0 :
cluster_ids = list(query_to_cluster.keys())
weights, scaled_weights = compute_weights(query_to_cluster)
cid = random.choices( cluster_ids, weights=weights , k=1)[0]
rand_index = random.randint(0, len(query_to_cluster[cid]) -1 )
picked_query = query_to_cluster[cid][rand_index]
query_to_cluster[cid].pop(rand_index)
queries_final.append(picked_query)
num_spent = num_spent + 1
if len(query_to_cluster[cid]) == 0:
del query_to_cluster[cid]
if len(query_to_cluster) == 0:
print("Exising as all queries used")
break
return queries_final
def pick_representative_queries_hybrid_weight(query_to_cluster, num_probe, num_cluster):
num_spent = 0
cluster_ids = list(query_to_cluster.keys())
queries_final = []
'''
If size of cluster is greater than probe, then we pick X from the first NUM_PROBE clusters
'''
if num_cluster >= num_probe:
print("Num cluster > Num Probe so pick random cluster .. ")
# sample WITHOUT replacement
chosen_cids = random.sample(cluster_ids, num_probe)
for cid in chosen_cids:
rand_index = random.randint(0, len(query_to_cluster[cid]) -1 )
picked_query = query_to_cluster[cid][rand_index]
queries_final.append(picked_query)
query_to_cluster[cid].pop(rand_index)
print("picked 1 from each cluster")
return queries_final
'''
Sample based on the weight (size) of each cluster
'''
#WEIGHT BASED
print('WEIGHT based')
while num_spent < num_probe and len(query_to_cluster) > 0 :
cluster_ids = list(query_to_cluster.keys())
weights, scaled_weights = compute_weights(query_to_cluster)
cid = random.choices( cluster_ids, weights=weights , k=1)[0]
rand_index = random.randint(0, len(query_to_cluster[cid]) -1 )
picked_query = query_to_cluster[cid][rand_index]
query_to_cluster[cid].pop(rand_index)
queries_final.append(picked_query)
num_spent = num_spent + 1
if len(query_to_cluster[cid]) == 0:
del query_to_cluster[cid]
if len(query_to_cluster) == 0:
print("Exising as all queries used")
break
return queries_final
'''
Pick samples and give equal weight to each cluster
'''
def pick_representative_queries_equal_weight(query_to_cluster, num_probe, num_cluster):
num_spent = 0
cluster_ids = list(query_to_cluster.keys())
queries_final = []
'''
If size of cluster is greater than probe, then we pick X from the first NUM_PROBE clusters
'''
if num_cluster >= num_probe:
print("Num cluster > Num Probe so pick random cluster .. ")
#sample WITHOUT replacement
chosen_cids = random.sample(cluster_ids, num_probe)
for cid in chosen_cids:
rand_index = random.randint(0, len(query_to_cluster[cid]) -1 )
picked_query = query_to_cluster[cid][rand_index]
queries_final.append(picked_query)
query_to_cluster[cid].pop(rand_index)
return queries_final
'''
Pick one from each other ..
'''
del_keys = set()
for cid, queries in query_to_cluster.items() :
rand_index = random.randint(0, len(queries) -1 )
picked_query = query_to_cluster[cid][rand_index]
queries_final.append(picked_query)
query_to_cluster[cid].pop(rand_index)
num_spent = num_spent + 1
if len(query_to_cluster[cid]) == 0:
del_keys.add(cid )
if num_spent >= num_probe:
break
print("picked {} so far ".format(num_spent) )
print("Deleting cluster IDs that are empty {} ".format(del_keys) )
# Delete cluster ID if we picked all from that cluster
for cid in del_keys:
del query_to_cluster[cid]
while num_spent < num_probe and len(query_to_cluster) > 0 :
cluster_ids = list(query_to_cluster.keys())
cid = random.choice( cluster_ids )
rand_index = random.randint(0, len(query_to_cluster[cid]) -1 )
picked_query = query_to_cluster[cid][rand_index]
query_to_cluster[cid].pop(rand_index)
queries_final.append(picked_query)
num_spent = num_spent + 1
if len(query_to_cluster[cid]) == 0:
del query_to_cluster[cid]
if len(query_to_cluster) == 0:
print("Exising as all queries used")
break
return queries_final
def compute_weights(query_to_cluster):
weights = []
total_weight = 0
for cid, queries in query_to_cluster.items():
weights.append(len(queries))
#print(len(queries))
total_weight = total_weight + len(queries)
scaled_weights = []
for w in weights:
scaled_weights.append( w/total_weight )
return weights, scaled_weights
def pick_representative_queries_new(query_to_cluster, num_probe):
num_spent = 0
cluster_ids = list(query_to_cluster.keys())
queries_final = []
'''
if size of cluster is greater than probe, then we pick X from the first NUM_PROBE clusters
'''
if num_cluster >= num_probe:
print("Num cluster > Num Probe SO pick at least one sample ")
chosen_cids = random.sample(cluster_ids, num_probe)
#print(sorted(chosen_cids) ) # len(chosen_cid), len(set(chosen_cid)))
for cid in chosen_cids:
rand_index = random.randint(0, len(query_to_cluster[cid]) -1 )
picked_query = query_to_cluster[cid][rand_index]
queries_final.append(picked_query)
#Debugged this problem -- May 29, <NAME>
#queries_final.append(picked_query[0])
#Delete the picked queries
query_to_cluster[cid].pop(rand_index)
return queries_final
'''
If num_cluster < num_probe :
Pick one from each other ..
'''
for cid, queries in query_to_cluster.items() :
rand_index = random.randint(0, len(query_to_cluster[cid]) -1 )
picked_query = query_to_cluster[cid][rand_index]
queries_final.append(picked_query)
#Delete the chosen queries
query_to_cluster[cid].pop(rand_index)
num_spent = num_spent + 1
if num_spent >= num_probe:
break
print("picked {} so far ".format(num_spent) )
while num_spent < num_probe:
cluster_ids = list(query_to_cluster.keys())
weights, scaled_weights = compute_weights(query_to_cluster)
cid = np.random.choice( cluster_ids, 1, p=scaled_weights)[0]
rand_index = random.randint(0, len(query_to_cluster[cid]) -1 )
picked_query = query_to_cluster[cid][rand_index]
query_to_cluster[cid].pop(rand_index)
queries_final.append(picked_query)
num_spent = num_spent + 1
if len(query_to_cluster[cid]) == 0:
del query_to_cluster[cid]
if len(query_to_cluster) == 0:
print("Exising as all queries used")
break
return queries_final
def pick_representative_queries(query_to_cluster, num_probe):
num_cluster = len(query_to_cluster)
num_spent = 0
queries_final = []
'''
If size of cluster is greater than probe, then
we pick one sampel from each cluster
'''
if num_cluster >= num_probe:
print("Num cluster > Num Probe SO pick at least one sample ")
#pick at least
print(query_to_cluster)
for cid, queries in query_to_cluster.items() :
#print(query_to_cluster)
rand_index = random.randint(0, len(query_to_cluster[cid]) -1 )
picked_query = query_to_cluster[cid][rand_index]
queries_final.append(picked_query)
#Delete the picked queries
query_to_cluster[cid].pop(rand_index)
print(" Picked id ", num_spent , ":" ,picked_query[0], picked_query[2], "with cid ", cid )
num_spent = num_spent + 1
while num_spent < num_probe:
cluster_ids = list(query_to_cluster.keys())
weights, scaled_weights = compute_weights(query_to_cluster)
cid = np.random.choice( cluster_ids, 1, p=scaled_weights)[0]
rand_index = random.randint(0, len(query_to_cluster[cid]) -1 )
picked_query = query_to_cluster[cid][rand_index]
print(" ID " , num_spent , ":" , picked_query[0] , picked_query[2], "with cid ", cid )
query_to_cluster[cid].pop(rand_index)
queries_final.append(picked_query)
num_spent = num_spent + 1
if len(query_to_cluster[cid]) == 0:
del query_to_cluster[cid]
if len(query_to_cluster) == 0:
print("Exising as all queries used")
break
return queries_final
def convert_to_dict(chosen_queries, proto_fields):
field_names = list(proto_fields.keys())
queries_dict = []
for entry in chosen_queries:
#print("entry " ,entry)
q = deepcopy(entry)
#print("q is ", q)
#print("field name is ", field_names)
q_dict = OrderedDict()
for i in range(len(field_names)):
# Yucheng: 2/4/2019, maybe buggy
#print("q is ", q)
q_dict[field_names[i]] = q[0][i]
# q_dict[field_names[i]] = q[i]
queries_dict.append(q_dict)
return queries_dict
'''
Clustering queries with above minAF AFs
Returns the chosen queries used for probing
'''
def cluster(ampdata, proto_fields, num_cluster, minAF, num_queries, is_measurement, args, config, log_file=None):
minAF = float(minAF)
'''
Logging features
'''
print("In clustering")
if log_file == None:
log_file = os.path.join( config["common_path"]["log_out_dir"], "cluster.log")
logging.basicConfig(filename=log_file,format='%(levelname)s : %(asctime)s \t %(message)s', \
datefmt='%m/%d/%Y %I:%M:%S %p', level=logging.DEBUG)
logging.info("In clustering")
'''
Only fetch those queries with AF >= minAF
'''
data_list, amp_list = parse_data(ampdata , proto_fields, minAF)
assert(len(data_list) == len(amp_list))
print("Length of data is {}".format(len(data_list)))
logging.info("Length of data is ", len(data_list))
if len(data_list) == 0:
return []
amp_data_frame = | pd.DataFrame(data_list) | pandas.DataFrame |
import eikon as ek # the Eikon Python wrapper package
import numpy as np # NumPy
import pandas as pd # pandas
import configparser as cp
import warnings
import sys
# underlying use case
warnings.filterwarnings("ignore")
df = pd.read_csv(r'C:\Users\segul\OneDrive\Documents\ReutersTickers.csv', header=None)
ek.set_app_key('a17f5ab013e449cc86857ecbfe62f4c96577df86')
tickers = list(df[0])
data, error = ek.get_data(tickers,'TR.CDSPrimaryCDSRic')
data.dropna(inplace=True)
data = data[data['Primary CDS RIC']!='']
data.to_csv(r'C:\Users\segul\OneDrive\Documents\ReutersTickersData.csv')
cdsIdList = [str(cdsId) for cdsId in data.ix[:,1].values]
result, err = ek.get_data(cdsIdList, ['PRIMACT_1'])
result.to_csv(r'C:\Users\segul\OneDrive\Documents\ReutersTickersOutput2.csv')
dfFinal = | pd.merge(data, result, left_on='Primary CDS RIC', right_on='Instrument', how='left') | pandas.merge |
"""
The TypedDict class
"""
#***************************************************************************************************
# Copyright 2015, 2019 National Technology & Engineering Solutions of Sandia, LLC (NTESS).
# Under the terms of Contract DE-NA0003525 with NTESS, the U.S. Government retains certain rights
# in this software.
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0 or in the LICENSE file in the root pyGSTi directory.
#***************************************************************************************************
import numpy as _np
def _columndict_to_dataframe(columns, seriestypes):
import pandas as _pandas
columns_as_series = {}
for colname, lst in columns.items():
seriestype = seriestypes[colname]
if seriestype == 'float':
s = _np.array(lst, dtype='d')
elif seriestype == 'int':
s = _np.array(lst, dtype=int) # or pd.Series w/dtype?
elif seriestype == 'category':
if len(lst) > 0 and isinstance(lst[0], tuple):
# special case when the values for a category are tuples. Often they're different lengths
# (e.g. qubit labels) and we want the Categorical to use an object-type numpy array to
# avoid any "ragged nested sequences" warnings, so do this:
lst = _pandas.Series(lst, dtype=object)
s = _pandas.Categorical(lst)
elif seriestype == 'object':
s = | _pandas.Series(lst, dtype=object) | pandas.Series |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# This file contains dummy data for the model unit tests
import numpy as np
import pandas as pd
AIR_FCST_LINEAR_95 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("1961-01-01 00:00:00"),
1: pd.Timestamp("1961-02-01 00:00:00"),
2: pd.Timestamp("1961-03-01 00:00:00"),
3: pd.Timestamp("1961-04-01 00:00:00"),
4: pd.Timestamp("1961-05-01 00:00:00"),
5: pd.Timestamp("1961-06-01 00:00:00"),
6: pd.Timestamp("1961-07-01 00:00:00"),
7: pd.Timestamp("1961-08-01 00:00:00"),
8: pd.Timestamp("1961-09-01 00:00:00"),
9: pd.Timestamp("1961-10-01 00:00:00"),
10: pd.Timestamp("1961-11-01 00:00:00"),
11: pd.Timestamp("1961-12-01 00:00:00"),
12: pd.Timestamp("1962-01-01 00:00:00"),
13: pd.Timestamp("1962-02-01 00:00:00"),
14: pd.Timestamp("1962-03-01 00:00:00"),
15: pd.Timestamp("1962-04-01 00:00:00"),
16: pd.Timestamp("1962-05-01 00:00:00"),
17: pd.Timestamp("1962-06-01 00:00:00"),
18: pd.Timestamp("1962-07-01 00:00:00"),
19: pd.Timestamp("1962-08-01 00:00:00"),
20: pd.Timestamp("1962-09-01 00:00:00"),
21: pd.Timestamp("1962-10-01 00:00:00"),
22: pd.Timestamp("1962-11-01 00:00:00"),
23: pd.Timestamp("1962-12-01 00:00:00"),
24: pd.Timestamp("1963-01-01 00:00:00"),
25: pd.Timestamp("1963-02-01 00:00:00"),
26: pd.Timestamp("1963-03-01 00:00:00"),
27: pd.Timestamp("1963-04-01 00:00:00"),
28: pd.Timestamp("1963-05-01 00:00:00"),
29: pd.Timestamp("1963-06-01 00:00:00"),
},
"fcst": {
0: 472.9444444444443,
1: 475.60162835249025,
2: 478.2588122605362,
3: 480.9159961685822,
4: 483.57318007662815,
5: 486.23036398467417,
6: 488.88754789272014,
7: 491.5447318007661,
8: 494.20191570881207,
9: 496.85909961685803,
10: 499.516283524904,
11: 502.17346743295,
12: 504.830651340996,
13: 507.48783524904195,
14: 510.1450191570879,
15: 512.8022030651339,
16: 515.4593869731799,
17: 518.1165708812258,
18: 520.7737547892718,
19: 523.4309386973177,
20: 526.0881226053638,
21: 528.7453065134097,
22: 531.4024904214557,
23: 534.0596743295017,
24: 536.7168582375476,
25: 539.3740421455936,
26: 542.0312260536396,
27: 544.6884099616856,
28: 547.3455938697316,
29: 550.0027777777775,
},
"fcst_lower": {
0: 380.6292037661305,
1: 383.26004701147235,
2: 385.8905370924373,
3: 388.52067431512216,
4: 391.1504589893095,
5: 393.7798914284503,
6: 396.4089719496461,
7: 399.0377008736321,
8: 401.66607852475926,
9: 404.2941052309762,
10: 406.9217813238114,
11: 409.54910713835505,
12: 412.1760830132403,
13: 414.80270929062544,
14: 417.42898631617453,
15: 420.0549144390392,
16: 422.68049401183924,
17: 425.3057253906438,
18: 427.93060893495215,
19: 430.555145007674,
20: 433.1793339751107,
21: 435.8031762069345,
22: 438.42667207616984,
23: 441.0498219591729,
24: 443.6726262356114,
25: 446.2950852884452,
26: 448.91719950390507,
27: 451.53896927147304,
28: 454.1603949838614,
29: 456.78147703699216,
},
"fcst_upper": {
0: 565.2596851227581,
1: 567.9432096935082,
2: 570.6270874286351,
3: 573.3113180220422,
4: 575.9959011639468,
5: 578.680836540898,
6: 581.3661238357942,
7: 584.0517627279,
8: 586.7377528928648,
9: 589.4240940027398,
10: 592.1107857259966,
11: 594.797827727545,
12: 597.4852196687516,
13: 600.1729612074585,
14: 602.8610519980012,
15: 605.5494916912286,
16: 608.2382799345206,
17: 610.9274163718079,
18: 613.6169006435915,
19: 616.3067323869615,
20: 618.9969112356168,
21: 621.6874368198849,
22: 624.3783087667415,
23: 627.0695266998305,
24: 629.7610902394838,
25: 632.4529990027421,
26: 635.145252603374,
27: 637.8378506518982,
28: 640.5307927556019,
29: 643.2240785185628,
},
}
)
AIR_FCST_LINEAR_99 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("1961-01-01 00:00:00"),
1: pd.Timestamp("1961-02-01 00:00:00"),
2: pd.Timestamp("1961-03-01 00:00:00"),
3: pd.Timestamp("1961-04-01 00:00:00"),
4: pd.Timestamp("1961-05-01 00:00:00"),
5: pd.Timestamp("1961-06-01 00:00:00"),
6: pd.Timestamp("1961-07-01 00:00:00"),
7: pd.Timestamp("1961-08-01 00:00:00"),
8: pd.Timestamp("1961-09-01 00:00:00"),
9: pd.Timestamp("1961-10-01 00:00:00"),
10: pd.Timestamp("1961-11-01 00:00:00"),
11: pd.Timestamp("1961-12-01 00:00:00"),
12: pd.Timestamp("1962-01-01 00:00:00"),
13: pd.Timestamp("1962-02-01 00:00:00"),
14: pd.Timestamp("1962-03-01 00:00:00"),
15: pd.Timestamp("1962-04-01 00:00:00"),
16: pd.Timestamp("1962-05-01 00:00:00"),
17: pd.Timestamp("1962-06-01 00:00:00"),
18: pd.Timestamp("1962-07-01 00:00:00"),
19: pd.Timestamp("1962-08-01 00:00:00"),
20: pd.Timestamp("1962-09-01 00:00:00"),
21: pd.Timestamp("1962-10-01 00:00:00"),
22: pd.Timestamp("1962-11-01 00:00:00"),
23: pd.Timestamp("1962-12-01 00:00:00"),
24: pd.Timestamp("1963-01-01 00:00:00"),
25: pd.Timestamp("1963-02-01 00:00:00"),
26: pd.Timestamp("1963-03-01 00:00:00"),
27: pd.Timestamp("1963-04-01 00:00:00"),
28: pd.Timestamp("1963-05-01 00:00:00"),
29: pd.Timestamp("1963-06-01 00:00:00"),
},
"fcst": {
0: 472.9444444444443,
1: 475.60162835249025,
2: 478.2588122605362,
3: 480.9159961685822,
4: 483.57318007662815,
5: 486.23036398467417,
6: 488.88754789272014,
7: 491.5447318007661,
8: 494.20191570881207,
9: 496.85909961685803,
10: 499.516283524904,
11: 502.17346743295,
12: 504.830651340996,
13: 507.48783524904195,
14: 510.1450191570879,
15: 512.8022030651339,
16: 515.4593869731799,
17: 518.1165708812258,
18: 520.7737547892718,
19: 523.4309386973177,
20: 526.0881226053638,
21: 528.7453065134097,
22: 531.4024904214557,
23: 534.0596743295017,
24: 536.7168582375476,
25: 539.3740421455936,
26: 542.0312260536396,
27: 544.6884099616856,
28: 547.3455938697316,
29: 550.0027777777775,
},
"fcst_lower": {
0: 351.01805478037915,
1: 353.64044896268456,
2: 356.2623766991775,
3: 358.883838394139,
4: 361.50483445671773,
5: 364.12536530090745,
6: 366.74543134552374,
7: 369.3650330141812,
8: 371.98417073526997,
9: 374.6028449419319,
10: 377.2210560720369,
11: 379.83880456815905,
12: 382.45609087755207,
13: 385.07291545212513,
14: 387.68927874841813,
15: 390.3051812275768,
16: 392.92062335532785,
17: 395.5356056019535,
18: 398.15012844226646,
19: 400.764192355584,
20: 403.37779782570226,
21: 405.99094534087044,
22: 408.60363539376465,
23: 411.2158684814615,
24: 413.82764510541136,
25: 416.4389657714128,
26: 419.04983098958445,
27: 421.66024127433906,
28: 424.2701971443558,
29: 426.8796991225531,
},
"fcst_upper": {
0: 594.8708341085095,
1: 597.562807742296,
2: 600.255247821895,
3: 602.9481539430253,
4: 605.6415256965386,
5: 608.3353626684409,
6: 611.0296644399166,
7: 613.724430587351,
8: 616.4196606823541,
9: 619.1153542917842,
10: 621.8115109777711,
11: 624.508130297741,
12: 627.2052118044398,
13: 629.9027550459588,
14: 632.6007595657577,
15: 635.299224902691,
16: 637.998150591032,
17: 640.6975361604982,
18: 643.3973811362772,
19: 646.0976850390515,
20: 648.7984473850253,
21: 651.4996676859489,
22: 654.2013454491467,
23: 656.903480177542,
24: 659.6060713696838,
25: 662.3091185197744,
26: 665.0126211176946,
27: 667.716578649032,
28: 670.4209905951075,
29: 673.1258564330019,
},
}
)
PEYTON_FCST_LINEAR_95 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2013-05-01 00:00:00"),
1: pd.Timestamp("2013-05-02 00:00:00"),
2: pd.Timestamp("2013-05-03 00:00:00"),
3: pd.Timestamp("2013-05-04 00:00:00"),
4: pd.Timestamp("2013-05-05 00:00:00"),
5: pd.Timestamp("2013-05-06 00:00:00"),
6: pd.Timestamp("2013-05-07 00:00:00"),
7: pd.Timestamp("2013-05-08 00:00:00"),
8: pd.Timestamp("2013-05-09 00:00:00"),
9: pd.Timestamp("2013-05-10 00:00:00"),
10: pd.Timestamp("2013-05-11 00:00:00"),
11: pd.Timestamp("2013-05-12 00:00:00"),
12: pd.Timestamp("2013-05-13 00:00:00"),
13: pd.Timestamp("2013-05-14 00:00:00"),
14: pd.Timestamp("2013-05-15 00:00:00"),
15: pd.Timestamp("2013-05-16 00:00:00"),
16: pd.Timestamp("2013-05-17 00:00:00"),
17: pd.Timestamp("2013-05-18 00:00:00"),
18: pd.Timestamp("2013-05-19 00:00:00"),
19: pd.Timestamp("2013-05-20 00:00:00"),
20: pd.Timestamp("2013-05-21 00:00:00"),
21: pd.Timestamp("2013-05-22 00:00:00"),
22: pd.Timestamp("2013-05-23 00:00:00"),
23: pd.Timestamp("2013-05-24 00:00:00"),
24: pd.Timestamp("2013-05-25 00:00:00"),
25: pd.Timestamp("2013-05-26 00:00:00"),
26: pd.Timestamp("2013-05-27 00:00:00"),
27: pd.Timestamp("2013-05-28 00:00:00"),
28: pd.Timestamp("2013-05-29 00:00:00"),
29: pd.Timestamp("2013-05-30 00:00:00"),
},
"fcst": {
0: 8.479624727157459,
1: 8.479984673362159,
2: 8.480344619566859,
3: 8.48070456577156,
4: 8.48106451197626,
5: 8.48142445818096,
6: 8.481784404385662,
7: 8.482144350590362,
8: 8.482504296795062,
9: 8.482864242999762,
10: 8.483224189204464,
11: 8.483584135409163,
12: 8.483944081613863,
13: 8.484304027818565,
14: 8.484663974023265,
15: 8.485023920227965,
16: 8.485383866432667,
17: 8.485743812637367,
18: 8.486103758842066,
19: 8.486463705046766,
20: 8.486823651251468,
21: 8.487183597456168,
22: 8.487543543660868,
23: 8.48790348986557,
24: 8.48826343607027,
25: 8.48862338227497,
26: 8.48898332847967,
27: 8.489343274684371,
28: 8.489703220889071,
29: 8.490063167093771,
},
"fcst_lower": {
0: 7.055970485245664,
1: 7.056266316358524,
2: 7.056561800026597,
3: 7.056856936297079,
4: 7.057151725217398,
5: 7.05744616683524,
6: 7.057740261198534,
7: 7.058034008355445,
8: 7.058327408354395,
9: 7.058620461244044,
10: 7.0589131670733005,
11: 7.059205525891312,
12: 7.059497537747475,
13: 7.059789202691431,
14: 7.0600805207730595,
15: 7.060371492042489,
16: 7.060662116550093,
17: 7.060952394346479,
18: 7.06124232548251,
19: 7.0615319100092835,
20: 7.061821147978145,
21: 7.062110039440677,
22: 7.062398584448709,
23: 7.062686783054313,
24: 7.0629746353098,
25: 7.063262141267724,
26: 7.063549300980883,
27: 7.063836114502315,
28: 7.0641225818852975,
29: 7.064408703183352,
},
"fcst_upper": {
0: 9.903278969069254,
1: 9.903703030365794,
2: 9.90412743910712,
3: 9.904552195246042,
4: 9.904977298735123,
5: 9.90540274952668,
6: 9.90582854757279,
7: 9.906254692825279,
8: 9.90668118523573,
9: 9.90710802475548,
10: 9.907535211335626,
11: 9.907962744927016,
12: 9.908390625480251,
13: 9.9088188529457,
14: 9.90924742727347,
15: 9.909676348413441,
16: 9.91010561631524,
17: 9.910535230928254,
18: 9.910965192201623,
19: 9.91139550008425,
20: 9.91182615452479,
21: 9.912257155471659,
22: 9.912688502873028,
23: 9.913120196676825,
24: 9.91355223683074,
25: 9.913984623282214,
26: 9.914417355978456,
27: 9.914850434866427,
28: 9.915283859892844,
29: 9.91571763100419,
},
}
)
PEYTON_FCST_LINEAR_99 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2013-05-01 00:00:00"),
1: pd.Timestamp("2013-05-02 00:00:00"),
2: pd.Timestamp("2013-05-03 00:00:00"),
3: pd.Timestamp("2013-05-04 00:00:00"),
4: pd.Timestamp("2013-05-05 00:00:00"),
5: pd.Timestamp("2013-05-06 00:00:00"),
6: pd.Timestamp("2013-05-07 00:00:00"),
7: pd.Timestamp("2013-05-08 00:00:00"),
8: pd.Timestamp("2013-05-09 00:00:00"),
9: pd.Timestamp("2013-05-10 00:00:00"),
10: pd.Timestamp("2013-05-11 00:00:00"),
11: pd.Timestamp("2013-05-12 00:00:00"),
12: pd.Timestamp("2013-05-13 00:00:00"),
13: pd.Timestamp("2013-05-14 00:00:00"),
14: pd.Timestamp("2013-05-15 00:00:00"),
15: pd.Timestamp("2013-05-16 00:00:00"),
16: pd.Timestamp("2013-05-17 00:00:00"),
17: pd.Timestamp("2013-05-18 00:00:00"),
18: pd.Timestamp("2013-05-19 00:00:00"),
19: pd.Timestamp("2013-05-20 00:00:00"),
20: pd.Timestamp("2013-05-21 00:00:00"),
21: pd.Timestamp("2013-05-22 00:00:00"),
22: pd.Timestamp("2013-05-23 00:00:00"),
23: pd.Timestamp("2013-05-24 00:00:00"),
24: pd.Timestamp("2013-05-25 00:00:00"),
25: pd.Timestamp("2013-05-26 00:00:00"),
26: pd.Timestamp("2013-05-27 00:00:00"),
27: pd.Timestamp("2013-05-28 00:00:00"),
28: pd.Timestamp("2013-05-29 00:00:00"),
29: pd.Timestamp("2013-05-30 00:00:00"),
},
"fcst": {
0: 8.479624727157459,
1: 8.479984673362159,
2: 8.480344619566859,
3: 8.48070456577156,
4: 8.48106451197626,
5: 8.48142445818096,
6: 8.481784404385662,
7: 8.482144350590362,
8: 8.482504296795062,
9: 8.482864242999762,
10: 8.483224189204464,
11: 8.483584135409163,
12: 8.483944081613863,
13: 8.484304027818565,
14: 8.484663974023265,
15: 8.485023920227965,
16: 8.485383866432667,
17: 8.485743812637367,
18: 8.486103758842066,
19: 8.486463705046766,
20: 8.486823651251468,
21: 8.487183597456168,
22: 8.487543543660868,
23: 8.48790348986557,
24: 8.48826343607027,
25: 8.48862338227497,
26: 8.48898332847967,
27: 8.489343274684371,
28: 8.489703220889071,
29: 8.490063167093771,
},
"fcst_lower": {
0: 6.605000045325637,
1: 6.605275566724015,
2: 6.605550630617649,
3: 6.605825237068679,
4: 6.606099386139563,
5: 6.60637307789309,
6: 6.606646312392368,
7: 6.606919089700827,
8: 6.607191409882221,
9: 6.607463273000626,
10: 6.607734679120443,
11: 6.608005628306389,
12: 6.608276120623508,
13: 6.608546156137163,
14: 6.608815734913038,
15: 6.609084857017139,
16: 6.609353522515795,
17: 6.609621731475649,
18: 6.609889483963668,
19: 6.610156780047143,
20: 6.61042361979368,
21: 6.610690003271204,
22: 6.610955930547961,
23: 6.611221401692519,
24: 6.611486416773756,
25: 6.611750975860878,
26: 6.612015079023405,
27: 6.612278726331177,
28: 6.612541917854348,
29: 6.612804653663393,
},
"fcst_upper": {
0: 10.354249408989281,
1: 10.354693780000304,
2: 10.355138608516068,
3: 10.355583894474442,
4: 10.356029637812957,
5: 10.35647583846883,
6: 10.356922496378955,
7: 10.357369611479896,
8: 10.357817183707903,
9: 10.358265212998898,
10: 10.358713699288483,
11: 10.359162642511938,
12: 10.359612042604219,
13: 10.360061899499968,
14: 10.360512213133493,
15: 10.36096298343879,
16: 10.361414210349539,
17: 10.361865893799084,
18: 10.362318033720465,
19: 10.36277063004639,
20: 10.363223682709256,
21: 10.363677191641132,
22: 10.364131156773775,
23: 10.364585578038621,
24: 10.365040455366783,
25: 10.365495788689062,
26: 10.365951577935935,
27: 10.366407823037564,
28: 10.366864523923793,
29: 10.36732168052415,
},
}
)
PEYTON_FCST_LINEAR_INVALID_ZERO = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2012-05-02 00:00:00"),
1: pd.Timestamp("2012-05-03 00:00:00"),
2: pd.Timestamp("2012-05-04 00:00:00"),
3: pd.Timestamp("2012-05-05 00:00:00"),
4: pd.Timestamp("2012-05-06 00:00:00"),
5: pd.Timestamp("2012-05-07 00:00:00"),
6: pd.Timestamp("2012-05-08 00:00:00"),
7: pd.Timestamp("2012-05-09 00:00:00"),
8: pd.Timestamp("2012-05-10 00:00:00"),
9: pd.Timestamp("2012-05-11 00:00:00"),
10: pd.Timestamp("2012-05-12 00:00:00"),
11: pd.Timestamp("2012-05-13 00:00:00"),
12: pd.Timestamp("2012-05-14 00:00:00"),
13: pd.Timestamp("2012-05-15 00:00:00"),
14: pd.Timestamp("2012-05-16 00:00:00"),
15: pd.Timestamp("2012-05-17 00:00:00"),
16: pd.Timestamp("2012-05-18 00:00:00"),
17: pd.Timestamp("2012-05-19 00:00:00"),
18: pd.Timestamp("2012-05-20 00:00:00"),
19: pd.Timestamp("2012-05-21 00:00:00"),
20: pd.Timestamp("2012-05-22 00:00:00"),
21: pd.Timestamp("2012-05-23 00:00:00"),
22: pd.Timestamp("2012-05-24 00:00:00"),
23: pd.Timestamp("2012-05-25 00:00:00"),
24: pd.Timestamp("2012-05-26 00:00:00"),
25: pd.Timestamp("2012-05-27 00:00:00"),
26: pd.Timestamp("2012-05-28 00:00:00"),
27: pd.Timestamp("2012-05-29 00:00:00"),
28: pd.Timestamp("2012-05-30 00:00:00"),
29: pd.Timestamp("2012-05-31 00:00:00"),
30: pd.Timestamp("2012-06-01 00:00:00"),
31: pd.Timestamp("2012-06-02 00:00:00"),
32: pd.Timestamp("2012-06-03 00:00:00"),
33: pd.Timestamp("2012-06-04 00:00:00"),
34: pd.Timestamp("2012-06-05 00:00:00"),
35: pd.Timestamp("2012-06-06 00:00:00"),
36: pd.Timestamp("2012-06-07 00:00:00"),
37: pd.Timestamp("2012-06-08 00:00:00"),
38: pd.Timestamp("2012-06-09 00:00:00"),
39: pd.Timestamp("2012-06-10 00:00:00"),
40: pd.Timestamp("2012-06-11 00:00:00"),
41: pd.Timestamp("2012-06-12 00:00:00"),
42: pd.Timestamp("2012-06-13 00:00:00"),
43: pd.Timestamp("2012-06-14 00:00:00"),
44: pd.Timestamp("2012-06-15 00:00:00"),
45: pd.Timestamp("2012-06-16 00:00:00"),
46: pd.Timestamp("2012-06-17 00:00:00"),
47: pd.Timestamp("2012-06-18 00:00:00"),
48: pd.Timestamp("2012-06-19 00:00:00"),
49: pd.Timestamp("2012-06-20 00:00:00"),
50: pd.Timestamp("2012-06-21 00:00:00"),
51: pd.Timestamp("2012-06-22 00:00:00"),
52: pd.Timestamp("2012-06-23 00:00:00"),
53: pd.Timestamp("2012-06-24 00:00:00"),
54: pd.Timestamp("2012-06-25 00:00:00"),
55: pd.Timestamp("2012-06-26 00:00:00"),
56: pd.Timestamp("2012-06-27 00:00:00"),
57: pd.Timestamp("2012-06-28 00:00:00"),
58: pd.Timestamp("2012-06-29 00:00:00"),
59: pd.Timestamp("2012-06-30 00:00:00"),
60: pd.Timestamp("2012-07-01 00:00:00"),
61: pd.Timestamp("2012-07-02 00:00:00"),
62: pd.Timestamp("2012-07-03 00:00:00"),
63: pd.Timestamp("2012-07-04 00:00:00"),
64: pd.Timestamp("2012-07-05 00:00:00"),
65: pd.Timestamp("2012-07-06 00:00:00"),
66: pd.Timestamp("2012-07-07 00:00:00"),
67: pd.Timestamp("2012-07-08 00:00:00"),
68: pd.Timestamp("2012-07-09 00:00:00"),
69: pd.Timestamp("2012-07-10 00:00:00"),
70: pd.Timestamp("2012-07-11 00:00:00"),
71: pd.Timestamp("2012-07-12 00:00:00"),
72: pd.Timestamp("2012-07-13 00:00:00"),
73: pd.Timestamp("2012-07-14 00:00:00"),
74: pd.Timestamp("2012-07-15 00:00:00"),
75: pd.Timestamp("2012-07-16 00:00:00"),
76: pd.Timestamp("2012-07-17 00:00:00"),
77: pd.Timestamp("2012-07-18 00:00:00"),
78: pd.Timestamp("2012-07-19 00:00:00"),
79: pd.Timestamp("2012-07-20 00:00:00"),
80: pd.Timestamp("2012-07-21 00:00:00"),
81: pd.Timestamp("2012-07-22 00:00:00"),
82: pd.Timestamp("2012-07-23 00:00:00"),
83: pd.Timestamp("2012-07-24 00:00:00"),
84: pd.Timestamp("2012-07-25 00:00:00"),
85: pd.Timestamp("2012-07-26 00:00:00"),
86: pd.Timestamp("2012-07-27 00:00:00"),
87: pd.Timestamp("2012-07-28 00:00:00"),
88: pd.Timestamp("2012-07-29 00:00:00"),
89: pd.Timestamp("2012-07-30 00:00:00"),
90: pd.Timestamp("2012-07-31 00:00:00"),
91: pd.Timestamp("2012-08-01 00:00:00"),
92: pd.Timestamp("2012-08-02 00:00:00"),
93: pd.Timestamp("2012-08-03 00:00:00"),
94: pd.Timestamp("2012-08-04 00:00:00"),
95: pd.Timestamp("2012-08-05 00:00:00"),
96: pd.Timestamp("2012-08-06 00:00:00"),
97: pd.Timestamp("2012-08-07 00:00:00"),
98: pd.Timestamp("2012-08-08 00:00:00"),
99: pd.Timestamp("2012-08-09 00:00:00"),
100: pd.Timestamp("2012-08-10 00:00:00"),
101: pd.Timestamp("2012-08-11 00:00:00"),
102: pd.Timestamp("2012-08-12 00:00:00"),
103: pd.Timestamp("2012-08-13 00:00:00"),
104: pd.Timestamp("2012-08-14 00:00:00"),
105: pd.Timestamp("2012-08-15 00:00:00"),
106: pd.Timestamp("2012-08-16 00:00:00"),
107: pd.Timestamp("2012-08-17 00:00:00"),
108: pd.Timestamp("2012-08-18 00:00:00"),
109: pd.Timestamp("2012-08-19 00:00:00"),
110: pd.Timestamp("2012-08-20 00:00:00"),
111: pd.Timestamp("2012-08-21 00:00:00"),
112: pd.Timestamp("2012-08-22 00:00:00"),
113: pd.Timestamp("2012-08-23 00:00:00"),
114: pd.Timestamp("2012-08-24 00:00:00"),
115: pd.Timestamp("2012-08-25 00:00:00"),
116: pd.Timestamp("2012-08-26 00:00:00"),
117: pd.Timestamp("2012-08-27 00:00:00"),
118: pd.Timestamp("2012-08-28 00:00:00"),
119: pd.Timestamp("2012-08-29 00:00:00"),
120: pd.Timestamp("2012-08-30 00:00:00"),
121: pd.Timestamp("2012-08-31 00:00:00"),
122: pd.Timestamp("2012-09-01 00:00:00"),
123: pd.Timestamp("2012-09-02 00:00:00"),
124: pd.Timestamp("2012-09-03 00:00:00"),
125: pd.Timestamp("2012-09-04 00:00:00"),
126: pd.Timestamp("2012-09-05 00:00:00"),
127: pd.Timestamp("2012-09-06 00:00:00"),
128: pd.Timestamp("2012-09-07 00:00:00"),
129: pd.Timestamp("2012-09-08 00:00:00"),
130: pd.Timestamp("2012-09-09 00:00:00"),
131: pd.Timestamp("2012-09-10 00:00:00"),
132: pd.Timestamp("2012-09-11 00:00:00"),
133: pd.Timestamp("2012-09-12 00:00:00"),
134: pd.Timestamp("2012-09-13 00:00:00"),
135: pd.Timestamp("2012-09-14 00:00:00"),
136: pd.Timestamp("2012-09-15 00:00:00"),
137: pd.Timestamp("2012-09-16 00:00:00"),
138: pd.Timestamp("2012-09-17 00:00:00"),
139: pd.Timestamp("2012-09-18 00:00:00"),
140: pd.Timestamp("2012-09-19 00:00:00"),
141: pd.Timestamp("2012-09-20 00:00:00"),
142: pd.Timestamp("2012-09-21 00:00:00"),
143: pd.Timestamp("2012-09-22 00:00:00"),
144: pd.Timestamp("2012-09-23 00:00:00"),
145: pd.Timestamp("2012-09-24 00:00:00"),
146: pd.Timestamp("2012-09-25 00:00:00"),
147: pd.Timestamp("2012-09-26 00:00:00"),
148: pd.Timestamp("2012-09-27 00:00:00"),
149: pd.Timestamp("2012-09-28 00:00:00"),
150: pd.Timestamp("2012-09-29 00:00:00"),
151: pd.Timestamp("2012-09-30 00:00:00"),
152: pd.Timestamp("2012-10-01 00:00:00"),
153: pd.Timestamp("2012-10-02 00:00:00"),
154: pd.Timestamp("2012-10-03 00:00:00"),
155: pd.Timestamp("2012-10-04 00:00:00"),
156: pd.Timestamp("2012-10-05 00:00:00"),
157: pd.Timestamp("2012-10-06 00:00:00"),
158: pd.Timestamp("2012-10-07 00:00:00"),
159: pd.Timestamp("2012-10-08 00:00:00"),
160: pd.Timestamp("2012-10-09 00:00:00"),
161: pd.Timestamp("2012-10-10 00:00:00"),
162: pd.Timestamp("2012-10-11 00:00:00"),
163: pd.Timestamp("2012-10-12 00:00:00"),
164: pd.Timestamp("2012-10-13 00:00:00"),
165: pd.Timestamp("2012-10-14 00:00:00"),
166: pd.Timestamp("2012-10-15 00:00:00"),
167: pd.Timestamp("2012-10-16 00:00:00"),
168: pd.Timestamp("2012-10-17 00:00:00"),
169: pd.Timestamp("2012-10-18 00:00:00"),
170: pd.Timestamp("2012-10-19 00:00:00"),
171: pd.Timestamp("2012-10-20 00:00:00"),
172: pd.Timestamp("2012-10-21 00:00:00"),
173: pd.Timestamp("2012-10-22 00:00:00"),
174: pd.Timestamp("2012-10-23 00:00:00"),
175: pd.Timestamp("2012-10-24 00:00:00"),
176: pd.Timestamp("2012-10-25 00:00:00"),
177: pd.Timestamp("2012-10-26 00:00:00"),
178: pd.Timestamp("2012-10-27 00:00:00"),
179: pd.Timestamp("2012-10-28 00:00:00"),
180: pd.Timestamp("2012-10-29 00:00:00"),
181: pd.Timestamp("2012-10-30 00:00:00"),
182: pd.Timestamp("2012-10-31 00:00:00"),
183: pd.Timestamp("2012-11-01 00:00:00"),
184: pd.Timestamp("2012-11-02 00:00:00"),
185: pd.Timestamp("2012-11-03 00:00:00"),
186: pd.Timestamp("2012-11-04 00:00:00"),
187: pd.Timestamp("2012-11-05 00:00:00"),
188: pd.Timestamp("2012-11-06 00:00:00"),
189: pd.Timestamp("2012-11-07 00:00:00"),
190: pd.Timestamp("2012-11-08 00:00:00"),
191: pd.Timestamp("2012-11-09 00:00:00"),
192: pd.Timestamp("2012-11-10 00:00:00"),
193: pd.Timestamp("2012-11-11 00:00:00"),
194: pd.Timestamp("2012-11-12 00:00:00"),
195: pd.Timestamp("2012-11-13 00:00:00"),
196: pd.Timestamp("2012-11-14 00:00:00"),
197: pd.Timestamp("2012-11-15 00:00:00"),
198: pd.Timestamp("2012-11-16 00:00:00"),
199: pd.Timestamp("2012-11-17 00:00:00"),
200: pd.Timestamp("2012-11-18 00:00:00"),
201: pd.Timestamp("2012-11-19 00:00:00"),
202: pd.Timestamp("2012-11-20 00:00:00"),
203: pd.Timestamp("2012-11-21 00:00:00"),
204: pd.Timestamp("2012-11-22 00:00:00"),
205: pd.Timestamp("2012-11-23 00:00:00"),
206: pd.Timestamp("2012-11-24 00:00:00"),
207: pd.Timestamp("2012-11-25 00:00:00"),
208: pd.Timestamp("2012-11-26 00:00:00"),
209: pd.Timestamp("2012-11-27 00:00:00"),
210: pd.Timestamp("2012-11-28 00:00:00"),
211: pd.Timestamp("2012-11-29 00:00:00"),
212: pd.Timestamp("2012-11-30 00:00:00"),
213: pd.Timestamp("2012-12-01 00:00:00"),
214: pd.Timestamp("2012-12-02 00:00:00"),
215: pd.Timestamp("2012-12-03 00:00:00"),
216: pd.Timestamp("2012-12-04 00:00:00"),
217: pd.Timestamp("2012-12-05 00:00:00"),
218: pd.Timestamp("2012-12-06 00:00:00"),
219: pd.Timestamp("2012-12-07 00:00:00"),
220: pd.Timestamp("2012-12-08 00:00:00"),
221: pd.Timestamp("2012-12-09 00:00:00"),
222: pd.Timestamp("2012-12-10 00:00:00"),
223: pd.Timestamp("2012-12-11 00:00:00"),
224: pd.Timestamp("2012-12-12 00:00:00"),
225: pd.Timestamp("2012-12-13 00:00:00"),
226: pd.Timestamp("2012-12-14 00:00:00"),
227: pd.Timestamp("2012-12-15 00:00:00"),
228: pd.Timestamp("2012-12-16 00:00:00"),
229: pd.Timestamp("2012-12-17 00:00:00"),
230: pd.Timestamp("2012-12-18 00:00:00"),
231: pd.Timestamp("2012-12-19 00:00:00"),
232: pd.Timestamp("2012-12-20 00:00:00"),
233: pd.Timestamp("2012-12-21 00:00:00"),
234: pd.Timestamp("2012-12-22 00:00:00"),
235: pd.Timestamp("2012-12-23 00:00:00"),
236: pd.Timestamp("2012-12-24 00:00:00"),
237: pd.Timestamp("2012-12-25 00:00:00"),
238: pd.Timestamp("2012-12-26 00:00:00"),
239: pd.Timestamp("2012-12-27 00:00:00"),
240: pd.Timestamp("2012-12-28 00:00:00"),
241: pd.Timestamp("2012-12-29 00:00:00"),
242: pd.Timestamp("2012-12-30 00:00:00"),
243: pd.Timestamp("2012-12-31 00:00:00"),
244: pd.Timestamp("2013-01-01 00:00:00"),
245: pd.Timestamp("2013-01-02 00:00:00"),
246: pd.Timestamp("2013-01-03 00:00:00"),
247: pd.Timestamp("2013-01-04 00:00:00"),
248: pd.Timestamp("2013-01-05 00:00:00"),
249: pd.Timestamp("2013-01-06 00:00:00"),
250: pd.Timestamp("2013-01-07 00:00:00"),
251: pd.Timestamp("2013-01-08 00:00:00"),
252: pd.Timestamp("2013-01-09 00:00:00"),
253: pd.Timestamp("2013-01-10 00:00:00"),
254: pd.Timestamp("2013-01-11 00:00:00"),
255: pd.Timestamp("2013-01-12 00:00:00"),
256: pd.Timestamp("2013-01-13 00:00:00"),
257: pd.Timestamp("2013-01-14 00:00:00"),
258: pd.Timestamp("2013-01-15 00:00:00"),
259: pd.Timestamp("2013-01-16 00:00:00"),
260: pd.Timestamp("2013-01-17 00:00:00"),
261: pd.Timestamp("2013-01-18 00:00:00"),
262: pd.Timestamp("2013-01-19 00:00:00"),
263: pd.Timestamp("2013-01-20 00:00:00"),
264: pd.Timestamp("2013-01-21 00:00:00"),
265: pd.Timestamp("2013-01-22 00:00:00"),
266: pd.Timestamp("2013-01-23 00:00:00"),
267: pd.Timestamp("2013-01-24 00:00:00"),
268: pd.Timestamp("2013-01-25 00:00:00"),
269: pd.Timestamp("2013-01-26 00:00:00"),
270: pd.Timestamp("2013-01-27 00:00:00"),
271: pd.Timestamp("2013-01-28 00:00:00"),
272: pd.Timestamp("2013-01-29 00:00:00"),
273: pd.Timestamp("2013-01-30 00:00:00"),
274: pd.Timestamp("2013-01-31 00:00:00"),
275: pd.Timestamp("2013-02-01 00:00:00"),
276: pd.Timestamp("2013-02-02 00:00:00"),
277: pd.Timestamp("2013-02-03 00:00:00"),
278: pd.Timestamp("2013-02-04 00:00:00"),
279: pd.Timestamp("2013-02-05 00:00:00"),
280: pd.Timestamp("2013-02-06 00:00:00"),
281: pd.Timestamp("2013-02-07 00:00:00"),
282: pd.Timestamp("2013-02-08 00:00:00"),
283: pd.Timestamp("2013-02-09 00:00:00"),
284: pd.Timestamp("2013-02-10 00:00:00"),
285: pd.Timestamp("2013-02-11 00:00:00"),
286: pd.Timestamp("2013-02-12 00:00:00"),
287: pd.Timestamp("2013-02-13 00:00:00"),
288: pd.Timestamp("2013-02-14 00:00:00"),
289: pd.Timestamp("2013-02-15 00:00:00"),
290: pd.Timestamp("2013-02-16 00:00:00"),
291: pd.Timestamp("2013-02-17 00:00:00"),
292: pd.Timestamp("2013-02-18 00:00:00"),
293: pd.Timestamp("2013-02-19 00:00:00"),
294: pd.Timestamp("2013-02-20 00:00:00"),
295: pd.Timestamp("2013-02-21 00:00:00"),
296: pd.Timestamp("2013-02-22 00:00:00"),
297: pd.Timestamp("2013-02-23 00:00:00"),
298: pd.Timestamp("2013-02-24 00:00:00"),
299: pd.Timestamp("2013-02-25 00:00:00"),
300: pd.Timestamp("2013-02-26 00:00:00"),
301: pd.Timestamp("2013-02-27 00:00:00"),
302: pd.Timestamp("2013-02-28 00:00:00"),
303: pd.Timestamp("2013-03-01 00:00:00"),
304: pd.Timestamp("2013-03-02 00:00:00"),
305: pd.Timestamp("2013-03-03 00:00:00"),
306: pd.Timestamp("2013-03-04 00:00:00"),
307: pd.Timestamp("2013-03-05 00:00:00"),
308: pd.Timestamp("2013-03-06 00:00:00"),
309: pd.Timestamp("2013-03-07 00:00:00"),
310: pd.Timestamp("2013-03-08 00:00:00"),
311: pd.Timestamp("2013-03-09 00:00:00"),
312: pd.Timestamp("2013-03-10 00:00:00"),
313: pd.Timestamp("2013-03-11 00:00:00"),
314: pd.Timestamp("2013-03-12 00:00:00"),
315: pd.Timestamp("2013-03-13 00:00:00"),
316: pd.Timestamp("2013-03-14 00:00:00"),
317: pd.Timestamp("2013-03-15 00:00:00"),
318: pd.Timestamp("2013-03-16 00:00:00"),
319: pd.Timestamp("2013-03-17 00:00:00"),
320: pd.Timestamp("2013-03-18 00:00:00"),
321: pd.Timestamp("2013-03-19 00:00:00"),
322: pd.Timestamp("2013-03-20 00:00:00"),
323: pd.Timestamp("2013-03-21 00:00:00"),
324: pd.Timestamp("2013-03-22 00:00:00"),
325: pd.Timestamp("2013-03-23 00:00:00"),
326: pd.Timestamp("2013-03-24 00:00:00"),
327: pd.Timestamp("2013-03-25 00:00:00"),
328: pd.Timestamp("2013-03-26 00:00:00"),
329: pd.Timestamp("2013-03-27 00:00:00"),
330: pd.Timestamp("2013-03-28 00:00:00"),
331: pd.Timestamp("2013-03-29 00:00:00"),
332: pd.Timestamp("2013-03-30 00:00:00"),
333: pd.Timestamp("2013-03-31 00:00:00"),
334: pd.Timestamp("2013-04-01 00:00:00"),
335: pd.Timestamp("2013-04-02 00:00:00"),
336: pd.Timestamp("2013-04-03 00:00:00"),
337: pd.Timestamp("2013-04-04 00:00:00"),
338: pd.Timestamp("2013-04-05 00:00:00"),
339: pd.Timestamp("2013-04-06 00:00:00"),
340: pd.Timestamp("2013-04-07 00:00:00"),
341: pd.Timestamp("2013-04-08 00:00:00"),
342: pd.Timestamp("2013-04-09 00:00:00"),
343: pd.Timestamp("2013-04-10 00:00:00"),
344: pd.Timestamp("2013-04-11 00:00:00"),
345: pd.Timestamp("2013-04-12 00:00:00"),
346: pd.Timestamp("2013-04-13 00:00:00"),
347: pd.Timestamp("2013-04-14 00:00:00"),
348: pd.Timestamp("2013-04-15 00:00:00"),
349: pd.Timestamp("2013-04-16 00:00:00"),
350: pd.Timestamp("2013-04-17 00:00:00"),
351: pd.Timestamp("2013-04-18 00:00:00"),
352: pd.Timestamp("2013-04-19 00:00:00"),
353: pd.Timestamp("2013-04-20 00:00:00"),
354: pd.Timestamp("2013-04-21 00:00:00"),
355: pd.Timestamp("2013-04-22 00:00:00"),
356: pd.Timestamp("2013-04-23 00:00:00"),
357: pd.Timestamp("2013-04-24 00:00:00"),
358: pd.Timestamp("2013-04-25 00:00:00"),
359: pd.Timestamp("2013-04-26 00:00:00"),
360: pd.Timestamp("2013-04-27 00:00:00"),
361: pd.Timestamp("2013-04-28 00:00:00"),
362: pd.Timestamp("2013-04-29 00:00:00"),
363: pd.Timestamp("2013-04-30 00:00:00"),
364: pd.Timestamp("2013-05-01 00:00:00"),
365: pd.Timestamp("2013-05-02 00:00:00"),
366: pd.Timestamp("2013-05-03 00:00:00"),
367: pd.Timestamp("2013-05-04 00:00:00"),
368: pd.Timestamp("2013-05-05 00:00:00"),
369: pd.Timestamp("2013-05-06 00:00:00"),
370: pd.Timestamp("2013-05-07 00:00:00"),
371: pd.Timestamp("2013-05-08 00:00:00"),
372: pd.Timestamp("2013-05-09 00:00:00"),
373: pd.Timestamp("2013-05-10 00:00:00"),
374: pd.Timestamp("2013-05-11 00:00:00"),
375: pd.Timestamp("2013-05-12 00:00:00"),
376: pd.Timestamp("2013-05-13 00:00:00"),
377: pd.Timestamp("2013-05-14 00:00:00"),
378: pd.Timestamp("2013-05-15 00:00:00"),
379: pd.Timestamp("2013-05-16 00:00:00"),
380: pd.Timestamp("2013-05-17 00:00:00"),
381: pd.Timestamp("2013-05-18 00:00:00"),
382: pd.Timestamp("2013-05-19 00:00:00"),
383: pd.Timestamp("2013-05-20 00:00:00"),
384: pd.Timestamp("2013-05-21 00:00:00"),
385: pd.Timestamp("2013-05-22 00:00:00"),
386: pd.Timestamp("2013-05-23 00:00:00"),
387: pd.Timestamp("2013-05-24 00:00:00"),
388: pd.Timestamp("2013-05-25 00:00:00"),
389: pd.Timestamp("2013-05-26 00:00:00"),
390: pd.Timestamp("2013-05-27 00:00:00"),
391: pd.Timestamp("2013-05-28 00:00:00"),
392: pd.Timestamp("2013-05-29 00:00:00"),
393: pd.Timestamp("2013-05-30 00:00:00"),
},
"fcst": {
0: 8.348604308646497,
1: 8.348964254851197,
2: 8.349324201055898,
3: 8.349684147260598,
4: 8.350044093465298,
5: 8.350404039669998,
6: 8.3507639858747,
7: 8.3511239320794,
8: 8.3514838782841,
9: 8.351843824488801,
10: 8.352203770693501,
11: 8.352563716898201,
12: 8.352923663102903,
13: 8.353283609307603,
14: 8.353643555512303,
15: 8.354003501717003,
16: 8.354363447921704,
17: 8.354723394126404,
18: 8.355083340331104,
19: 8.355443286535806,
20: 8.355803232740506,
21: 8.356163178945206,
22: 8.356523125149906,
23: 8.356883071354607,
24: 8.357243017559307,
25: 8.357602963764007,
26: 8.357962909968709,
27: 8.358322856173409,
28: 8.358682802378109,
29: 8.35904274858281,
30: 8.35940269478751,
31: 8.35976264099221,
32: 8.36012258719691,
33: 8.360482533401612,
34: 8.360842479606312,
35: 8.361202425811012,
36: 8.361562372015714,
37: 8.361922318220413,
38: 8.362282264425113,
39: 8.362642210629813,
40: 8.363002156834515,
41: 8.363362103039215,
42: 8.363722049243915,
43: 8.364081995448617,
44: 8.364441941653316,
45: 8.364801887858016,
46: 8.365161834062716,
47: 8.365521780267418,
48: 8.365881726472118,
49: 8.366241672676818,
50: 8.36660161888152,
51: 8.36696156508622,
52: 8.36732151129092,
53: 8.367681457495621,
54: 8.368041403700321,
55: 8.368401349905021,
56: 8.36876129610972,
57: 8.369121242314423,
58: 8.369481188519122,
59: 8.369841134723822,
60: 8.370201080928524,
61: 8.370561027133224,
62: 8.370920973337924,
63: 8.371280919542624,
64: 8.371640865747326,
65: 8.372000811952026,
66: 8.372360758156725,
67: 8.372720704361427,
68: 8.373080650566127,
69: 8.373440596770827,
70: 8.373800542975529,
71: 8.374160489180229,
72: 8.374520435384929,
73: 8.374880381589628,
74: 8.37524032779433,
75: 8.37560027399903,
76: 8.37596022020373,
77: 8.376320166408432,
78: 8.376680112613132,
79: 8.377040058817832,
80: 8.377400005022531,
81: 8.377759951227233,
82: 8.378119897431933,
83: 8.378479843636633,
84: 8.378839789841335,
85: 8.379199736046035,
86: 8.379559682250735,
87: 8.379919628455436,
88: 8.380279574660136,
89: 8.380639520864836,
90: 8.380999467069536,
91: 8.381359413274238,
92: 8.381719359478938,
93: 8.382079305683638,
94: 8.38243925188834,
95: 8.38279919809304,
96: 8.38315914429774,
97: 8.383519090502439,
98: 8.38387903670714,
99: 8.38423898291184,
100: 8.38459892911654,
101: 8.384958875321242,
102: 8.385318821525942,
103: 8.385678767730642,
104: 8.386038713935344,
105: 8.386398660140044,
106: 8.386758606344744,
107: 8.387118552549444,
108: 8.387478498754145,
109: 8.387838444958845,
110: 8.388198391163545,
111: 8.388558337368247,
112: 8.388918283572947,
113: 8.389278229777647,
114: 8.389638175982347,
115: 8.389998122187048,
116: 8.390358068391748,
117: 8.390718014596448,
118: 8.39107796080115,
119: 8.39143790700585,
120: 8.39179785321055,
121: 8.392157799415251,
122: 8.392517745619951,
123: 8.392877691824651,
124: 8.393237638029351,
125: 8.393597584234053,
126: 8.393957530438753,
127: 8.394317476643453,
128: 8.394677422848154,
129: 8.395037369052854,
130: 8.395397315257554,
131: 8.395757261462254,
132: 8.396117207666956,
133: 8.396477153871656,
134: 8.396837100076356,
135: 8.397197046281057,
136: 8.397556992485757,
137: 8.397916938690457,
138: 8.398276884895157,
139: 8.398636831099859,
140: 8.398996777304559,
141: 8.399356723509259,
142: 8.39971666971396,
143: 8.40007661591866,
144: 8.40043656212336,
145: 8.400796508328062,
146: 8.401156454532762,
147: 8.401516400737462,
148: 8.401876346942162,
149: 8.402236293146863,
150: 8.402596239351563,
151: 8.402956185556263,
152: 8.403316131760965,
153: 8.403676077965665,
154: 8.404036024170365,
155: 8.404395970375065,
156: 8.404755916579767,
157: 8.405115862784466,
158: 8.405475808989166,
159: 8.405835755193868,
160: 8.406195701398568,
161: 8.406555647603268,
162: 8.40691559380797,
163: 8.40727554001267,
164: 8.40763548621737,
165: 8.40799543242207,
166: 8.408355378626771,
167: 8.408715324831471,
168: 8.409075271036171,
169: 8.409435217240873,
170: 8.409795163445573,
171: 8.410155109650272,
172: 8.410515055854972,
173: 8.410875002059674,
174: 8.411234948264374,
175: 8.411594894469074,
176: 8.411954840673776,
177: 8.412314786878476,
178: 8.412674733083175,
179: 8.413034679287877,
180: 8.413394625492577,
181: 8.413754571697277,
182: 8.414114517901977,
183: 8.414474464106679,
184: 8.414834410311379,
185: 8.415194356516078,
186: 8.41555430272078,
187: 8.41591424892548,
188: 8.41627419513018,
189: 8.41663414133488,
190: 8.416994087539582,
191: 8.417354033744282,
192: 8.417713979948982,
193: 8.418073926153683,
194: 8.418433872358383,
195: 8.418793818563083,
196: 8.419153764767785,
197: 8.419513710972485,
198: 8.419873657177185,
199: 8.420233603381885,
200: 8.420593549586586,
201: 8.420953495791286,
202: 8.421313441995986,
203: 8.421673388200688,
204: 8.422033334405388,
205: 8.422393280610088,
206: 8.422753226814788,
207: 8.42311317301949,
208: 8.42347311922419,
209: 8.423833065428889,
210: 8.42419301163359,
211: 8.42455295783829,
212: 8.42491290404299,
213: 8.42527285024769,
214: 8.425632796452392,
215: 8.425992742657092,
216: 8.426352688861792,
217: 8.426712635066494,
218: 8.427072581271194,
219: 8.427432527475894,
220: 8.427792473680595,
221: 8.428152419885295,
222: 8.428512366089995,
223: 8.428872312294695,
224: 8.429232258499397,
225: 8.429592204704097,
226: 8.429952150908797,
227: 8.430312097113498,
228: 8.430672043318198,
229: 8.431031989522898,
230: 8.431391935727598,
231: 8.4317518819323,
232: 8.432111828137,
233: 8.4324717743417,
234: 8.432831720546401,
235: 8.433191666751101,
236: 8.433551612955801,
237: 8.433911559160503,
238: 8.434271505365203,
239: 8.434631451569903,
240: 8.434991397774603,
241: 8.435351343979304,
242: 8.435711290184004,
243: 8.436071236388704,
244: 8.436431182593406,
245: 8.436791128798106,
246: 8.437151075002806,
247: 8.437511021207506,
248: 8.437870967412207,
249: 8.438230913616907,
250: 8.438590859821607,
251: 8.438950806026309,
252: 8.439310752231009,
253: 8.439670698435709,
254: 8.44003064464041,
255: 8.44039059084511,
256: 8.44075053704981,
257: 8.44111048325451,
258: 8.441470429459212,
259: 8.441830375663912,
260: 8.442190321868612,
261: 8.442550268073314,
262: 8.442910214278013,
263: 8.443270160482713,
264: 8.443630106687413,
265: 8.443990052892115,
266: 8.444349999096815,
267: 8.444709945301515,
268: 8.445069891506217,
269: 8.445429837710916,
270: 8.445789783915616,
271: 8.446149730120318,
272: 8.446509676325018,
273: 8.446869622529718,
274: 8.447229568734418,
275: 8.44758951493912,
276: 8.44794946114382,
277: 8.44830940734852,
278: 8.448669353553221,
279: 8.449029299757921,
280: 8.449389245962621,
281: 8.449749192167321,
282: 8.450109138372023,
283: 8.450469084576723,
284: 8.450829030781422,
285: 8.451188976986124,
286: 8.451548923190824,
287: 8.451908869395524,
288: 8.452268815600226,
289: 8.452628761804926,
290: 8.452988708009626,
291: 8.453348654214325,
292: 8.453708600419027,
293: 8.454068546623727,
294: 8.454428492828427,
295: 8.454788439033129,
296: 8.455148385237829,
297: 8.455508331442529,
298: 8.455868277647228,
299: 8.45622822385193,
300: 8.45658817005663,
301: 8.45694811626133,
302: 8.457308062466032,
303: 8.457668008670732,
304: 8.458027954875432,
305: 8.458387901080131,
306: 8.458747847284833,
307: 8.459107793489533,
308: 8.459467739694233,
309: 8.459827685898935,
310: 8.460187632103635,
311: 8.460547578308335,
312: 8.460907524513036,
313: 8.461267470717736,
314: 8.461627416922436,
315: 8.461987363127136,
316: 8.462347309331838,
317: 8.462707255536538,
318: 8.463067201741238,
319: 8.46342714794594,
320: 8.46378709415064,
321: 8.46414704035534,
322: 8.464506986560039,
323: 8.46486693276474,
324: 8.46522687896944,
325: 8.46558682517414,
326: 8.465946771378842,
327: 8.466306717583542,
328: 8.466666663788242,
329: 8.467026609992944,
330: 8.467386556197644,
331: 8.467746502402344,
332: 8.468106448607044,
333: 8.468466394811745,
334: 8.468826341016445,
335: 8.469186287221145,
336: 8.469546233425847,
337: 8.469906179630547,
338: 8.470266125835247,
339: 8.470626072039947,
340: 8.470986018244648,
341: 8.471345964449348,
342: 8.471705910654048,
343: 8.47206585685875,
344: 8.47242580306345,
345: 8.47278574926815,
346: 8.473145695472851,
347: 8.473505641677551,
348: 8.473865587882251,
349: 8.474225534086951,
350: 8.474585480291653,
351: 8.474945426496353,
352: 8.475305372701053,
353: 8.475665318905754,
354: 8.476025265110454,
355: 8.476385211315154,
356: 8.476745157519854,
357: 8.477105103724556,
358: 8.477465049929256,
359: 8.477824996133956,
360: 8.478184942338657,
361: 8.478544888543357,
362: 8.478904834748057,
363: 8.479264780952759,
364: 8.479624727157459,
365: 8.479984673362159,
366: 8.480344619566859,
367: 8.48070456577156,
368: 8.48106451197626,
369: 8.48142445818096,
370: 8.481784404385662,
371: 8.482144350590362,
372: 8.482504296795062,
373: 8.482864242999762,
374: 8.483224189204464,
375: 8.483584135409163,
376: 8.483944081613863,
377: 8.484304027818565,
378: 8.484663974023265,
379: 8.485023920227965,
380: 8.485383866432667,
381: 8.485743812637367,
382: 8.486103758842066,
383: 8.486463705046766,
384: 8.486823651251468,
385: 8.487183597456168,
386: 8.487543543660868,
387: 8.48790348986557,
388: 8.48826343607027,
389: 8.48862338227497,
390: 8.48898332847967,
391: 8.489343274684371,
392: 8.489703220889071,
393: 8.490063167093771,
},
"fcst_lower": {
0: -np.inf,
1: -np.inf,
2: -np.inf,
3: -np.inf,
4: -np.inf,
5: -np.inf,
6: -np.inf,
7: -np.inf,
8: -np.inf,
9: -np.inf,
10: -np.inf,
11: -np.inf,
12: -np.inf,
13: -np.inf,
14: -np.inf,
15: -np.inf,
16: -np.inf,
17: -np.inf,
18: -np.inf,
19: -np.inf,
20: -np.inf,
21: -np.inf,
22: -np.inf,
23: -np.inf,
24: -np.inf,
25: -np.inf,
26: -np.inf,
27: -np.inf,
28: -np.inf,
29: -np.inf,
30: -np.inf,
31: -np.inf,
32: -np.inf,
33: -np.inf,
34: -np.inf,
35: -np.inf,
36: -np.inf,
37: -np.inf,
38: -np.inf,
39: -np.inf,
40: -np.inf,
41: -np.inf,
42: -np.inf,
43: -np.inf,
44: -np.inf,
45: -np.inf,
46: -np.inf,
47: -np.inf,
48: -np.inf,
49: -np.inf,
50: -np.inf,
51: -np.inf,
52: -np.inf,
53: -np.inf,
54: -np.inf,
55: -np.inf,
56: -np.inf,
57: -np.inf,
58: -np.inf,
59: -np.inf,
60: -np.inf,
61: -np.inf,
62: -np.inf,
63: -np.inf,
64: -np.inf,
65: -np.inf,
66: -np.inf,
67: -np.inf,
68: -np.inf,
69: -np.inf,
70: -np.inf,
71: -np.inf,
72: -np.inf,
73: -np.inf,
74: -np.inf,
75: -np.inf,
76: -np.inf,
77: -np.inf,
78: -np.inf,
79: -np.inf,
80: -np.inf,
81: -np.inf,
82: -np.inf,
83: -np.inf,
84: -np.inf,
85: -np.inf,
86: -np.inf,
87: -np.inf,
88: -np.inf,
89: -np.inf,
90: -np.inf,
91: -np.inf,
92: -np.inf,
93: -np.inf,
94: -np.inf,
95: -np.inf,
96: -np.inf,
97: -np.inf,
98: -np.inf,
99: -np.inf,
100: -np.inf,
101: -np.inf,
102: -np.inf,
103: -np.inf,
104: -np.inf,
105: -np.inf,
106: -np.inf,
107: -np.inf,
108: -np.inf,
109: -np.inf,
110: -np.inf,
111: -np.inf,
112: -np.inf,
113: -np.inf,
114: -np.inf,
115: -np.inf,
116: -np.inf,
117: -np.inf,
118: -np.inf,
119: -np.inf,
120: -np.inf,
121: -np.inf,
122: -np.inf,
123: -np.inf,
124: -np.inf,
125: -np.inf,
126: -np.inf,
127: -np.inf,
128: -np.inf,
129: -np.inf,
130: -np.inf,
131: -np.inf,
132: -np.inf,
133: -np.inf,
134: -np.inf,
135: -np.inf,
136: -np.inf,
137: -np.inf,
138: -np.inf,
139: -np.inf,
140: -np.inf,
141: -np.inf,
142: -np.inf,
143: -np.inf,
144: -np.inf,
145: -np.inf,
146: -np.inf,
147: -np.inf,
148: -np.inf,
149: -np.inf,
150: -np.inf,
151: -np.inf,
152: -np.inf,
153: -np.inf,
154: -np.inf,
155: -np.inf,
156: -np.inf,
157: -np.inf,
158: -np.inf,
159: -np.inf,
160: -np.inf,
161: -np.inf,
162: -np.inf,
163: -np.inf,
164: -np.inf,
165: -np.inf,
166: -np.inf,
167: -np.inf,
168: -np.inf,
169: -np.inf,
170: -np.inf,
171: -np.inf,
172: -np.inf,
173: -np.inf,
174: -np.inf,
175: -np.inf,
176: -np.inf,
177: -np.inf,
178: -np.inf,
179: -np.inf,
180: -np.inf,
181: -np.inf,
182: -np.inf,
183: -np.inf,
184: -np.inf,
185: -np.inf,
186: -np.inf,
187: -np.inf,
188: -np.inf,
189: -np.inf,
190: -np.inf,
191: -np.inf,
192: -np.inf,
193: -np.inf,
194: -np.inf,
195: -np.inf,
196: -np.inf,
197: -np.inf,
198: -np.inf,
199: -np.inf,
200: -np.inf,
201: -np.inf,
202: -np.inf,
203: -np.inf,
204: -np.inf,
205: -np.inf,
206: -np.inf,
207: -np.inf,
208: -np.inf,
209: -np.inf,
210: -np.inf,
211: -np.inf,
212: -np.inf,
213: -np.inf,
214: -np.inf,
215: -np.inf,
216: -np.inf,
217: -np.inf,
218: -np.inf,
219: -np.inf,
220: -np.inf,
221: -np.inf,
222: -np.inf,
223: -np.inf,
224: -np.inf,
225: -np.inf,
226: -np.inf,
227: -np.inf,
228: -np.inf,
229: -np.inf,
230: -np.inf,
231: -np.inf,
232: -np.inf,
233: -np.inf,
234: -np.inf,
235: -np.inf,
236: -np.inf,
237: -np.inf,
238: -np.inf,
239: -np.inf,
240: -np.inf,
241: -np.inf,
242: -np.inf,
243: -np.inf,
244: -np.inf,
245: -np.inf,
246: -np.inf,
247: -np.inf,
248: -np.inf,
249: -np.inf,
250: -np.inf,
251: -np.inf,
252: -np.inf,
253: -np.inf,
254: -np.inf,
255: -np.inf,
256: -np.inf,
257: -np.inf,
258: -np.inf,
259: -np.inf,
260: -np.inf,
261: -np.inf,
262: -np.inf,
263: -np.inf,
264: -np.inf,
265: -np.inf,
266: -np.inf,
267: -np.inf,
268: -np.inf,
269: -np.inf,
270: -np.inf,
271: -np.inf,
272: -np.inf,
273: -np.inf,
274: -np.inf,
275: -np.inf,
276: -np.inf,
277: -np.inf,
278: -np.inf,
279: -np.inf,
280: -np.inf,
281: -np.inf,
282: -np.inf,
283: -np.inf,
284: -np.inf,
285: -np.inf,
286: -np.inf,
287: -np.inf,
288: -np.inf,
289: -np.inf,
290: -np.inf,
291: -np.inf,
292: -np.inf,
293: -np.inf,
294: -np.inf,
295: -np.inf,
296: -np.inf,
297: -np.inf,
298: -np.inf,
299: -np.inf,
300: -np.inf,
301: -np.inf,
302: -np.inf,
303: -np.inf,
304: -np.inf,
305: -np.inf,
306: -np.inf,
307: -np.inf,
308: -np.inf,
309: -np.inf,
310: -np.inf,
311: -np.inf,
312: -np.inf,
313: -np.inf,
314: -np.inf,
315: -np.inf,
316: -np.inf,
317: -np.inf,
318: -np.inf,
319: -np.inf,
320: -np.inf,
321: -np.inf,
322: -np.inf,
323: -np.inf,
324: -np.inf,
325: -np.inf,
326: -np.inf,
327: -np.inf,
328: -np.inf,
329: -np.inf,
330: -np.inf,
331: -np.inf,
332: -np.inf,
333: -np.inf,
334: -np.inf,
335: -np.inf,
336: -np.inf,
337: -np.inf,
338: -np.inf,
339: -np.inf,
340: -np.inf,
341: -np.inf,
342: -np.inf,
343: -np.inf,
344: -np.inf,
345: -np.inf,
346: -np.inf,
347: -np.inf,
348: -np.inf,
349: -np.inf,
350: -np.inf,
351: -np.inf,
352: -np.inf,
353: -np.inf,
354: -np.inf,
355: -np.inf,
356: -np.inf,
357: -np.inf,
358: -np.inf,
359: -np.inf,
360: -np.inf,
361: -np.inf,
362: -np.inf,
363: -np.inf,
364: -np.inf,
365: -np.inf,
366: -np.inf,
367: -np.inf,
368: -np.inf,
369: -np.inf,
370: -np.inf,
371: -np.inf,
372: -np.inf,
373: -np.inf,
374: -np.inf,
375: -np.inf,
376: -np.inf,
377: -np.inf,
378: -np.inf,
379: -np.inf,
380: -np.inf,
381: -np.inf,
382: -np.inf,
383: -np.inf,
384: -np.inf,
385: -np.inf,
386: -np.inf,
387: -np.inf,
388: -np.inf,
389: -np.inf,
390: -np.inf,
391: -np.inf,
392: -np.inf,
393: -np.inf,
},
"fcst_upper": {
0: np.inf,
1: np.inf,
2: np.inf,
3: np.inf,
4: np.inf,
5: np.inf,
6: np.inf,
7: np.inf,
8: np.inf,
9: np.inf,
10: np.inf,
11: np.inf,
12: np.inf,
13: np.inf,
14: np.inf,
15: np.inf,
16: np.inf,
17: np.inf,
18: np.inf,
19: np.inf,
20: np.inf,
21: np.inf,
22: np.inf,
23: np.inf,
24: np.inf,
25: np.inf,
26: np.inf,
27: np.inf,
28: np.inf,
29: np.inf,
30: np.inf,
31: np.inf,
32: np.inf,
33: np.inf,
34: np.inf,
35: np.inf,
36: np.inf,
37: np.inf,
38: np.inf,
39: np.inf,
40: np.inf,
41: np.inf,
42: np.inf,
43: np.inf,
44: np.inf,
45: np.inf,
46: np.inf,
47: np.inf,
48: np.inf,
49: np.inf,
50: np.inf,
51: np.inf,
52: np.inf,
53: np.inf,
54: np.inf,
55: np.inf,
56: np.inf,
57: np.inf,
58: np.inf,
59: np.inf,
60: np.inf,
61: np.inf,
62: np.inf,
63: np.inf,
64: np.inf,
65: np.inf,
66: np.inf,
67: np.inf,
68: np.inf,
69: np.inf,
70: np.inf,
71: np.inf,
72: np.inf,
73: np.inf,
74: np.inf,
75: np.inf,
76: np.inf,
77: np.inf,
78: np.inf,
79: np.inf,
80: np.inf,
81: np.inf,
82: np.inf,
83: np.inf,
84: np.inf,
85: np.inf,
86: np.inf,
87: np.inf,
88: np.inf,
89: np.inf,
90: np.inf,
91: np.inf,
92: np.inf,
93: np.inf,
94: np.inf,
95: np.inf,
96: np.inf,
97: np.inf,
98: np.inf,
99: np.inf,
100: np.inf,
101: np.inf,
102: np.inf,
103: np.inf,
104: np.inf,
105: np.inf,
106: np.inf,
107: np.inf,
108: np.inf,
109: np.inf,
110: np.inf,
111: np.inf,
112: np.inf,
113: np.inf,
114: np.inf,
115: np.inf,
116: np.inf,
117: np.inf,
118: np.inf,
119: np.inf,
120: np.inf,
121: np.inf,
122: np.inf,
123: np.inf,
124: np.inf,
125: np.inf,
126: np.inf,
127: np.inf,
128: np.inf,
129: np.inf,
130: np.inf,
131: np.inf,
132: np.inf,
133: np.inf,
134: np.inf,
135: np.inf,
136: np.inf,
137: np.inf,
138: np.inf,
139: np.inf,
140: np.inf,
141: np.inf,
142: np.inf,
143: np.inf,
144: np.inf,
145: np.inf,
146: np.inf,
147: np.inf,
148: np.inf,
149: np.inf,
150: np.inf,
151: np.inf,
152: np.inf,
153: np.inf,
154: np.inf,
155: np.inf,
156: np.inf,
157: np.inf,
158: np.inf,
159: np.inf,
160: np.inf,
161: np.inf,
162: np.inf,
163: np.inf,
164: np.inf,
165: np.inf,
166: np.inf,
167: np.inf,
168: np.inf,
169: np.inf,
170: np.inf,
171: np.inf,
172: np.inf,
173: np.inf,
174: np.inf,
175: np.inf,
176: np.inf,
177: np.inf,
178: np.inf,
179: np.inf,
180: np.inf,
181: np.inf,
182: np.inf,
183: np.inf,
184: np.inf,
185: np.inf,
186: np.inf,
187: np.inf,
188: np.inf,
189: np.inf,
190: np.inf,
191: np.inf,
192: np.inf,
193: np.inf,
194: np.inf,
195: np.inf,
196: np.inf,
197: np.inf,
198: np.inf,
199: np.inf,
200: np.inf,
201: np.inf,
202: np.inf,
203: np.inf,
204: np.inf,
205: np.inf,
206: np.inf,
207: np.inf,
208: np.inf,
209: np.inf,
210: np.inf,
211: np.inf,
212: np.inf,
213: np.inf,
214: np.inf,
215: np.inf,
216: np.inf,
217: np.inf,
218: np.inf,
219: np.inf,
220: np.inf,
221: np.inf,
222: np.inf,
223: np.inf,
224: np.inf,
225: np.inf,
226: np.inf,
227: np.inf,
228: np.inf,
229: np.inf,
230: np.inf,
231: np.inf,
232: np.inf,
233: np.inf,
234: np.inf,
235: np.inf,
236: np.inf,
237: np.inf,
238: np.inf,
239: np.inf,
240: np.inf,
241: np.inf,
242: np.inf,
243: np.inf,
244: np.inf,
245: np.inf,
246: np.inf,
247: np.inf,
248: np.inf,
249: np.inf,
250: np.inf,
251: np.inf,
252: np.inf,
253: np.inf,
254: np.inf,
255: np.inf,
256: np.inf,
257: np.inf,
258: np.inf,
259: np.inf,
260: np.inf,
261: np.inf,
262: np.inf,
263: np.inf,
264: np.inf,
265: np.inf,
266: np.inf,
267: np.inf,
268: np.inf,
269: np.inf,
270: np.inf,
271: np.inf,
272: np.inf,
273: np.inf,
274: np.inf,
275: np.inf,
276: np.inf,
277: np.inf,
278: np.inf,
279: np.inf,
280: np.inf,
281: np.inf,
282: np.inf,
283: np.inf,
284: np.inf,
285: np.inf,
286: np.inf,
287: np.inf,
288: np.inf,
289: np.inf,
290: np.inf,
291: np.inf,
292: np.inf,
293: np.inf,
294: np.inf,
295: np.inf,
296: np.inf,
297: np.inf,
298: np.inf,
299: np.inf,
300: np.inf,
301: np.inf,
302: np.inf,
303: np.inf,
304: np.inf,
305: np.inf,
306: np.inf,
307: np.inf,
308: np.inf,
309: np.inf,
310: np.inf,
311: np.inf,
312: np.inf,
313: np.inf,
314: np.inf,
315: np.inf,
316: np.inf,
317: np.inf,
318: np.inf,
319: np.inf,
320: np.inf,
321: np.inf,
322: np.inf,
323: np.inf,
324: np.inf,
325: np.inf,
326: np.inf,
327: np.inf,
328: np.inf,
329: np.inf,
330: np.inf,
331: np.inf,
332: np.inf,
333: np.inf,
334: np.inf,
335: np.inf,
336: np.inf,
337: np.inf,
338: np.inf,
339: np.inf,
340: np.inf,
341: np.inf,
342: np.inf,
343: np.inf,
344: np.inf,
345: np.inf,
346: np.inf,
347: np.inf,
348: np.inf,
349: np.inf,
350: np.inf,
351: np.inf,
352: np.inf,
353: np.inf,
354: np.inf,
355: np.inf,
356: np.inf,
357: np.inf,
358: np.inf,
359: np.inf,
360: np.inf,
361: np.inf,
362: np.inf,
363: np.inf,
364: np.inf,
365: np.inf,
366: np.inf,
367: np.inf,
368: np.inf,
369: np.inf,
370: np.inf,
371: np.inf,
372: np.inf,
373: np.inf,
374: np.inf,
375: np.inf,
376: np.inf,
377: np.inf,
378: np.inf,
379: np.inf,
380: np.inf,
381: np.inf,
382: np.inf,
383: np.inf,
384: np.inf,
385: np.inf,
386: np.inf,
387: np.inf,
388: np.inf,
389: np.inf,
390: np.inf,
391: np.inf,
392: np.inf,
393: np.inf,
},
}
)
PEYTON_FCST_LINEAR_INVALID_NEG_ONE = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2012-05-02 00:00:00"),
1: pd.Timestamp("2012-05-03 00:00:00"),
2: pd.Timestamp("2012-05-04 00:00:00"),
3: pd.Timestamp("2012-05-05 00:00:00"),
4: pd.Timestamp("2012-05-06 00:00:00"),
5: pd.Timestamp("2012-05-07 00:00:00"),
6: pd.Timestamp("2012-05-08 00:00:00"),
7: pd.Timestamp("2012-05-09 00:00:00"),
8: pd.Timestamp("2012-05-10 00:00:00"),
9: pd.Timestamp("2012-05-11 00:00:00"),
10: pd.Timestamp("2012-05-12 00:00:00"),
11: pd.Timestamp("2012-05-13 00:00:00"),
12: | pd.Timestamp("2012-05-14 00:00:00") | pandas.Timestamp |
import os
import tempfile
import unittest
import numpy as np
import pandas as pd
from sqlalchemy import create_engine
from tests.settings import POSTGRESQL_ENGINE, SQLITE_ENGINE
from tests.utils import get_repository_path, DBTest
from ukbrest.common.pheno2sql import Pheno2SQL
class Pheno2SQLTest(DBTest):
@unittest.skip('sqlite being removed')
def test_sqlite_default_values(self):
# Prepare
csv_file = get_repository_path('pheno2sql/example01.csv')
db_engine = SQLITE_ENGINE
p2sql = Pheno2SQL(csv_file, db_engine)
# Run
p2sql.load_data()
# Validate
assert p2sql.db_type == 'sqlite'
## Check table exists
tmp = pd.read_sql("SELECT name FROM sqlite_master WHERE type='table' AND name='{}';".format('ukb_pheno_0_00'), create_engine(db_engine))
assert not tmp.empty
## Check columns are correct
tmp = pd.read_sql('select * from ukb_pheno_0_00', create_engine(db_engine))
expected_columns = ["eid","c21_0_0","c21_1_0","c21_2_0","c31_0_0","c34_0_0","c46_0_0","c47_0_0","c48_0_0"]
assert len(tmp.columns) == len(expected_columns)
assert all(x in expected_columns for x in tmp.columns)
## Check data is correct
tmp = pd.read_sql('select * from ukb_pheno_0_00', create_engine(db_engine), index_col='eid')
assert not tmp.empty
assert tmp.shape[0] == 2
assert tmp.loc[1, 'c21_0_0'] == 'Option number 1'
assert tmp.loc[1, 'c21_1_0'] == 'No response'
assert tmp.loc[1, 'c21_2_0'] == 'Yes'
assert tmp.loc[1, 'c31_0_0'] == '2012-01-05'
assert int(tmp.loc[1, 'c34_0_0']) == 21
assert int(tmp.loc[1, 'c46_0_0']) == -9
assert tmp.loc[1, 'c47_0_0'].round(5) == 45.55412
assert tmp.loc[1, 'c48_0_0'] == '2011-08-14'
assert tmp.loc[2, 'c21_0_0'] == 'Option number 2'
assert pd.isnull(tmp.loc[2, 'c21_1_0'])
assert tmp.loc[2, 'c21_2_0'] == 'No'
assert tmp.loc[2, 'c31_0_0'] == '2015-12-30'
assert int(tmp.loc[2, 'c34_0_0']) == 12
assert int(tmp.loc[2, 'c46_0_0']) == -2
assert tmp.loc[2, 'c47_0_0'].round(5) == -0.55461
assert tmp.loc[2, 'c48_0_0'] == '2010-03-29'
def test_postgresql_default_values(self):
# Prepare
csv_file = get_repository_path('pheno2sql/example01.csv')
db_engine = POSTGRESQL_ENGINE
p2sql = Pheno2SQL(csv_file, db_engine)
# Run
p2sql.load_data()
# Validate
assert p2sql.db_type == 'postgresql'
## Check table exists
table = pd.read_sql("SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = '{}');".format('ukb_pheno_0_00'), create_engine(db_engine))
assert table.iloc[0, 0]
## Check columns are correct
tmp = pd.read_sql('select * from ukb_pheno_0_00', create_engine(db_engine))
expected_columns = ["eid","c21_0_0","c21_1_0","c21_2_0","c31_0_0","c34_0_0","c46_0_0","c47_0_0","c48_0_0"]
assert len(tmp.columns) == len(expected_columns)
assert all(x in expected_columns for x in tmp.columns)
## Check data is correct
tmp = pd.read_sql('select * from ukb_pheno_0_00', create_engine(db_engine), index_col='eid')
assert not tmp.empty
assert tmp.shape[0] == 2
assert tmp.loc[1, 'c21_0_0'] == 'Option number 1'
assert tmp.loc[1, 'c21_1_0'] == 'No response'
assert tmp.loc[1, 'c21_2_0'] == 'Yes'
assert tmp.loc[1, 'c31_0_0'].strftime('%Y-%m-%d') == '2012-01-05'
assert int(tmp.loc[1, 'c34_0_0']) == 21
assert int(tmp.loc[1, 'c46_0_0']) == -9
assert tmp.loc[1, 'c47_0_0'].round(5) == 45.55412
assert tmp.loc[1, 'c48_0_0'].strftime('%Y-%m-%d') == '2011-08-14'
assert tmp.loc[2, 'c21_0_0'] == 'Option number 2'
assert pd.isnull(tmp.loc[2, 'c21_1_0'])
assert tmp.loc[2, 'c21_2_0'] == 'No'
assert tmp.loc[2, 'c31_0_0'].strftime('%Y-%m-%d') == '2015-12-30'
assert int(tmp.loc[2, 'c34_0_0']) == 12
assert int(tmp.loc[2, 'c46_0_0']) == -2
assert tmp.loc[2, 'c47_0_0'].round(5) == -0.55461
assert tmp.loc[2, 'c48_0_0'].strftime('%Y-%m-%d') == '2010-03-29'
def test_exit(self):
# Prepare
csv_file = get_repository_path('pheno2sql/example01.csv')
db_engine = POSTGRESQL_ENGINE
temp_dir = tempfile.mkdtemp()
# Run
with Pheno2SQL(csv_file, db_engine, tmpdir=temp_dir) as p2sql:
p2sql.load_data()
# Validate
## Check table exists
table = pd.read_sql("SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = '{}');".format('ukb_pheno_0_00'), create_engine(db_engine))
assert table.iloc[0, 0]
## Check columns are correct
tmp = pd.read_sql('select * from ukb_pheno_0_00', create_engine(db_engine))
expected_columns = ["eid","c21_0_0","c21_1_0","c21_2_0","c31_0_0","c34_0_0","c46_0_0","c47_0_0","c48_0_0"]
assert len(tmp.columns) == len(expected_columns)
assert all(x in expected_columns for x in tmp.columns)
## Check data is correct
tmp = pd.read_sql('select * from ukb_pheno_0_00', create_engine(db_engine), index_col='eid')
assert not tmp.empty
assert tmp.shape[0] == 2
## Check that temporary files were deleted
assert len(os.listdir(temp_dir)) == 0
@unittest.skip('sqlite being removed')
def test_sqlite_less_columns_per_table(self):
# Prepare
csv_file = get_repository_path('pheno2sql/example01.csv')
db_engine = SQLITE_ENGINE
p2sql = Pheno2SQL(csv_file, db_engine, n_columns_per_table=3)
# Run
p2sql.load_data()
# Validate
assert p2sql.db_type == 'sqlite'
## Check tables exist
table = pd.read_sql("SELECT name FROM sqlite_master WHERE type='table' AND name='{}';".format('ukb_pheno_0_00'), create_engine(db_engine))
assert not table.empty
table = pd.read_sql("SELECT name FROM sqlite_master WHERE type='table' AND name='{}';".format('ukb_pheno_0_01'), create_engine(db_engine))
assert not table.empty
table = pd.read_sql("SELECT name FROM sqlite_master WHERE type='table' AND name='{}';".format('ukb_pheno_0_02'), create_engine(db_engine))
assert not table.empty
## Check columns are correct
tmp = pd.read_sql('select * from ukb_pheno_0_00', create_engine(db_engine))
expected_columns = ["eid","c21_0_0","c21_1_0","c21_2_0"]
assert len(tmp.columns) == len(expected_columns)
assert all(x in expected_columns for x in tmp.columns)
tmp = pd.read_sql('select * from ukb_pheno_0_01', create_engine(db_engine))
expected_columns = ["eid","c31_0_0","c34_0_0","c46_0_0"]
assert len(tmp.columns) == len(expected_columns)
assert all(x in expected_columns for x in tmp.columns)
tmp = pd.read_sql('select * from ukb_pheno_0_02', create_engine(db_engine))
expected_columns = ["eid","c47_0_0","c48_0_0"]
assert len(tmp.columns) == len(expected_columns)
assert all(x in expected_columns for x in tmp.columns)
## Check data is correct
tmp = pd.read_sql('select * from ukb_pheno_0_00', create_engine(db_engine), index_col='eid')
assert not tmp.empty
assert tmp.shape[0] == 2
assert tmp.loc[1, 'c21_0_0'] == 'Option number 1'
assert tmp.loc[1, 'c21_1_0'] == 'No response'
assert tmp.loc[1, 'c21_2_0'] == 'Yes'
assert tmp.loc[2, 'c21_0_0'] == 'Option number 2'
assert pd.isnull(tmp.loc[2, 'c21_1_0'])
assert tmp.loc[2, 'c21_2_0'] == 'No'
tmp = pd.read_sql('select * from ukb_pheno_0_01', create_engine(db_engine), index_col='eid')
assert not tmp.empty
assert tmp.shape[0] == 2
assert tmp.loc[1, 'c31_0_0'] == '2012-01-05'
assert int(tmp.loc[1, 'c34_0_0']) == 21
assert int(tmp.loc[1, 'c46_0_0']) == -9
assert tmp.loc[2, 'c31_0_0'] == '2015-12-30'
assert int(tmp.loc[2, 'c34_0_0']) == 12
assert int(tmp.loc[2, 'c46_0_0']) == -2
tmp = pd.read_sql('select * from ukb_pheno_0_02', create_engine(db_engine), index_col='eid')
assert not tmp.empty
assert tmp.shape[0] == 2
assert tmp.loc[1, 'c47_0_0'].round(5) == 45.55412
assert tmp.loc[1, 'c48_0_0'] == '2011-08-14'
assert tmp.loc[2, 'c47_0_0'].round(5) == -0.55461
assert tmp.loc[2, 'c48_0_0'] == '2010-03-29'
def test_postgresql_less_columns_per_table(self):
# Prepare
csv_file = get_repository_path('pheno2sql/example01.csv')
db_engine = POSTGRESQL_ENGINE
p2sql = Pheno2SQL(csv_file, db_engine, n_columns_per_table=3)
# Run
p2sql.load_data()
# Validate
assert p2sql.db_type == 'postgresql'
## Check tables exist
table = pd.read_sql("SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = '{}');".format('ukb_pheno_0_00'), create_engine(db_engine))
assert table.iloc[0, 0]
table = pd.read_sql("SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = '{}');".format('ukb_pheno_0_01'), create_engine(db_engine))
assert table.iloc[0, 0]
table = pd.read_sql("SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = '{}');".format('ukb_pheno_0_02'), create_engine(db_engine))
assert table.iloc[0, 0]
## Check columns are correct
tmp = pd.read_sql('select * from ukb_pheno_0_00', create_engine(db_engine))
expected_columns = ["eid","c21_0_0","c21_1_0","c21_2_0"]
assert len(tmp.columns) == len(expected_columns)
assert all(x in expected_columns for x in tmp.columns)
tmp = pd.read_sql('select * from ukb_pheno_0_01', create_engine(db_engine))
expected_columns = ["eid","c31_0_0","c34_0_0","c46_0_0"]
assert len(tmp.columns) == len(expected_columns)
assert all(x in expected_columns for x in tmp.columns)
tmp = pd.read_sql('select * from ukb_pheno_0_02', create_engine(db_engine))
expected_columns = ["eid","c47_0_0","c48_0_0"]
assert len(tmp.columns) == len(expected_columns)
assert all(x in expected_columns for x in tmp.columns)
## Check data is correct
tmp = pd.read_sql('select * from ukb_pheno_0_00', create_engine(db_engine), index_col='eid')
assert not tmp.empty
assert tmp.shape[0] == 2
assert tmp.loc[1, 'c21_0_0'] == 'Option number 1'
assert tmp.loc[1, 'c21_1_0'] == 'No response'
assert tmp.loc[1, 'c21_2_0'] == 'Yes'
assert tmp.loc[2, 'c21_0_0'] == 'Option number 2'
assert pd.isnull(tmp.loc[2, 'c21_1_0'])
assert tmp.loc[2, 'c21_2_0'] == 'No'
tmp = pd.read_sql('select * from ukb_pheno_0_01', create_engine(db_engine), index_col='eid')
assert not tmp.empty
assert tmp.shape[0] == 2
assert tmp.loc[1, 'c31_0_0'].strftime('%Y-%m-%d') == '2012-01-05'
assert int(tmp.loc[1, 'c34_0_0']) == 21
assert int(tmp.loc[1, 'c46_0_0']) == -9
assert tmp.loc[2, 'c31_0_0'].strftime('%Y-%m-%d') == '2015-12-30'
assert int(tmp.loc[2, 'c34_0_0']) == 12
assert int(tmp.loc[2, 'c46_0_0']) == -2
tmp = pd.read_sql('select * from ukb_pheno_0_02', create_engine(db_engine), index_col='eid')
assert not tmp.empty
assert tmp.shape[0] == 2
assert tmp.loc[1, 'c47_0_0'].round(5) == 45.55412
assert tmp.loc[1, 'c48_0_0'].strftime('%Y-%m-%d') == '2011-08-14'
assert tmp.loc[2, 'c47_0_0'].round(5) == -0.55461
assert tmp.loc[2, 'c48_0_0'].strftime('%Y-%m-%d') == '2010-03-29'
def test_custom_tmpdir(self):
# Prepare
csv_file = get_repository_path('pheno2sql/example01.csv')
db_engine = POSTGRESQL_ENGINE
with Pheno2SQL(csv_file, db_engine, tmpdir='/tmp/custom/directory/here', delete_temp_csv=False) as p2sql:
# Run
p2sql.load_data()
# Validate
## Check table exists
table = pd.read_sql("SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = '{}');".format('ukb_pheno_0_00'), create_engine(db_engine))
assert table.iloc[0, 0]
## Check columns are correct
tmp = pd.read_sql('select * from ukb_pheno_0_00', create_engine(db_engine))
expected_columns = ["eid","c21_0_0","c21_1_0","c21_2_0","c31_0_0","c34_0_0","c46_0_0","c47_0_0","c48_0_0"]
assert len(tmp.columns) == len(expected_columns)
assert all(x in expected_columns for x in tmp.columns)
## Check data is correct
tmp = pd.read_sql('select * from ukb_pheno_0_00', create_engine(db_engine), index_col='eid')
assert not tmp.empty
assert tmp.shape[0] == 2
## Check that temporary are still there
assert len(os.listdir('/tmp/custom/directory/here')) > 0
## Check that temporary is now clean
assert len(os.listdir('/tmp/custom/directory/here')) == 0
@unittest.skip('sqlite being removed')
def test_sqlite_auxiliary_table_is_created(self):
# Prepare
csv_file = get_repository_path('pheno2sql/example01.csv')
db_engine = SQLITE_ENGINE
p2sql = Pheno2SQL(csv_file, db_engine, n_columns_per_table=3)
# Run
p2sql.load_data()
# Validate
assert p2sql.db_type == 'sqlite'
## Check tables exist
table = pd.read_sql("SELECT name FROM sqlite_master WHERE type='table' AND name='{}';".format('ukb_pheno_0_00'), create_engine(db_engine))
assert not table.empty
table = pd.read_sql("SELECT name FROM sqlite_master WHERE type='table' AND name='{}';".format('ukb_pheno_0_01'), create_engine(db_engine))
assert not table.empty
table = pd.read_sql("SELECT name FROM sqlite_master WHERE type='table' AND name='{}';".format('ukb_pheno_0_02'), create_engine(db_engine))
assert not table.empty
## Check columns are correct
tmp = pd.read_sql('select * from ukb_pheno_0_00', create_engine(db_engine))
expected_columns = ["eid","c21_0_0","c21_1_0","c21_2_0"]
assert len(tmp.columns) == len(expected_columns)
assert all(x in expected_columns for x in tmp.columns)
tmp = pd.read_sql('select * from ukb_pheno_0_01', create_engine(db_engine))
expected_columns = ["eid","c31_0_0","c34_0_0","c46_0_0"]
assert len(tmp.columns) == len(expected_columns)
assert all(x in expected_columns for x in tmp.columns)
tmp = pd.read_sql('select * from ukb_pheno_0_02', create_engine(db_engine))
expected_columns = ["eid","c47_0_0","c48_0_0"]
assert len(tmp.columns) == len(expected_columns)
assert all(x in expected_columns for x in tmp.columns)
## Check auxiliary table existance
table = pd.read_sql("SELECT name FROM sqlite_master WHERE type='table' AND name='{}';".format('fields'), create_engine(db_engine))
assert not table.empty
## Check columns are correct
tmp = pd.read_sql('select * from fields', create_engine(db_engine))
expected_columns = ["column_name", "table_name"]
assert len(tmp.columns) >= len(expected_columns)
assert all(x in tmp.columns for x in expected_columns)
## Check data is correct
tmp = pd.read_sql('select * from fields', create_engine(db_engine), index_col='column_name')
assert not tmp.empty
assert tmp.shape[0] == 8
assert tmp.loc['c21_0_0', 'table_name'] == 'ukb_pheno_0_00'
assert tmp.loc['c21_1_0', 'table_name'] == 'ukb_pheno_0_00'
assert tmp.loc['c21_2_0', 'table_name'] == 'ukb_pheno_0_00'
assert tmp.loc['c31_0_0', 'table_name'] == 'ukb_pheno_0_01'
assert tmp.loc['c34_0_0', 'table_name'] == 'ukb_pheno_0_01'
assert tmp.loc['c46_0_0', 'table_name'] == 'ukb_pheno_0_01'
assert tmp.loc['c47_0_0', 'table_name'] == 'ukb_pheno_0_02'
assert tmp.loc['c48_0_0', 'table_name'] == 'ukb_pheno_0_02'
def test_postgresql_auxiliary_table_is_created_and_has_minimum_data_required(self):
# Prepare
csv_file = get_repository_path('pheno2sql/example01.csv')
db_engine = POSTGRESQL_ENGINE
p2sql = Pheno2SQL(csv_file, db_engine, n_columns_per_table=3, loading_n_jobs=1)
# Run
p2sql.load_data()
# Validate
assert p2sql.db_type == 'postgresql'
## Check tables exist
table = pd.read_sql("SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = '{}');".format('ukb_pheno_0_00'), create_engine(db_engine))
assert table.iloc[0, 0]
table = pd.read_sql("SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = '{}');".format('ukb_pheno_0_01'), create_engine(db_engine))
assert table.iloc[0, 0]
table = pd.read_sql("SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = '{}');".format('ukb_pheno_0_02'), create_engine(db_engine))
assert table.iloc[0, 0]
## Check columns are correct
tmp = pd.read_sql('select * from ukb_pheno_0_00', create_engine(db_engine))
expected_columns = ["eid","c21_0_0","c21_1_0","c21_2_0"]
assert len(tmp.columns) == len(expected_columns)
assert all(x in expected_columns for x in tmp.columns)
tmp = pd.read_sql('select * from ukb_pheno_0_01', create_engine(db_engine))
expected_columns = ["eid","c31_0_0","c34_0_0","c46_0_0"]
assert len(tmp.columns) == len(expected_columns)
assert all(x in expected_columns for x in tmp.columns)
tmp = pd.read_sql('select * from ukb_pheno_0_02', create_engine(db_engine))
expected_columns = ["eid","c47_0_0","c48_0_0"]
assert len(tmp.columns) == len(expected_columns)
assert all(x in expected_columns for x in tmp.columns)
## Check auxiliary table existance
table = pd.read_sql("SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = '{}');".format('fields'), create_engine(db_engine))
assert table.iloc[0, 0]
## Check columns are correct
tmp = pd.read_sql('select * from fields', create_engine(db_engine))
expected_columns = ["column_name", "table_name"]
assert len(tmp.columns) >= len(expected_columns)
assert all(x in tmp.columns for x in expected_columns)
## Check data is correct
tmp = pd.read_sql('select * from fields', create_engine(db_engine), index_col='column_name')
assert not tmp.empty
assert tmp.shape[0] == 8
assert tmp.loc['c21_0_0', 'table_name'] == 'ukb_pheno_0_00'
assert tmp.loc['c21_1_0', 'table_name'] == 'ukb_pheno_0_00'
assert tmp.loc['c21_2_0', 'table_name'] == 'ukb_pheno_0_00'
assert tmp.loc['c31_0_0', 'table_name'] == 'ukb_pheno_0_01'
assert tmp.loc['c34_0_0', 'table_name'] == 'ukb_pheno_0_01'
assert tmp.loc['c46_0_0', 'table_name'] == 'ukb_pheno_0_01'
assert tmp.loc['c47_0_0', 'table_name'] == 'ukb_pheno_0_02'
assert tmp.loc['c48_0_0', 'table_name'] == 'ukb_pheno_0_02'
def test_postgresql_auxiliary_table_with_more_information(self):
# Prepare
csv_file = get_repository_path('pheno2sql/example01.csv')
db_engine = POSTGRESQL_ENGINE
p2sql = Pheno2SQL(csv_file, db_engine, n_columns_per_table=3, loading_n_jobs=1)
# Run
p2sql.load_data()
# Validate
assert p2sql.db_type == 'postgresql'
## Check tables exist
table = pd.read_sql("SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = '{}');".format('ukb_pheno_0_00'), create_engine(db_engine))
assert table.iloc[0, 0]
table = pd.read_sql("SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = '{}');".format('ukb_pheno_0_01'), create_engine(db_engine))
assert table.iloc[0, 0]
table = pd.read_sql("SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = '{}');".format('ukb_pheno_0_02'), create_engine(db_engine))
assert table.iloc[0, 0]
## Check auxiliary table existance
table = pd.read_sql("SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = '{}');".format('fields'), create_engine(db_engine))
assert table.iloc[0, 0]
## Check columns are correct
tmp = pd.read_sql('select * from fields', create_engine(db_engine))
expected_columns = ["column_name", "field_id", "inst", "arr", "coding", "table_name", "type", "description"]
assert len(tmp.columns) == len(expected_columns), len(tmp.columns)
assert all(x in expected_columns for x in tmp.columns)
## Check data is correct
tmp = pd.read_sql('select * from fields', create_engine(db_engine), index_col='column_name')
assert not tmp.empty
assert tmp.shape[0] == 8
assert tmp.loc['c21_0_0', 'field_id'] == '21'
assert tmp.loc['c21_0_0', 'inst'] == 0
assert tmp.loc['c21_0_0', 'arr'] == 0
assert tmp.loc['c21_0_0', 'coding'] == 100261
assert tmp.loc['c21_0_0', 'table_name'] == 'ukb_pheno_0_00'
assert tmp.loc['c21_0_0', 'type'] == 'Categorical (single)'
assert tmp.loc['c21_0_0', 'description'] == 'An string value'
assert tmp.loc['c21_1_0', 'field_id'] == '21'
assert tmp.loc['c21_1_0', 'inst'] == 1
assert tmp.loc['c21_1_0', 'arr'] == 0
assert tmp.loc['c21_1_0', 'coding'] == 100261
assert tmp.loc['c21_1_0', 'table_name'] == 'ukb_pheno_0_00'
assert tmp.loc['c21_1_0', 'type'] == 'Categorical (single)'
assert tmp.loc['c21_1_0', 'description'] == 'An string value'
assert tmp.loc['c21_2_0', 'field_id'] == '21'
assert tmp.loc['c21_2_0', 'inst'] == 2
assert tmp.loc['c21_2_0', 'arr'] == 0
assert tmp.loc['c21_2_0', 'coding'] == 100261
assert tmp.loc['c21_2_0', 'table_name'] == 'ukb_pheno_0_00'
assert tmp.loc['c21_2_0', 'type'] == 'Categorical (single)'
assert tmp.loc['c21_2_0', 'description'] == 'An string value'
assert tmp.loc['c31_0_0', 'field_id'] == '31'
assert tmp.loc['c31_0_0', 'inst'] == 0
assert tmp.loc['c31_0_0', 'arr'] == 0
assert pd.isnull(tmp.loc['c31_0_0', 'coding'])
assert tmp.loc['c31_0_0', 'table_name'] == 'ukb_pheno_0_01'
assert tmp.loc['c31_0_0', 'type'] == 'Date'
assert tmp.loc['c31_0_0', 'description'] == 'A date'
assert tmp.loc['c34_0_0', 'field_id'] == '34'
assert tmp.loc['c34_0_0', 'inst'] == 0
assert tmp.loc['c34_0_0', 'arr'] == 0
assert tmp.loc['c34_0_0', 'coding'] == 9
assert tmp.loc['c34_0_0', 'table_name'] == 'ukb_pheno_0_01'
assert tmp.loc['c34_0_0', 'type'] == 'Integer'
assert tmp.loc['c34_0_0', 'description'] == 'Some integer'
assert tmp.loc['c46_0_0', 'field_id'] == '46'
assert tmp.loc['c46_0_0', 'inst'] == 0
assert tmp.loc['c46_0_0', 'arr'] == 0
assert pd.isnull(tmp.loc['c46_0_0', 'coding'])
assert tmp.loc['c46_0_0', 'table_name'] == 'ukb_pheno_0_01'
assert tmp.loc['c46_0_0', 'type'] == 'Integer'
assert tmp.loc['c46_0_0', 'description'] == 'Some another integer'
assert tmp.loc['c47_0_0', 'field_id'] == '47'
assert tmp.loc['c47_0_0', 'inst'] == 0
assert tmp.loc['c47_0_0', 'arr'] == 0
assert pd.isnull(tmp.loc['c47_0_0', 'coding'])
assert tmp.loc['c47_0_0', 'table_name'] == 'ukb_pheno_0_02'
assert tmp.loc['c47_0_0', 'type'] == 'Continuous'
assert tmp.loc['c47_0_0', 'description'] == 'Some continuous value'
assert tmp.loc['c48_0_0', 'field_id'] == '48'
assert tmp.loc['c48_0_0', 'inst'] == 0
assert tmp.loc['c48_0_0', 'arr'] == 0
assert pd.isnull(tmp.loc['c48_0_0', 'coding'])
assert tmp.loc['c48_0_0', 'table_name'] == 'ukb_pheno_0_02'
assert tmp.loc['c48_0_0', 'type'] == 'Time'
assert tmp.loc['c48_0_0', 'description'] == 'Some time'
def test_postgresql_auxiliary_table_check_types(self):
# Prepare
csv_file = get_repository_path('pheno2sql/example01.csv')
db_engine = POSTGRESQL_ENGINE
p2sql = Pheno2SQL(csv_file, db_engine, n_columns_per_table=3, loading_n_jobs=1)
# Run
p2sql.load_data()
# Validate
assert p2sql.db_type == 'postgresql'
## Check auxiliary table existance
table = pd.read_sql("SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = '{}');".format('fields'), create_engine(db_engine))
assert table.iloc[0, 0]
## Check columns are correct
tmp = pd.read_sql('select * from fields', create_engine(db_engine))
expected_columns = ["column_name", "field_id", "inst", "arr", "coding", "table_name", "type", "description"]
assert len(tmp.columns) == len(expected_columns), len(tmp.columns)
assert all(x in expected_columns for x in tmp.columns)
## Check data is correct
sql_types = """
select column_name, data_type
from information_schema.columns
where table_name = 'fields';
"""
tmp = pd.read_sql(sql_types, create_engine(db_engine), index_col='column_name')
assert not tmp.empty
assert tmp.shape[0] == 8
assert tmp.loc['field_id', 'data_type'] == 'text'
assert tmp.loc['inst', 'data_type'] == 'bigint'
assert tmp.loc['arr', 'data_type'] == 'bigint'
assert tmp.loc['coding', 'data_type'] == 'bigint'
assert tmp.loc['table_name', 'data_type'] == 'text'
assert tmp.loc['type', 'data_type'] == 'text'
assert tmp.loc['description', 'data_type'] == 'text'
def test_postgresql_auxiliary_table_constraints(self):
# Prepare
csv_file = get_repository_path('pheno2sql/example01.csv')
db_engine = POSTGRESQL_ENGINE
p2sql = Pheno2SQL(csv_file, db_engine, n_columns_per_table=3, loading_n_jobs=1)
# Run
p2sql.load_data()
# Validate
assert p2sql.db_type == 'postgresql'
## Check auxiliary table existance
table = pd.read_sql("SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = '{}');".format('fields'), create_engine(db_engine))
assert table.iloc[0, 0]
# primary key
constraint_sql = self._get_table_contrains('fields', column_query='column_name', relationship_query='pk_%%')
constraints_results = pd.read_sql(constraint_sql, create_engine(db_engine))
assert constraints_results is not None
assert not constraints_results.empty
# index on 'event' column
constraint_sql = self._get_table_contrains('fields', relationship_query='ix_%%')
constraints_results = pd.read_sql(constraint_sql, create_engine(db_engine))
assert constraints_results is not None
assert not constraints_results.empty
columns = constraints_results['column_name'].tolist()
assert len(columns) == 6
assert 'arr' in columns
assert 'field_id' in columns
assert 'inst' in columns
assert 'table_name' in columns
assert 'type' in columns
assert 'coding' in columns
def test_postgresql_two_csv_files(self):
# Prepare
csv01 = get_repository_path('pheno2sql/example08_01.csv')
csv02 = get_repository_path('pheno2sql/example08_02.csv')
db_engine = POSTGRESQL_ENGINE
p2sql = Pheno2SQL((csv01, csv02), db_engine)
# Run
p2sql.load_data()
# Validate
assert p2sql.db_type == 'postgresql'
## Check tables exist
table = pd.read_sql("SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = '{}');".format('ukb_pheno_0_00'), create_engine(db_engine))
assert table.iloc[0, 0]
table = pd.read_sql("SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = '{}');".format('ukb_pheno_1_00'), create_engine(db_engine))
assert table.iloc[0, 0]
## Check columns are correct
tmp = pd.read_sql('select * from ukb_pheno_0_00', create_engine(db_engine))
expected_columns = ["eid","c21_0_0","c21_1_0","c21_2_0","c31_0_0","c34_0_0","c46_0_0","c47_0_0","c48_0_0"]
assert len(tmp.columns) == len(expected_columns)
assert all(x in expected_columns for x in tmp.columns)
tmp = pd.read_sql('select * from ukb_pheno_1_00', create_engine(db_engine))
expected_columns = ["eid","c100_0_0", "c100_1_0", "c100_2_0", "c110_0_0", "c120_0_0", "c130_0_0", "c140_0_0", "c150_0_0"]
assert len(tmp.columns) == len(expected_columns)
assert all(x in expected_columns for x in tmp.columns)
## Check data is correct
tmp = pd.read_sql('select * from ukb_pheno_0_00', create_engine(db_engine), index_col='eid')
assert not tmp.empty
assert tmp.shape[0] == 5
assert tmp.loc[1, 'c21_0_0'] == 'Option number 1'
assert tmp.loc[1, 'c21_1_0'] == 'No response'
assert tmp.loc[1, 'c21_2_0'] == 'Yes'
assert tmp.loc[1, 'c31_0_0'].strftime('%Y-%m-%d') == '2011-03-07'
assert int(tmp.loc[1, 'c34_0_0']) == -33
assert int(tmp.loc[1, 'c46_0_0']) == -9
assert tmp.loc[1, 'c47_0_0'].round(5) == 41.55312
assert tmp.loc[1, 'c48_0_0'].strftime('%Y-%m-%d') == '2010-07-14'
assert tmp.loc[5, 'c21_0_0'] == 'Option number 5'
assert tmp.loc[5, 'c21_1_0'] == 'Maybe'
assert tmp.loc[5, 'c21_2_0'] == 'Probably'
assert pd.isnull(tmp.loc[5, 'c31_0_0'])
assert int(tmp.loc[5, 'c34_0_0']) == -4
assert int(tmp.loc[5, 'c46_0_0']) == 1
assert pd.isnull(tmp.loc[5, 'c47_0_0'])
assert tmp.loc[5, 'c48_0_0'].strftime('%Y-%m-%d') == '1999-10-11'
tmp = pd.read_sql('select * from ukb_pheno_1_00', create_engine(db_engine), index_col='eid')
assert not tmp.empty
assert tmp.shape[0] == 3
assert int(tmp.loc[1, 'c100_0_0']) == -9
assert int(tmp.loc[1, 'c100_1_0']) == 3
assert pd.isnull(tmp.loc[1, 'c100_2_0'])
assert tmp.loc[1, 'c110_0_0'].round(5) == 42.55312
assert int(tmp.loc[1, 'c120_0_0']) == -33
assert tmp.loc[1, 'c130_0_0'] == 'Option number 1'
assert tmp.loc[1, 'c140_0_0'].strftime('%Y-%m-%d') == '2011-03-07'
assert tmp.loc[1, 'c150_0_0'].strftime('%Y-%m-%d') == '2010-07-14'
assert pd.isnull(tmp.loc[3, 'c100_0_0'])
assert int(tmp.loc[3, 'c100_1_0']) == -4
assert int(tmp.loc[3, 'c100_2_0']) == -10
assert tmp.loc[3, 'c110_0_0'].round(5) == -35.31471
assert int(tmp.loc[3, 'c120_0_0']) == 0
assert tmp.loc[3, 'c130_0_0'] == 'Option number 3'
assert tmp.loc[3, 'c140_0_0'].strftime('%Y-%m-%d') == '1997-04-15'
assert pd.isnull(tmp.loc[3, 'c150_0_0'])
@unittest.skip('sqlite being removed')
def test_sqlite_query_single_table(self):
# Prepare
csv_file = get_repository_path('pheno2sql/example02.csv')
db_engine = SQLITE_ENGINE
p2sql = Pheno2SQL(csv_file, db_engine, n_columns_per_table=999999)
p2sql.load_data()
# Run
columns = ['c21_0_0', 'c21_2_0', 'c48_0_0']
query_result = next(p2sql.query(columns))
# Validate
assert query_result is not None
assert query_result.index.name == 'eid'
assert len(query_result.index) == 4
assert all(x in query_result.index for x in range(1, 4 + 1))
assert len(query_result.columns) == len(columns)
assert all(x in columns for x in query_result.columns)
assert not query_result.empty
assert query_result.shape[0] == 4
assert query_result.loc[1, 'c21_0_0'] == 'Option number 1'
assert query_result.loc[2, 'c21_0_0'] == 'Option number 2'
assert query_result.loc[3, 'c21_0_0'] == 'Option number 3'
assert query_result.loc[4, 'c21_0_0'] == 'Option number 4'
assert query_result.loc[1, 'c21_2_0'] == 'Yes'
assert query_result.loc[2, 'c21_2_0'] == 'No'
assert query_result.loc[3, 'c21_2_0'] == 'Maybe'
assert pd.isnull(query_result.loc[4, 'c21_2_0'])
assert query_result.loc[1, 'c48_0_0'] == '2011-08-14'
assert query_result.loc[2, 'c48_0_0'] == '2016-11-30'
assert query_result.loc[3, 'c48_0_0'] == '2010-01-01'
assert query_result.loc[4, 'c48_0_0'] == '2011-02-15'
def test_postgresql_query_single_table(self):
# Prepare
csv_file = get_repository_path('pheno2sql/example02.csv')
db_engine = POSTGRESQL_ENGINE
p2sql = Pheno2SQL(csv_file, db_engine, n_columns_per_table=999999)
p2sql.load_data()
# Run
columns = ['c21_0_0', 'c21_2_0', 'c48_0_0']
query_result = next(p2sql.query(columns))
# Validate
assert query_result is not None
assert query_result.index.name == 'eid'
assert len(query_result.index) == 4
assert all(x in query_result.index for x in range(1, 4 + 1))
assert len(query_result.columns) == len(columns)
assert all(x in columns for x in query_result.columns)
assert not query_result.empty
assert query_result.shape[0] == 4
assert query_result.loc[1, 'c21_0_0'] == 'Option number 1'
assert query_result.loc[2, 'c21_0_0'] == 'Option number 2'
assert query_result.loc[3, 'c21_0_0'] == 'Option number 3'
assert query_result.loc[4, 'c21_0_0'] == 'Option number 4'
assert query_result.loc[1, 'c21_2_0'] == 'Yes'
assert query_result.loc[2, 'c21_2_0'] == 'No'
assert query_result.loc[3, 'c21_2_0'] == 'Maybe'
assert pd.isnull(query_result.loc[4, 'c21_2_0'])
assert query_result.loc[1, 'c48_0_0'].strftime('%Y-%m-%d') == '2011-08-14'
assert query_result.loc[2, 'c48_0_0'].strftime('%Y-%m-%d') == '2016-11-30'
assert query_result.loc[3, 'c48_0_0'].strftime('%Y-%m-%d') == '2010-01-01'
assert query_result.loc[4, 'c48_0_0'].strftime('%Y-%m-%d') == '2011-02-15'
def test_postgresql_two_csv_files_query_single_table(self):
# Prepare
csv01 = get_repository_path('pheno2sql/example08_01.csv')
csv02 = get_repository_path('pheno2sql/example08_02.csv')
db_engine = POSTGRESQL_ENGINE
p2sql = Pheno2SQL((csv01, csv02), db_engine, n_columns_per_table=999999)
p2sql.load_data()
# Run
columns = ['c21_0_0', 'c21_2_0', 'c48_0_0']
query_result = next(p2sql.query(columns))
# Validate
assert query_result is not None
assert query_result.index.name == 'eid'
assert len(query_result.index) == 5
assert all(x in query_result.index for x in range(1, 5 + 1))
assert len(query_result.columns) == len(columns)
assert all(x in columns for x in query_result.columns)
assert not query_result.empty
assert query_result.shape[0] == 5
assert query_result.loc[1, 'c21_0_0'] == 'Option number 1'
assert query_result.loc[2, 'c21_0_0'] == 'Option number 2'
assert query_result.loc[3, 'c21_0_0'] == 'Option number 3'
assert query_result.loc[4, 'c21_0_0'] == 'Option number 4'
assert query_result.loc[5, 'c21_0_0'] == 'Option number 5'
assert query_result.loc[1, 'c21_2_0'] == 'Yes'
assert query_result.loc[2, 'c21_2_0'] == 'No'
assert query_result.loc[3, 'c21_2_0'] == 'Maybe'
assert pd.isnull(query_result.loc[4, 'c21_2_0'])
assert query_result.loc[5, 'c21_2_0'] == 'Probably'
assert query_result.loc[1, 'c48_0_0'].strftime('%Y-%m-%d') == '2010-07-14'
assert query_result.loc[2, 'c48_0_0'].strftime('%Y-%m-%d') == '2017-11-30'
assert query_result.loc[3, 'c48_0_0'].strftime('%Y-%m-%d') == '2020-01-01'
assert query_result.loc[4, 'c48_0_0'].strftime('%Y-%m-%d') == '1990-02-15'
assert query_result.loc[5, 'c48_0_0'].strftime('%Y-%m-%d') == '1999-10-11'
@unittest.skip('sqlite being removed')
def test_sqlite_query_multiple_tables(self):
# RIGHT and FULL OUTER JOINs are not currently supported
# Prepare
csv_file = get_repository_path('pheno2sql/example02.csv')
db_engine = SQLITE_ENGINE
p2sql = Pheno2SQL(csv_file, db_engine, n_columns_per_table=2)
p2sql.load_data()
# Run
columns = ['c21_0_0', 'c21_2_0', 'c48_0_0']
query_result = p2sql.query(columns)
# Validate
assert query_result is not None
assert query_result.index.name == 'eid'
assert len(query_result.index) == 4
assert all(x in query_result.index for x in range(1, 4 + 1))
assert len(query_result.columns) == len(columns)
assert all(x in columns for x in query_result.columns)
assert not query_result.empty
assert query_result.shape[0] == 4
assert query_result.loc[1, 'c21_0_0'] == 'Option number 1'
assert query_result.loc[2, 'c21_0_0'] == 'Option number 2'
assert query_result.loc[3, 'c21_0_0'] == 'Option number 3'
assert query_result.loc[4, 'c21_0_0'] == 'Option number 4'
assert query_result.loc[1, 'c21_2_0'] == 'Yes'
assert query_result.loc[2, 'c21_2_0'] == 'No'
assert query_result.loc[3, 'c21_2_0'] == 'Maybe'
assert pd.isnull(query_result.loc[4, 'c21_2_0'])
assert query_result.loc[1, 'c48_0_0'] == '2011-08-14'
assert query_result.loc[2, 'c48_0_0'] == '2016-11-30'
assert query_result.loc[3, 'c48_0_0'] == '2010-01-01'
assert query_result.loc[4, 'c48_0_0'] == '2011-02-15'
def test_postgresql_query_multiple_tables(self):
# Prepare
csv_file = get_repository_path('pheno2sql/example02.csv')
db_engine = POSTGRESQL_ENGINE
p2sql = Pheno2SQL(csv_file, db_engine, n_columns_per_table=2)
p2sql.load_data()
# Run
columns = ['c21_0_0', 'c21_2_0', 'c48_0_0']
query_result = next(p2sql.query(columns))
# Validate
assert query_result is not None
assert query_result.index.name == 'eid'
assert len(query_result.index) == 4
assert all(x in query_result.index for x in range(1, 4 + 1))
assert len(query_result.columns) == len(columns)
assert all(x in columns for x in query_result.columns)
assert not query_result.empty
assert query_result.shape[0] == 4
assert query_result.loc[1, 'c21_0_0'] == 'Option number 1'
assert query_result.loc[2, 'c21_0_0'] == 'Option number 2'
assert query_result.loc[3, 'c21_0_0'] == 'Option number 3'
assert query_result.loc[4, 'c21_0_0'] == 'Option number 4'
assert query_result.loc[1, 'c21_2_0'] == 'Yes'
assert query_result.loc[2, 'c21_2_0'] == 'No'
assert query_result.loc[3, 'c21_2_0'] == 'Maybe'
assert pd.isnull(query_result.loc[4, 'c21_2_0'])
assert query_result.loc[1, 'c48_0_0'].strftime('%Y-%m-%d') == '2011-08-14'
assert query_result.loc[2, 'c48_0_0'].strftime('%Y-%m-%d') == '2016-11-30'
assert query_result.loc[3, 'c48_0_0'].strftime('%Y-%m-%d') == '2010-01-01'
assert query_result.loc[4, 'c48_0_0'].strftime('%Y-%m-%d') == '2011-02-15'
def test_postgresql_two_csv_files_query_multiple_tables(self):
# Prepare
csv01 = get_repository_path('pheno2sql/example08_01.csv')
csv02 = get_repository_path('pheno2sql/example08_02.csv')
db_engine = POSTGRESQL_ENGINE
p2sql = Pheno2SQL((csv01, csv02), db_engine, n_columns_per_table=999999)
p2sql.load_data()
# Run
columns = ['c21_0_0', 'c21_2_0', 'c110_0_0', 'c150_0_0']
query_result = next(p2sql.query(columns))
# Validate
assert query_result is not None
assert query_result.index.name == 'eid'
assert len(query_result.index) == 5
assert all(x in query_result.index for x in range(1, 5 + 1))
assert len(query_result.columns) == len(columns)
assert all(x in columns for x in query_result.columns)
assert not query_result.empty
assert query_result.shape[0] == 5
assert query_result.loc[1, 'c21_0_0'] == 'Option number 1'
assert query_result.loc[2, 'c21_0_0'] == 'Option number 2'
assert query_result.loc[3, 'c21_0_0'] == 'Option number 3'
assert query_result.loc[4, 'c21_0_0'] == 'Option number 4'
assert query_result.loc[5, 'c21_0_0'] == 'Option number 5'
assert query_result.loc[1, 'c21_2_0'] == 'Yes'
assert query_result.loc[2, 'c21_2_0'] == 'No'
assert query_result.loc[3, 'c21_2_0'] == 'Maybe'
assert pd.isnull(query_result.loc[4, 'c21_2_0'])
assert query_result.loc[5, 'c21_2_0'] == 'Probably'
assert query_result.loc[1, 'c110_0_0'].round(5) == 42.55312
assert pd.isnull(query_result.loc[2, 'c110_0_0'])
assert query_result.loc[3, 'c110_0_0'].round(5) == -35.31471
assert pd.isnull(query_result.loc[4, 'c110_0_0'])
assert pd.isnull(query_result.loc[5, 'c110_0_0'])
assert query_result.loc[1, 'c150_0_0'].strftime('%Y-%m-%d') == '2010-07-14'
assert query_result.loc[2, 'c150_0_0'].strftime('%Y-%m-%d') == '2017-11-30'
assert pd.isnull(query_result.loc[3, 'c150_0_0'])
assert pd.isnull(query_result.loc[4, 'c150_0_0'])
assert pd.isnull(query_result.loc[5, 'c150_0_0'])
def test_postgresql_two_csv_files_flipped_query_multiple_tables(self):
# Prepare
# In this test the files are just flipped
csv01 = get_repository_path('pheno2sql/example08_01.csv')
csv02 = get_repository_path('pheno2sql/example08_02.csv')
db_engine = POSTGRESQL_ENGINE
p2sql = Pheno2SQL((csv02, csv01), db_engine, n_columns_per_table=999999)
p2sql.load_data()
# Run
columns = ['c21_0_0', 'c21_2_0', 'c110_0_0', 'c150_0_0']
query_result = next(p2sql.query(columns))
# Validate
assert query_result is not None
assert query_result.index.name == 'eid'
assert len(query_result.index) == 5
assert all(x in query_result.index for x in range(1, 5 + 1))
assert len(query_result.columns) == len(columns)
assert all(x in columns for x in query_result.columns)
assert not query_result.empty
assert query_result.shape[0] == 5
assert query_result.loc[1, 'c21_0_0'] == 'Option number 1'
assert query_result.loc[2, 'c21_0_0'] == 'Option number 2'
assert query_result.loc[3, 'c21_0_0'] == 'Option number 3'
assert query_result.loc[4, 'c21_0_0'] == 'Option number 4'
assert query_result.loc[5, 'c21_0_0'] == 'Option number 5'
assert query_result.loc[1, 'c21_2_0'] == 'Yes'
assert query_result.loc[2, 'c21_2_0'] == 'No'
assert query_result.loc[3, 'c21_2_0'] == 'Maybe'
assert pd.isnull(query_result.loc[4, 'c21_2_0'])
assert query_result.loc[5, 'c21_2_0'] == 'Probably'
assert query_result.loc[1, 'c110_0_0'].round(5) == 42.55312
assert pd.isnull(query_result.loc[2, 'c110_0_0'])
assert query_result.loc[3, 'c110_0_0'].round(5) == -35.31471
assert pd.isnull(query_result.loc[4, 'c110_0_0'])
assert pd.isnull(query_result.loc[5, 'c110_0_0'])
assert query_result.loc[1, 'c150_0_0'].strftime('%Y-%m-%d') == '2010-07-14'
assert query_result.loc[2, 'c150_0_0'].strftime('%Y-%m-%d') == '2017-11-30'
assert pd.isnull(query_result.loc[3, 'c150_0_0'])
assert pd.isnull(query_result.loc[4, 'c150_0_0'])
assert pd.isnull(query_result.loc[5, 'c150_0_0'])
@unittest.skip('sqlite being removed')
def test_sqlite_query_custom_columns(self):
# SQLite is very limited when selecting variables, renaming, doing math operations, etc
pass
def test_postgresql_query_custom_columns(self):
# Prepare
csv_file = get_repository_path('pheno2sql/example02.csv')
db_engine = POSTGRESQL_ENGINE
p2sql = Pheno2SQL(csv_file, db_engine, n_columns_per_table=2)
p2sql.load_data()
# Run
columns = ['c21_0_0', 'c21_2_0', 'c47_0_0', '(c47_0_0 ^ 2.0) as c47_squared']
query_result = next(p2sql.query(columns))
# Validate
assert query_result is not None
assert query_result.index.name == 'eid'
assert len(query_result.index) == 4
assert all(x in query_result.index for x in range(1, 4 + 1))
assert len(query_result.columns) == len(columns)
assert all(x in ['c21_0_0', 'c21_2_0', 'c47_0_0', 'c47_squared'] for x in query_result.columns)
assert not query_result.empty
assert query_result.shape[0] == 4
assert query_result.loc[1, 'c21_0_0'] == 'Option number 1'
assert query_result.loc[2, 'c21_0_0'] == 'Option number 2'
assert query_result.loc[3, 'c21_0_0'] == 'Option number 3'
assert query_result.loc[4, 'c21_0_0'] == 'Option number 4'
assert query_result.loc[1, 'c21_2_0'] == 'Yes'
assert query_result.loc[2, 'c21_2_0'] == 'No'
assert query_result.loc[3, 'c21_2_0'] == 'Maybe'
assert pd.isnull(query_result.loc[4, 'c21_2_0'])
assert query_result.loc[1, 'c47_0_0'].round(5) == 45.55412
assert query_result.loc[2, 'c47_0_0'].round(5) == -0.55461
assert query_result.loc[3, 'c47_0_0'].round(5) == -5.32471
assert query_result.loc[4, 'c47_0_0'].round(5) == 55.19832
assert query_result.loc[1, 'c47_squared'].round(5) == round(45.55412 ** 2, 5)
assert query_result.loc[2, 'c47_squared'].round(5) == round((-0.55461) ** 2, 5)
assert query_result.loc[3, 'c47_squared'].round(5) == round((-5.32471) ** 2, 5)
assert query_result.loc[4, 'c47_squared'].round(5) == round(55.19832 ** 2, 5)
@unittest.skip('sqlite being removed')
def test_sqlite_query_single_filter(self):
# RIGHT and FULL OUTER JOINs are not currently supported
# Prepare
csv_file = get_repository_path('pheno2sql/example02.csv')
db_engine = SQLITE_ENGINE
p2sql = Pheno2SQL(csv_file, db_engine, n_columns_per_table=2)
p2sql.load_data()
# Run
columns = ['c21_0_0', 'c21_2_0', 'c47_0_0']
filter = ['c47_0_0 > 0']
query_result = p2sql.query(columns, filter)
# Validate
assert query_result is not None
assert query_result.index.name == 'eid'
assert len(query_result.index) == 2
assert all(x in query_result.index for x in (1, 4))
assert len(query_result.columns) == len(columns)
assert all(x in columns for x in query_result.columns)
assert not query_result.empty
assert query_result.shape[0] == 2
assert query_result.loc[1, 'c21_0_0'] == 'Option number 1'
assert query_result.loc[4, 'c21_0_0'] == 'Option number 4'
assert query_result.loc[1, 'c21_2_0'] == 'Yes'
assert pd.isnull(query_result.loc[4, 'c21_2_0'])
assert query_result.loc[1, 'c47_0_0'].round(5) == 45.55412
assert query_result.loc[4, 'c47_0_0'].round(5) == 55.19832
def test_postgresql_query_single_filter(self):
# Prepare
csv_file = get_repository_path('pheno2sql/example02.csv')
db_engine = POSTGRESQL_ENGINE
p2sql = Pheno2SQL(csv_file, db_engine, n_columns_per_table=2)
p2sql.load_data()
# Run
columns = ['c21_0_0', 'c21_2_0', 'c47_0_0']
filter = ['c47_0_0 > 0']
query_result = next(p2sql.query(columns, filterings=filter))
# Validate
assert query_result is not None
assert query_result.index.name == 'eid'
assert len(query_result.index) == 2
assert all(x in query_result.index for x in (1, 4))
assert len(query_result.columns) == len(columns)
assert all(x in columns for x in query_result.columns)
assert not query_result.empty
assert query_result.shape[0] == 2
assert query_result.loc[1, 'c21_0_0'] == 'Option number 1'
assert query_result.loc[4, 'c21_0_0'] == 'Option number 4'
assert query_result.loc[1, 'c21_2_0'] == 'Yes'
assert pd.isnull(query_result.loc[4, 'c21_2_0'])
assert query_result.loc[1, 'c47_0_0'].round(5) == 45.55412
assert query_result.loc[4, 'c47_0_0'].round(5) == 55.19832
@unittest.skip('sqlite being removed')
def test_sqlite_query_multiple_and_filter(self):
# 'RIGHT and FULL OUTER JOINs are not currently supported'
# Prepare
csv_file = get_repository_path('pheno2sql/example02.csv')
db_engine = SQLITE_ENGINE
p2sql = Pheno2SQL(csv_file, db_engine, n_columns_per_table=2)
p2sql.load_data()
# Run
columns = ['c21_0_0', 'c21_2_0', 'c47_0_0', 'c48_0_0']
filter = ["c48_0_0 > '2011-01-01'", "c21_2_0 <> ''"]
query_result = p2sql.query(columns, filter)
# Validate
assert query_result is not None
assert query_result.index.name == 'eid'
assert len(query_result.index) == 2
assert all(x in query_result.index for x in (1, 2))
assert len(query_result.columns) == len(columns)
assert all(x in columns for x in query_result.columns)
assert not query_result.empty
assert query_result.shape[0] == 2
assert query_result.loc[1, 'c21_0_0'] == 'Option number 1'
assert query_result.loc[2, 'c21_0_0'] == 'Option number 2'
assert query_result.loc[1, 'c21_2_0'] == 'Yes'
assert query_result.loc[2, 'c21_2_0'] == 'No'
assert query_result.loc[1, 'c47_0_0'].round(5) == 45.55412
assert query_result.loc[2, 'c47_0_0'].round(5) == -0.55461
def test_postgresql_query_multiple_and_filter(self):
# Prepare
csv_file = get_repository_path('pheno2sql/example02.csv')
db_engine = POSTGRESQL_ENGINE
p2sql = Pheno2SQL(csv_file, db_engine, n_columns_per_table=2)
p2sql.load_data()
# Run
columns = ['c21_0_0', 'c21_2_0', 'c47_0_0', 'c48_0_0']
filter = ["c48_0_0 > '2011-01-01'", "c21_2_0 <> ''"]
query_result = next(p2sql.query(columns, filterings=filter))
# Validate
assert query_result is not None
assert query_result.index.name == 'eid'
assert len(query_result.index) == 2
assert all(x in query_result.index for x in (1, 2))
assert len(query_result.columns) == len(columns)
assert all(x in columns for x in query_result.columns)
assert not query_result.empty
assert query_result.shape[0] == 2
assert query_result.loc[1, 'c21_0_0'] == 'Option number 1'
assert query_result.loc[2, 'c21_0_0'] == 'Option number 2'
assert query_result.loc[1, 'c21_2_0'] == 'Yes'
assert query_result.loc[2, 'c21_2_0'] == 'No'
assert query_result.loc[1, 'c47_0_0'].round(5) == 45.55412
assert query_result.loc[2, 'c47_0_0'].round(5) == -0.55461
assert query_result.loc[1, 'c48_0_0'].strftime('%Y-%m-%d') == '2011-08-14'
assert query_result.loc[2, 'c48_0_0'].strftime('%Y-%m-%d') == '2016-11-30'
@unittest.skip('sqlite being removed')
def test_sqlite_float_is_empty(self):
# Prepare
csv_file = get_repository_path('pheno2sql/example03.csv')
db_engine = SQLITE_ENGINE
p2sql = Pheno2SQL(csv_file, db_engine, n_columns_per_table=3, loading_n_jobs=1)
# Run
p2sql.load_data()
# Validate
assert p2sql.db_type == 'sqlite'
## Check data is correct
tmp = pd.read_sql('select * from ukb_pheno_0_00', create_engine(db_engine), index_col='eid')
assert not tmp.empty
assert tmp.shape[0] == 4
assert tmp.loc[1, 'c21_0_0'] == 'Option number 1'
assert tmp.loc[1, 'c21_1_0'] == 'No response'
assert tmp.loc[1, 'c21_2_0'] == 'Yes'
assert tmp.loc[2, 'c21_0_0'] == 'Option number 2'
assert pd.isnull(tmp.loc[2, 'c21_1_0'])
assert tmp.loc[2, 'c21_2_0'] == 'No'
assert tmp.loc[3, 'c21_0_0'] == 'Option number 3'
assert tmp.loc[3, 'c21_1_0'] == 'Of course'
assert tmp.loc[3, 'c21_2_0'] == 'Maybe'
assert pd.isnull(tmp.loc[4, 'c21_2_0'])
tmp = pd.read_sql('select * from ukb_pheno_0_01', create_engine(db_engine), index_col='eid')
assert not tmp.empty
assert tmp.shape[0] == 4
assert tmp.loc[1, 'c31_0_0'] == '2012-01-05'
assert int(tmp.loc[1, 'c34_0_0']) == 21
assert int(tmp.loc[1, 'c46_0_0']) == -9
assert tmp.loc[2, 'c31_0_0'] == '2015-12-30'
assert int(tmp.loc[2, 'c34_0_0']) == 12
assert int(tmp.loc[2, 'c46_0_0']) == -2
assert tmp.loc[3, 'c31_0_0'] == '2007-03-19'
assert int(tmp.loc[3, 'c34_0_0']) == 1
assert int(tmp.loc[3, 'c46_0_0']) == -7
tmp = pd.read_sql('select * from ukb_pheno_0_02', create_engine(db_engine), index_col='eid')
assert not tmp.empty
assert tmp.shape[0] == 4
# FIXME: this is strange, data type in this particular case needs np.round
assert np.round(tmp.loc[1, 'c47_0_0'], 5) == 45.55412
assert tmp.loc[1, 'c48_0_0'] == '2011-08-14'
assert tmp.loc[2, 'c47_0_0'] == -0.55461
assert tmp.loc[2, 'c48_0_0'] == '2016-11-30'
assert pd.isnull(tmp.loc[3, 'c47_0_0'])
assert tmp.loc[3, 'c48_0_0'] == '2010-01-01'
def test_postgresql_float_is_empty(self):
# Prepare
csv_file = get_repository_path('pheno2sql/example03.csv')
db_engine = POSTGRESQL_ENGINE
p2sql = Pheno2SQL(csv_file, db_engine, n_columns_per_table=3, loading_n_jobs=1)
# Run
p2sql.load_data()
# Validate
assert p2sql.db_type == 'postgresql'
## Check data is correct
tmp = pd.read_sql('select * from ukb_pheno_0_00', create_engine(db_engine), index_col='eid')
assert not tmp.empty
assert tmp.shape[0] == 4
assert tmp.loc[1, 'c21_0_0'] == 'Option number 1'
assert tmp.loc[1, 'c21_1_0'] == 'No response'
assert tmp.loc[1, 'c21_2_0'] == 'Yes'
assert tmp.loc[2, 'c21_0_0'] == 'Option number 2'
assert pd.isnull(tmp.loc[2, 'c21_1_0'])
assert tmp.loc[2, 'c21_2_0'] == 'No'
assert tmp.loc[3, 'c21_0_0'] == 'Option number 3'
assert tmp.loc[3, 'c21_1_0'] == 'Of course'
assert tmp.loc[3, 'c21_2_0'] == 'Maybe'
assert pd.isnull(tmp.loc[4, 'c21_2_0'])
tmp = pd.read_sql('select * from ukb_pheno_0_01', create_engine(db_engine), index_col='eid')
assert not tmp.empty
assert tmp.shape[0] == 4
assert tmp.loc[1, 'c31_0_0'].strftime('%Y-%m-%d') == '2012-01-05'
assert int(tmp.loc[1, 'c34_0_0']) == 21
assert int(tmp.loc[1, 'c46_0_0']) == -9
assert tmp.loc[2, 'c31_0_0'].strftime('%Y-%m-%d') == '2015-12-30'
assert int(tmp.loc[2, 'c34_0_0']) == 12
assert int(tmp.loc[2, 'c46_0_0']) == -2
assert tmp.loc[3, 'c31_0_0'].strftime('%Y-%m-%d') == '2007-03-19'
assert int(tmp.loc[3, 'c34_0_0']) == 1
assert int(tmp.loc[3, 'c46_0_0']) == -7
tmp = pd.read_sql('select * from ukb_pheno_0_02', create_engine(db_engine), index_col='eid')
assert not tmp.empty
assert tmp.shape[0] == 4
assert tmp.loc[1, 'c47_0_0'].round(5) == 45.55412
assert tmp.loc[1, 'c48_0_0'].strftime('%Y-%m-%d') == '2011-08-14'
assert tmp.loc[2, 'c47_0_0'].round(5) == -0.55461
assert tmp.loc[2, 'c48_0_0'].strftime('%Y-%m-%d') == '2016-11-30'
assert pd.isnull(tmp.loc[3, 'c47_0_0'])
assert tmp.loc[3, 'c48_0_0'].strftime('%Y-%m-%d') == '2010-01-01'
def test_postgresql_timestamp_is_empty(self):
# Prepare
csv_file = get_repository_path('pheno2sql/example04.csv')
db_engine = 'postgresql://test:test@localhost:5432/ukb'
p2sql = Pheno2SQL(csv_file, db_engine, n_columns_per_table=3, loading_n_jobs=1)
# Run
p2sql.load_data()
# Validate
assert p2sql.db_type == 'postgresql'
## Check data is correct
tmp = pd.read_sql('select * from ukb_pheno_0_00', create_engine(db_engine), index_col='eid')
assert not tmp.empty
assert tmp.shape[0] == 4
assert tmp.loc[1, 'c21_0_0'] == 'Option number 1'
assert tmp.loc[1, 'c21_1_0'] == 'No response'
assert tmp.loc[1, 'c21_2_0'] == 'Yes'
assert tmp.loc[2, 'c21_0_0'] == 'Option number 2'
assert pd.isnull(tmp.loc[2, 'c21_1_0'])
assert tmp.loc[2, 'c21_2_0'] == 'No'
assert tmp.loc[3, 'c21_0_0'] == 'Option number 3'
assert tmp.loc[3, 'c21_1_0'] == 'Of course'
assert tmp.loc[3, 'c21_2_0'] == 'Maybe'
assert pd.isnull(tmp.loc[4, 'c21_2_0'])
tmp = pd.read_sql('select * from ukb_pheno_0_01', create_engine(db_engine), index_col='eid')
assert not tmp.empty
assert tmp.shape[0] == 4
assert tmp.loc[1, 'c31_0_0'].strftime('%Y-%m-%d') == '2012-01-05'
assert int(tmp.loc[1, 'c34_0_0']) == 21
assert int(tmp.loc[1, 'c46_0_0']) == -9
assert tmp.loc[2, 'c31_0_0'].strftime('%Y-%m-%d') == '2015-12-30'
assert int(tmp.loc[2, 'c34_0_0']) == 12
assert int(tmp.loc[2, 'c46_0_0']) == -2
assert tmp.loc[3, 'c31_0_0'].strftime('%Y-%m-%d') == '2007-03-19'
assert int(tmp.loc[3, 'c34_0_0']) == 1
assert int(tmp.loc[3, 'c46_0_0']) == -7
assert pd.isnull(tmp.loc[4, 'c31_0_0'])
tmp = pd.read_sql('select * from ukb_pheno_0_02', create_engine(db_engine), index_col='eid')
assert not tmp.empty
assert tmp.shape[0] == 4
assert tmp.loc[1, 'c47_0_0'].round(5) == 45.55412
assert tmp.loc[1, 'c48_0_0'].strftime('%Y-%m-%d') == '2011-08-14'
assert tmp.loc[2, 'c47_0_0'].round(5) == -0.55461
assert pd.isnull(tmp.loc[2, 'c48_0_0'])
assert tmp.loc[3, 'c47_0_0'].round(5) == -5.32471
assert tmp.loc[3, 'c48_0_0'].strftime('%Y-%m-%d') == '2010-01-01'
def test_postgresql_integer_is_nan(self):
# Prepare
csv_file = get_repository_path('pheno2sql/example06_nan_integer.csv')
db_engine = 'postgresql://test:test@localhost:5432/ukb'
p2sql = Pheno2SQL(csv_file, db_engine, n_columns_per_table=3, loading_n_jobs=1)
# Run
p2sql.load_data()
# Validate
assert p2sql.db_type == 'postgresql'
## Check data is correct
tmp = pd.read_sql('select * from ukb_pheno_0_01', create_engine(db_engine), index_col='eid')
assert not tmp.empty
assert tmp.shape[0] == 4
assert tmp.loc[1, 'c31_0_0'].strftime('%Y-%m-%d') == '2012-01-05'
assert int(tmp.loc[1, 'c34_0_0']) == 21
assert int(tmp.loc[1, 'c46_0_0']) == -9
assert tmp.loc[2, 'c31_0_0'].strftime('%Y-%m-%d') == '2015-12-30'
assert int(tmp.loc[2, 'c34_0_0']) == 12
pd.isnull(tmp.loc[2, 'c46_0_0'])
assert tmp.loc[3, 'c31_0_0'].strftime('%Y-%m-%d') == '2007-03-19'
assert int(tmp.loc[3, 'c34_0_0']) == 1
assert int(tmp.loc[3, 'c46_0_0']) == -7
assert pd.isnull(tmp.loc[4, 'c31_0_0'])
def test_postgresql_first_row_is_nan_integer(self):
# Prepare
csv_file = get_repository_path('pheno2sql/example07_first_nan_integer.csv')
db_engine = 'postgresql://test:test@localhost:5432/ukb'
p2sql = Pheno2SQL(csv_file, db_engine, n_columns_per_table=3, loading_n_jobs=1)
# Run
p2sql.load_data()
# Validate
assert p2sql.db_type == 'postgresql'
## Check data is correct
tmp = pd.read_sql('select * from ukb_pheno_0_01', create_engine(db_engine), index_col='eid')
assert not tmp.empty
assert tmp.shape[0] == 4
assert tmp.loc[1, 'c31_0_0'].strftime('%Y-%m-%d') == '2012-01-05'
assert int(tmp.loc[1, 'c34_0_0']) == 21
assert pd.isnull(tmp.loc[1, 'c46_0_0'])
assert tmp.loc[2, 'c31_0_0'].strftime('%Y-%m-%d') == '2015-12-30'
assert int(tmp.loc[2, 'c34_0_0']) == 12
pd.isnull(tmp.loc[2, 'c46_0_0'])
assert tmp.loc[3, 'c31_0_0'].strftime('%Y-%m-%d') == '2007-03-19'
assert int(tmp.loc[3, 'c34_0_0']) == 1
assert int(tmp.loc[3, 'c46_0_0']) == -7
assert pd.isnull(tmp.loc[4, 'c31_0_0'])
def test_postgresql_sql_chunksize01(self):
# Prepare
csv_file = get_repository_path('pheno2sql/example02.csv')
db_engine = POSTGRESQL_ENGINE
p2sql = Pheno2SQL(csv_file, db_engine, n_columns_per_table=3, sql_chunksize=2)
p2sql.load_data()
# Run
columns = ['c21_0_0', 'c21_2_0', 'c48_0_0']
query_result = p2sql.query(columns)
# Validate
assert query_result is not None
import collections
assert isinstance(query_result, collections.Iterable)
index_len_sum = 0
for chunk_idx, chunk in enumerate(query_result):
assert chunk.index.name == 'eid'
index_len_sum += len(chunk.index)
assert len(chunk.index) == 2
if chunk_idx == 0:
indexes = (1, 2)
assert all(x in chunk.index for x in indexes)
else:
indexes = (3, 4)
assert all(x in chunk.index for x in indexes)
assert len(chunk.columns) == len(columns)
assert all(x in columns for x in chunk.columns)
assert not chunk.empty
assert chunk.shape[0] == 2
if chunk_idx == 0:
assert chunk.loc[1, 'c21_0_0'] == 'Option number 1'
assert chunk.loc[2, 'c21_0_0'] == 'Option number 2'
assert chunk.loc[1, 'c21_2_0'] == 'Yes'
assert chunk.loc[2, 'c21_2_0'] == 'No'
assert chunk.loc[1, 'c48_0_0'].strftime('%Y-%m-%d') == '2011-08-14'
assert chunk.loc[2, 'c48_0_0'].strftime('%Y-%m-%d') == '2016-11-30'
else:
assert chunk.loc[3, 'c21_0_0'] == 'Option number 3'
assert chunk.loc[4, 'c21_0_0'] == 'Option number 4'
assert chunk.loc[3, 'c21_2_0'] == 'Maybe'
assert pd.isnull(chunk.loc[4, 'c21_2_0'])
assert chunk.loc[3, 'c48_0_0'].strftime('%Y-%m-%d') == '2010-01-01'
assert chunk.loc[4, 'c48_0_0'].strftime('%Y-%m-%d') == '2011-02-15'
assert index_len_sum == 4
def test_postgresql_sql_chunksize02(self):
# Prepare
csv_file = get_repository_path('pheno2sql/example02.csv')
db_engine = POSTGRESQL_ENGINE
p2sql = Pheno2SQL(csv_file, db_engine, n_columns_per_table=3, sql_chunksize=3)
p2sql.load_data()
# Run
columns = ['c21_0_0', 'c21_2_0', 'c48_0_0']
query_result = p2sql.query(columns)
# Validate
assert query_result is not None
import collections
assert isinstance(query_result, collections.Iterable)
index_len_sum = 0
for chunk_idx, chunk in enumerate(query_result):
assert chunk.index.name == 'eid'
index_len_sum += len(chunk.index)
if chunk_idx == 0:
assert len(chunk.index) == 3
indexes = (1, 2, 3)
assert all(x in chunk.index for x in indexes)
else:
assert len(chunk.index) == 1
indexes = (4,)
assert all(x in chunk.index for x in indexes)
assert len(chunk.columns) == len(columns)
assert all(x in columns for x in chunk.columns)
assert not chunk.empty
if chunk_idx == 0:
assert chunk.shape[0] == 3
assert chunk.loc[1, 'c21_0_0'] == 'Option number 1'
assert chunk.loc[2, 'c21_0_0'] == 'Option number 2'
assert chunk.loc[3, 'c21_0_0'] == 'Option number 3'
assert chunk.loc[1, 'c21_2_0'] == 'Yes'
assert chunk.loc[2, 'c21_2_0'] == 'No'
assert chunk.loc[3, 'c21_2_0'] == 'Maybe'
assert chunk.loc[1, 'c48_0_0'].strftime('%Y-%m-%d') == '2011-08-14'
assert chunk.loc[2, 'c48_0_0'].strftime('%Y-%m-%d') == '2016-11-30'
assert chunk.loc[3, 'c48_0_0'].strftime('%Y-%m-%d') == '2010-01-01'
else:
assert chunk.shape[0] == 1
assert chunk.loc[4, 'c21_0_0'] == 'Option number 4'
assert | pd.isnull(chunk.loc[4, 'c21_2_0']) | pandas.isnull |
from ast import parse
from operator import indexOf
from typing import OrderedDict
import numpy as np
from numpy.lib.function_base import rot90
from pandas.io.parsers import read_csv
import torch.utils.data as data_utils
import pandas as pd
import matplotlib.pyplot as plt
from torch.utils.data.dataset import Subset
import seaborn as sns
from datetime import datetime, timedelta
import matplotlib.dates as mdates
import torch
from sklearn import preprocessing
from sklearn.metrics import precision_recall_fscore_support as score
from sklearn.metrics import roc_curve, roc_auc_score
import sys
from adtk.data import validate_series
from adtk.visualization import plot
from adtk.detector import SeasonalAD
def seqLabel_2_WindowsLabels(window_size, labels):
# 將seq label 變成windows_labels
windows_labels = []
for i in range(len(labels)-window_size+1):
windows_labels.append(list(np.int_(labels[i:i+window_size])))
# 這邊就是windows裡面只要有一個是anomaly整段windows都標記成anomaly
y_True = [1.0 if (np.sum(window) > 0)
else 0 for window in windows_labels]
return y_True
def get_default_device():
"""Pick GPU if available, else CPU"""
if torch.cuda.is_available():
return torch.device('cuda')
else:
return torch.device('cpu')
def to_device(data, device):
"""Move tensor(s) to chosen device"""
if isinstance(data, (list, tuple)):
return [to_device(x, device) for x in data]
return data.to(device, non_blocking=True)
def plot_history(history, modelName):
losses1 = [x['val_loss1'] for x in history]
if modelName == "USAD":
losses2 = [x['val_loss2'] for x in history]
plt.plot(losses2, '-x', label="loss2")
plt.plot(losses1, '-x', label="loss1")
plt.xlabel('epoch')
plt.ylabel('loss')
plt.legend()
plt.title('Losses vs. No. of epochs')
plt.grid()
plt.savefig("result/"+modelName+"/history")
def histogram(y_test, y_pred):
plt.figure(figsize=(12, 6))
plt.hist([y_pred[y_test == 0],
y_pred[y_test == 1]],
bins=20,
color=['#82E0AA', '#EC7063'], stacked=True)
plt.title("Results", size=20)
plt.grid()
plt.savefig("histogram")
def ROC(y_test, y_pred, modelName):
fpr, tpr, tr = roc_curve(y_test, y_pred)
auc = roc_auc_score(y_test, y_pred)
idx = np.argwhere(np.diff(np.sign(tpr-(1-fpr)))).flatten()
plt.figure()
plt.xlabel("FPR")
plt.ylabel("TPR")
plt.plot(fpr, tpr, label="AUC="+str(auc))
plt.plot(fpr, 1-fpr, 'r:')
plt.plot(fpr[idx], tpr[idx], 'ro')
plt.legend(loc=4)
plt.grid()
plt.show()
plt.savefig("result/"+modelName+"/ROC")
plt.clf()
return tr[idx]
def printDataInfo(dataset):
abnormalCount = 0
normalCount = 0
for label in dataset["Normal/Attack"]:
if label == "Normal":
normalCount += 1
else:
abnormalCount += 1
print("#####data info########")
print("number of anomaly :", abnormalCount)
print("number of normal :", normalCount)
print("################")
def evaluateResult(y_True, y_pred, threshold, modelName):
y_pred_anomaly = [1 if(x >= threshold) else 0 for x in y_pred]
TP = 0
TN = 0
FP = 0
FN = 0
for index, item in enumerate(y_pred_anomaly):
if y_pred_anomaly[index] == 1 and y_True[index] == 1:
TP += 1
elif y_pred_anomaly[index] == 0 and y_True[index] == 0:
TN += 1
elif y_pred_anomaly[index] == 1 and y_True[index] == 0:
FP += 1
elif y_pred_anomaly[index] == 0 and y_True[index] == 1:
FN += 1
recall = float(TP/(TP+FN))
precision = float(TP/(TP+FP))
if recall == 0 and precision ==0:
return
with open("result/"+modelName+"/result.txt", 'a') as resultFile:
print("-------------------", file=resultFile)
print("TP:", TP, "TN:", TN, "FP:", FP, "FN:", FN, file=resultFile)
print("precision:", precision, file=resultFile)
print("recall:", recall, file=resultFile)
print("F1 score", 2*precision*recall /
(precision+recall), file=resultFile)
print("TPR", TP/(TP+FN), file=resultFile)
print("FPR", FP/(TN+FP), file=resultFile)
print("-------------------", file=resultFile)
def printResult(y_True, y_pred, threshold, modelName):
y_pred_anomaly = [1 if(x >= threshold) else 0 for x in y_pred]
precision, recall, fscore, support = score(y_True, y_pred_anomaly)
# caculate recall
print("============== result ==================")
evaluateResult(y_True, y_pred, threshold, modelName)
print('precision: {}'.format(precision[0]))
print('recall: {}'.format(recall[0]))
print('f1score: {}'.format(fscore[0]))
print("============== result ==================")
def confusion_matrix(target, predicted, perc=False):
data = {'y_Actual': target,
'y_Predicted': predicted
}
df = | pd.DataFrame(data, columns=['y_Predicted', 'y_Actual']) | pandas.DataFrame |
import statsmodels.api as sm
from statsmodels.sandbox.nonparametric import kernels
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
'''特征空间解析
我们将特征类型分为如下四种
- numeric:连续的特征,表现为可以定义序关系且唯一值可以有无数个
- category:类别型特征
- Multi-category:多类别
- object:无结构数据,暂不提供任何解析
'''
def describe_numeric_1d(series,quantiles=None,missing_value = None):
"""Describe a numeric series.
Args:
series: The Series to describe.
quantiles: list,like [0.25,0.75].
Returns:
A dict containing calculated series description values.
"""
if quantiles is None:
quantiles = [0.005,0.01,0.05,0.25,0.5,0.75,0.95,0.99,0.995]
n = len(series)
if missing_value:
series = series.replace({missing_value:np.NaN})
stats = {
"mean": series.mean(),
"std": series.std(),
"variance": series.var(),
"min": series.min(),
"max": series.max(),
"kurtosis": series.kurt(),
"skewness": series.skew(),
"zeros": (n - np.count_nonzero(series))/n,
"missing":np.sum(series.isna())/n
}
stats.update({
"{:.1%}".format(percentile).replace('.0',''): value
for percentile, value in series.quantile(quantiles).to_dict().items()
})
stats["iqr"] = stats["75%"] - stats["25%"]
stats["cv"] = stats["std"] / stats["mean"] if stats["mean"] else np.NaN
return stats
def _get_distrbution(x,x_lim=None,gridsize=None,bw=None,bw_method='scott',eta=1):
'''
## brandwidth select
A = min(std(x, ddof=1), IQR/1.349)
Scott's rule: 1.059 * A * n ** (-1/5.)
Silverman's Rule: .9 * A * n ** (-1/5.)
'''
x = pd.Series(x)
stats = describe_numeric_1d(x)
x_mean_fix = x[(x>=stats['5%'])&(x<=stats['95%'])].mean()
# 截断数据用于分析合理的密度函数
if x_lim is None:
cv_lower,cv_upper = x[x<=stats['5%']].std()/(abs(x_mean_fix)+1e-14), x[x>=stats['95%']].std()/(abs(x_mean_fix)+1e-14)
x_lim = [stats['5%'] if cv_lower>=eta else stats['min'],stats['95%'] if cv_upper>=eta else stats['max']]
domain = [stats['min'],stats['max']]
if cv_lower>=eta:
domain[0] = -np.inf
if cv_upper>=eta:
domain[1] = np.inf
# 选择绘图和计算需要的网格大小
try:
bw = float(bw)
except:
bw = sm.nonparametric.bandwidths.select_bandwidth(x,bw=bw_method,kernel = None)
# 特征的样本数一般较大,这里规定gridsize最小为 128
n_fix = len(x[(x>=x_lim[0])&(x<=x_lim[1])])
if gridsize is None:
gridsize=max(128,min(int(np.round((x_lim[1] - x_lim[0])/bw)),n_fix)) if bw>=1e-7 else None
dens = sm.nonparametric.KDEUnivariate(x.dropna().astype(np.double).values)
dens.fit(gridsize=gridsize,bw=bw,clip=x_lim)
# 获取最新的 brandwidth 等数据
bw = dens.bw
# bw_method = bw_method if dens.bw_method = 'user-given' else dens.bw_method
gridsize = len(dens.support)
result = stats
result.update({'key_dist':['bw','bw_method','support','density','x_lim','cdf','icdf','domain','gridsize','evaluate']
,'bw':bw
,'bw_method':bw_method
,'support':dens.support
,'density':dens.density
,'x_lim':x_lim
,'cdf':dens.cdf
,'icdf':dens.icdf
,'domain':domain
,'gridsize':gridsize
,'evaluate':dens.evaluate
})
return result
class feature_numeric(object):
def __init__(self,name=None):
self.name = name
self.dtype = 'numeric'
self.stats = None
self.dist = None
self.cross_proba=None
self.cross_stats=None
# 随机取样
def sample(self,n):
return self.dist['icdf'][np.random.randint(low=0,high = self.dist['gridsize'] -1,size=n)]
def pdf(self,x):
return self.dist['evaluate'](x)
def describe(self):
return self.stats
def get_values(self,key):
if key in self.dist:
return self.dist[key]
elif key in self.stats:
return self.stats[key]
elif key in self.cross_proba:
return self.cross_proba[key]
elif key in self.cross_stats:
return self.cross_stats[key]
else:
return None
def fit(self,x,y=None,**arg):
result = _get_distrbution(x,**arg)
self.stats = {key:value for key,value in result.items() if key not in result['key_dist']+['key_dist']}
self.dist = {key:value for key,value in result.items() if key in result['key_dist']}
if y is not None and len(x) == len(y):
cross_proba,cross_stats = self.crosstab_bin(x,y)
self.cross_proba = cross_proba
self.cross_stats = cross_stats
def crosstab_bin(self,x,y):
x = pd.Series(x)
y = pd.Series(y)
n = len(y)
dist_y = y.value_counts()/n
bw = self.dist['bw']
support = self.dist['support']
domain = self.dist['domain']
q995 = self.stats['99.5%']
gridsize = self.dist['gridsize']
seq = np.mean(support[1:] - support[0:-1])
# 添加额外的支撑集,便于分析合理的泛化方法
if domain[1] == np.inf:
n_add = np.ceil((q995 - support[-1])/seq)
support_add = [support[-1] + seq*(i+1) for i in range(int(n_add))]
support_new = np.concatenate((support,support_add))
else:
support_new = support.copy()
p_y1_x = np.zeros_like(support_new)
cumulative = np.zeros_like(support_new)
for i,xi in enumerate(support_new):
ind =(x<=xi+bw)&(x>=xi-bw)
tmp = y[ind].value_counts().to_dict()
cnt = {0:dist_y[0],1:dist_y[1]}
cnt[0] += tmp.get(0,0)
cnt[1] += tmp.get(1,0)
p_y1_x[i] = cnt[1]/(cnt[0]+cnt[1])
cumulative[i] = np.sum(x<=xi)/n
# 根据贝叶斯法则可以求出
p_x_y1 = self.dist['density']*p_y1_x[:gridsize]/dist_y[1]
p_x_y0 = self.dist['density']*(1-p_y1_x[:gridsize])/dist_y[0]
iv =np.sum((p_x_y1 - p_x_y0)*np.log2((1e-14+p_x_y1)/(p_x_y0+1e-14)))*seq
cross_proba = {
"p(y=1|x)":p_y1_x
,"p(x|y=1)":p_x_y1
,"p(x|y=0)":p_x_y0
,"woe(x)":np.log2(p_x_y0/p_x_y1)
,"cumulative":cumulative
,"support_x":support_new
,"support_y":support
}
cross_stats = {
"iv":iv
,"p(y=1)":dist_y[1]
,"p(y=0)":dist_y[0]
}
return cross_proba,cross_stats
def plot_pdf(self):
x_min,x_max = self.stats['min'],self.stats['max']
bw = self.dist['bw']
if self.name:
title = 'density curve of {}'.format(self.name)
else:
title = 'density curve'
fig,ax=plt.subplots(figsize=[10,6.6])
support = self.dist['support']
#seq = np.mean(support[1:]-support[0:-1])
#ind = (support>=x_min-3*seq)&(support<=x_max+3*seq)
ax.plot(support,self.dist['density']);
ax.set_title(title);
ax.set_xlabel('range = [{},{}]'.format(x_min,x_max));
fig.show()
return None
def summary(self):
# 区分两个版本,一个有y 一个没 y
tmp = pd.DataFrame(index=range(0,10),columns=['name1','value1','name2','value2','name3','value3'])
tmp.name1 = ['missing','zeros','min','max','mean','std','skewness','kurtosis','cv','iqr']
tmp.value1 = [self.stats[k] for k in tmp['name1'].values]
tmp.name2 = ['0.5%','1%','5%','25%','50%','75%','95%','99%','99.5%','domain']
tmp.value2 = [self.stats[k] for k in tmp['name2'][:-1].values]+[str(self.dist['domain'])]
tmp.loc[0,'name3'] = 'iv'
tmp.loc[0,'value3'] = self.cross_stats['iv']
tmp.loc[1,'name3'] = 'p(y=1)'
tmp.loc[1,'value3'] = self.cross_stats['p(y=1)']
display(tmp)
support_new = self.cross_proba['support_x']
ind1 = (self.cross_proba['support_x']>=self.stats['min'])&(self.cross_proba['support_x']<=self.stats['max'])
p_y1 = self.cross_stats['p(y=1)']
fig,[ax1,ax2]=plt.subplots(2,1,figsize=[10,13])
ax1.plot(support_new[ind1],self.cross_proba['p(y=1|x)'][ind1] ,'.');
ax1.plot([support_new[0],support_new[-1]] ,[p_y1,p_y1],label = 'baseline')
ax1_ = ax1.twinx()
ax1_.plot(support_new[ind1],self.cross_proba['cumulative'][ind1],label = 'cumulative',color='red')
ax1_.legend(loc = 'center left')
ax1.set_title(r'$p(y=1|x)$');
ax1.legend()
ind2 = (self.cross_proba['support_y']>=self.stats['min'])&(self.cross_proba['support_y']<=self.stats['max'])
ax2.plot(self.cross_proba['support_y'],self.cross_proba['p(x|y=1)'],label=r'$p(x|y=1)$')
ax2.plot(self.cross_proba['support_y'],self.cross_proba['p(x|y=0)'],label=r'$p(x|y=0)$')
ax2.plot(self.cross_proba['support_y'],self.dist['density'],label=r'$p(x)$',color = '0.5',linestyle='--')
ax2_ = ax2.twinx()
ax2_.plot(self.cross_proba['support_y'][ind2],self.cross_proba['woe(x)'][ind2],label = 'woe(x)',color='red')
ax2_.legend(loc = 'center right')
ax2.legend()
ax2.set_title(r'$p(x|y=1)$ vs $p(x|y=0)$')
ax2.set_xlabel('iv = {:.2f}'.format(self.cross_stats['iv']))
fig.show()
def sample_size_cal(p,alpha=0.05,e=0.05):
import scipy.stats as stats
z=stats.norm.ppf(1-alpha/2)
return int(np.ceil(z**2*p*(1-p)/e**2))
def describe_categorical(x
,missing_value = None
,pct_pos = 0.5
,backoff_p = 0.05
,backoff_rnk = 30
,backoff_n = None
,alpha=0.05
,e=0.05):
x = pd.Series(x)
if missing_value:
x = x.replace({str(missing_value):np.nan})
n = len(x)
missing = np.sum(x.isnull())
p_x = x.value_counts().sort_values(ascending=False)/n
itemlist = p_x.index.tolist()
# 识别稀有类
if backoff_n is None:
backoff_n = sample_size_cal(pct_pos,alpha=alpha,e=e)
x_base = pd.DataFrame(x.value_counts().sort_values(ascending=False),index=itemlist)
x_base.columns = ['cnt']
x_base['proba'] = x_base['cnt']/n
x_base['type'] = 'normal'
x_base['rnk'] = range(1,len(x_base)+1)
x_base.loc[((x_base.proba<backoff_p)&(x_base.cnt<backoff_n))|(x_base.rnk>=backoff_rnk),'type'] = 'rare'
stats = {
"missing": missing/n,
"distinct_count":len(itemlist),
"n":n,
"entropy":-1*np.sum(p_x*np.log2(p_x))
}
dist = {
"itemlist":itemlist,
"p(x)":p_x,
"type":x_base['type'].to_dict(),
"itemlist_rare":x_base[x_base.type=='rare'].index.tolist(),
"data":x_base
}
return stats,dist
class feature_categorical(object):
def __init__(self,name=None):
self.name = name
self.dtype = 'categorical'
self.stats = None
self.dist = None
self.cross_proba=None
self.cross_stats=None
def crosstab_bin(self,x,y):
x = pd.Series(x)
y = | pd.Series(y) | pandas.Series |
# -*- coding: utf-8 -*-
try:
import json
except ImportError:
import simplejson as json
import math
import pytz
import locale
import pytest
import time
import datetime
import calendar
import re
import decimal
import dateutil
from functools import partial
from pandas.compat import range, StringIO, u
from pandas._libs.tslib import Timestamp
import pandas._libs.json as ujson
import pandas.compat as compat
import numpy as np
from pandas import DataFrame, Series, Index, NaT, DatetimeIndex, date_range
import pandas.util.testing as tm
json_unicode = (json.dumps if compat.PY3
else partial(json.dumps, encoding="utf-8"))
def _clean_dict(d):
"""
Sanitize dictionary for JSON by converting all keys to strings.
Parameters
----------
d : dict
The dictionary to convert.
Returns
-------
cleaned_dict : dict
"""
return {str(k): v for k, v in compat.iteritems(d)}
@pytest.fixture(params=[
None, # Column indexed by default.
"split",
"records",
"values",
"index"])
def orient(request):
return request.param
@pytest.fixture(params=[None, True])
def numpy(request):
return request.param
class TestUltraJSONTests(object):
@pytest.mark.skipif(compat.is_platform_32bit(),
reason="not compliant on 32-bit, xref #15865")
def test_encode_decimal(self):
sut = decimal.Decimal("1337.1337")
encoded = ujson.encode(sut, double_precision=15)
decoded = ujson.decode(encoded)
assert decoded == 1337.1337
sut = decimal.Decimal("0.95")
encoded = ujson.encode(sut, double_precision=1)
assert encoded == "1.0"
decoded = ujson.decode(encoded)
assert decoded == 1.0
sut = decimal.Decimal("0.94")
encoded = ujson.encode(sut, double_precision=1)
assert encoded == "0.9"
decoded = ujson.decode(encoded)
assert decoded == 0.9
sut = decimal.Decimal("1.95")
encoded = ujson.encode(sut, double_precision=1)
assert encoded == "2.0"
decoded = ujson.decode(encoded)
assert decoded == 2.0
sut = decimal.Decimal("-1.95")
encoded = ujson.encode(sut, double_precision=1)
assert encoded == "-2.0"
decoded = ujson.decode(encoded)
assert decoded == -2.0
sut = decimal.Decimal("0.995")
encoded = ujson.encode(sut, double_precision=2)
assert encoded == "1.0"
decoded = ujson.decode(encoded)
assert decoded == 1.0
sut = decimal.Decimal("0.9995")
encoded = ujson.encode(sut, double_precision=3)
assert encoded == "1.0"
decoded = ujson.decode(encoded)
assert decoded == 1.0
sut = decimal.Decimal("0.99999999999999944")
encoded = ujson.encode(sut, double_precision=15)
assert encoded == "1.0"
decoded = ujson.decode(encoded)
assert decoded == 1.0
@pytest.mark.parametrize("ensure_ascii", [True, False])
def test_encode_string_conversion(self, ensure_ascii):
string_input = "A string \\ / \b \f \n \r \t </script> &"
not_html_encoded = ('"A string \\\\ \\/ \\b \\f \\n '
'\\r \\t <\\/script> &"')
html_encoded = ('"A string \\\\ \\/ \\b \\f \\n \\r \\t '
'\\u003c\\/script\\u003e \\u0026"')
def helper(expected_output, **encode_kwargs):
output = ujson.encode(string_input,
ensure_ascii=ensure_ascii,
**encode_kwargs)
assert output == expected_output
assert string_input == json.loads(output)
assert string_input == ujson.decode(output)
# Default behavior assumes encode_html_chars=False.
helper(not_html_encoded)
# Make sure explicit encode_html_chars=False works.
helper(not_html_encoded, encode_html_chars=False)
# Make sure explicit encode_html_chars=True does the encoding.
helper(html_encoded, encode_html_chars=True)
@pytest.mark.parametrize("long_number", [
-4342969734183514, -12345678901234.56789012, -528656961.4399388
])
def test_double_long_numbers(self, long_number):
sut = {u("a"): long_number}
encoded = ujson.encode(sut, double_precision=15)
decoded = ujson.decode(encoded)
assert sut == decoded
def test_encode_non_c_locale(self):
lc_category = locale.LC_NUMERIC
# We just need one of these locales to work.
for new_locale in ("it_IT.UTF-8", "Italian_Italy"):
if tm.can_set_locale(new_locale, lc_category):
with tm.set_locale(new_locale, lc_category):
assert ujson.loads(ujson.dumps(4.78e60)) == 4.78e60
assert ujson.loads("4.78", precise_float=True) == 4.78
break
def test_decimal_decode_test_precise(self):
sut = {u("a"): 4.56}
encoded = ujson.encode(sut)
decoded = ujson.decode(encoded, precise_float=True)
assert sut == decoded
@pytest.mark.skipif(compat.is_platform_windows() and not compat.PY3,
reason="buggy on win-64 for py2")
def test_encode_double_tiny_exponential(self):
num = 1e-40
assert num == ujson.decode(ujson.encode(num))
num = 1e-100
assert num == ujson.decode(ujson.encode(num))
num = -1e-45
assert num == ujson.decode(ujson.encode(num))
num = -1e-145
assert np.allclose(num, ujson.decode(ujson.encode(num)))
@pytest.mark.parametrize("unicode_key", [
u("key1"), u("بن")
])
def test_encode_dict_with_unicode_keys(self, unicode_key):
unicode_dict = {unicode_key: u("value1")}
assert unicode_dict == ujson.decode(ujson.encode(unicode_dict))
@pytest.mark.parametrize("double_input", [
math.pi,
-math.pi # Should work with negatives too.
])
def test_encode_double_conversion(self, double_input):
output = ujson.encode(double_input)
assert round(double_input, 5) == round(json.loads(output), 5)
assert round(double_input, 5) == round(ujson.decode(output), 5)
def test_encode_with_decimal(self):
decimal_input = 1.0
output = ujson.encode(decimal_input)
assert output == "1.0"
def test_encode_array_of_nested_arrays(self):
nested_input = [[[[]]]] * 20
output = ujson.encode(nested_input)
assert nested_input == json.loads(output)
assert nested_input == ujson.decode(output)
nested_input = np.array(nested_input)
tm.assert_numpy_array_equal(nested_input, ujson.decode(
output, numpy=True, dtype=nested_input.dtype))
def test_encode_array_of_doubles(self):
doubles_input = [31337.31337, 31337.31337,
31337.31337, 31337.31337] * 10
output = ujson.encode(doubles_input)
assert doubles_input == json.loads(output)
assert doubles_input == ujson.decode(output)
tm.assert_numpy_array_equal(np.array(doubles_input),
ujson.decode(output, numpy=True))
def test_double_precision(self):
double_input = 30.012345678901234
output = ujson.encode(double_input, double_precision=15)
assert double_input == json.loads(output)
assert double_input == ujson.decode(output)
for double_precision in (3, 9):
output = ujson.encode(double_input,
double_precision=double_precision)
rounded_input = round(double_input, double_precision)
assert rounded_input == json.loads(output)
assert rounded_input == ujson.decode(output)
@pytest.mark.parametrize("invalid_val", [
20, -1, "9", None
])
def test_invalid_double_precision(self, invalid_val):
double_input = 30.12345678901234567890
expected_exception = (ValueError if isinstance(invalid_val, int)
else TypeError)
with pytest.raises(expected_exception):
ujson.encode(double_input, double_precision=invalid_val)
def test_encode_string_conversion2(self):
string_input = "A string \\ / \b \f \n \r \t"
output = ujson.encode(string_input)
assert string_input == json.loads(output)
assert string_input == ujson.decode(output)
assert output == '"A string \\\\ \\/ \\b \\f \\n \\r \\t"'
@pytest.mark.parametrize("unicode_input", [
"Räksmörgås اسامة بن محمد بن عوض بن لادن",
"\xe6\x97\xa5\xd1\x88"
])
def test_encode_unicode_conversion(self, unicode_input):
enc = ujson.encode(unicode_input)
dec = ujson.decode(enc)
assert enc == json_unicode(unicode_input)
assert dec == json.loads(enc)
def test_encode_control_escaping(self):
escaped_input = "\x19"
enc = ujson.encode(escaped_input)
dec = ujson.decode(enc)
assert escaped_input == dec
assert enc == json_unicode(escaped_input)
def test_encode_unicode_surrogate_pair(self):
surrogate_input = "\xf0\x90\x8d\x86"
enc = ujson.encode(surrogate_input)
dec = ujson.decode(enc)
assert enc == json_unicode(surrogate_input)
assert dec == json.loads(enc)
def test_encode_unicode_4bytes_utf8(self):
four_bytes_input = "\xf0\x91\x80\xb0TRAILINGNORMAL"
enc = ujson.encode(four_bytes_input)
dec = ujson.decode(enc)
assert enc == json_unicode(four_bytes_input)
assert dec == json.loads(enc)
def test_encode_unicode_4bytes_utf8highest(self):
four_bytes_input = "\xf3\xbf\xbf\xbfTRAILINGNORMAL"
enc = ujson.encode(four_bytes_input)
dec = ujson.decode(enc)
assert enc == json_unicode(four_bytes_input)
assert dec == json.loads(enc)
def test_encode_array_in_array(self):
arr_in_arr_input = [[[[]]]]
output = ujson.encode(arr_in_arr_input)
assert arr_in_arr_input == json.loads(output)
assert output == json.dumps(arr_in_arr_input)
assert arr_in_arr_input == ujson.decode(output)
tm.assert_numpy_array_equal(np.array(arr_in_arr_input),
ujson.decode(output, numpy=True))
@pytest.mark.parametrize("num_input", [
31337,
-31337, # Negative number.
-9223372036854775808 # Large negative number.
])
def test_encode_num_conversion(self, num_input):
output = ujson.encode(num_input)
assert num_input == json.loads(output)
assert output == json.dumps(num_input)
assert num_input == ujson.decode(output)
def test_encode_list_conversion(self):
list_input = [1, 2, 3, 4]
output = ujson.encode(list_input)
assert list_input == json.loads(output)
assert list_input == ujson.decode(output)
tm.assert_numpy_array_equal(np.array(list_input),
ujson.decode(output, numpy=True))
def test_encode_dict_conversion(self):
dict_input = {"k1": 1, "k2": 2, "k3": 3, "k4": 4}
output = ujson.encode(dict_input)
assert dict_input == json.loads(output)
assert dict_input == ujson.decode(output)
@pytest.mark.parametrize("builtin_value", [None, True, False])
def test_encode_builtin_values_conversion(self, builtin_value):
output = ujson.encode(builtin_value)
assert builtin_value == json.loads(output)
assert output == json.dumps(builtin_value)
assert builtin_value == ujson.decode(output)
def test_encode_datetime_conversion(self):
datetime_input = datetime.datetime.fromtimestamp(time.time())
output = ujson.encode(datetime_input, date_unit="s")
expected = calendar.timegm(datetime_input.utctimetuple())
assert int(expected) == json.loads(output)
assert int(expected) == ujson.decode(output)
def test_encode_date_conversion(self):
date_input = datetime.date.fromtimestamp(time.time())
output = ujson.encode(date_input, date_unit="s")
tup = (date_input.year, date_input.month, date_input.day, 0, 0, 0)
expected = calendar.timegm(tup)
assert int(expected) == json.loads(output)
assert int(expected) == ujson.decode(output)
@pytest.mark.parametrize("test", [
datetime.time(),
datetime.time(1, 2, 3),
datetime.time(10, 12, 15, 343243),
])
def test_encode_time_conversion_basic(self, test):
output = ujson.encode(test)
expected = '"{iso}"'.format(iso=test.isoformat())
assert expected == output
def test_encode_time_conversion_pytz(self):
# see gh-11473: to_json segfaults with timezone-aware datetimes
test = datetime.time(10, 12, 15, 343243, pytz.utc)
output = ujson.encode(test)
expected = '"{iso}"'.format(iso=test.isoformat())
assert expected == output
def test_encode_time_conversion_dateutil(self):
# see gh-11473: to_json segfaults with timezone-aware datetimes
test = datetime.time(10, 12, 15, 343243, dateutil.tz.tzutc())
output = ujson.encode(test)
expected = '"{iso}"'.format(iso=test.isoformat())
assert expected == output
@pytest.mark.parametrize("decoded_input", [
NaT,
np.datetime64("NaT"),
np.nan,
np.inf,
-np.inf
])
def test_encode_as_null(self, decoded_input):
assert ujson.encode(decoded_input) == "null", "Expected null"
def test_datetime_units(self):
val = datetime.datetime(2013, 8, 17, 21, 17, 12, 215504)
stamp = Timestamp(val)
roundtrip = ujson.decode(ujson.encode(val, date_unit='s'))
assert roundtrip == stamp.value // 10**9
roundtrip = ujson.decode(ujson.encode(val, date_unit='ms'))
assert roundtrip == stamp.value // 10**6
roundtrip = ujson.decode(ujson.encode(val, date_unit='us'))
assert roundtrip == stamp.value // 10**3
roundtrip = ujson.decode(ujson.encode(val, date_unit='ns'))
assert roundtrip == stamp.value
pytest.raises(ValueError, ujson.encode, val, date_unit='foo')
def test_encode_to_utf8(self):
unencoded = "\xe6\x97\xa5\xd1\x88"
enc = ujson.encode(unencoded, ensure_ascii=False)
dec = ujson.decode(enc)
assert enc == json_unicode(unencoded, ensure_ascii=False)
assert dec == json.loads(enc)
def test_decode_from_unicode(self):
unicode_input = u("{\"obj\": 31337}")
dec1 = ujson.decode(unicode_input)
dec2 = ujson.decode(str(unicode_input))
assert dec1 == dec2
def test_encode_recursion_max(self):
# 8 is the max recursion depth
class O2(object):
member = 0
pass
class O1(object):
member = 0
pass
decoded_input = O1()
decoded_input.member = O2()
decoded_input.member.member = decoded_input
with pytest.raises(OverflowError):
ujson.encode(decoded_input)
def test_decode_jibberish(self):
jibberish = "fdsa sda v9sa fdsa"
with pytest.raises(ValueError):
ujson.decode(jibberish)
@pytest.mark.parametrize("broken_json", [
"[", # Broken array start.
"{", # Broken object start.
"]", # Broken array end.
"}", # Broken object end.
])
def test_decode_broken_json(self, broken_json):
with pytest.raises(ValueError):
ujson.decode(broken_json)
@pytest.mark.parametrize("too_big_char", [
"[",
"{",
])
def test_decode_depth_too_big(self, too_big_char):
with pytest.raises(ValueError):
ujson.decode(too_big_char * (1024 * 1024))
@pytest.mark.parametrize("bad_string", [
"\"TESTING", # Unterminated.
"\"TESTING\\\"", # Unterminated escape.
"tru", # Broken True.
"fa", # Broken False.
"n", # Broken None.
])
def test_decode_bad_string(self, bad_string):
with pytest.raises(ValueError):
ujson.decode(bad_string)
@pytest.mark.parametrize("broken_json", [
'{{1337:""}}',
'{{"key":"}',
'[[[true',
])
def test_decode_broken_json_leak(self, broken_json):
for _ in range(1000):
with pytest.raises(ValueError):
ujson.decode(broken_json)
@pytest.mark.parametrize("invalid_dict", [
"{{{{31337}}}}", # No key.
"{{{{\"key\":}}}}", # No value.
"{{{{\"key\"}}}}", # No colon or value.
])
def test_decode_invalid_dict(self, invalid_dict):
with pytest.raises(ValueError):
ujson.decode(invalid_dict)
@pytest.mark.parametrize("numeric_int_as_str", [
"31337", "-31337" # Should work with negatives.
])
def test_decode_numeric_int(self, numeric_int_as_str):
assert int(numeric_int_as_str) == ujson.decode(numeric_int_as_str)
@pytest.mark.skipif(compat.PY3, reason="only PY2")
def test_encode_unicode_4bytes_utf8_fail(self):
with pytest.raises(OverflowError):
ujson.encode("\xfd\xbf\xbf\xbf\xbf\xbf")
def test_encode_null_character(self):
wrapped_input = "31337 \x00 1337"
output = ujson.encode(wrapped_input)
assert wrapped_input == json.loads(output)
assert output == json.dumps(wrapped_input)
assert wrapped_input == ujson.decode(output)
alone_input = "\x00"
output = ujson.encode(alone_input)
assert alone_input == json.loads(output)
assert output == json.dumps(alone_input)
assert alone_input == ujson.decode(output)
assert '" \\u0000\\r\\n "' == ujson.dumps(u(" \u0000\r\n "))
def test_decode_null_character(self):
wrapped_input = "\"31337 \\u0000 31337\""
assert ujson.decode(wrapped_input) == json.loads(wrapped_input)
def test_encode_list_long_conversion(self):
long_input = [9223372036854775807, 9223372036854775807,
9223372036854775807, 9223372036854775807,
9223372036854775807, 9223372036854775807]
output = ujson.encode(long_input)
assert long_input == json.loads(output)
assert long_input == ujson.decode(output)
tm.assert_numpy_array_equal(np.array(long_input),
ujson.decode(output, numpy=True,
dtype=np.int64))
def test_encode_long_conversion(self):
long_input = 9223372036854775807
output = ujson.encode(long_input)
assert long_input == json.loads(output)
assert output == json.dumps(long_input)
assert long_input == ujson.decode(output)
@pytest.mark.parametrize("int_exp", [
"1337E40", "1.337E40", "1337E+9", "1.337e+40", "1.337E-4"
])
def test_decode_numeric_int_exp(self, int_exp):
assert ujson.decode(int_exp) == json.loads(int_exp)
def test_dump_to_file(self):
f = StringIO()
ujson.dump([1, 2, 3], f)
assert "[1,2,3]" == f.getvalue()
def test_dump_to_file_like(self):
class FileLike(object):
def __init__(self):
self.bytes = ''
def write(self, data_bytes):
self.bytes += data_bytes
f = FileLike()
ujson.dump([1, 2, 3], f)
assert "[1,2,3]" == f.bytes
def test_dump_file_args_error(self):
with pytest.raises(TypeError):
ujson.dump([], "")
def test_load_file(self):
data = "[1,2,3,4]"
exp_data = [1, 2, 3, 4]
f = StringIO(data)
assert exp_data == ujson.load(f)
f = StringIO(data)
tm.assert_numpy_array_equal(np.array(exp_data),
ujson.load(f, numpy=True))
def test_load_file_like(self):
class FileLike(object):
def read(self):
try:
self.end
except AttributeError:
self.end = True
return "[1,2,3,4]"
exp_data = [1, 2, 3, 4]
f = FileLike()
assert exp_data == ujson.load(f)
f = FileLike()
tm.assert_numpy_array_equal(np.array(exp_data),
ujson.load(f, numpy=True))
def test_load_file_args_error(self):
with pytest.raises(TypeError):
ujson.load("[]")
def test_version(self):
assert re.match(r'^\d+\.\d+(\.\d+)?$', ujson.__version__), \
"ujson.__version__ must be a string like '1.4.0'"
def test_encode_numeric_overflow(self):
with pytest.raises(OverflowError):
ujson.encode(12839128391289382193812939)
def test_encode_numeric_overflow_nested(self):
class Nested(object):
x = 12839128391289382193812939
for _ in range(0, 100):
with pytest.raises(OverflowError):
ujson.encode(Nested())
@pytest.mark.parametrize("val", [
3590016419, 2**31, 2**32, (2**32) - 1
])
def test_decode_number_with_32bit_sign_bit(self, val):
# Test that numbers that fit within 32 bits but would have the
# sign bit set (2**31 <= x < 2**32) are decoded properly.
doc = '{{"id": {val}}}'.format(val=val)
assert ujson.decode(doc)["id"] == val
def test_encode_big_escape(self):
# Make sure no Exception is raised.
for _ in range(10):
base = '\u00e5'.encode("utf-8") if compat.PY3 else "\xc3\xa5"
escape_input = base * 1024 * 1024 * 2
ujson.encode(escape_input)
def test_decode_big_escape(self):
# Make sure no Exception is raised.
for _ in range(10):
base = '\u00e5'.encode("utf-8") if compat.PY3 else "\xc3\xa5"
quote = compat.str_to_bytes("\"")
escape_input = quote + (base * 1024 * 1024 * 2) + quote
ujson.decode(escape_input)
def test_to_dict(self):
d = {u("key"): 31337}
class DictTest(object):
def toDict(self):
return d
o = DictTest()
output = ujson.encode(o)
dec = ujson.decode(output)
assert dec == d
def test_default_handler(self):
class _TestObject(object):
def __init__(self, val):
self.val = val
@property
def recursive_attr(self):
return _TestObject("recursive_attr")
def __str__(self):
return str(self.val)
pytest.raises(OverflowError, ujson.encode, _TestObject("foo"))
assert '"foo"' == ujson.encode(_TestObject("foo"),
default_handler=str)
def my_handler(_):
return "foobar"
assert '"foobar"' == ujson.encode(_TestObject("foo"),
default_handler=my_handler)
def my_handler_raises(_):
raise TypeError("I raise for anything")
with pytest.raises(TypeError, match="I raise for anything"):
ujson.encode(_TestObject("foo"), default_handler=my_handler_raises)
def my_int_handler(_):
return 42
assert ujson.decode(ujson.encode(_TestObject("foo"),
default_handler=my_int_handler)) == 42
def my_obj_handler(_):
return datetime.datetime(2013, 2, 3)
assert (ujson.decode(ujson.encode(datetime.datetime(2013, 2, 3))) ==
ujson.decode(ujson.encode(_TestObject("foo"),
default_handler=my_obj_handler)))
obj_list = [_TestObject("foo"), _TestObject("bar")]
assert (json.loads(json.dumps(obj_list, default=str)) ==
ujson.decode(ujson.encode(obj_list, default_handler=str)))
class TestNumpyJSONTests(object):
@pytest.mark.parametrize("bool_input", [True, False])
def test_bool(self, bool_input):
b = np.bool(bool_input)
assert ujson.decode(ujson.encode(b)) == b
def test_bool_array(self):
bool_array = np.array([
True, False, True, True,
False, True, False, False], dtype=np.bool)
output = np.array(ujson.decode(
ujson.encode(bool_array)), dtype=np.bool)
tm.assert_numpy_array_equal(bool_array, output)
def test_int(self, any_int_dtype):
klass = np.dtype(any_int_dtype).type
num = klass(1)
assert klass(ujson.decode(ujson.encode(num))) == num
def test_int_array(self, any_int_dtype):
arr = np.arange(100, dtype=np.int)
arr_input = arr.astype(any_int_dtype)
arr_output = np.array(ujson.decode(ujson.encode(arr_input)),
dtype=any_int_dtype)
tm.assert_numpy_array_equal(arr_input, arr_output)
def test_int_max(self, any_int_dtype):
if any_int_dtype in ("int64", "uint64") and compat.is_platform_32bit():
pytest.skip("Cannot test 64-bit integer on 32-bit platform")
klass = np.dtype(any_int_dtype).type
# uint64 max will always overflow,
# as it's encoded to signed.
if any_int_dtype == "uint64":
num = np.iinfo("int64").max
else:
num = np.iinfo(any_int_dtype).max
assert klass(ujson.decode(ujson.encode(num))) == num
def test_float(self, float_dtype):
klass = np.dtype(float_dtype).type
num = klass(256.2013)
assert klass(ujson.decode(ujson.encode(num))) == num
def test_float_array(self, float_dtype):
arr = np.arange(12.5, 185.72, 1.7322, dtype=np.float)
float_input = arr.astype(float_dtype)
float_output = np.array(ujson.decode(
ujson.encode(float_input, double_precision=15)),
dtype=float_dtype)
tm.assert_almost_equal(float_input, float_output)
def test_float_max(self, float_dtype):
klass = np.dtype(float_dtype).type
num = klass(np.finfo(float_dtype).max / 10)
tm.assert_almost_equal(klass(ujson.decode(
ujson.encode(num, double_precision=15))), num)
def test_array_basic(self):
arr = np.arange(96)
arr = arr.reshape((2, 2, 2, 2, 3, 2))
tm.assert_numpy_array_equal(
np.array(ujson.decode(ujson.encode(arr))), arr)
tm.assert_numpy_array_equal(ujson.decode(
ujson.encode(arr), numpy=True), arr)
@pytest.mark.parametrize("shape", [
(10, 10),
(5, 5, 4),
(100, 1),
])
def test_array_reshaped(self, shape):
arr = np.arange(100)
arr = arr.reshape(shape)
tm.assert_numpy_array_equal(
np.array(ujson.decode(ujson.encode(arr))), arr)
tm.assert_numpy_array_equal(ujson.decode(
ujson.encode(arr), numpy=True), arr)
def test_array_list(self):
arr_list = ["a", list(), dict(), dict(), list(),
42, 97.8, ["a", "b"], {"key": "val"}]
arr = np.array(arr_list)
tm.assert_numpy_array_equal(
np.array(ujson.decode(ujson.encode(arr))), arr)
def test_array_float(self):
dtype = np.float32
arr = np.arange(100.202, 200.202, 1, dtype=dtype)
arr = arr.reshape((5, 5, 4))
arr_out = np.array(ujson.decode(ujson.encode(arr)), dtype=dtype)
tm.assert_almost_equal(arr, arr_out)
arr_out = ujson.decode(ujson.encode(arr), numpy=True, dtype=dtype)
tm.assert_almost_equal(arr, arr_out)
def test_0d_array(self):
with pytest.raises(TypeError):
ujson.encode(np.array(1))
@pytest.mark.parametrize("bad_input,exc_type,kwargs", [
([{}, []], ValueError, {}),
([42, None], TypeError, {}),
([["a"], 42], ValueError, {}),
([42, {}, "a"], TypeError, {}),
([42, ["a"], 42], ValueError, {}),
(["a", "b", [], "c"], ValueError, {}),
([{"a": "b"}], ValueError, dict(labelled=True)),
({"a": {"b": {"c": 42}}}, ValueError, dict(labelled=True)),
([{"a": 42, "b": 23}, {"c": 17}], ValueError, dict(labelled=True))
])
def test_array_numpy_except(self, bad_input, exc_type, kwargs):
with pytest.raises(exc_type):
ujson.decode(ujson.dumps(bad_input), numpy=True, **kwargs)
def test_array_numpy_labelled(self):
labelled_input = {"a": []}
output = ujson.loads(ujson.dumps(labelled_input),
numpy=True, labelled=True)
assert (np.empty((1, 0)) == output[0]).all()
assert (np.array(["a"]) == output[1]).all()
assert output[2] is None
labelled_input = [{"a": 42}]
output = ujson.loads(ujson.dumps(labelled_input),
numpy=True, labelled=True)
assert (np.array([u("a")]) == output[2]).all()
assert (np.array([42]) == output[0]).all()
assert output[1] is None
# see gh-10837: write out the dump explicitly
# so there is no dependency on iteration order
input_dumps = ('[{"a": 42, "b":31}, {"a": 24, "c": 99}, '
'{"a": 2.4, "b": 78}]')
output = ujson.loads(input_dumps, numpy=True, labelled=True)
expected_vals = np.array(
[42, 31, 24, 99, 2.4, 78], dtype=int).reshape((3, 2))
assert (expected_vals == output[0]).all()
assert output[1] is None
assert (np.array([u("a"), "b"]) == output[2]).all()
input_dumps = ('{"1": {"a": 42, "b":31}, "2": {"a": 24, "c": 99}, '
'"3": {"a": 2.4, "b": 78}}')
output = ujson.loads(input_dumps, numpy=True, labelled=True)
expected_vals = np.array(
[42, 31, 24, 99, 2.4, 78], dtype=int).reshape((3, 2))
assert (expected_vals == output[0]).all()
assert (np.array(["1", "2", "3"]) == output[1]).all()
assert (np.array(["a", "b"]) == output[2]).all()
class TestPandasJSONTests(object):
def test_dataframe(self, orient, numpy):
if orient == "records" and numpy:
pytest.skip("Not idiomatic pandas")
df = DataFrame([[1, 2, 3], [4, 5, 6]], index=[
"a", "b"], columns=["x", "y", "z"])
encode_kwargs = {} if orient is None else dict(orient=orient)
decode_kwargs = {} if numpy is None else dict(numpy=numpy)
output = ujson.decode(ujson.encode(df, **encode_kwargs),
**decode_kwargs)
# Ensure proper DataFrame initialization.
if orient == "split":
dec = _clean_dict(output)
output = DataFrame(**dec)
else:
output = DataFrame(output)
# Corrections to enable DataFrame comparison.
if orient == "values":
df.columns = [0, 1, 2]
df.index = [0, 1]
elif orient == "records":
df.index = [0, 1]
elif orient == "index":
df = df.transpose()
tm.assert_frame_equal(output, df, check_dtype=False)
def test_dataframe_nested(self, orient):
df = DataFrame([[1, 2, 3], [4, 5, 6]], index=[
"a", "b"], columns=["x", "y", "z"])
nested = {"df1": df, "df2": df.copy()}
kwargs = {} if orient is None else dict(orient=orient)
exp = {"df1": ujson.decode(ujson.encode(df, **kwargs)),
"df2": ujson.decode(ujson.encode(df, **kwargs))}
assert ujson.decode(ujson.encode(nested, **kwargs)) == exp
def test_dataframe_numpy_labelled(self, orient):
if orient in ("split", "values"):
pytest.skip("Incompatible with labelled=True")
df = DataFrame([[1, 2, 3], [4, 5, 6]], index=[
"a", "b"], columns=["x", "y", "z"], dtype=np.int)
kwargs = {} if orient is None else dict(orient=orient)
output = DataFrame(*ujson.decode(ujson.encode(df, **kwargs),
numpy=True, labelled=True))
if orient is None:
df = df.T
elif orient == "records":
df.index = [0, 1]
tm.assert_frame_equal(output, df)
def test_series(self, orient, numpy):
s = Series([10, 20, 30, 40, 50, 60], name="series",
index=[6, 7, 8, 9, 10, 15]).sort_values()
encode_kwargs = {} if orient is None else dict(orient=orient)
decode_kwargs = {} if numpy is None else dict(numpy=numpy)
output = ujson.decode(ujson.encode(s, **encode_kwargs),
**decode_kwargs)
if orient == "split":
dec = _clean_dict(output)
output = Series(**dec)
else:
output = Series(output)
if orient in (None, "index"):
s.name = None
output = output.sort_values()
s.index = ["6", "7", "8", "9", "10", "15"]
elif orient in ("records", "values"):
s.name = None
s.index = [0, 1, 2, 3, 4, 5]
tm.assert_series_equal(output, s, check_dtype=False)
def test_series_nested(self, orient):
s = Series([10, 20, 30, 40, 50, 60], name="series",
index=[6, 7, 8, 9, 10, 15]).sort_values()
nested = {"s1": s, "s2": s.copy()}
kwargs = {} if orient is None else dict(orient=orient)
exp = {"s1": ujson.decode(ujson.encode(s, **kwargs)),
"s2": ujson.decode(ujson.encode(s, **kwargs))}
assert ujson.decode(ujson.encode(nested, **kwargs)) == exp
def test_index(self):
i = Index([23, 45, 18, 98, 43, 11], name="index")
# Column indexed.
output = Index(ujson.decode(ujson.encode(i)), name="index")
tm.assert_index_equal(i, output)
output = Index(ujson.decode(ujson.encode(i), numpy=True), name="index")
tm.assert_index_equal(i, output)
dec = _clean_dict(ujson.decode(ujson.encode(i, orient="split")))
output = Index(**dec)
tm.assert_index_equal(i, output)
assert i.name == output.name
dec = _clean_dict(ujson.decode(ujson.encode(i, orient="split"),
numpy=True))
output = Index(**dec)
tm.assert_index_equal(i, output)
assert i.name == output.name
output = Index(ujson.decode( | ujson.encode(i, orient="values") | pandas._libs.json.encode |
from datetime import datetime
import operator
import numpy as np
import pytest
from pandas import DataFrame, Index, Series, bdate_range
import pandas._testing as tm
from pandas.core import ops
class TestSeriesLogicalOps:
@pytest.mark.parametrize("bool_op", [operator.and_, operator.or_, operator.xor])
def test_bool_operators_with_nas(self, bool_op):
# boolean &, |, ^ should work with object arrays and propagate NAs
ser = Series(bdate_range("1/1/2000", periods=10), dtype=object)
ser[::2] = np.nan
mask = ser.isna()
filled = ser.fillna(ser[0])
result = bool_op(ser < ser[9], ser > ser[3])
expected = bool_op(filled < filled[9], filled > filled[3])
expected[mask] = False
tm.assert_series_equal(result, expected)
def test_logical_operators_bool_dtype_with_empty(self):
# GH#9016: support bitwise op for integer types
index = list("bca")
s_tft = Series([True, False, True], index=index)
s_fff = Series([False, False, False], index=index)
s_empty = Series([], dtype=object)
res = s_tft & s_empty
expected = s_fff
tm.assert_series_equal(res, expected)
res = s_tft | s_empty
expected = s_tft
tm.assert_series_equal(res, expected)
def test_logical_operators_int_dtype_with_int_dtype(self):
# GH#9016: support bitwise op for integer types
# TODO: unused
# s_0101 = Series([0, 1, 0, 1])
s_0123 = Series(range(4), dtype="int64")
s_3333 = Series([3] * 4)
s_4444 = Series([4] * 4)
res = s_0123 & s_3333
expected = Series(range(4), dtype="int64")
tm.assert_series_equal(res, expected)
res = s_0123 | s_4444
expected = Series(range(4, 8), dtype="int64")
tm.assert_series_equal(res, expected)
s_1111 = Series([1] * 4, dtype="int8")
res = s_0123 & s_1111
expected = Series([0, 1, 0, 1], dtype="int64")
tm.assert_series_equal(res, expected)
res = s_0123.astype(np.int16) | s_1111.astype(np.int32)
expected = Series([1, 1, 3, 3], dtype="int32")
tm.assert_series_equal(res, expected)
def test_logical_operators_int_dtype_with_int_scalar(self):
# GH#9016: support bitwise op for integer types
s_0123 = Series(range(4), dtype="int64")
res = s_0123 & 0
expected = Series([0] * 4)
tm.assert_series_equal(res, expected)
res = s_0123 & 1
expected = Series([0, 1, 0, 1])
tm.assert_series_equal(res, expected)
def test_logical_operators_int_dtype_with_float(self):
# GH#9016: support bitwise op for integer types
s_0123 = Series(range(4), dtype="int64")
msg = "Cannot perform.+with a dtyped.+array and scalar of type"
with pytest.raises(TypeError, match=msg):
s_0123 & np.NaN
with pytest.raises(TypeError, match=msg):
s_0123 & 3.14
msg = "unsupported operand type.+for &:"
with pytest.raises(TypeError, match=msg):
s_0123 & [0.1, 4, 3.14, 2]
with pytest.raises(TypeError, match=msg):
s_0123 & np.array([0.1, 4, 3.14, 2])
with pytest.raises(TypeError, match=msg):
s_0123 & Series([0.1, 4, -3.14, 2])
def test_logical_operators_int_dtype_with_str(self):
s_1111 = Series([1] * 4, dtype="int8")
msg = "Cannot perform 'and_' with a dtyped.+array and scalar of type"
with pytest.raises(TypeError, match=msg):
s_1111 & "a"
with pytest.raises(TypeError, match="unsupported operand.+for &"):
s_1111 & ["a", "b", "c", "d"]
def test_logical_operators_int_dtype_with_bool(self):
# GH#9016: support bitwise op for integer types
s_0123 = Series(range(4), dtype="int64")
expected = Series([False] * 4)
result = s_0123 & False
tm.assert_series_equal(result, expected)
result = s_0123 & [False]
tm.assert_series_equal(result, expected)
result = s_0123 & (False,)
tm.assert_series_equal(result, expected)
result = s_0123 ^ False
expected = Series([False, True, True, True])
tm.assert_series_equal(result, expected)
def test_logical_operators_int_dtype_with_object(self):
# GH#9016: support bitwise op for integer types
s_0123 = Series(range(4), dtype="int64")
result = s_0123 & Series([False, np.NaN, False, False])
expected = Series([False] * 4)
tm.assert_series_equal(result, expected)
s_abNd = Series(["a", "b", np.NaN, "d"])
with pytest.raises(TypeError, match="unsupported.* 'int' and 'str'"):
s_0123 & s_abNd
def test_logical_operators_bool_dtype_with_int(self):
index = list("bca")
s_tft = Series([True, False, True], index=index)
s_fff = Series([False, False, False], index=index)
res = s_tft & 0
expected = s_fff
tm.assert_series_equal(res, expected)
res = s_tft & 1
expected = s_tft
tm.assert_series_equal(res, expected)
def test_logical_ops_bool_dtype_with_ndarray(self):
# make sure we operate on ndarray the same as Series
left = Series([True, True, True, False, True])
right = [True, False, None, True, np.nan]
expected = Series([True, False, False, False, False])
result = left & right
tm.assert_series_equal(result, expected)
result = left & np.array(right)
tm.assert_series_equal(result, expected)
result = left & Index(right)
tm.assert_series_equal(result, expected)
result = left & Series(right)
tm.assert_series_equal(result, expected)
expected = Series([True, True, True, True, True])
result = left | right
tm.assert_series_equal(result, expected)
result = left | np.array(right)
tm.assert_series_equal(result, expected)
result = left | Index(right)
tm.assert_series_equal(result, expected)
result = left | Series(right)
tm.assert_series_equal(result, expected)
expected = Series([False, True, True, True, True])
result = left ^ right
tm.assert_series_equal(result, expected)
result = left ^ np.array(right)
tm.assert_series_equal(result, expected)
result = left ^ Index(right)
tm.assert_series_equal(result, expected)
result = left ^ Series(right)
tm.assert_series_equal(result, expected)
def test_logical_operators_int_dtype_with_bool_dtype_and_reindex(self):
# GH#9016: support bitwise op for integer types
# with non-matching indexes, logical operators will cast to object
# before operating
index = list("bca")
s_tft = Series([True, False, True], index=index)
s_tft = Series([True, False, True], index=index)
s_tff = Series([True, False, False], index=index)
s_0123 = Series(range(4), dtype="int64")
# s_0123 will be all false now because of reindexing like s_tft
expected = Series([False] * 7, index=[0, 1, 2, 3, "a", "b", "c"])
result = s_tft & s_0123
tm.assert_series_equal(result, expected)
expected = Series([False] * 7, index=[0, 1, 2, 3, "a", "b", "c"])
result = s_0123 & s_tft
tm.assert_series_equal(result, expected)
s_a0b1c0 = Series([1], list("b"))
res = s_tft & s_a0b1c0
expected = s_tff.reindex(list("abc"))
tm.assert_series_equal(res, expected)
res = s_tft | s_a0b1c0
expected = s_tft.reindex(list("abc"))
tm.assert_series_equal(res, expected)
def test_scalar_na_logical_ops_corners(self):
s = Series([2, 3, 4, 5, 6, 7, 8, 9, 10])
msg = "Cannot perform.+with a dtyped.+array and scalar of type"
with pytest.raises(TypeError, match=msg):
s & datetime(2005, 1, 1)
s = Series([2, 3, 4, 5, 6, 7, 8, 9, datetime(2005, 1, 1)])
s[::2] = np.nan
expected = Series(True, index=s.index)
expected[::2] = False
result = s & list(s)
tm.assert_series_equal(result, expected)
def test_scalar_na_logical_ops_corners_aligns(self):
s = Series([2, 3, 4, 5, 6, 7, 8, 9, datetime(2005, 1, 1)])
s[::2] = np.nan
d = DataFrame({"A": s})
expected = DataFrame(False, index=range(9), columns=["A"] + list(range(9)))
result = s & d
tm.assert_frame_equal(result, expected)
result = d & s
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("op", [operator.and_, operator.or_, operator.xor])
def test_logical_ops_with_index(self, op):
# GH#22092, GH#19792
ser = Series([True, True, False, False])
idx1 = Index([True, False, True, False])
idx2 = Index([1, 0, 1, 0])
expected = Series([op(ser[n], idx1[n]) for n in range(len(ser))])
result = op(ser, idx1)
tm.assert_series_equal(result, expected)
expected = Series([op(ser[n], idx2[n]) for n in range(len(ser))], dtype=bool)
result = op(ser, idx2)
tm.assert_series_equal(result, expected)
def test_reversed_xor_with_index_returns_index(self):
# GH#22092, GH#19792
ser = Series([True, True, False, False])
idx1 = Index([True, False, True, False])
idx2 = Index([1, 0, 1, 0])
expected = Index.symmetric_difference(idx1, ser)
with tm.assert_produces_warning(FutureWarning):
result = idx1 ^ ser
tm.assert_index_equal(result, expected)
expected = Index.symmetric_difference(idx2, ser)
with tm.assert_produces_warning(FutureWarning):
result = idx2 ^ ser
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize(
"op",
[
pytest.param(
ops.rand_,
marks=pytest.mark.xfail(
reason="GH#22092 Index __and__ returns Index intersection",
raises=AssertionError,
strict=True,
),
),
pytest.param(
ops.ror_,
marks=pytest.mark.xfail(
reason="GH#22092 Index __or__ returns Index union",
raises=AssertionError,
strict=True,
),
),
],
)
def test_reversed_logical_op_with_index_returns_series(self, op):
# GH#22092, GH#19792
ser = Series([True, True, False, False])
idx1 = Index([True, False, True, False])
idx2 = Index([1, 0, 1, 0])
expected = Series(op(idx1.values, ser.values))
with tm.assert_produces_warning(FutureWarning):
result = op(ser, idx1)
tm.assert_series_equal(result, expected)
expected = Series(op(idx2.values, ser.values))
with tm.assert_produces_warning(FutureWarning):
result = op(ser, idx2)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"op, expected",
[
(ops.rand_, Index([False, True])),
(ops.ror_, Index([False, True])),
(ops.rxor, Index([])),
],
)
def test_reverse_ops_with_index(self, op, expected):
# https://github.com/pandas-dev/pandas/pull/23628
# multi-set Index ops are buggy, so let's avoid duplicates...
ser = Series([True, False])
idx = Index([False, True])
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
# behaving as set ops is deprecated, will become logical ops
result = op(ser, idx)
tm.assert_index_equal(result, expected)
def test_logical_ops_label_based(self):
# GH#4947
# logical ops should be label based
a = Series([True, False, True], list("bca"))
b = Series([False, True, False], list("abc"))
expected = Series([False, True, False], list("abc"))
result = a & b
tm.assert_series_equal(result, expected)
expected = Series([True, True, False], list("abc"))
result = a | b
tm.assert_series_equal(result, expected)
expected = Series([True, False, False], list("abc"))
result = a ^ b
tm.assert_series_equal(result, expected)
# rhs is bigger
a = Series([True, False, True], list("bca"))
b = Series([False, True, False, True], list("abcd"))
expected = Series([False, True, False, False], list("abcd"))
result = a & b
tm.assert_series_equal(result, expected)
expected = Series([True, True, False, False], list("abcd"))
result = a | b
tm.assert_series_equal(result, expected)
# filling
# vs empty
empty = Series([], dtype=object)
result = a & empty.copy()
expected = Series([False, False, False], list("bca"))
tm.assert_series_equal(result, expected)
result = a | empty.copy()
expected = Series([True, False, True], list("bca"))
tm.assert_series_equal(result, expected)
# vs non-matching
result = a & Series([1], ["z"])
expected = Series([False, False, False, False], list("abcz"))
tm.assert_series_equal(result, expected)
result = a | Series([1], ["z"])
expected = Series([True, True, False, False], list("abcz"))
tm.assert_series_equal(result, expected)
# identity
# we would like s[s|e] == s to hold for any e, whether empty or not
for e in [
empty.copy(),
Series([1], ["z"]),
Series(np.nan, b.index),
Series(np.nan, a.index),
]:
result = a[a | e]
tm.assert_series_equal(result, a[a])
for e in [Series(["z"])]:
result = a[a | e]
| tm.assert_series_equal(result, a[a]) | pandas._testing.assert_series_equal |
import logging, os, sys, pickle, json, time, yaml
from datetime import datetime as dt
import warnings
warnings.filterwarnings('ignore')
from tqdm import tqdm
tqdm.pandas()
import pandas as pd
import geopandas as gpd
from geopandas.plotting import _plot_linestring_collection, _plot_point_collection
import numpy as np
from shapely import geometry, wkt, ops
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import matplotlib as mpl
from matplotlib.colors import LinearSegmentedColormap
from matplotlib.patches import FancyBboxPatch
from ffsc.pipeline.nodes.utils import V_inv
import networkx as nx
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
import multiprocessing as mp
N_WORKERS=6
def visualise_gpd(params, gdfs, ne, logger):
fig, ax = plt.subplots(1,1,figsize=params['figsize'])
ne.plot(ax=ax, color='#{:02x}{:02x}{:02x}'.format(*params['vis_colors']['ne']), **params['type_style']['ne'])
for dd in gdfs:
logger.info(f'plotting {dd["type"]} {dd["color_key"]}')
if dd['type']=='lin_asset':
dd['gdf']['len'] = dd['gdf']['geometry'].apply(lambda geom: geom.length)
dd['gdf'] = dd['gdf'][dd['gdf']['len']<345]
dd['gdf'].plot(
ax=ax,
color='#{:02x}{:02x}{:02x}'.format(*params['vis_colors'][dd['color_key']]),
**params['type_style'][dd['type']]
)
plt.savefig(params['path'])
def visualise_assets_simplified_coal(params, df_edges, refineries, oilfields, oilwells, coalmines, lngterminals, ports, cities, powerstations, df_missing_cities, df_missing_powerstations, railways,shippingroutes, pipelines, ne):
params['path'] = os.path.join(os.getcwd(),'results','figures','assets_simplified_coal.png')
visualise_assets(params, df_edges, refineries, oilfields, oilwells, coalmines, lngterminals, ports, cities, powerstations, df_missing_cities, df_missing_powerstations, railways,shippingroutes, pipelines, ne)
return []
def visualise_assets_simplified_oil(params, df_edges, refineries, oilfields, oilwells, coalmines, lngterminals, ports, cities, powerstations, df_missing_cities, df_missing_powerstations, railways,shippingroutes, pipelines, ne):
params['path'] = os.path.join(os.getcwd(),'results','figures','assets_simplified_oil.png')
visualise_assets(params, df_edges, refineries, oilfields, oilwells, coalmines, lngterminals, ports, cities, powerstations, df_missing_cities, df_missing_powerstations, railways,shippingroutes, pipelines, ne)
return []
def visualise_assets_simplified_gas(params, df_edges, refineries, oilfields, oilwells, coalmines, lngterminals, ports, cities, powerstations, df_missing_cities, df_missing_powerstations, railways,shippingroutes, pipelines, ne):
params['path'] = os.path.join(os.getcwd(),'results','figures','assets_simplified_gas.png')
visualise_assets(params, df_edges, refineries, oilfields, oilwells, coalmines, lngterminals, ports, cities, powerstations, df_missing_cities, df_missing_powerstations, railways,shippingroutes, pipelines, ne)
return []
def visualise_assets_coal(params, df_edges, refineries, oilfields, oilwells, coalmines, lngterminals, ports, cities, powerstations, df_missing_cities, df_missing_powerstations, railways,shippingroutes, pipelines, ne):
params['path'] = os.path.join(os.getcwd(),'results','figures','assets_coal.png')
visualise_assets(params, df_edges, refineries, oilfields, oilwells, coalmines, lngterminals, ports, cities, powerstations, df_missing_cities, df_missing_powerstations, railways,shippingroutes, pipelines, ne)
return []
def visualise_assets_oil(params, df_edges, refineries, oilfields, oilwells, coalmines, lngterminals, ports, cities, powerstations, df_missing_cities, df_missing_powerstations, railways,shippingroutes, pipelines, ne):
params['path'] = os.path.join(os.getcwd(),'results','figures','assets_oil.png')
visualise_assets(params, df_edges, refineries, oilfields, oilwells, coalmines, lngterminals, ports, cities, powerstations, df_missing_cities, df_missing_powerstations, railways,shippingroutes, pipelines, ne)
return []
def visualise_assets_gas(params, df_edges, refineries, oilfields, oilwells, coalmines, lngterminals, ports, cities, powerstations, df_missing_cities, df_missing_powerstations, railways,shippingroutes, pipelines, ne):
params['path'] = os.path.join(os.getcwd(),'results','figures','assets_gas.png')
visualise_assets(params, df_edges, refineries, oilfields, oilwells, coalmines, lngterminals, ports, cities, powerstations, df_missing_cities, df_missing_powerstations, railways,shippingroutes, pipelines, ne)
return []
def visualise_all_assets(vis_params):
ne = kedro_catalog.load('ne')
gdfs = {}
gdfs['SHIPPINGROUTE'] = kedro_catalog.load('raw_shippingroutes_data')
gdfs['PORT'] = kedro_catalog.load('raw_ports_data')
gdfs['LNGTERMINAL'] = kedro_catalog.load('raw_lngterminals_data')
gdfs['COALMINE'] = kedro_catalog.load('raw_coalmines_data')
gdfs['OILFIELD'] = kedro_catalog.load('raw_oilfields_data')
gdfs['OILWELL'] = kedro_catalog.load('raw_oilwells_data')
gdfs['REFINERY'] = kedro_catalog.load('raw_processingplants_data')
gdfs['RAILWAY'] = kedro_catalog.load('raw_railways_data')
gdfs['RAILWAY'] = gpd.GeoDataFrame.from_features(gdfs['RAILWAY']['features'])
gdfs['PIPELINE'] = kedro_catalog.load('raw_pipelines_data')
gdfs['PIPELINE'] = gpd.GeoDataFrame.from_features(gdfs['PIPELINE']['features'])
gdfs['CITY'] = kedro_catalog.load('raw_cities_energy_data')
gdfs['CITY']['orig_geom'] = gdfs['CITY']['geom_gj'].apply(lambda el: geometry.shape(el))
gdfs['CITY'] = gdfs['CITY'].set_geometry('orig_geom')
gdfs['POWERSTATION'] = kedro_catalog.load('raw_pipelines_data')
gdfs['POWERSTATION'] = gpd.GeoDataFrame.from_features(gdfs['POWERSTATION']['features'])
fig, ax = plt.subplots(1,1,figsize=(36,36))
ne.boundary.plot(ax=ax,color='#{:02x}{:02x}{:02x}'.format(*vis_params['vis_colors']['ne']),zorder=0)
gdfs['SHIPPINGROUTE'].plot(ax=ax, color='#{:02x}{:02x}{:02x}'.format(*vis_params['vis_colors']['SHIPPINGROUTE']), lw=0.5, zorder=1)
gdfs['PORT'].plot(ax=ax, color='#{:02x}{:02x}{:02x}'.format(*vis_params['vis_colors']['PORT']), markersize=5, zorder=1)
gdfs['LNGTERMINAL'].plot(ax=ax, color='#{:02x}{:02x}{:02x}'.format(*vis_params['vis_colors']['LNGTERMINAL']), markersize=13, zorder=1)
gdfs['COALMINE'].plot(ax=ax, color='#{:02x}{:02x}{:02x}'.format(*vis_params['vis_colors']['COALMINE']), markersize=8, zorder=1)
gdfs['OILFIELD'].plot(ax=ax, color='#{:02x}{:02x}{:02x}'.format(*vis_params['vis_colors']['OILFIELD']),alpha=0.5, zorder=1)
gdfs['OILWELL'].plot(ax=ax, color='#{:02x}{:02x}{:02x}'.format(*vis_params['vis_colors']['OILWELL']),markersize=5, zorder=1)
gdfs['REFINERY'].plot(ax=ax, color='#{:02x}{:02x}{:02x}'.format(*vis_params['vis_colors']['REFINERY']),markersize=5, zorder=1)
gdfs['PIPELINE'].plot(ax=ax, color='#{:02x}{:02x}{:02x}'.format(*vis_params['vis_colors']['PIPELINE']),lw=0.3, zorder=1)
gdfs['RAILWAY'].plot(ax=ax, color='#{:02x}{:02x}{:02x}'.format(*vis_params['vis_colors']['RAILWAY']),lw=0.5, zorder=1)
gdfs['CITY'].plot(ax=ax, color='#{:02x}{:02x}{:02x}'.format(*vis_params['vis_colors']['CITY']), zorder=2)
gdfs['POWERSTATION'].plot(ax=ax, color='#{:02x}{:02x}{:02x}'.format(*vis_params['vis_colors']['POWERSTATION']),markersize=8,alpha=0.5, zorder=2)
ax.set_aspect(1.2)
plt.savefig('./all_assets_vis.png', bbox_inches='tight')
def prep_assets(params, df_edges, refineries, oilfields, oilwells, coalmines, lngterminals, ports, cities, powerstations, df_missing_cities, df_missing_powerstations, railways,shippingroutes, pipelines, ne):
logger = logging.getLogger('Prep assets')
df_ptassets = pd.concat([refineries, oilfields, oilwells, coalmines, lngterminals, ports, cities, powerstations])
df_linassets = pd.concat([railways, shippingroutes,pipelines])
### filter all dfs
all_nodes = list(set(df_edges['source'].unique().tolist() + df_edges['target'].unique().tolist()))
#all_nodes = all_nodes + [n+'_B' for n in all_nodes]
# drop the lin assets
logger.info('Dropping nodes not in edges')
df_edges['source_type'] = df_edges['source'].str.split('_').str[0]
df_edges['target_type'] = df_edges['target'].str.split('_').str[0]
for lin_asset in ['RAILWAY','PIPELINE','SHIPPINGROUTE']:
df_edges = df_edges[~((df_edges['source_type']==lin_asset) & (df_edges['target_type']==lin_asset))]
df_edges = df_edges[df_edges['source_type']!='supersource']
# drop any '_B' assets
df_edges['source'] = df_edges['source'].str.replace('_B','')
df_edges['target'] = df_edges['target'].str.replace('_B','')
# join geometries for missing
df_missing_cities = pd.merge(df_missing_cities, df_ptassets[['unique_id','geometry']], how='left',on='unique_id')
#print ('missing')
#print (df_missing_cities)
#print (df_missing_powerstations)
# drop non-nodes
df_ptassets = df_ptassets[df_ptassets['unique_id'].isin(all_nodes)]
df_linassets = df_linassets[df_linassets['START'].isin(all_nodes)]
# map geoms on ptassets
logger.info('mapping geometries')
df_ptassets['geometry'] = df_ptassets['geometry'].apply(wkt.loads)
# do polygon assets
df_ptassets.loc[df_ptassets['unique_id'].str.split('_').str[0]=='OILFIELD','geometry'] = df_ptassets.loc[df_ptassets['unique_id'].str.split('_').str[0]=='OILFIELD','geometry'].apply(lambda el: el.representative_point())
df_ptassets.loc[df_ptassets['unique_id'].str.split('_').str[0]=='CITY','geometry'] = df_ptassets.loc[df_ptassets['unique_id'].str.split('_').str[0]=='CITY','geometry'].apply(lambda el: el.representative_point())
# map geoms on remaining edges
df_edges = pd.merge(df_edges, df_ptassets[['unique_id','geometry']], how='left',left_on='source',right_on='unique_id').rename(columns={'geometry':'geometry_source'}).drop(columns=['unique_id'])
df_edges = pd.merge(df_edges, df_ptassets[['unique_id','geometry']], how='left',left_on='target',right_on='unique_id').rename(columns={'geometry':'geometry_target'}).drop(columns=['unique_id'])
df_edges.loc[df_edges['source_type'].isin(['RAILWAY','PIPELINE','SHIPPINGROUTE']),'geometry_source'] = df_edges.loc[df_edges['source_type'].isin(['RAILWAY','PIPELINE','SHIPPINGROUTE']),'source'].apply(lambda el: geometry.Point([float(cc) for cc in el.split('_')[2:4]]))
df_edges.loc[df_edges['target_type'].isin(['RAILWAY','PIPELINE','SHIPPINGROUTE']),'geometry_target'] = df_edges.loc[df_edges['target_type'].isin(['RAILWAY','PIPELINE','SHIPPINGROUTE']),'target'].apply(lambda el: geometry.Point([float(cc) for cc in el.split('_')[2:4]]))
print ('bork')
print (df_edges.loc[df_edges['target_type'].isin(['RAILWAY','PIPELINE','SHIPPINGROUTE'])])
df_edges['geometry'] = df_edges.apply(lambda row: geometry.LineString([row['geometry_source'],row['geometry_target']]), axis=1)
print ('IDL')
pos_idl = ((df_linassets['START'].str.split('_').str[0]=='SHIPPINGROUTE') &(df_linassets['END'].str.split('_').str[0]=='SHIPPINGROUTE')&(df_linassets['START'].str.split('_').str[2].astype(float)<-175)&(df_linassets['END'].str.split('_').str[2].astype(float)>175))
neg_idl =((df_linassets['START'].str.split('_').str[0]=='SHIPPINGROUTE') &(df_linassets['END'].str.split('_').str[0]=='SHIPPINGROUTE')&(df_linassets['START'].str.split('_').str[2].astype(float)>175)&(df_linassets['END'].str.split('_').str[2].astype(float)<-175))
print (pos_idl.sum(), neg_idl.sum())
# remove IDL from linassets
df_linassets = df_linassets[~pos_idl]
df_linassets = df_linassets[~neg_idl]
# map geoms on linassets (LSS)
df_linassets['start_geometry'] = df_linassets['START'].apply(lambda el: geometry.Point([float(cc) for cc in el.split('_')[2:4]]))
df_linassets['end_geometry'] = df_linassets['END'].apply(lambda el: geometry.Point([float(cc) for cc in el.split('_')[2:4]]))
df_linassets['geometry'] = df_linassets.apply(lambda row: geometry.LineString([row['start_geometry'],row['end_geometry']]),axis=1)
# map geoms on missing
df_missing_cities['geometry'] = df_missing_cities['geometry'].apply(wkt.loads)
df_missing_cities['geometry'] = df_missing_cities['geometry'].apply(lambda el: el.representative_point())
df_missing_powerstations['geometry'] = df_missing_powerstations['geometry'].apply(wkt.loads)
print ('edges')
print (df_edges)
print ('assets')
print (df_ptassets)
print ('linassets')
print (df_linassets)
print ('tuples')
print (set([tuple(el) for el in df_edges[['source_type','target_type']].values.tolist()]))
# get color keys
df_edges['color_key'] = 'FINALMILE'
for kk in ['RAILWAY','PIPELINE','SHIPPINGROUTE']:
df_edges.loc[((df_edges['source_type']==kk) | (df_edges['target_type']==kk)),'color_key'] = kk
df_linassets['color_key'] = df_linassets['START'].str.split('_').str[0]
df_ptassets['color_key'] = df_ptassets['unique_id'].str.split('_').str[0]
return df_edges, df_linassets, df_ptassets, df_missing_cities, df_missing_powerstations
def visualise_assets(params, df_edges, refineries, oilfields, oilwells, coalmines, lngterminals, ports, cities, powerstations, df_missing_cities, df_missing_powerstations, railways,shippingroutes, pipelines, ne):
logger=logging.getLogger('Visualising')
df_edges, df_linassets, df_ptassets, df_missing_cities, df_missing_powerstations = prep_assets(params, df_edges, refineries, oilfields, oilwells, coalmines, lngterminals, ports, cities, powerstations, df_missing_cities, df_missing_powerstations, railways, shippingroutes, pipelines, ne)
# prep gdfs
logger.info('Prepping geodataframes')
gdfs = []
for kk in df_ptassets['color_key'].unique():
gdfs.append(
{
'gdf':gpd.GeoDataFrame(df_ptassets[df_ptassets['color_key']==kk], geometry='geometry'),
'color_key':kk,
'type':'pt_asset'
}
)
for kk in df_linassets['color_key'].unique():
gdfs.append(
{
'gdf':gpd.GeoDataFrame(df_linassets[df_linassets['color_key']==kk], geometry='geometry'),
'color_key':kk,
'type':'lin_asset'
}
)
for kk in df_edges['color_key'].unique():
gdfs.append(
{
'gdf':gpd.GeoDataFrame(df_edges[df_edges['color_key']==kk], geometry='geometry'),
'color_key':kk,
'type':'edges'
}
)
# missing
gdfs += [
{
'gdf':gpd.GeoDataFrame(df_missing_cities, geometry='geometry'),
'color_key':'MISSING_CITY',
'type':'missing_city',
},
{
'gdf':gpd.GeoDataFrame(df_missing_powerstations, geometry='geometry'),
'color_key':'MISSING_POWERSTATION',
'type':'missing_powerstation',
},
]
params['figsize'] = (72,48)
logger.info('Callign mpl')
visualise_gpd(params, gdfs, ne, logger)
return []
def visualise_flow(params, ne, df_flow, df_community_edges, df_community_nodes):
# get carrier
if 'COALMINE' in df_community_nodes['NODETYPE'].unique():
carrier='coal'
carrier_supplytypes = ['COALMINE']
elif 'LNGTERMINAL' in df_community_nodes['NODETYPE'].unique():
carrier='gas'
carrier_supplytypes = ['OILFIELD','OILWELL']
else:
carrier='oil'
carrier_supplytypes = ['OILFIELD','OILWELL']
logger = logging.getLogger(f'visualise flow: {carrier}')
logger.info('prepping DFs')
df_community_nodes = df_community_nodes[~df_community_nodes['NODETYPE'].isin(['RAILWAY','PIPELINE','SHIPPINGROUTE'])]
print ('nodes')
print (df_community_nodes)
df_flow = df_flow.rename(columns={'SOURCE':'source','TARGET':'target'})
df_flow = df_flow.set_index(['source','target'])
print ('df_flow')
print (df_flow)
df_community_edges['source_type'] = df_community_edges['source'].str.split('_').str[0]
df_community_edges['target_type'] = df_community_edges['target'].str.split('_').str[0]
df_community_edges = df_community_edges.set_index(['source','target'])
print ('df edges')
print (df_community_edges)
df_community_edges = pd.merge(df_community_edges, df_flow[['flow']], how='left', left_index=True, right_index=True)
logger.info('mapping geometries')
df_community_edges['geometry'] = df_community_edges['geometry'].apply(wkt.loads)
df_community_nodes['geometry'] = df_community_nodes['geometry'].apply(wkt.loads)
logger.info('doing colors and weights')
#df_colors = pd.DataFrame.from_dict({kk:"#{:02x}{:02x}{:02x}".format(*vv) for kk,vv in params['vis_colors'].items()}, orient='index').rename(columns={0:'hex'})
colormap = {kk:"#{:02x}{:02x}{:02x}".format(*vv) for kk,vv in params['vis_colors'].items()}
df_community_edges['color_key'] = 'FINALMILE'
for kk in ['RAILWAY','PIPELINE','SHIPPINGROUTE']:
df_community_edges.loc[((df_community_edges['source_type']==kk) | (df_community_edges['target_type']==kk)),'color_key'] = kk
df_community_edges['color_hex'] = df_community_edges['color_key'].map(colormap)
df_community_nodes['color_hex'] = df_community_nodes['NODETYPE'].map(colormap)
MIN_EDGE = 1
MAX_EDGE = 10
MIN_NODE = 1
MAX_NODE = 25
df_community_nodes = pd.merge(df_community_nodes, df_flow.reset_index()[['target','flow']], how='left',left_on='NODE',right_on='target')
# do demand and supply separately
df_community_nodes['s'] = (np.log10(df_community_nodes['D']+1) - np.log10(df_community_nodes['D']+1).min())/(np.log10(df_community_nodes['D']+1).max() - np.log10(df_community_nodes['D']+1).min())*(MAX_NODE-MIN_NODE)+MIN_NODE
df_community_nodes['s_flow'] = (np.log10(df_community_nodes['flow']+1) - np.log10(df_community_nodes['D']+1).min())/(np.log10(df_community_nodes['flow']+1).max() - np.log10(df_community_nodes['flow']+1).min())*(MAX_NODE-MIN_NODE)+MIN_NODE
df_community_nodes.loc[df_community_nodes['NODETYPE'].isin(carrier_supplytypes),'s'] = df_community_nodes.loc[df_community_nodes['NODETYPE'].isin(carrier_supplytypes),'s_flow']
#df_community_edges['s'] = (np.log(df_community_edges['flow']+1) - np.log(df_community_edges['flow']+1).min())/(np.log(df_community_edges['flow']+1).max() - np.log(df_community_edges['flow']+1).min())*(MAX_EDGE-MIN_EDGE)+MIN_EDGE
df_community_edges['s'] = (df_community_edges['flow'] - df_community_edges['flow'].min())/(df_community_edges['flow'].max() - df_community_edges['flow'].min())*(MAX_EDGE-MIN_EDGE)+MIN_EDGE
df_community_edges = df_community_edges[df_community_edges['flow']>0]
# get rid of the ones that are super long
df_community_edges['len'] = df_community_edges['geometry'].apply(lambda geom: geom.length)
df_community_edges = df_community_edges[df_community_edges['len']<350]
#cast to gdf
df_community_nodes = gpd.GeoDataFrame(df_community_nodes, geometry='geometry')
df_community_edges = gpd.GeoDataFrame(df_community_edges, geometry='geometry')
fig, ax = plt.subplots(1,1,figsize=(48,60))
ne.plot(ax=ax, color='#{:02x}{:02x}{:02x}'.format(*params['vis_colors']['ne']), **params['type_style']['ne'])
_plot_point_collection(
ax=ax,
geoms=df_community_nodes['geometry'],
color=df_community_nodes['color_hex'].values.tolist(),
markersize=df_community_nodes['s'].values.tolist()
)
_plot_linestring_collection(
ax=ax,
geoms=df_community_edges['geometry'],
color=df_community_edges['color_hex'].values.tolist(),
linewidth=df_community_edges['s'].values.tolist()
)
ax.set_aspect(1.5)
#ax.set_position([0,0,1,1])
plt.savefig(os.path.join(os.getcwd(),'results','figures',f'flow_{carrier}.png'))
return []
def compare_flow(params, ne, df_flow_bl, df_flow_cf, df_community_edges, df_community_nodes):
# get carrier
if 'COALMINE' in df_community_nodes['NODETYPE'].unique():
carrier='coal'
carrier_supplytypes = ['COALMINE']
elif 'LNGTERMINAL' in df_community_nodes['NODETYPE'].unique():
carrier='gas'
carrier_supplytypes = ['OILFIELD','OILWELL']
else:
carrier='oil'
carrier_supplytypes = ['OILFIELD','OILWELL']
logger = logging.getLogger(f'visualise flow: {carrier}')
writer = logging.getLogger(f'writer_{carrier}')
fh = logging.FileHandler(f'compare_{carrier}.log')
fh.setLevel(logging.INFO)
writer.addHandler(fh)
logger.info('prepping DFs')
df_community_nodes = df_community_nodes[~df_community_nodes['NODETYPE'].isin(['RAILWAY','PIPELINE','SHIPPINGROUTE'])]
df_flow_bl = df_flow_bl.rename(columns={'SOURCE':'source','TARGET':'target'})
df_flow_bl = df_flow_bl.set_index(['source','target'])
df_flow_cf = df_flow_cf.rename(columns={'SOURCE':'source','TARGET':'target'})
df_flow_cf = df_flow_cf.set_index(['source','target'])
df_community_edges['source_type'] = df_community_edges['source'].str.split('_').str[0]
df_community_edges['target_type'] = df_community_edges['target'].str.split('_').str[0]
df_community_edges = df_community_edges.set_index(['source','target'])
print ('edges')
print (df_community_edges)
print ('flow_bl')
print(df_flow_bl)
print ('flow_cf')
print(df_flow_cf)
df_community_edges = pd.merge(df_community_edges, df_flow_bl[['flow']], how='left', left_index=True, right_index=True).rename(columns={'flow':'bl_flow'})
df_community_edges = pd.merge(df_community_edges, df_flow_cf[['flow']], how='left', left_index=True, right_index=True).rename(columns={'flow':'cf_flow'})
logger.info('mapping geometries')
df_community_edges['geometry'] = df_community_edges['geometry'].apply(wkt.loads)
df_community_nodes['geometry'] = df_community_nodes['geometry'].apply(wkt.loads)
logger.info('doing colors and weights')
#df_colors = pd.DataFrame.from_dict({kk:"#{:02x}{:02x}{:02x}".format(*vv) for kk,vv in params['vis_colors'].items()}, orient='index').rename(columns={0:'hex'})
colormap = {kk:"#{:02x}{:02x}{:02x}".format(*vv) for kk,vv in params['vis_colors'].items()}
#df_community_edges['color_key'] = 'FINALMILE'
#for kk in ['RAILWAY','PIPELINE','SHIPPINGROUTE']:
# df_community_edges.loc[((df_community_edges['source_type']==kk) | (df_community_edges['target_type']==kk)),'color_key'] = kk
#df_community_edges['color_hex'] = df_community_edges['color_key'].map(colormap)
df_community_nodes['color_hex'] = df_community_nodes['NODETYPE'].map(colormap)
MIN_EDGE = 1
MAX_EDGE = 10
MIN_NODE = 1
MAX_NODE = 25
df_community_nodes = pd.merge(df_community_nodes, df_flow_bl.reset_index()[['target','flow']], how='left',left_on='NODE',right_on='target')
# do demand and supply separately
df_community_nodes['s'] = (np.log10(df_community_nodes['D']+1) - np.log10(df_community_nodes['D']+1).min())/(np.log10(df_community_nodes['D']+1).max() - np.log10(df_community_nodes['D']+1).min())*(MAX_NODE-MIN_NODE)+MIN_NODE
df_community_nodes['s_flow'] = (np.log10(df_community_nodes['flow']+1) - np.log10(df_community_nodes['D']+1).min())/(np.log10(df_community_nodes['flow']+1).max() - np.log10(df_community_nodes['flow']+1).min())*(MAX_NODE-MIN_NODE)+MIN_NODE
df_community_nodes.loc[df_community_nodes['NODETYPE'].isin(carrier_supplytypes),'s'] = df_community_nodes.loc[df_community_nodes['NODETYPE'].isin(carrier_supplytypes),'s_flow']
#df_community_edges['s'] = (np.log(df_community_edges['flow']+1) - np.log(df_community_edges['flow']+1).min())/(np.log(df_community_edges['flow']+1).max() - np.log(df_community_edges['flow']+1).min())*(MAX_EDGE-MIN_EDGE)+MIN_EDGE
df_community_edges['s'] = (df_community_edges['bl_flow'] - df_community_edges['bl_flow'].min())/(df_community_edges['bl_flow'].max() - df_community_edges['bl_flow'].min())*(MAX_EDGE-MIN_EDGE)+MIN_EDGE
print('new edges')
print (df_community_edges.loc[(df_community_edges['cf_flow']>0) & (df_community_edges['bl_flow']==0)])
df_community_edges['difference'] = df_community_edges['bl_flow'] - df_community_edges['cf_flow']
df_community_edges['reduction'] = df_community_edges['difference']/df_community_edges['bl_flow']
cm = LinearSegmentedColormap.from_list('GrayRd', [(0,1,0),(.63,.63,.63),(1, 0, 0)], N=255)
df_community_edges = df_community_edges[(df_community_edges['bl_flow']>0) | (df_community_edges['cf_flow'])>0]
def apply_colmap(row):
if row['bl_flow']>0:
cm_val = (row['reduction']+1.)*128 # between 0 and 255 with 128 as neutral
return '#{:02x}{:02x}{:02x}'.format(*[int(il*255) for il in cm(int(cm_val))[0:3]])
else:
return '#{:02x}{:02x}{:02x}'.format(0,0,255)
df_community_edges['color_hex'] = df_community_edges.apply(lambda row: apply_colmap(row), axis=1)
# filter IDL -> just use euclidean length
logger.info('remove idl edges')
df_community_edges['len'] = df_community_edges['geometry'].apply(lambda el: el.length)
df_community_edges = df_community_edges[df_community_edges['len']<=350]
df_community_edges = df_community_edges.reset_index()
# get top changes and add write them to file
# want: top/bottom 10 reduced sources
logger.info('writing differences to file')
for idx, val in df_community_edges.loc[df_community_edges['source_type'].isin(carrier_supplytypes),['source','difference','bl_flow']].groupby('source').sum().sort_values('difference').iloc[:10].iterrows():
writer.info(f'idx:{idx}\t difference:{val["difference"]}\t bl_flow:{val["bl_flow"]}')
for idx, val in df_community_edges.loc[df_community_edges['source_type'].isin(carrier_supplytypes),['source','difference','bl_flow']].groupby('source').sum().sort_values('difference').iloc[-10:].iterrows():
writer.info(f'idx:{idx}\t difference:{val["difference"]}\t bl_flow:{val["bl_flow"]}')
# want: top/bottom 10 reduced transmission
for idx, val in df_community_edges.loc[~df_community_edges['source_type'].isin(carrier_supplytypes),['source','target','difference', 'reduction']].sort_values('difference').iloc[:10].iterrows():
writer.info(f'src:{val["source"]}\t target:{val["target"]}\t difference:{val["difference"]}\t reduction: {val["reduction"]}')
for idx, val in df_community_edges.loc[~df_community_edges['source_type'].isin(carrier_supplytypes),['source','target','difference','reduction']].sort_values('difference').iloc[-10:].iterrows():
writer.info(f'src:{val["source"]}\t target:{val["target"]}\t difference:{val["difference"]}\t reduction: {val["reduction"]}')
#cast to gdf
df_community_nodes = gpd.GeoDataFrame(df_community_nodes, geometry='geometry')
df_community_edges = gpd.GeoDataFrame(df_community_edges, geometry='geometry')
fig, ax = plt.subplots(1,1,figsize=(48,60))
ne.plot(ax=ax, color='#{:02x}{:02x}{:02x}'.format(*params['vis_colors']['ne']), **params['type_style']['ne'])
_plot_point_collection(
ax=ax,
geoms=df_community_nodes['geometry'],
color=df_community_nodes['color_hex'].values.tolist(),
markersize=df_community_nodes['s'].values.tolist()
)
_plot_linestring_collection(
ax=ax,
geoms=df_community_edges['geometry'],
color=df_community_edges['color_hex'].values.tolist(),
linewidth=df_community_edges['s'].values.tolist()
)
ax.set_aspect(1.5)
#ax.set_position([0,0,1,1])
plt.savefig(os.path.join(os.getcwd(),'results','figures',f'flow_sds_{carrier}.png'))
return []
def node_iso2(iso2,ne,df_coal_nodes,df_oil_nodes,df_gas_nodes,df_raw_oilfields,df_raw_oilwells):
logger=logging.getLogger('do iso2s')
logger.info('Doing oilfields and oilwells which might be offshore')
map_dict = iso2[['country','iso2']].set_index('country').to_dict()['iso2']
oilwells_map = {'congo':'CG',"cote d''ivoire":'CI','iran (islamic republic of)':'IR'}
oilfields_map = {
'venezuela, bolivarian republic of':'VE',
'syrian arab republic':'SY',
'democratic republic of the congo':'CD',
'republic of korea':'KR',
"democratic people''s republic of korea":'KP',
'united republic of tanzania':'TZ',
'republic of korea':'KR',
'guinea bissau':'GW',
'bolivia (plurinational state of)':'BO',
'republic of moldova':'MD',
'the former yugoslav republic of macedonia':'MK',
"lao people''s democratic republic":'LA',
}
map_dict.update(oilwells_map)
map_dict.update(oilfields_map)
df_raw_oilfields['md_country'] = df_raw_oilfields['md_country'].str.lower()
df_raw_oilwells['md_country'] = df_raw_oilwells['md_country'].str.lower()
iso2['country'] = iso2['country'].str.lower()
df_raw_oilfields = pd.merge(df_raw_oilfields, iso2[['country','iso2']], how='left',left_on='md_country',right_on='country')
#df_raw_oilfields.loc[df_raw_oilfields['iso2'].isna(),'iso2'] = df_raw_oilfields.loc[df_raw_oilfields['iso2'].isna(),'md_country'].str.split(';').apply(lambda ll: ','.join([map_dict[el.strip()] if el.strip() in map_dict.keys() else el for el in ll]))
df_raw_oilwells = | pd.merge(df_raw_oilwells, iso2[['country','iso2']], how='left',left_on='md_country',right_on='country') | pandas.merge |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Description"""
import logging
import flask
import numpy as np
import pandas as pd
logger = logging.getLogger(__name__)
serve_app = flask.Flask(__name__)
@serve_app.route("/ping", methods=["GET"])
def ping():
return flask.Response(response="\n", status=status, mimetype="application/json")
@serve_app.route("/invocations", methods=["POST"])
def invocations():
payload
if flask.request.content_type == "text/csv":
data = flask.request.data.decode("utf-8")
s = io.StringIO(data)
data = | pd.read_csv(s, header=None) | pandas.read_csv |
import pandas as pd
import seaborn as sns
from etherscan import Etherscan
import streamlit as st
import matplotlib
import matplotlib.pyplot as plt
matplotlib.style.use('ggplot')
def without_hue( plot, feature, title, criteria, x_axis_rotation=0, _format=None):
sns.set(rc={'figure.figsize':(11.7,8.27)})
plot.set_xticklabels(plot.get_xticklabels(), rotation=x_axis_rotation)
for p in plot.patches:
if p.get_height() > 0.01:
if _format:
final_p = "{:.2f}".format(p.get_height())
else:
final_p = p.get_height()
x = p.get_x() + p.get_width() / 2 - 0.1
y = p.get_y() + p.get_height() + 0.50
plot.annotate(final_p, (x, y), size = 12)
plot.set_title(title, fontsize=20, weight='bold')
plot.set_xlabel(criteria, fontsize=14)
plot.set_ylabel("Events Total", fontsize=14)
plt.show()
def fmt(x):
print(x)
total = len(values)
return '{:.4f}%\n({:.0f})'.format(x, total*x/100)
st.title('Piano king NFT Dashboards')
st.subheader('Distribution of events over 1,000 Piano King NFTs')
path_raw_data = "resources/output/pianoking_data.csv"
path_transac_history = "resources/output/transac_history_by_PK_NFT_ID.csv"
st.set_option('deprecation.showPyplotGlobalUse', False)
df = pd.read_csv(path_raw_data , sep=',')
# Import du csv en ométtant la première colonne d'index
df_transac_hist = | pd.read_csv(path_transac_history, index_col=[0]) | pandas.read_csv |
# from warnings import warn
# from faps.alogsumexp import alogsumexp
from operator import pos
import numpy as np
import pandas as pd
import faps as fp
from glob import glob
from tqdm import tqdm
import os
def import_mcmc(folder, burnin):
"""
Import files with MCMC output for A. majus mating parameters
Glob a set of files with the output of amajusmating.mating.run_MCMC(), import
each and remove the first rows of each as burn-in, then concatenate into a
single dataframe.
Parameters
==========
folder: str
Path to a directory containing one or more files from
amajusmating.mating.run_MCMC ending with the suffix `.out`
burnin: int
Integer number of rows to remove from each chain as burnin
Returns
=======
A single dataframe concatenating data from the imported chains. Since we are
interested in dispersal parameters, the `mixture` parameter is set to 1 for
all rows.
"""
# Import MCMC chains
chains = glob(folder+"/*out")
posterior = {}
for chain in chains:
k = os.path.basename(chain)
posterior[k] = pd.read_csv(chain, sep="\t").loc[lambda x: x.iter >= burnin]
posterior = pd.concat(posterior).reset_index()
# For this script we are only interested in the generalised-Gaussian part
# of the dispersal kernel, so set `mixture` to 1 for all rows.
posterior['mixture'] = 1
return posterior
def simulate_mating(data, model, ndraws = 1000, max_distance = np.inf):
"""
Posterior samples of mating events.
Draw a sample of mating events from the posterior distributions of sibship
stuctures and paternity, given observed genotype and dispersal data. Also
draw a sample of mating events based on dispersal only, to simulate what
would be 'expected' if mating was random, and based only on distance.
Finally, merge the two datasets with GPS and phenotype data.
Parameters
==========
data: `faps_data` class object
Data about the population.
model: dict
Dictionary of starting model parameters. Keys should be a subset of
['missing', 'shape', 'scale', 'mixture', 'assortment'] and values
floats giving initial values for those parameters, within appropriate
boundaries.
ndraws: int, optional
Number of Monte Carlo draws to perform for sibship clustering. Defaults
to 1000.
max_distance: float, int
Maximum distance from the mother a candidate may be. Candidates further than
this value will have their posterior probability of paternity set to zero.
This is equivalent to setting a threshold prior on distance.
Returns
=======
A dictionary of two dataframes showing the output of faps.posterior_mating()
for observed data ('obs'; based on genotypic and covariates) and expected
data ('exp'; based on covariates only). This includes columns for flower
colours and GPS.
"""
# Update data with parameter values
data.update_covariate_probs(model = model, max_distance = max_distance)
# We need to set dispersal probabilities for mothers to zero
# Otherwise the mother would be drawn many times when drawing from the dispersal
# kernel only, and cause many invalid partitions.
jx = [np.where(x == np.array(data.candidates))[0][0] for x in data.mothers]
for i,j in zip(range(len(data.paternity)), jx):
data.covariates['dispersal'][i, j] = -np.inf
# Cluster into sibships, if not already done.
data.sibship_clustering(ndraws = 1000, use_covariates = True)
# Draw a posterior data set for observed data, and expected under dispersal only
obs = fp.posterior_mating(data.sibships, covariates_only=False, ndraws=ndraws)
exp = fp.posterior_mating(data.sibships, covariates_only=True, ndraws=ndraws)
# Add data on distance between mother and father and flower colours.
# Turn distance matrix into a data frame so we can use .loc on it.
distance_df = pd.DataFrame({
'mother' : np.repeat(list(data.mothers), data.n_candidates),
'father' : np.tile(data.candidates, len(data.mothers)),
'distance' : data.distances.flatten()
})
# Merge siring events with distances and phenotypes.
# Observed mating events
obs = obs.\
merge(data.gps['Easting'], how='left', left_on='mother', right_index=True) .\
merge(distance_df, how="left", on=['mother', 'father']).\
merge(data.flower_colours, how="left", left_on="mother", right_index=True).\
merge(data.flower_colours, how="left", left_on="father", right_index=True, suffixes = ['_mother', "_father"])
# Random mating
exp = exp.\
merge(data.gps['Easting'], how='left', left_on='mother', right_index=True).\
merge(distance_df, how="left", on=['mother', 'father']).\
merge(data.flower_colours, how="left", left_on="mother", right_index=True).\
merge(data.flower_colours, how="left", left_on="father", right_index=True, suffixes = ['_mother', "_father"])
return {
'obs' : obs,
'exp' : exp
}
def summarise_families(obs):
"""
Summarise mating events and dispersal
Parameters
==========
obs: pandas.DataFrame
Observed mating events given dispersal and genetic data.
This should be the output from `amajus.mating.simulate_mating()`.
Returns
=======
A list giving:
* Number of mating events, excluding those with missing fathers.
* Number of mating events with unsampled candidates. Note that if several
sires for a half-sib array are missing FAPS lumps these together, so the
actual number could be higher.
* Estimated number of 'orphans'; offspring with an unsampled father.
* Number of half-sibships for which all fathers were unsampled.
* Median pollen dispersal distance
* Number of dispersal events > 100m
* Number of dispersal events > 500m
* Number of dispersal events > 1000m
"""
return [
np.sum(obs['father'] != "missing"),
np.sum(obs['father'] == "missing"),
obs.loc[obs['father'] == "missing"]['offspring_sired'].sum(),
(obs.loc[obs['father'] == "missing"]['frequency'] == 1).sum(),
np.median(obs['distance'].dropna()),
np.sum(obs['distance'] > 100),
np.sum(obs['distance'] > 500),
np.sum(obs['distance'] > 1000),
]
def relative_fitness(obs, exp, column, geno, boundaries = None):
"""
Relative fitness of genotypes across the whole hybrid zone, and in
individual spatial bins.
Relative fitness for the whole population is the absolute number of mating
events sired by males of each genotype, divided by the maximum of those
counts. This is run for the whole population. In addition, if a list of
Easting positions is given, the mothers are divided into spatial bins, and
relative fitnesses of sires is calculated separately for each bin. This is
done separately for observed and expected datasets
Parameters
==========
obs: pandas.DataFrame
Observed mating events given dispersal and genetic data.
This should be the output from `amajus.mating.simulate_mating()`.
exp: pandas.DataFrame
Expected mating events under dispersal only.
This should be the output from `amajus.mating.simulate_mating()`.
column: str
Column name shared by data.mating['obs'] and data.mating['exp'] giving
genotypes for which relative fitnesses should be estimated
geno: list
List of possible genotypes to identify in `column`.
boundaries: list or int, optional
List giving Easting positions to divide mothers into spatial bins. This
is passed to the `bins` argument of `pandas.cut()`; see that function
for details.
Returns
=======
A dataframe giving relative fitnesses for each genotype overall and in each
spatial bin in observed and expected datasets. Columns indicate:
- 'dataset': Observed vs expected datasets
- 'bin': Label for the spatial bin. 'all' indicates rows for the whole hybrid zone
- 'start': Western-most boundary of the bin
- 'stop' Eastern-most boundary of the bin
- 'n_sires': Number of sires
- Subsequent columns : relative fitnesses of each genotype
"""
# Fathers with missing data mess up relative fitnesses
# Pull out only those rows with phenotype data
obs = obs.query(column + ' in ' + str(geno))
exp = exp.query(column + ' in ' + str(geno))
# Divide mothers into spatial bins
if boundaries:
# Create a Series with an integer label for each bin. I used integers
# instead of the standard output of pd.cut because it was easier to loop over.
region_obs = pd.cut(obs['Easting'], bins = boundaries, labels = np.arange(1, len(boundaries)))
region_exp = pd.cut(exp['Easting'], bins = boundaries, labels = np.arange(1, len(boundaries)))
# Empty list to store relative fitnesses
rel_fitness = []
# Absolute fitness each genotype across the whole hybrid zone.
# Sum mating events from sires of each genotype
abs_obs = [(obs[column] == g).sum() for g in geno]
abs_exp = [(exp[column] == g).sum() for g in geno]
# Relative fitness of each genotype
rel_obs = abs_obs / np.max(abs_obs)
rel_exp = abs_exp / np.max(abs_exp)
# Send to rel_fitness. NaNs are for bin start and stop, which aren't relevant here
rel_fitness = rel_fitness + [['obs', 'all', np.nan, np.nan, np.sum(abs_obs)] + list(rel_obs)]
rel_fitness = rel_fitness + [['exp', 'all', np.nan, np.nan, np.sum(abs_exp)] + list(rel_exp)]
if boundaries:
# Relative fitnesses in each spatial bin.
for r in range(1, len(boundaries)):
# Absolute fitness each genotype (= sum mating events from sires of each genotype)
abs_obs = [(obs.loc[region_obs == r][column] == g).sum() for g in geno]
abs_exp = [(exp.loc[region_exp == r][column] == g).sum() for g in geno]
# Relative fitness of each genotype
rel_obs = abs_obs / np.max(abs_obs)
rel_exp = abs_exp / np.max(abs_exp)
# Send to rel_fitness, along with bin label, and start and stop for each bin
rel_fitness = rel_fitness + [['obs', r, boundaries[r-1], boundaries[r], np.sum(abs_obs)] + list(rel_obs)]
rel_fitness = rel_fitness + [['exp', r, boundaries[r-1], boundaries[r], np.sum(abs_exp)] + list(rel_exp)]
return pd.DataFrame(rel_fitness, columns=['dataset', 'bin', 'start', 'stop', 'n_sires'] + geno)
def assortative_mating(obs, exp, col_x, col_y, boundaries = None):
"""
Assortative mating between genotypes across the hybrid zone, and within
spatial bins.
Assortment probabilities for the whole population are calculated as the mean
number of mating events between individuals of the same genotype. This is
run for the whole population. In addition, if a list of Easting positions is
given, the mothers are divided into spatial bins, and assortment
probabilities are calculated separately for each bin. This is done
separately for observed and expected datasets.
Parameters
==========
obs: pandas.DataFrame
Observed mating events given dispersal and genetic data.
This should be the output from `amajus.mating.simulate_mating()`.
exp: pandas.DataFrame
Expected mating events under dispersal only.
This should be the output from `amajus.mating.simulate_mating()`.
col_x, col_y: str
The pair of column names shared by data.mating['obs'] and
data.mating['exp'] giving genotypes for which assortment probabilities
should be estimated.
boundaries: list or int, optional
List giving Easting positions to divide mothers into spatial bins. This
is passed to the `bins` argument of `pandas.cut()`; see that function
for details.
Returns
=======
A dataframe giving relative fitnesses for each genotype overall and in each
spatial bin in observed and expected datasets. Columns indicate:
- 'dataset': Observed vs expected datasets
- 'bin': Label for the spatial bin. 'all' indicates rows for the whole
hybrid zone
- 'start': Western-most boundary of the bin
- 'stop' Eastern-most boundary of the bin
- 'n_sires': Number of sires
- 'assortment : Mean mating events between individuals of the same genotype.
"""
# Remove rows with missing data for one or both parents
obs = obs.copy().loc[obs[col_x].notna() & obs[col_y].notna()]
exp = exp.copy().loc[exp[col_x].notna() & exp[col_y].notna()]
# Create a column stating whether genotypes match or not
obs['match'] = obs[col_x] == obs[col_y]
exp['match'] = exp[col_x] == exp[col_y]
# Divide mothers into spatial bins
if boundaries:
# Create a Series with an integer label for each bin. I used integers
# instead of the standard output of pd.cut because it was easier to loop
# over.
region_obs = pd.cut(obs['Easting'], bins = boundaries, labels = np.arange(1, len(boundaries)))
region_exp = pd.cut(exp['Easting'], bins = boundaries, labels = np.arange(1, len(boundaries)))
# Empty list to store assortment probabilities
assortment = []
# Assortative mating for the whole hybrid zone
ass_obs = obs['match'].mean()
ass_exp = exp['match'].mean()
# Send to assortment. NaNs are for bin start and stop, which aren't relevant here
assortment = assortment + [['obs', 'all', np.nan, np.nan, obs.shape[0], ass_obs]]
assortment = assortment + [['exp', 'all', np.nan, np.nan, exp.shape[0], ass_exp]]
# Assortative mating in each spatial bin.
if boundaries:
for r in range(1, len(boundaries)):
# Assortment probabilities in each dataset
ass_obs = obs.loc[region_obs == r]['match'].mean()
ass_exp = exp.loc[region_exp == r]['match'].mean()
# Send to assortment, along with bin label, start and stop, and sample sizes for each bin
assortment = assortment + [['obs', r, boundaries[r-1], boundaries[r], (region_obs == r).sum(), ass_obs]]
assortment = assortment + [['exp', r, boundaries[r-1], boundaries[r], (region_exp == r).sum(), ass_exp]]
return pd.DataFrame(
assortment,
columns=['dataset', 'bin', 'start', 'stop', 'n_sires', 'assortment']
)
def mating_over_chains(data, folder, boundaries, burnin = 500, ndraws = 1000):
"""
Summarise number of mating events, relative fitness and assortment for each
iteration of a set of MCMC files.
Parameters
==========
data: `faps_data` class object
Data about the population.
folder: str
Path to a directory containing one or more files from
amajusmating.mating.run_MCMC ending with the suffix `.out`
boundaries: list or int, optional
List giving Easting positions to divide mothers into spatial bins. This
is passed to the `bins` argument of `pandas.cut()`; see that function
for details.
burnin: int
Integer number of rows to remove from each chain as burnin
ndraws: int, optional
Number of Monte Carlo draws to perform for sibship clustering. Defaults
to 1000.
Returns
=======
Saves CSV files for the outputs of
* mating.summarise_mating()
* mating.relative_fitness() for phenotype data (full red, hybrid, yellow),
plus Rosea and Sulfurea genotypes
* assortment() for phenotypes (full red, hybrid, yellow)
Also saves all mating events as `sires.csv`
"""
# Import MCMC results
mcmc = import_mcmc(folder, burnin = burnin)
# Empty dictionaries to store the output of each iteration
sires = {}
summarise = {}
phenotype = {}
rosea = {}
sulf = {}
assortment = {}
# Loop over steps in the MCMC chain and get siring events for each.
for i in tqdm(mcmc.index):
model = mcmc.loc[i]
# Simulate mating events, and include GPS and phentoype information about mother and sire
data.mating = simulate_mating(data, model, ndraws=ndraws)
# Full list of mothers, fathers and distances between them.
sires[i] = data.mating['obs'][['mother','father','distance']]
# Summarise mating events and missing fathers.
summarise[i] = summarise_families(data.mating['obs'])
# Relative fitness of full red, hybrid and yellow plants
phenotype[i] = relative_fitness(
obs = data.mating['obs'],
exp = data.mating['exp'],
column = "simple_colour_father",
geno = ["FR", "hybrid", "Ye"],
boundaries = boundaries
)
# Relative fitness of Rosea genotypes
rosea[i] = relative_fitness(
obs = data.mating['obs'],
exp = data.mating['exp'],
column = "rosea_father",
geno = ["R/R", "R/r", "r/r"],
boundaries = boundaries
)
# Relative fitness of Sulfurea genotypes
sulf[i] = relative_fitness(
obs = data.mating['obs'],
exp = data.mating['exp'],
column = "sulfurea_father",
geno = ["S/+", "s/s"],
boundaries = boundaries
)
# Assortative mating.
assortment[i] = assortative_mating(
obs = data.mating['obs'],
exp = data.mating['exp'],
col_x = 'simple_colour_mother',
col_y = 'simple_colour_father',
boundaries = boundaries
)
# Concatenate output for each iteration into single dataframes
sires = pd.concat(sires)
phenotype = pd.concat(phenotype)
rosea = pd.concat(rosea)
sulf = pd.concat(sulf)
assortment = pd.concat(assortment)
# Concatenate `summarise`, and also add column names.
summarise = | pd.DataFrame(summarise) | pandas.DataFrame |
import pandas as pd
import numpy as np
from prettytable import PrettyTable
def delete_na(dataframes, dtypes):
'''
Objective:
- Delete all NA's from the dataframes passed
Input:
- dataframes : String of the tables and their selected columns
- dtypes : Numerical types
Output:
- Dataframe with the deleted values.
'''
for i in range(len(dataframes)):
dataframe_numerical = dataframes[i].select_dtypes(include=dtypes)
total_rows = dataframe_numerical.shape[0]
# Delete rows that contain na's
dataframe_numerical = dataframe_numerical.dropna()
dataframes[i] = dataframe_numerical
return dataframes
def analyse_categorical_variables(table_names, variable_names, dataframes):
'''
Objective:
- Analyse the categorical variables to be encoded
Input:
- Table_names : String of the names of the tables
- Variable_names : String of the categorical variables corresponding to the table
- Dataframes : String of the tables and their selected columns
Output:
- Table with the categorical variables levels
["Table name", "Variable name", "Number of levels", "Types"]
'''
table = PrettyTable()
table.field_names = ["Table name", "Variable name", "Number of levels", "Types"]
for i in range(len(table_names)):
for j in range(len(variable_names[i])):
table.add_row([table_names[i], variable_names[i][j], len(dataframes[i][variable_names[i][j]].unique()), dataframes[i][variable_names[i][j]].unique()])
print(table)
def one_hot_encoding(table, variable):
'''
Objective:
- Encode the categorical variable passed from the table to one-hot encoding.
If the categorical variable only has one level, the column is deleted.
Input:
- Table : String of the table name
- Variable : String of the categorical variable corresponding to the table
Output:
For categorical variables with more than one level:
- Table with the categorical variable encoded to one-hot
["Table name", "Variable name", "Number of levels", "Types"]
For categorical variables with less than one level:
-String indicating so.
'''
# The column contain more than one level.
if len(table[variable].unique()) > 1:
variable_dummies = | pd.get_dummies(table[variable]) | pandas.get_dummies |
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import librosa
import keras
from keras.preprocessing import sequence
from keras.models import Sequential
from keras.layers import Dense, Embedding
from keras.layers import LSTM
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.utils import to_categorical
from keras.layers import Input, Flatten, Dropout, Activation
from keras.layers import Conv1D, MaxPooling1D, AveragePooling1D
from keras.layers.normalization import BatchNormalization
from keras.models import Model
from keras.callbacks import ModelCheckpoint
from keras import regularizers
from keras.utils import np_utils
from keras.optimizers import Adam
import pandas as pd
import numpy as np
from sklearn.preprocessing import LabelBinarizer, normalize
from sklearn.metrics import confusion_matrix
from sklearn.utils import shuffle
from models.conv1d import Conv1DNN
import argparse
import pickle
import os
ap = argparse.ArgumentParser()
ap.add_argument("-d", "--dataset", required=True,
help="path to input dataset (i.e., directory of sound files)")
ap.add_argument("-m", "--model", default="nn.model",
help="path to output model")
ap.add_argument("-l", "--labelbin", default="lb.pickle",
help="path to output label binarizer")
args = vars(ap.parse_args())
KERNEL_SIZE = 12
BATCH_SIZE = 32
EPOCHS = 500
LR = 1.e-6
DATASET = args['dataset']
OPTIMIZER = 'ADAM'
SOUND_PICKLE = 'sound_data.pickle'
EMOTION_LABEL_PICKLE = 'emotion_labels.pickle'
fileList = os.listdir(args['dataset'])
if (not os.path.isfile(SOUND_PICKLE)) or (not os.path.isfile(EMOTION_LABEL_PICKLE)):
feeling_list=[]
for item in fileList:
if item[6:8]=='03' and int(item[18:20])%2==0 or (item[:3]=='gio' and item[4]=='f'):
feeling_list.append('female_happy')
elif (item[6:8]=='03' and int(item[18:20])%2==1) or item[:1]=='h' or (item[:3]=='gio' and item[4]=='m'):
feeling_list.append('male_happy')
elif item[6:8]=='04' and int(item[18:20])%2==0 or (item[:3]=='tri' and item[4]=='f'):
feeling_list.append('female_sad')
elif (item[6:8]=='04' and int(item[18:20])%2==1) or item[:2]=='sa' or (item[:3]=='tri' and item[4]=='m'):
feeling_list.append('male_sad')
elif (item[6:8]=='05' and int(item[18:20])%2==0) or (item[:3]=='rab' and item[4]=='f'):
feeling_list.append('female_angry')
elif (item[6:8]=='05' and int(item[18:20])%2==1) or item[:1]=='a' or (item[:3]=='rab' and item[4]=='m'):
feeling_list.append('male_angry')
elif (item[6:8]=='06' and int(item[18:20])%2==0) or (item[:3]=='pau' and item[4]=='f'):
feeling_list.append('female_fearful')
elif (item[6:8]=='06' and int(item[18:20])%2==1) or item[:1]=='f' or (item[:3]=='pau' and item[4]=='m'):
feeling_list.append('male_fearful')
elif ((item[6:8]=='02' or item[6:8]=='01') and int(item[18:20])%2==1) or item[:1]=='n' or (item[:3]=='neu' and item[4]=='m'):
feeling_list.append('male_neutral')
elif ((item[6:8]=='02' or item[6:8]=='01') and int(item[18:20])%2==0) or (item[:3]=='neu' and item[4]=='f'):
feeling_list.append('female_neutral')
elif (item[6:8]=='08' and int(item[18:20])%2==1) or item[:2] == 'su' or (item[:3]=='sor' and item[4]=='m'):
feeling_list.append('male_surprised')
elif (item[6:8]=='08' and int(item[18:20])%2==0) or (item[:3]=='sor' and item[4]=='f'):
feeling_list.append('female_surprised')
elif item[:1] == 'd' or (item[:3]=='dis' and item[4]=='m') or (item[6:8]=='07' and int(item[18:20])%2==1):
feeling_list.append('male_disgust')
elif (item[:3]=='dis' and item[4]=='f') or (item[6:8]=='07' and int(item[18:20])%2==0):
feeling_list.append('female_disgust')
labels = pd.DataFrame(feeling_list)
labels.to_pickle(EMOTION_LABEL_PICKLE)
soundData = pd.DataFrame(columns=['feature'])
bookmark=0
for index,y in enumerate(fileList):
X, sample_rate = librosa.load(args['dataset'] + '/' +y, res_type='kaiser_fast',sr=22050*2, duration=3.5)
sample_rate = np.array(sample_rate)
mfccs = np.mean(librosa.feature.mfcc(y=X,
sr=sample_rate,
n_mfcc=13),
axis=0)
feature = mfccs
normalized = normalize([feature], norm='l1')
soundData.loc[bookmark] = [normalized[0]]
bookmark=bookmark+1
soundData.to_pickle(SOUND_PICKLE)
else:
soundData = pd.read_pickle(SOUND_PICKLE)
labels = | pd.read_pickle(EMOTION_LABEL_PICKLE) | pandas.read_pickle |
"""
The BIGMACC script.
"""
import os
import pandas as pd
import numpy as np
import logging
import xarray as xr
import zarr
from itertools import repeat
import time
import cea.utilities.parallel
logging.getLogger('numba').setLevel(logging.WARNING)
import cea.config
import cea.utilities
import cea.inputlocator
import cea.demand.demand_main
import cea.resources.radiation_daysim.radiation_main
import cea.bigmacc.bigmacc_rules
import cea.bigmacc.wesbrook_DH_single
import cea.bigmacc.wesbrook_DH_multi
import cea.utilities.dbf
import cea.datamanagement.archetypes_mapper
import cea.datamanagement.data_initializer
import cea.analysis.costs.system_costs
import cea.analysis.lca.main
import cea.bigmacc.bigmacc_util as util
__author__ = "<NAME>"
__copyright__ = ""
__credits__ = ["<NAME>"]
__license__ = "MIT"
__version__ = "0.1"
__maintainer__ = ""
__email__ = ""
__status__ = ""
def generate_building_list(config):
locator = cea.inputlocator.InputLocator(config.scenario)
data = pd.read_csv(locator.get_total_demand())
return np.array(data['Name'])
def hourly_xr_get_hourly_results(config, bldg):
locator = cea.inputlocator.InputLocator(config.scenario)
return pd.read_csv(locator.get_demand_results_file(bldg))
def hourly_xr_create_hourly_results_df(config):
buildings = generate_building_list(config)
interior_temp_dict = dict()
pv_gen_dict = dict()
operative_temp_dict = dict()
district_dhw_dict = dict()
district_heat_dict = dict()
district_cool_dict = dict()
electrical_aux_dict = dict()
electrical_dhw_dict = dict()
electrical_heat_dict = dict()
electrical_cool_dict = dict()
heatloss_rad_dict = dict()
heatgain_solar_dict = dict()
grid_dict = dict()
electrical_appliances_dict = dict()
electrical_ev_dict = dict()
electrical_refrig_dict = dict()
electrical_data_cool_dict = dict()
electrical_ind_process_dict = dict()
electrical_data_dict = dict()
ng_dhw_dict = dict()
ng_heat_dict = dict()
heat_enduse_sys_dict = dict()
heat_enduse_dict = dict()
dhw_enduse_sys_dict = dict()
dhw_enduse_dict = dict()
cool_enduse_sys_dict = dict()
cool_enduse_dict = dict()
for bldg in buildings.tolist():
print(f' - Adding {bldg} to the dataarray.')
data = hourly_xr_get_hourly_results(config, bldg)
interior_temp_dict[bldg] = data['T_int_C'] # 0
pv_gen_dict[bldg] = data['PV_kWh'] # 1
operative_temp_dict[bldg] = data['theta_o_C'] # 2
district_dhw_dict[bldg] = data['DH_ww_kWh'] # 3
district_heat_dict[bldg] = data['DH_hs_kWh'] # 4
district_cool_dict[bldg] = data['DC_cs_kWh'] # 5
electrical_aux_dict[bldg] = data['Eaux_kWh'] # 6
electrical_dhw_dict[bldg] = data['E_ww_kWh'] # 7
electrical_heat_dict[bldg] = data['E_hs_kWh'] # 8
electrical_cool_dict[bldg] = data['E_cs_kWh'] # 9
heatloss_rad_dict[bldg] = data['I_rad_kWh'] # 10
heatgain_solar_dict[bldg] = data['I_sol_kWh'] # 11
grid_dict[bldg] = data['GRID_kWh'] # 12
electrical_appliances_dict[bldg] = data['Eal_kWh'] # 13
electrical_ev_dict[bldg] = data['Ev_kWh'] # 14
electrical_refrig_dict[bldg] = data['E_cre_kWh'] # 15
electrical_data_cool_dict[bldg] = data['E_cdata_kWh'] # 16
electrical_ind_process_dict[bldg] = data['Epro_kWh'] # 17
electrical_data_dict[bldg] = data['Edata_kWh'] # 18
ng_dhw_dict[bldg] = data['NG_ww_kWh'] # 19
ng_heat_dict[bldg] = data['NG_hs_kWh'] # 20
heat_enduse_sys_dict[bldg] = data['Qhs_sys_kWh'] # 21
heat_enduse_dict[bldg] = data['Qhs_kWh'] # 22
dhw_enduse_sys_dict[bldg] = data['Qww_sys_kWh'] # 23
dhw_enduse_dict[bldg] = data['Qww_kWh'] # 24
cool_enduse_sys_dict[bldg] = data['Qcs_sys_kWh'] # 25
cool_enduse_dict[bldg] = data['Qcs_kWh'] # 26
return [interior_temp_dict, pv_gen_dict, operative_temp_dict, district_dhw_dict,
district_heat_dict, district_cool_dict, electrical_aux_dict, electrical_dhw_dict, electrical_heat_dict,
electrical_cool_dict, heatloss_rad_dict, heatgain_solar_dict, grid_dict, electrical_appliances_dict,
electrical_ev_dict, electrical_refrig_dict, electrical_data_cool_dict,
electrical_ind_process_dict, electrical_data_dict, ng_dhw_dict, ng_heat_dict,
heat_enduse_sys_dict, heat_enduse_dict, dhw_enduse_sys_dict, dhw_enduse_dict,
cool_enduse_sys_dict, cool_enduse_dict]
def hourly_xr_get_annual_results(config):
locator = cea.inputlocator.InputLocator(config.scenario)
embodied_carbon_path = locator.get_lca_embodied()
operational_carbon_path = locator.get_lca_operation()
building_tac_path = locator.get_building_tac_file()
supply_syst_path = locator.get_costs_operation_file()
emb_carbon = pd.read_csv(embodied_carbon_path)['GHG_sys_embodied_tonCO2'].sum()
op_carbon_district = pd.read_csv(operational_carbon_path)['GHG_sys_district_scale_tonCO2'].sum()
op_carbon_building = pd.read_csv(operational_carbon_path)['GHG_sys_building_scale_tonCO2'].sum()
build_costs_opex = pd.read_csv(building_tac_path)['opex_building_systems'].sum()
build_costs_capex = pd.read_csv(building_tac_path)['capex_building_systems'].sum()
supply_costs_opex = pd.read_csv(supply_syst_path)['Opex_sys_USD'].sum()
supply_costs_capex = pd.read_csv(supply_syst_path)['Capex_total_sys_USD'].sum()
return [emb_carbon, op_carbon_district, op_carbon_building, build_costs_opex, build_costs_capex, supply_costs_opex,
supply_costs_capex]
def hourly_xr_create_hourly_dataset(config):
scenario = config.general.parent
strategy = config.bigmacc.key
time_arr = pd.date_range("{}-01-01".format(scenario.split('_')[1]), periods=8760, freq="h")
data = hourly_xr_create_hourly_results_df(config)
annual_results = hourly_xr_get_annual_results(config)
print(' - Creating dataset.')
d = xr.Dataset(
data_vars=dict(
interior_temp_C=(["times", "buildings"], pd.DataFrame.from_dict(data[0]).to_numpy()),
pv_generated_kwh=(["times", "buildings"], | pd.DataFrame.from_dict(data[1]) | pandas.DataFrame.from_dict |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from constants import *
from datetime import datetime
import lightgbm as lgb
import numpy as np
from sklearn.model_selection import KFold
import pandas as pd
import utils
import os
import ndcg_tools
import math
import gc
import sys
seed = SEED
cur_stage = CUR_STAGE
version = datetime.now().strftime("%m%d%H%M%S")
print('Version: ', version)
weights = [6.5,1]
print('Now using weight: ', weights)
LR = '0.005'
load_model = ""
if len(sys.argv)>0:
LR = sys.argv[1]
load_model = sys.argv[2]
print('LR Status: ', LR, ' load_model: ', load_model)
def modeling(train_X, train_Y, test_X, test_Y, categoricals, mode, OPT_ROUNDS=600, weight=None):
EARLY_STOP = 300
OPT_ROUNDS = OPT_ROUNDS
MAX_ROUNDS = 10000
params = {
'boosting': 'gbdt',
'metric' : 'binary_logloss',
#'metric' : 'auc',
'objective': 'binary',
'learning_rate': float(LR),
'max_depth': -1,
'min_child_samples': 20,
'max_bin': 255,
'subsample': 0.85,
'subsample_freq': 10,
'colsample_bytree': 0.8,
'min_child_weight': 0.001,
'subsample_for_bin': 200000,
'min_split_gain': 0,
'reg_alpha': 0,
'reg_lambda': 0,
'num_leaves':63,
'seed': seed,
'nthread': 16,
'scale_pos_weight': 1.5
#'is_unbalance': True,
}
print(f'Now Version {version}')
if mode == 'valid':
print('Start train and validate...')
print('feature number:', len(train_X.columns))
feat_cols = list(train_X.columns)
dtrain = lgb.Dataset(data=train_X, label=train_Y, feature_name=feat_cols,weight=weight)
dvalid = lgb.Dataset(data=test_X, label=test_Y, feature_name=feat_cols)
model = lgb.train(params,
dtrain,
categorical_feature=categoricals,
num_boost_round=MAX_ROUNDS,
early_stopping_rounds=EARLY_STOP,
verbose_eval=50,
valid_sets=[dtrain, dvalid],
valid_names=['train', 'valid']
)
importances = pd.DataFrame({'features':model.feature_name(),
'importances':model.feature_importance()})
importances.sort_values('importances',ascending=False,inplace=True)
importances.to_csv( (feat_imp_dir+'{}_imp.csv').format(version), index=False )
return model
else:
print('Start training... Please set OPT-ROUNDS.')
feat_cols = list(train_X.columns)
dtrain = lgb.Dataset(data=train_X, label=train_Y, feature_name=feat_cols,weight=weight)
print('feature number:', len(train_X.columns))
print('feature :', train_X.columns)
model = lgb.train(params,
dtrain,
categorical_feature=categoricals,
num_boost_round=OPT_ROUNDS,
verbose_eval=50,
valid_sets=[dtrain],
valid_names='train'
)
importances = pd.DataFrame({'features':model.feature_name(),
'importances':model.feature_importance()})
importances.sort_values('importances',ascending=False,inplace=True)
importances.to_csv( (feat_imp_dir+'{}_imp.csv').format(version), index=False )
model.save_model( lgb_model_dir+'{}.model'.format(version) )
return model
def predict(test_X, model):
print('Start Predict ...')
print('Num of features: ', len(test_X.columns))
print(test_X.columns)
block_len = len(test_X)//block_num
predicts = []
for block_id in range(block_num):
l = block_id * block_len
r = (block_id+1) * block_len
if block_id == block_num - 1:
predict = model.predict( test_X.iloc[l:], num_iteration=model.best_iteration)
else:
predict = model.predict( test_X.iloc[l:r], num_iteration=model.best_iteration)
predicts.append(predict)
predict = np.concatenate( predicts )
return predict
def get_scores(ans=None,shift=0.0,bottom=0.25,after_deal=True):
phase_item_degree = utils.load_pickle(phase_full_item_degree_path.format(cur_stage))
df_valid_stage = utils.load_pickle(all_valid_stage_data_path.format(cur_stage))
df_valid = utils.load_pickle(all_valid_stage_data_path.format(cur_stage))
phase2valid_item_degree = {}
phase2median = {}
for sta in range(cur_stage+1):
cur_df_valid = df_valid[df_valid['stage']==sta]
items = cur_df_valid['item_id'].values
item_degree = phase_item_degree[sta]
list_item_degress = []
for item_id in items:
list_item_degress.append(item_degree[item_id])
list_item_degress.sort()
median_item_degree = list_item_degress[len(list_item_degress) // 2]
phase2median[sta] = median_item_degree
for item in items:
phase2valid_item_degree[(sta,item)] = item_degree[item]
old = False
if after_deal:
ans = ans.groupby( ['user', 'item'] )['label'].max().reset_index()
if old:
user_item_label = ans[ ['user','item','label'] ].values
user2stage = df_valid_stage[ ['user_id','stage'] ]
user2stage['user'] = user2stage['user_id']
user2stage = user2stage.drop('user_id', axis=1)
ans = pd.merge( ans, user2stage, how='left', on='user' )
sta_list = []
item_list = []
degree_list = []
for sta in range(cur_stage+1):
item_degrees = phase_item_degree[sta]
for item in item_degrees.keys():
sta_list.append(sta)
item_list.append(item)
degree_list.append( item_degrees[item] )
df_degree = pd.DataFrame( {'stage':sta_list, 'item':item_list, 'degree':degree_list} )
ans = pd.merge( ans, df_degree, how='left', on=['stage','item'] )
phase_median = ans.groupby('stage')['degree'].median().reset_index()
phase_median['median_degree'] = phase_median['degree']
phase_median = phase_median.drop('degree', axis=1)
ans = pd.merge(ans, phase_median, how='left', on ='stage')
ans['is_rare'] = ans['degree'] <= (ans['median_degree']+shift)
else:
user2stage = df_valid_stage[ ['user_id','stage'] ]
user2stage['user'] = user2stage['user_id']
user2stage = user2stage.drop('user_id', axis=1)
ans = pd.merge( ans, user2stage, how='left', on='user' )
vals = ans[ ['item','stage'] ].values
is_rare = []
for val in vals:
is_rare.append( phase_item_degree[ val[1] ][ val[0] ] <= phase2median[ val[1] ] )
ans['is_rare'] = is_rare
ans['is_rare'] = ans['is_rare'].astype('float') / bottom
ans['is_rare'] = ans['is_rare']+1.0
ans['label'] = ans['label'] * ans['is_rare']
else:
ans = ans.groupby( ['user', 'item'] )['label'].max().reset_index()
ans['label'] = -ans['label']
ans = ans.sort_values( by=['user','label'] )
user2recall = ans.groupby('user')['item'].agg(list)
user2pos = df_valid_stage[ ['user_id','item_id'] ].set_index('user_id')
all_scores = []
all_pred_items = {}
pickup = 500
for sta in range(cur_stage+1):
predictions = []
item_degree = phase_item_degree[sta]
now_users = df_valid_stage[ df_valid_stage['stage']==sta ]['user_id'].tolist()
answers = []
for now_user in now_users:
pos = user2pos.loc[now_user].values[0]
pred = user2recall.loc[now_user]
new_pred = []
for j in pred:
if len(new_pred) < pickup:
flag = 0
for k in new_pred:
if j == k:
flag = 1
break
if flag==0:
new_pred.append( j )
answers.append( ( pos, item_degree[ pos ] ) )
all_pred_items[now_user] = []
for pred in new_pred[:pickup]:
all_pred_items[now_user].append( pred )
predictions.append(new_pred[:50]+[0]*(50-len(new_pred)))
scores = ndcg_tools.evaluate_each_phase(predictions, answers, at=50)
all_scores.append(scores)
utils.dump_pickle(all_pred_items, rerank_path.format(pickup, mode))
for scores in all_scores:
print(scores)
print('all_scores_sum',np.array(all_scores).sum(axis=0))
print('7_9_all_scores_sum',np.array(all_scores[-3:]).sum(axis=0))
print('0_6_all_scores_sum',np.array(all_scores[0:7]).sum(axis=0))
return all_scores
def get_result(ans=None,shift=0.0,bottom=0.7,after_deal=True):
print(f'using bottom: {bottom}')
phase_item_degree = utils.load_pickle(phase_full_item_degree_path.format(cur_stage))
df_test_stage = utils.load_pickle(online_all_test_data_path.format(cur_stage))
df_valid = utils.load_pickle(all_valid_stage_data_path.format(cur_stage))
phase2valid_item_degree = {}
phase2median = {}
for sta in range(cur_stage+1):
cur_df_valid = df_valid[df_valid['stage']==sta]
items = cur_df_valid['item_id'].values
item_degree = phase_item_degree[sta]
list_item_degress = []
for item_id in items:
list_item_degress.append(item_degree[item_id])
list_item_degress.sort()
median_item_degree = list_item_degress[len(list_item_degress) // 2]
phase2median[sta] = median_item_degree
for item in items:
phase2valid_item_degree[(sta,item)] = item_degree[item]
old = False
if after_deal:
ans = ans.groupby( ['user', 'item'] )['label'].max().reset_index()
if old:
user_item_label = ans[ ['user','item','label'] ].values
user2stage = df_test_stage[ ['user_id','stage'] ]
user2stage['user'] = user2stage['user_id']
user2stage = user2stage.drop('user_id', axis=1)
ans = pd.merge( ans, user2stage, how='left', on='user' )
sta_list = []
item_list = []
degree_list = []
for sta in range(cur_stage+1):
item_degrees = phase_item_degree[sta]
for item in item_degrees.keys():
sta_list.append(sta)
item_list.append(item)
degree_list.append( item_degrees[item] )
df_degree = pd.DataFrame( {'stage':sta_list, 'item':item_list, 'degree':degree_list} )
ans = pd.merge( ans, df_degree, how='left', on=['stage','item'] )
phase_median = ans.groupby('stage')['degree'].median().reset_index()
phase_median['median_degree'] = phase_median['degree']
phase_median = phase_median.drop('degree', axis=1)
ans = pd.merge(ans, phase_median, how='left', on ='stage')
ans['is_rare'] = ans['degree'] <= (ans['median_degree']+shift)
else:
user2stage = df_test_stage[ ['user_id','stage'] ]
user2stage['user'] = user2stage['user_id']
user2stage = user2stage.drop('user_id', axis=1)
ans = pd.merge( ans, user2stage, how='left', on='user' )
vals = ans[ ['item','stage'] ].values
is_rare = []
for val in vals:
is_rare.append( phase_item_degree[ val[1] ][ val[0] ] <= phase2median[ val[1] ] )
ans['is_rare'] = is_rare
ans['is_rare'] = ans['is_rare'].astype('float') / bottom
ans['is_rare'] = ans['is_rare']+1.0
ans['label'] = ans['label'] * ans['is_rare']
else:
ans = ans.groupby( ['user', 'item'] )['label'].max().reset_index()
ans['label'] = -ans['label']
ans = ans.sort_values( by=['user','label'] )
user2recall = ans.groupby('user')['item'].agg(list)
df_train_stage = utils.load_pickle(online_all_train_stage_data_path.format(cur_stage))
all_scores = []
all_pred_items = {}
pickup = 500
predictions = {}
for sta in range(cur_stage+1):
now_users = df_test_stage[ df_test_stage['stage'] == sta ]['user_id'].tolist()
df_train = df_train_stage[ df_train_stage['stage'] == sta ]
hot_items = df_train['item_id'].value_counts().index.tolist()
answers = []
for now_user in now_users:
pred = user2recall.loc[now_user]
new_pred = []
for j in pred:
if (len(new_pred) < pickup) and (j not in new_pred):
new_pred.append( j )
all_pred_items[now_user] = []
for pred in new_pred[:pickup]:
all_pred_items[now_user].append(pred)
new_pred = new_pred[:50]
for j in hot_items:
if (len(new_pred) < 50) and (j not in new_pred):
new_pred.append( j )
predictions[now_user] = new_pred
utils.dump_pickle(all_pred_items, rerank_path.format(pickup, mode))
#check
with open(prediction_result+f'{version}_{LR}_result.csv','w') as file:
for idx,user in enumerate(predictions.keys()):
file.write(str(user)+','+','.join([str(p) for p in predictions[user]])+'\n')
def debug_scores(ans,shift=0.0, bottom=0.25,after_deal=True):
#1)
#count_info = ans.groupby( ['user', 'item'] )['label'].count().reset_index()
#def func(s):
# return math.log(1 + s)
#count_info['label'] = count_info['label'].apply( func )
#ans = ans.groupby( ['user', 'item'] )['label'].sum().reset_index()
#ans['label'] = ans['label'] / count_info['label']
#2)
#ans = ans.groupby( ['user', 'item'] )['label'].max().reset_index()
#3)
#ans = ans.groupby( ['user', 'item'] )['label'].mean().reset_index()
#4)
#ans = ans.groupby( ['user', 'item'] )['label'].sum().reset_index()
#5)
df_valid_stage = utils.load_pickle(all_valid_stage_data_path.format(cur_stage))
phase_item_degree = utils.load_pickle(phase_full_item_degree_path.format(cur_stage))
df_valid = utils.load_pickle(all_valid_stage_data_path.format(cur_stage))
phase2valid_item_degree = {}
phase2median = {}
for sta in range(cur_stage+1):
cur_df_valid = df_valid[df_valid['stage']==sta]
items = cur_df_valid['item_id'].values
item_degree = phase_item_degree[sta]
list_item_degress = []
for item_id in items:
list_item_degress.append(item_degree[item_id])
list_item_degress.sort()
median_item_degree = list_item_degress[len(list_item_degress) // 2]
phase2median[sta] = median_item_degree
for item in items:
phase2valid_item_degree[(sta,item)] = item_degree[item]
old = False
if after_deal:
ans = ans.groupby( ['user', 'item'] )['label'].max().reset_index()
if old:
user_item_label = ans[ ['user','item','label'] ].values
user2stage = df_valid_stage[ ['user_id','stage'] ]
user2stage['user'] = user2stage['user_id']
user2stage = user2stage.drop('user_id', axis=1)
ans = pd.merge( ans, user2stage, how='left', on='user' )
sta_list = []
item_list = []
degree_list = []
for sta in range(cur_stage+1):
item_degrees = phase_item_degree[sta]
for item in item_degrees.keys():
sta_list.append(sta)
item_list.append(item)
degree_list.append( item_degrees[item] )
df_degree = pd.DataFrame( {'stage':sta_list, 'item':item_list, 'degree':degree_list} )
ans = pd.merge( ans, df_degree, how='left', on=['stage','item'] )
phase_median = ans.groupby('stage')['degree'].median().reset_index()
phase_median['median_degree'] = phase_median['degree']
phase_median = phase_median.drop('degree', axis=1)
ans = pd.merge(ans, phase_median, how='left', on ='stage')
ans['is_rare'] = ans['degree'] <= (ans['median_degree']+shift)
else:
user2stage = df_valid_stage[ ['user_id','stage'] ]
user2stage['user'] = user2stage['user_id']
user2stage = user2stage.drop('user_id', axis=1)
ans = pd.merge( ans, user2stage, how='left', on='user' )
vals = ans[ ['item','stage'] ].values
is_rare = []
for val in vals:
is_rare.append( phase_item_degree[ val[1] ][ val[0] ] <= phase2median[ val[1] ] )
ans['is_rare'] = is_rare
ans['is_rare'] = ans['is_rare'].astype('float') / bottom
ans['is_rare'] = ans['is_rare']+1.0
ans['label'] = ans['label'] * ans['is_rare']
else:
ans = ans.groupby( ['user', 'item'] )['label'].max().reset_index()
ans['label'] = -ans['label']
ans = ans.sort_values( by=['user','label'] )
user2recall = ans.groupby('user')['item'].agg(list)
user2pos = df_valid_stage[ ['user_id','item_id'] ].set_index('user_id')
all_scores = []
for sta in range(cur_stage+1):
predictions = []
item_degree = phase_item_degree[sta]
now_users = df_valid_stage[ df_valid_stage['stage']==sta ]['user_id'].tolist()
answers = []
for now_user in now_users:
pos = user2pos.loc[now_user].values[0]
pred = user2recall.loc[now_user]
new_pred = []
for j in pred:
if len(new_pred) < 50:
flag = 0
for k in new_pred:
if j == k:
flag = 1
break
if flag==0:
new_pred.append( j )
answers.append( ( pos, item_degree[ pos ] ) )
predictions.append(new_pred+[0]*(50-len(new_pred)))
scores = ndcg_tools.evaluate_each_phase(predictions, answers, at=50)
all_scores.append(scores)
for scores in all_scores:
print(scores)
print('all_scores_sum',np.array(all_scores).sum(axis=0))
print('7_9_all_scores_sum',np.array(all_scores[-3:]).sum(axis=0))
print('0_6_all_scores_sum',np.array(all_scores[0:7]).sum(axis=0))
return all_scores
def calculate_user2degree( ans, df_valid_stage, phase_item_degree, top=50 ):
count_info = ans.groupby( ['user', 'item'] )['label'].count().reset_index()
def func(s):
return s-0.2
count_info['label'] = count_info['label'].apply( func )
ans = ans.groupby( ['user', 'item'] )['label'].sum().reset_index()
ans['label'] = ans['label'] / count_info['label']
ans['label'] = -ans['label']
ans = ans.sort_values( by=['user','label'] )
user2recall = ans.groupby('user')['item'].agg(list)
user2pos = df_valid_stage[ ['user_id','item_id'] ].set_index('user_id')
all_scores = []
user2degree = {}
user2stage = {}
for sta in range(cur_stage+1):
predictions = []
item_degree = phase_item_degree[sta]
now_users = df_valid_stage[ df_valid_stage['stage']==sta ]['user_id'].tolist()
answers = []
for now_user in now_users:
pos = user2pos.loc[now_user].values[0]
pred = user2recall.loc[now_user]
new_pred = []
for j in pred:
if len(new_pred) < 50:
flag = 0
for k in new_pred:
if j == k:
flag = 1
break
if flag==0:
new_pred.append( j )
answers.append( ( pos, item_degree[ pos ] ) )
degrees = []
for j in new_pred[:50]:
if j in item_degree:
degrees.append( item_degree[j] )
else:
print('no')
print(1/0)
user2degree[now_user] = np.mean( degrees )
user2stage[now_user] = sta
predictions.append(new_pred+[0]*(50-len(new_pred)))
return user2degree, user2stage
def model_merge_scores():
full = utils.load_pickle( lgb_ans_dir+ ('{}_{}_{}_ans.pkl').format('0529094924', mode, cur_stage ) )
rare = utils.load_pickle( lgb_ans_dir+ ('{}_{}_{}_ans.pkl').format('0529095003', mode, cur_stage ) )
df_valid_stage = utils.load_pickle(all_valid_stage_data_path.format(cur_stage))
phase_item_degree = utils.load_pickle(phase_full_item_degree_path.format(cur_stage))
use_gt = False
if use_gt:
user2degree = {}
for i in range( df_valid_stage.shape[0] ):
r = df_valid_stage.iloc[i]
user2degree[ r['user_id'] ] = phase_item_degree[r['stage']][ r['item_id'] ]
data = df_valid_stage.copy()
data['degree'] = data['user_id'].map( user2degree )
data['median_degree'] = data['stage'].map( data.groupby('stage')['degree'].quantile(0.5) )
data['is_rare'] = data['degree']<=data['median_degree']
data['user'] = data['user_id']
full = pd.merge( full, data[ ['user', 'is_rare'] ], how='left', on='user' )
rare = pd.merge( rare, data[ ['user', 'is_rare'] ], how='left', on='user' )
ans = full.copy()
#ans = ans.groupby( ['user', 'item'] )['label'].quantile(0.25).reset_index()
ans['label'] = full['label']*(1-full['is_rare']) + rare['label']*rare['is_rare']
else:
user2degree, user2stage = calculate_user2degree( rare.copy(), df_valid_stage, phase_item_degree, 50 )
data = pd.concat( [pd.Series(user2degree), | pd.Series(user2stage) | pandas.Series |
import string
import pandas as pd
import numpy as np
import doctest
from texthero import preprocessing, stopwords
from . import PandasTestCase
"""
Test doctest
"""
def load_tests(loader, tests, ignore):
tests.addTests(doctest.DocTestSuite(preprocessing))
return tests
class TestPreprocessing(PandasTestCase):
"""
Test remove digits.
"""
def test_remove_digits_only_block(self):
s = pd.Series("remove block of digits 1234 h1n1")
s_true = pd.Series("remove block of digits h1n1")
self.assertEqual(preprocessing.remove_digits(s), s_true)
def test_remove_digits_any(self):
s = pd.Series("remove block of digits 1234 h1n1")
s_true = pd.Series("remove block of digits h n ")
self.assertEqual(preprocessing.remove_digits(s, only_blocks=False), s_true)
def test_remove_digits_brackets(self):
s = pd.Series("Digits in bracket (123 $) needs to be cleaned out")
s_true = pd.Series("Digits in bracket ( $) needs to be cleaned out")
self.assertEqual(preprocessing.remove_digits(s), s_true)
def test_remove_digits_start(self):
s = pd.Series("123 starting digits needs to be cleaned out")
s_true = pd.Series(" starting digits needs to be cleaned out")
self.assertEqual(preprocessing.remove_digits(s), s_true)
def test_remove_digits_end(self):
s = pd.Series("end digits needs to be cleaned out 123")
s_true = pd.Series("end digits needs to be cleaned out ")
self.assertEqual(preprocessing.remove_digits(s), s_true)
def test_remove_digits_phone(self):
s = pd.Series("+41 1234 5678")
s_true = pd.Series("+ ")
self.assertEqual(preprocessing.remove_digits(s), s_true)
def test_remove_digits_punctuation(self):
s = pd.Series(string.punctuation)
s_true = pd.Series(string.punctuation)
self.assertEqual(preprocessing.remove_digits(s), s_true)
"""
Test replace digits
"""
def test_replace_digits(self):
s = pd.Series("1234 falcon9")
s_true = pd.Series("X falcon9")
self.assertEqual(preprocessing.replace_digits(s, "X"), s_true)
def test_replace_digits_any(self):
s = pd.Series("1234 falcon9")
s_true = pd.Series("X falconX")
self.assertEqual(
preprocessing.replace_digits(s, "X", only_blocks=False), s_true
)
"""
Remove punctuation.
"""
def test_remove_punctation(self):
s = pd.Series("Remove all! punctuation!! ()")
s_true = pd.Series(
"Remove all punctuation "
) # TODO maybe just remove space?
self.assertEqual(preprocessing.remove_punctuation(s), s_true)
"""
Remove diacritics.
"""
def test_remove_diactitics(self):
s = pd.Series("Montréal, über, 12.89, Mère, Françoise, noël, 889, اِس, اُس")
s_true = pd.Series("Montreal, uber, 12.89, Mere, Francoise, noel, 889, اس, اس")
self.assertEqual(preprocessing.remove_diacritics(s), s_true)
"""
Remove whitespace.
"""
def test_remove_whitespace(self):
s = pd.Series("hello world hello world ")
s_true = pd.Series("hello world hello world")
self.assertEqual(preprocessing.remove_whitespace(s), s_true)
"""
Test pipeline.
"""
def test_pipeline_stopwords(self):
s = pd.Series("E-I-E-I-O\nAnd on")
s_true = pd.Series("e-i-e-i-o\n ")
pipeline = [preprocessing.lowercase, preprocessing.remove_stopwords]
self.assertEqual(preprocessing.clean(s, pipeline=pipeline), s_true)
"""
Test stopwords.
"""
def test_remove_stopwords(self):
text = "i am quite intrigued"
text_default_preprocessed = " quite intrigued"
text_spacy_preprocessed = " intrigued"
text_custom_preprocessed = "i quite "
self.assertEqual(
preprocessing.remove_stopwords(pd.Series(text)),
pd.Series(text_default_preprocessed),
)
self.assertEqual(
preprocessing.remove_stopwords(
pd.Series(text), stopwords=stopwords.SPACY_EN
),
pd.Series(text_spacy_preprocessed),
)
self.assertEqual(
preprocessing.remove_stopwords(
pd.Series(text), stopwords={"am", "intrigued"}
),
pd.Series(text_custom_preprocessed),
)
def test_stopwords_are_set(self):
self.assertEqual(type(stopwords.DEFAULT), set)
self.assertEqual(type(stopwords.NLTK_EN), set)
self.assertEqual(type(stopwords.SPACY_EN), set)
"""
Test remove html tags
"""
def test_remove_html_tags(self):
s = pd.Series("<html>remove <br>html</br> tags<html> ")
s_true = pd.Series("remove html tags ")
self.assertEqual(preprocessing.remove_html_tags(s), s_true)
"""
Text tokenization
"""
def test_tokenize(self):
s = pd.Series("text to tokenize")
s_true = pd.Series([["text", "to", "tokenize"]])
self.assertEqual(preprocessing.tokenize(s), s_true)
def test_tokenize_multirows(self):
s = pd.Series(["first row", "second row"])
s_true = pd.Series([["first", "row"], ["second", "row"]])
self.assertEqual(preprocessing.tokenize(s), s_true)
def test_tokenize_split_punctuation(self):
s = pd.Series(["ready. set, go!"])
s_true = pd.Series([["ready", ".", "set", ",", "go", "!"]])
self.assertEqual(preprocessing.tokenize(s), s_true)
def test_tokenize_not_split_in_between_punctuation(self):
s = pd.Series(["don't say hello-world"])
s_true = pd.Series([["don't", "say", "hello-world"]])
self.assertEqual(preprocessing.tokenize(s), s_true)
"""
Has content
"""
def test_has_content(self):
s = pd.Series(["c", np.nan, "\t\n", " ", "", "has content", None])
s_true = pd.Series([True, False, False, False, False, True, False])
self.assertEqual(preprocessing.has_content(s), s_true)
"""
Test remove urls
"""
def test_remove_urls(self):
s = pd.Series("http://tests.com http://www.tests.com")
s_true = pd.Series(" ")
self.assertEqual(preprocessing.remove_urls(s), s_true)
def test_remove_urls_https(self):
s = pd.Series("https://tests.com https://www.tests.com")
s_true = pd.Series(" ")
self.assertEqual(preprocessing.remove_urls(s), s_true)
def test_remove_urls_multiline(self):
s = | pd.Series("https://tests.com \n https://tests.com") | pandas.Series |
# -*- coding: UTF-8 -*-
"""
Created by louis at 2021/9/13
Description:
"""
import os
import gc
import glob
import torch
from torch import nn
import torch.nn.functional as F
import torch.optim as optim
import numpy as np
import pandas as pd
import time
from itertools import islice
from torch.utils.data import Dataset, DataLoader
from multiprocessing import Pool
from sklearn.preprocessing import MinMaxScaler, StandardScaler
from sklearn.model_selection import train_test_split
from torch.utils.tensorboard import SummaryWriter
from tqdm.auto import tqdm
import logging
import resource
rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
resource.setrlimit(resource.RLIMIT_NOFILE, (2048, rlimit[1]))
datefmt = '%Y-%m-%d %H:%M:%S'
logging.basicConfig(filename='pytorch-baseline.log', filemode='w', format='%(asctime)s - %(levelname)s - %(message)s',
datefmt=datefmt, level=logging.DEBUG)
# import tqdm
tqdm.pandas()
import warnings
from multiprocessing import cpu_count
def get_path_dict(f, v):
f_dict = {}
for i in tqdm(v):
fpath = f'{f}/stock_id={i}'
flist = glob.glob(os.path.join(fpath, '*.parquet'))
if len(flist) > 0:
f_dict[i] = flist[0]
return f_dict
# train_idx, valid_idx = train_test_split(train_ds['row_id'], shuffle=True, test_size=0.1, random_state=SEED)
# ds: train.csv里面的数据 f_dict:是 book_train.parquet 里面的数据
def process_optiver_ds(ds, f_dict, skip_cols, t_dict):
x = []
y = []
full_seconds_in_bucket = {'seconds_in_bucket': np.arange(600)}
full_seconds_in_bucket = pd.DataFrame(full_seconds_in_bucket)
for stock_id, stock_fnmame in tqdm(f_dict.items()):
trade_train_ = t_dict.get(stock_id)
trade_train_ = pd.read_parquet(trade_train_)
optiver_ds = pd.read_parquet(stock_fnmame)
time_ids = optiver_ds['time_id'].unique()
for time_id in time_ids:
optiver_ds_ = optiver_ds[optiver_ds['time_id'] == time_id]
optiver_ds_ = pd.merge(full_seconds_in_bucket, optiver_ds_, how='left', on='seconds_in_bucket')
optiver_ds_ = pd.merge(optiver_ds_, trade_train_[trade_train_['time_id'] == time_id], how='left',
on='seconds_in_bucket')
# optiver_ds_.drop(skip_cols)
optiver_ds_.drop(['time_id_x', 'time_id_y'], axis=1)
optiver_ds_ = np.nan_to_num(optiver_ds_)
row_id = str(stock_id) + '-' + time_id.astype(str)
r = ds[ds['row_id'] == row_id]['target']
x.append(optiver_ds_)
y.append(r)
return x, y
def chunks(data, SIZE=10000):
it = iter(data)
for i in range(0, len(data), SIZE):
yield {k: data[k] for k in islice(it, SIZE)}
def process_book_train_chunk(chunk_ds):
return process_optiver_ds(train_ds, chunk_ds, book_skip_columns, trade_train_dict)
def process_book_test_chunk(chunk_ds):
return process_optiver_ds(test_ds, chunk_ds, book_skip_columns, trade_test_dict)
'''
# 将样本分成4块,每块里面有28条数据
book_train_chunks = [i for i in chunks(book_train_dict, int(len(book_train_dict) / NTHREADS))]
# trade_train_chunks = [i for i in chunks(trade_train_dict, int(len(trade_train_dict) / NTHREADS))]
z = 1 if len(book_test_dict) < NTHREADS else NTHREADS
book_test_chunks = [i for i in chunks(book_test_dict, int(len(book_test_dict) / z))]
# trade_test_chunks = [i for i in chunks(trade_test_dict, int(len(trade_test_dict) / z))]
pool = Pool(NTHREADS) # 创建进程池,最大进程数为 NTHREADS
r = pool.map(process_book_train_chunk, book_train_chunks)
pool.close()
a1, a2 = zip(*r)
pool = Pool(NTHREADS) # 创建进程池,最大进程数为 NTHREADS
r = pool.map(process_book_test_chunk, book_test_chunks)
pool.close()
t_a1, t_a2 = zip(*r)
np_train = a1
np_target = a2'''
# Scaler
# transformers = []
# for i in tqdm(range(np_train.shape[1])):
# a = np.nan_to_num(np_train[train_idx])
# b = np.nan_to_num(np_train[valid_idx])
#
# transformer = StandardScaler() # StandardScaler is very useful!
# np_train[train_idx] = transformer.fit_transform(a)
# np_train[valid_idx] = transformer.transform(b)
# transformers.append(transformer) # Save Scalers for the inference stage
class LSTMModel(nn.Module):
"""Container module with an encoder, a recurrent module, and a decoder."""
def __init__(self, ntoken, ninp, nhid, nlayers, dropout=0.1):
super(LSTMModel, self).__init__()
# self.drop = nn.Dropout(dropout)
# self.encoder = nn.Embedding(ntoken, ninp)
self.rnn = nn.LSTM(ninp + input_features_num, nhid + input_features_num, nlayers, dropout=dropout,
batch_first=True, bidirectional=True)
self.regress_rnn = nn.Sequential(
nn.BatchNorm1d(2 * nhid + 2 * input_features_num),
nn.Linear(2 * nhid + 2 * input_features_num, 1),
nn.Sigmoid()
)
self.decoder = nn.Sequential(
nn.BatchNorm1d(3 * nhid + 2 * input_features_num),
nn.Linear(3 * nhid + 2 * input_features_num, nhid + input_features_num),
nn.ReLU(),
nn.Dropout(0.2),
nn.BatchNorm1d(nhid + input_features_num),
nn.Linear(nhid + input_features_num, ntoken),
nn.ReLU(),
nn.Dropout(0.1),
nn.BatchNorm1d(ntoken),
nn.Linear(ntoken, 1),
nn.Sigmoid()
)
self.self_attention = nn.Sequential(
nn.Linear(3 * nhid + 2 * input_features_num, 10 * (nhid + input_features_num)),
nn.ReLU(),
nn.Dropout(0.2),
nn.Linear(10 * (nhid + input_features_num), 10 * (nhid + input_features_num)),
nn.ReLU(),
nn.Dropout(0.2),
nn.Linear(10 * (nhid + input_features_num), 3 * nhid + 2 * input_features_num),
nn.Softmax(dim=1)
)
# self.decoder_1 = nn.Linear(nhid, ntoken)
# self.decoder_2 = nn.Linear(ntoken, 1)
self.conv1d_relu_stack = nn.Sequential(
nn.Conv1d(in_channels=600, out_channels=1200, kernel_size=3),
nn.Dropout(0.1),
nn.ReLU(), # 9
nn.Conv1d(in_channels=1200, out_channels=1200, kernel_size=3),
nn.Dropout(0.2),
nn.ReLU(), # 7
nn.Conv1d(in_channels=1200, out_channels=1200, kernel_size=3),
nn.Dropout(0.2),
nn.ReLU(), # 5
nn.Conv1d(in_channels=1200, out_channels=600, kernel_size=3),
nn.Dropout(0.1),
nn.ReLU(), # 3
nn.Conv1d(in_channels=600, out_channels=nhid, kernel_size=3),
nn.ReLU(), # 1
)
self.regress_conv = nn.Sequential(
nn.BatchNorm1d(nhid),
nn.Linear(nhid, 1),
nn.Sigmoid()
)
self.linear_relu_stack = nn.Sequential(
nn.Linear(input_features_num, ntoken),
nn.Dropout(0.1),
nn.ReLU(),
nn.Linear(ntoken, ninp),
nn.Dropout(0.2),
nn.ReLU(),
nn.Linear(ninp, ninp),
nn.Dropout(0.2),
nn.ReLU(),
)
self.ninp = ninp
self.nhid = nhid
self.nlayers = nlayers
def forward(self, input):
# emb = self.drop(self.encoder(input))
cov_logits = self.conv1d_relu_stack(input)
cov_logits = cov_logits.view(cov_logits.shape[0], cov_logits.shape[1])
regress_conv_out = self.regress_conv(cov_logits)
logits = self.linear_relu_stack(input)
logits = torch.cat((logits, input), 2)
# logits = logits.view(1, len(logits), -1)
output, hidden = self.rnn(logits)
output = output[:, -1, :]
regress_rnn_out = self.regress_rnn(output)
new_logits = torch.cat((cov_logits, output), 1)
# attention_output = self.self_attention(new_logits)
# output = self.drop(output)
new_logits = torch.mul(new_logits, self.self_attention(new_logits))
# decoded_out = self.decoder(new_logits)
decoded_out = self.decoder(new_logits)
# decoded_2 = self.decoder_2(decoded_1)
return regress_conv_out, regress_rnn_out, decoded_out
def init_hidden(self, bsz):
weight = next(self.parameters())
return (weight.new_zeros(self.nlayers, bsz, self.nhid),
weight.new_zeros(self.nlayers, bsz, self.nhid))
# dataloader = DataLoader(transformed_dataset, batch_size=4,
# shuffle=True, num_workers=0)
def rmspe(y_pred,y_true):
rms = np.sqrt(np.mean(np.square((y_true - y_pred) / y_true)))
return rms
def RMSPELoss(y_pred, y_true):
return torch.sqrt(torch.mean(((y_true - y_pred) / y_true) ** 2)).clone()
def do_process(optiver_ds, full_seconds_in_bucket, trade__, time_id):
optiver_ds_ = optiver_ds[optiver_ds['time_id'] == time_id]
if optiver_ds_.size == 0:
return None
optiver_ds_ = pd.merge(full_seconds_in_bucket, optiver_ds_, how='left', on='seconds_in_bucket')
optiver_ds_ = pd.merge(optiver_ds_, trade__[trade__['time_id'] == time_id], how='left',
on='seconds_in_bucket')
# optiver_ds_.drop(skip_cols)
optiver_ds_ = optiver_ds_.drop(['time_id_x', 'time_id_y', 'seconds_in_bucket'], axis=1)
optiver_ds_ = np.nan_to_num(optiver_ds_)
# TODO 将每一列进行标准化
for i in range(optiver_ds_.shape[1]):
if np.sum(optiver_ds_[:, i]) != 0 and np.std(optiver_ds_[:, i]) != 0:
optiver_ds_[:, i] = (optiver_ds_[:, i] - np.mean(optiver_ds_[:, i])) / np.std(optiver_ds_[:, i])
return optiver_ds_
def process_train_bach(arg):
# input_0 = []
# target_0 = []
stock_id = arg['stock_id']
time_id = arg['time_id']
# optiver_ds = arg['optiver_ds']
# full_seconds_in_bucket = arg['full_seconds_in_bucket']
# trade_train_ = arg['trade_train_']
path = f"{DATA_PATH}formated_data/{stock_id}/"
optiver_ds_ = pd.read_parquet(f'{path}{time_id}.parquet').to_numpy()
# row_id = str(stock_id) + '-' + time_id.astype(str)
np_target = pd.read_parquet(f'{path}{time_id}_target.parquet')['target'].to_numpy()
return optiver_ds_, np_target[0]
def process_test_bach(time_id, ARGS):
optiver_ds = ARGS['optiver_ds']
full_seconds_in_bucket = ARGS['full_seconds_in_bucket']
trade_test_ = ARGS['trade_test_']
optiver_ds_ = do_process(optiver_ds, full_seconds_in_bucket, trade_test_, time_id)
return optiver_ds_
def train_bach(epoch):
# lstmmodel.load_state_dict(torch.load('train_out/model_weights_240.pth'))
full_seconds_in_bucket = {'seconds_in_bucket': np.arange(600)} # seconds_in_bucket最大是600,训练数据中不连续,这里将他们连起来
full_seconds_in_bucket = pd.DataFrame(full_seconds_in_bucket)
# lstmmodel.zero_grad()
# pool = Pool(30) # 创建进程池,最大进程数为 NTHREADS
for stock_id, stock_fnmame in book_train_dict.items():
trade_train_parquet = trade_train_dict.get(stock_id)
trade_train_ = pd.read_parquet(trade_train_parquet)
book_train = pd.read_parquet(stock_fnmame)
loss_0_each_stock = []
loss_1_each_stock = []
loss_2_each_stock = []
loss_each_stock = []
output_each_stock = []
target_each_stock = []
each_stock_train_data = {}
time_ids = book_train['time_id'].unique()
params = []
# time_ids=time_ids[1:20]
# 每次将已经格式化好了的一个stock中的数据全部取出
for time_id in tqdm(time_ids):
ARGS_ = dict(optiver_ds=book_train, full_seconds_in_bucket=full_seconds_in_bucket,
trade_train_=trade_train_, stock_id=stock_id, time_id=time_id)
params.append(ARGS_)
# input_, target_ = process_train_bach(ARGS_)
# each_stock_train_data[time_id] = dict(input_=input_, target_=target_)
with Pool(8) as p:
r = p.map(process_train_bach, params)
input_, target_ = zip(*r)
for i in range(len(time_ids)):
each_stock_train_data[time_ids[i]] = dict(input_=input_[i], target_=target_[i])
# 每次取一个小bach,分多次取
for i in tqdm(range(int(len(time_ids) / 20))):
time_ids = np.random.choice(time_ids, 50)
input_0 = []
target_0 = []
for time_id in time_ids:
input_0.append(each_stock_train_data[time_id]['input_'])
target_0.append([each_stock_train_data[time_id]['target_']])
input_1 = torch.tensor(input_0, dtype=torch.float32, requires_grad=True).to(device)
target_ = torch.tensor(target_0, dtype=torch.float32).to(device)
conv_out, rnn_out, output_2 = lstmmodel(input_1)
loss_0 = criterion(conv_out, target_)
loss_1 = criterion(rnn_out, target_)
loss_2 = RMSPELoss(output_2, target_)
loss_ = torch.mul(0.1, loss_0) + torch.mul(0.1, loss_1) + loss_2
optimizer_2.zero_grad()
loss_.backward(retain_graph=True)
optimizer_2.step()
output_each_stock.append(output_2.cpu().detach().numpy().ravel())
target_each_stock.append(np.array(target_0).ravel())
loss_0_each_stock.append(loss_0.item())
loss_1_each_stock.append(loss_1.item())
loss_2_each_stock.append(loss_2.item())
loss_each_stock.append(loss_.item())
mean_loss_0 = np.mean(loss_0_each_stock)
mean_loss_1 = np.mean(loss_1_each_stock)
mean_loss_2 = np.mean(loss_2_each_stock)
mean_loss = np.mean(loss_each_stock)
logging.debug(f'epoch = {epoch} , stock_id = {stock_id} , loss_each_stock : {mean_loss}')
rmspe_ = rmspe(np.array(output_each_stock), np.array(target_each_stock))
logging.debug(
f'epoch = {epoch} , stock_id = {stock_id} , rmspe each stock : {rmspe_}')
# loss_all.append(np.mean(loss_each_stock))
writer.add_scalar('V2-LOSS_0', mean_loss_0, writer.count)
writer.add_scalar('V2-LOSS_1', mean_loss_1, writer.count)
writer.add_scalar('V2-LOSS_2', mean_loss_2, writer.count)
writer.add_scalar('V2-LOSS', mean_loss, writer.count)
writer.add_scalar('V2-rmspe', rmspe_, writer.count)
writer.count += 1
torch.save(lstmmodel.state_dict(), 'train_out/model_weights_' + str(epoch) + '.pth')
# 每一个epoch之后就测试一下验证集
# with torch.no_grad():
# test()
# idx = np.arange(np_train.shape[0])
# train_idx, valid_idx = train_test_split(idx, shuffle=True, test_size=0.1, random_state=SEED)
def start_train():
for epoch in range(1, EPOCH_ACCOUNT):
train_bach(epoch)
def predict():
full_seconds_in_bucket = {'seconds_in_bucket': np.arange(600)}
full_seconds_in_bucket = | pd.DataFrame(full_seconds_in_bucket) | pandas.DataFrame |
# pylint: disable=redefined-outer-name,protected-access
# pylint: disable=missing-function-docstring,missing-module-docstring,missing-class-docstring
"""This module contains tests of the tabulator Data Grid"""
# http://tabulator.info/docs/4.7/quickstart
# https://github.com/paulhodel/jexcel
import pandas as pd
import panel as pn
import param
import pytest
from _pytest._code.code import TerminalRepr
from bokeh.models import ColumnDataSource
from awesome_panel_extensions.developer_tools.designer import Designer
from awesome_panel_extensions.developer_tools.designer.services.component_reloader import (
ComponentReloader,
)
from awesome_panel_extensions.widgets.tabulator import CSS_HREFS, Tabulator, TabulatorStylesheet
def _data_records():
return [
{"id": 1, "name": "<NAME>", "age": 12, "col": "red", "dob": pd.Timestamp("14/05/1982")},
{"id": 2, "name": "<NAME>", "age": 1, "col": "blue", "dob": pd.Timestamp("14/05/1982")},
{
"id": 3,
"name": "<NAME>",
"age": 42,
"col": "green",
"dob": pd.Timestamp("22/05/1982"),
},
{
"id": 4,
"name": "<NAME>",
"age": 125,
"col": "orange",
"dob": pd.Timestamp("01/08/1980"),
},
{
"id": 5,
"name": "<NAME>",
"age": 16,
"col": "yellow",
"dob": pd.Timestamp("31/01/1999"),
},
]
@pytest.fixture()
def data_records():
return _data_records()
@pytest.fixture()
def dataframe(data_records):
return pd.DataFrame(data=data_records)
@pytest.fixture()
def data_list(dataframe):
return dataframe.to_dict("list")
@pytest.fixture()
def column_data_source(data_list):
return ColumnDataSource(data_list)
@pytest.fixture()
def columns():
return [
{
"title": "Id",
"field": "id",
"sorter": "number",
"formatter": "money",
"hozAlign": "right",
},
{
"title": "Name",
"field": "name",
"sorter": "string",
"formatter": "plaintext",
"hozAlign": "left",
},
{
"title": "Age",
"field": "age",
"sorter": "number",
"formatter": "money",
"hozAlign": "right",
},
{
"title": "Col",
"field": "col",
"sorter": "string",
"formatter": "plaintext",
"hozAlign": "left",
},
{
"title": "Dob",
"field": "dob",
"sorter": "datetime",
"formatter": "datetime",
"hozAlign": "left",
},
]
@pytest.fixture()
def configuration():
# http://tabulator.info/docs/4.7/quickstart
return {"autoColumns": True}
@pytest.fixture
def tabulator(configuration, dataframe):
return Tabulator(configuration=configuration, value=dataframe)
def test_constructor():
# When
tabulator = Tabulator()
# Then
assert not tabulator.value
assert isinstance(tabulator._source, ColumnDataSource)
assert tabulator.configuration == {"autoColumns": True}
assert tabulator.selection == []
assert tabulator.selected_values is None
def test_tabulator_from_dataframe(dataframe, configuration):
tabulator = Tabulator(value=dataframe, configuration=configuration)
assert isinstance(tabulator._source, ColumnDataSource)
def test_tabulator_from_column_data_source(column_data_source, configuration):
tabulator = Tabulator(value=column_data_source, configuration=configuration)
assert tabulator._source == tabulator.value
def test_dataframe_to_columns_configuration(dataframe, columns):
# Given
value = dataframe
# When
actual = Tabulator.to_columns_configuration(value)
# Then
assert actual == columns
def test_config_default():
# When
Tabulator.config()
# Then
assert CSS_HREFS["default"] in pn.config.css_files
def test_config_none():
# Given
css_count = len(pn.config.css_files)
pn.config.js_files.clear()
# When
Tabulator.config(css=None)
# Then
assert len(pn.config.css_files) == css_count
def test_config_custom():
# When
Tabulator.config(css="materialize")
# Then
assert CSS_HREFS["materialize"] in pn.config.css_files
def test_selection_dataframe(data_records, dataframe):
# Given
tabulator = Tabulator(value=dataframe)
# When
tabulator.selection = [0, 1, 2]
actual = tabulator.selected_values
# Then
expected = pd.DataFrame(data=data_records[0:3])
pd.testing.assert_frame_equal(actual, expected)
def test_selection_column_data_source(data_records, column_data_source):
# Given
tabulator = Tabulator(value=column_data_source)
# When
tabulator.selection = [0, 1, 2]
actual = tabulator.selected_values
# Then
# I could not find a more direct way to test this.
expected_as_df = pd.DataFrame(data=data_records[0:3])
pd.testing.assert_frame_equal(actual.to_df().drop(columns="index"), expected_as_df)
@pytest.mark.parametrize(
["field", "expected"],
[
("name", "Name"),
("cheese cake", "Cheese Cake"),
("cheese_cake", "Cheese Cake"),
],
)
def test_to_title(field, expected):
assert Tabulator._to_title(field) == expected
def test_tabulator_comms(document, comm, column_data_source, configuration):
# Given
tabulator = Tabulator(value=column_data_source, configuration=configuration)
widget = tabulator.get_root(document, comm=comm)
# Then
assert isinstance(widget, tabulator._widget_type)
assert widget.source == column_data_source
assert widget.configuration == configuration
# When
with param.edit_constant(tabulator):
tabulator._process_events(
{
"configuration": {"a": 1},
}
)
# Then
assert tabulator.configuration == {"a": 1}
def test_selected_change(tabulator):
# When
tabulator.selection = [2, 4, 6]
# Then
assert tabulator._source.selected.indices == [2, 4, 6]
def test_source_selection_change(tabulator):
# When
tabulator._process_events({"indices": [2, 4, 6]})
# Then
assert tabulator.selection == [2, 4, 6]
def test_tabulator_style_sheet():
# When
stylesheet = TabulatorStylesheet(theme="materialize")
# Then
assert stylesheet.object.startswith("<link rel=")
assert CSS_HREFS["materialize"] in stylesheet.object
assert stylesheet.object.endswith(">")
# When
stylesheet.theme = "site"
assert CSS_HREFS["site"] in stylesheet.object
def test_cell_change_when_dataframe():
# Given
value = pd.DataFrame({"x": [1, 2], "y": ["a", "b"]})
tabulator = Tabulator(value=value)
original_data = tabulator._source.data
# When
tabulator._cell_change = {"c": "x", "i": 1, "v": 3}
# Then
assert tabulator.value.loc[1, "x"] == 3
# And the tabulator._source.data shall not have been updated
# We currently use the _pause_cds_updates parameter to avoid reupdating the _source.data
assert tabulator._source.data is original_data
def test_cell_change_when_column_data_source():
# Given
value = ColumnDataSource(pd.DataFrame({"x": [1, 2], "y": ["a", "b"]}))
tabulator = Tabulator(value=value)
# When
tabulator._cell_change = {"c": "x", "i": 1, "v": 3}
# Then we assume the columndatasource has been update on the js side
# and therefore don't update on the python side
assert tabulator.value.to_df().loc[1, "x"] == 2
# region stream
VALUE_CHANGED_COUNT = 0
def test_stream_dataframe_dataframe_value():
# Given
value = pd.DataFrame({"x": [1, 2], "y": ["a", "b"]})
tabulator = Tabulator(value=value)
stream_value = pd.DataFrame({"x": [3, 4], "y": ["c", "d"]})
# Used to test that value event is triggered
global VALUE_CHANGED_COUNT
VALUE_CHANGED_COUNT = 0
@param.depends(tabulator.param.value, watch=True)
def _inc(*events):
global VALUE_CHANGED_COUNT
VALUE_CHANGED_COUNT += 1
# When
tabulator.stream(stream_value)
# Then
tabulator_source_df = tabulator._source.to_df().drop(columns=["index"])
expected = pd.DataFrame({"x": [1, 2, 3, 4], "y": ["a", "b", "c", "d"]})
pd.testing.assert_frame_equal(tabulator.value, expected)
pd.testing.assert_frame_equal(tabulator_source_df, expected)
assert VALUE_CHANGED_COUNT == 1
def test_stream_dataframe_series_value():
# Given
value = pd.DataFrame({"x": [1, 2], "y": ["a", "b"]})
tabulator = Tabulator(value=value)
stream_value = pd.DataFrame({"x": [3, 4], "y": ["c", "d"]}).loc[1]
# Used to test that value event is triggered
global VALUE_CHANGED_COUNT
VALUE_CHANGED_COUNT = 0
@param.depends(tabulator.param.value, watch=True)
def _inc(*events):
global VALUE_CHANGED_COUNT
VALUE_CHANGED_COUNT += 1
# When
tabulator.stream(stream_value)
# Then
tabulator_source_df = tabulator._source.to_df().drop(columns=["index"])
expected = pd.DataFrame({"x": [1, 2, 4], "y": ["a", "b", "d"]})
pd.testing.assert_frame_equal(tabulator.value, expected)
pd.testing.assert_frame_equal(
tabulator_source_df, expected, check_column_type=False, check_dtype=False
)
assert VALUE_CHANGED_COUNT == 1
def test_stream_dataframe_dictionary_value_multi():
# Given
value = pd.DataFrame({"x": [1, 2], "y": ["a", "b"]})
tabulator = Tabulator(value=value)
stream_value = {"x": [3, 4], "y": ["c", "d"]}
# Used to test that value event is triggered
global VALUE_CHANGED_COUNT
VALUE_CHANGED_COUNT = 0
@param.depends(tabulator.param.value, watch=True)
def _inc(*events):
global VALUE_CHANGED_COUNT
VALUE_CHANGED_COUNT += 1
# When PROVIDING A DICTIONARY OF COLUMNS
tabulator.stream(stream_value)
# Then
tabulator_source_df = tabulator._source.to_df().drop(columns=["index"])
expected = pd.DataFrame({"x": [1, 2, 3, 4], "y": ["a", "b", "c", "d"]})
pd.testing.assert_frame_equal(tabulator.value, expected)
pd.testing.assert_frame_equal(
tabulator_source_df, expected, check_column_type=False, check_dtype=False
)
assert VALUE_CHANGED_COUNT == 1
def test_stream_dataframe_dictionary_value_single():
# Given
value = pd.DataFrame({"x": [1, 2], "y": ["a", "b"]})
tabulator = Tabulator(value=value)
stream_value = {"x": 4, "y": "d"}
# Used to test that value event is triggered
global VALUE_CHANGED_COUNT
VALUE_CHANGED_COUNT = 0
@param.depends(tabulator.param.value, watch=True)
def _inc(*events):
global VALUE_CHANGED_COUNT
VALUE_CHANGED_COUNT += 1
# When PROVIDING A DICTIONARY ROW
tabulator.stream(stream_value)
# Then
tabulator_source_df = tabulator._source.to_df().drop(columns=["index"])
expected = pd.DataFrame({"x": [1, 2, 4], "y": ["a", "b", "d"]})
pd.testing.assert_frame_equal(tabulator.value, expected)
pd.testing.assert_frame_equal(
tabulator_source_df, expected, check_column_type=False, check_dtype=False
)
assert VALUE_CHANGED_COUNT == 1
def test_stream_cds_dictionary_value():
# Given
value = ColumnDataSource({"x": [1, 2], "y": ["a", "b"]})
tabulator = Tabulator(value=value)
stream_value = {"x": [3, 4], "y": ["c", "d"]}
# Used to test that value event is triggered
global VALUE_CHANGED_COUNT
VALUE_CHANGED_COUNT = 0
@param.depends(tabulator.param.value, watch=True)
def _inc(*events):
global VALUE_CHANGED_COUNT
VALUE_CHANGED_COUNT += 1
# When
tabulator.stream(stream_value)
# Then
tabulator_source_json = tabulator._source.to_json(include_defaults=False)["data"]
expected = {"x": [1, 2, 3, 4], "y": ["a", "b", "c", "d"]}
assert tabulator.value is value
assert tabulator_source_json == expected
assert VALUE_CHANGED_COUNT == 1
# endregion Stream
# region Patch
def test_stream_dataframe_dataframe_value():
# Given
value = pd.DataFrame({"x": [1, 2], "y": ["a", "b"]})
tabulator = Tabulator(value=value)
stream_value = pd.DataFrame({"x": [3, 4], "y": ["c", "d"]})
# Used to test that value event is triggered
global VALUE_CHANGED_COUNT
VALUE_CHANGED_COUNT = 0
@param.depends(tabulator.param.value, watch=True)
def _inc(*events):
global VALUE_CHANGED_COUNT
VALUE_CHANGED_COUNT += 1
# When
tabulator.stream(stream_value)
# Then
tabulator_source_df = tabulator._source.to_df().drop(columns=["index"])
expected = pd.DataFrame({"x": [1, 2, 3, 4], "y": ["a", "b", "c", "d"]})
pd.testing.assert_frame_equal(tabulator.value, expected)
pd.testing.assert_frame_equal(tabulator_source_df, expected)
assert VALUE_CHANGED_COUNT == 1
# endregion Patch
def test_patch_from_partial_dataframe():
data = pd.DataFrame({"x": [1, 2, 3, 4], "y": ["a", "b", "c", "d"]})
data1 = data.loc[
0:1,
]
data2 = data.loc[2:4]
# When
tabulator = Tabulator(value=data1)
tabulator.value = data2.reset_index(drop=True)
patch_value = tabulator.value["x"] + 2
tabulator.patch(patch_value)
# Then
expected = pd.DataFrame({"x": [5, 6], "y": ["c", "d"]})
pd.testing.assert_frame_equal(tabulator.value, expected)
def test_range_index_of_dataframe_value():
# Given
data = pd.DataFrame({"x": [1, 2, 3, 4], "y": ["a", "b", "c", "d"]})
data2 = data.loc[2:4]
# When
with pytest.raises(ValueError) as e:
Tabulator(value=data2)
assert (
str(e.value) == "Please provide a DataFrame with RangeIndex starting at 0 and with step 1"
)
def test_patch_and_reset():
"""I experienced some strange behaviour which I test below.
The code actually worked as it should. The problem was that I patched the original
data so I could never "reset" back to the original data
"""
# Given
data = pd.DataFrame({"x": [1, 2, 3, 4], "y": ["a", "b", "c", "d"]})
data_copy = data.copy(deep=True)
tabulator = Tabulator(value=data_copy)
patch = tabulator.value["x"] + 2
# When patch Then
tabulator.patch(patch_value=patch)
assert set(tabulator._source.data["x"]) == {3, 4, 5, 6}
# When reset Then
tabulator.value = data
assert set(tabulator._source.data["x"]) == {1, 2, 3, 4}
def test_replace_stream_and_reset():
# Given
data = | pd.DataFrame({"x": [1, 2, 3, 4, 5], "y": ["a", "b", "c", "d", "e"]}) | pandas.DataFrame |
import os
# Reduce CPU load. Need to perform BEFORE import numpy and some other libraries.
os.environ['MKL_NUM_THREADS'] = '2'
os.environ['OMP_NUM_THREADS'] = '2'
os.environ['NUMEXPR_NUM_THREADS'] = '2'
import gc
import math
import copy
import json
import numpy as np
import pandas as pd
import torch as th
import torch.nn as nn
import torch.optim as optim
import torch.utils.data as data_utils
from torch.nn.utils.rnn import pad_sequence
from typing import Optional, Sequence, List, Tuple, Union, Dict
import requests
from tqdm import tqdm
import re
from sklearn.model_selection import train_test_split
from sklearn.metrics import log_loss
# Setup logging
import logging
logging.basicConfig(
format='%(asctime)s [%(levelname)s] %(name)s %(message)s',
datefmt='%y-%m-%d %H:%M:%S',
level=logging.DEBUG,
)
log = logging.getLogger('agro')
RANDOM_SEED = 2021
"""
# Общая идея
Эта задача по смыслу сходна с задачей Sentiment Analysis.
То есть, когда тексту в соответствие ставится один или несколько классов,
например: (положительный, негативный, нейтральный)
В данном случае: несколько классов может быть присвоено одновременно (MultiLabel Classification)
Я решил, что для этой цели подойдёт архитектура Transformers.
Точнее, её первая половина: TransformerEncoder.
На вход вместо слов подаётся последовательность эмбедингов (Embeddings).
То есть, каждому слову ставится в соответствие точка в N-мерном пространстве.
Обычно N: от 100 до 300.
Для каждого `embedding` добавляем информацию о положении слова в тексте: `PositionalEncoding`.
Далее несколько слоёв TransformerEncoder обрабатывают всю последовательность сразу,
усиляя одни блоки и ослабляя другие, выделяя, таким образом, важную информацию.
Затем обработанная последовательность сравнивается с некими целевыми эмбедингами (Target Embeddings),
которые описывают то или иное заболевание.
При сравнении вся последовательность сливается в некий единый эмбединг, по одному для каждого класса.
Финальный этап, получившийся набор эмбеддингов (фиксированного размера) пропускается через Linear слой,
чтобы создать вероятности для каждого заболевания.
"""
"""
# Словарь Embeddings для русского языка
Для работы нам потребуются готовые `embeddings` для русских слов.
Есть некоторые доступные для скачивания словари на
[RusVectores](https://rusvectores.org/ru/)
Но размер словарей в них: от 150 до 300 тысяч слов, что довольно мало.
Также, не совсем понятны условия их лицензии.
Есть проект ["Наташа"](https://github.com/natasha/navec).
Размер словаря: 500k слов.
Существует также другой интересный проект:
[DeepPavlov](https://docs.deeppavlov.ai/en/0.0.7/intro/pretrained_vectors.html),
содержащий около 1.5 млн. слов.
Его лицензия: **Apache 2.0** - позволяет как свободное, так и коммерческое использование.
С последним я и буду работать.
Нам потребуется скачать весь словарь, размером 4.14Гб, а затем загрузить его в память.
"""
class GloveModel():
"""
For a given text returns a list of embeddings
"""
Pat_Split_Text = re.compile(r"[\w']+|[.,!?;]", flags=re.RegexFlag.MULTILINE)
Unk_Tag: int = -1
Num_Tag: int = -1
def __init__(self, substitutions: Optional[str] = None, log: Optional[logging.Logger] = None):
if log is None:
log = logging.getLogger()
# Load Glove Model. Download and convert from text to .feather format (which is much faster)
glove_file_feather = 'ft_native_300_ru_wiki_lenta_lower_case.feather'
if not os.path.exists(glove_file_feather):
glove_file_vec = glove_file_feather.rsplit(os.extsep, 1)[0] + '.vec'
if not os.path.exists(glove_file_vec):
log.info('Downloading glove model for russia language from DeepPavlov...')
self.download_file(
'http://files.deeppavlov.ai/embeddings/ft_native_300_ru_wiki_lenta_lower_case/'
'ft_native_300_ru_wiki_lenta_lower_case.vec'
)
log.info('Done')
# Load model from .vec file
log.info('Loading Glove Model from .vec format...')
self.glove = self.load_glove_model(glove_file_vec, size=300)
log.info(f'{len(self.glove)} words loaded!')
log.info('Saving Glove Model to .feather format...')
self.glove.reset_index().to_feather(glove_file_feather)
else:
log.info('Loading Glove Model from .feather format...')
self.glove = pd.read_feather(glove_file_feather)
log.info(f'{len(self.glove)} words loaded!')
log.info('Sorting glove dataframe by words...')
self.glove.sort_values('word', axis=0, ignore_index=True, inplace=True)
log.info('Done')
self.subs_tab = {}
if isinstance(substitutions, str):
for line in substitutions.splitlines():
words = line.strip().lower().split()
if len(words) < 2:
continue
self.subs_tab[words[0]] = words[1:]
log.info(f'Using the substitutions table of {len(self.subs_tab)} records')
"""
Для неизвестных слов я буду использовать embedding слова 'unk'.
А для чисел - embedding слова 'num'.
Я не уверен, что авторы DeepPavlov именно так и планировали.
Но стандартных '<unk>' или '<num>' я там не обнаружил.
"""
self.Unk_Tag = int(self.glove.word.searchsorted('unk'))
self.Num_Tag = int(self.glove.word.searchsorted('num'))
assert self.glove.word[self.Unk_Tag] == 'unk', 'Failed to find "unk" token in Glove'
assert self.glove.word[self.Num_Tag] == 'num', 'Failed to find "num" token in Glove'
def __len__(self):
return len(self.glove)
def __getitem__(self, text: str) -> List[np.ndarray]:
tags = self.text2tags(text, return_offsets=False)
embeddings = [self.tag2embedding(tag) for tag in tags]
return embeddings
@staticmethod
def download_file(url: str, block_size=4096, file_name: Optional[str] = None):
"""Downloads file and saves it to local file, displays progress bar"""
with requests.get(url, stream=True) as response:
if file_name is None:
if 'Content-Disposition' in response.headers.keys():
file_name = re.findall('filename=(.+)', response.headers['Content-Disposition'])[0]
if file_name is None:
file_name = url.split('/')[-1]
expected_size_in_bytes = int(response.headers.get('content-length', 0))
received_size_in_bytes = 0
with tqdm(total=expected_size_in_bytes, unit='iB', unit_scale=True, position=0, leave=True) as pbar:
with open(file_name, 'wb') as file:
for data in response.iter_content(block_size):
file.write(data)
pbar.update(len(data))
received_size_in_bytes += len(data)
if (expected_size_in_bytes != 0) and (expected_size_in_bytes != received_size_in_bytes):
raise UserWarning(f'Incomplete download: {received_size_in_bytes} of {expected_size_in_bytes}')
@staticmethod
def load_glove_model(file_name: str, encoding: str = 'utf-8', size: Optional[int] = None) -> pd.DataFrame:
"""
Loads glove model from text file into pandas DataFrame
Returns
-------
df : pd.DataFrame
A dataframe with two columns: 'word' and 'embedding'.
The order of words is preserved as in the source file. Thus it may be unsorted!
"""
words, embeddings = [], []
with tqdm(total=os.path.getsize(file_name), unit='iB', unit_scale=True, position=0, leave=True) as pbar:
with open(file_name, 'r', encoding=encoding) as f:
first_line = True
line = f.readline()
while line:
split_line = line.split()
line = f.readline()
if first_line:
first_line = False
if len(split_line) == 2:
if size is None:
size = int(split_line[1])
else:
assert size == int(split_line[1]), \
f'Size specified at the first line: {int(split_line[1])} does not match: {size}'
continue
if size is not None:
word = ' '.join(split_line[0:-size])
embedding = np.array(split_line[-size:], dtype=np.float32)
assert len(embedding) == size, f'{line}'
else:
word = split_line[0]
embedding = np.array(split_line[1:], dtype=np.float32)
size = len(embedding)
words.append(word)
embeddings.append(embedding)
pbar.update(f.tell() - pbar.n)
return | pd.DataFrame({'word': words, 'embedding': embeddings}) | pandas.DataFrame |
"""
Short summary.
Extract the features of the ego-noise data...
"""
import os
import shutil
from sklearn.model_selection import train_test_split
import pandas as pd
import numpy as np
import librosa
import matplotlib.pyplot as plt
from aircraft_detector.utils.utils import (
retrieve_files,
get_feature_directory_name,
refresh_directory,
print_verbose,
load_spectrum_settings,
)
import aircraft_detector.utils.feature_helper as fh
import aircraft_detector.utils.plot_helper as ph
class FeatureExtraction:
def __init__(self, root_directory, feature_settings=None):
# set root directory
self._dir_root = root_directory
# set the missing feature settings to their defaults
if feature_settings is None:
feature_settings = {}
self._feature = load_spectrum_settings(feature_settings)
# derive root output directory (feature dataset) from parameters
self._dir_root_set = os.path.join(
self._dir_root,
"Ego-Noise Prediction",
"Parameter Sets",
get_feature_directory_name(self._feature),
)
# verbosity
self.verbose = True # print when method is finished
self.super_verbose = False # print for every single file
def split_mav_data(self, train_test_ratio=0.8, train_val_ratio=0.8):
# get files
dir_audio = os.path.join(self._dir_root, "Raw", "Mav", "Audio")
files_audio = [
os.path.join(dir_audio, f) for f in sorted(os.listdir(dir_audio))
]
dir_states = os.path.join(self._dir_root, "Raw", "Mav", "States")
files_states = [
os.path.join(dir_states, f) for f in sorted(os.listdir(dir_states))
]
# split files into train-val-test
audio_train, audio_test, states_train, states_test = train_test_split(
files_audio, files_states, train_size=train_test_ratio, random_state=42
)
audio_train, audio_val, states_train, states_val = train_test_split(
audio_train, states_train, train_size=train_val_ratio, random_state=42
)
# Group the files
files_train = [audio_train, states_train]
files_val = [audio_val, states_val]
files_test = [audio_test, states_test]
files = [files_train, files_val, files_test]
# Output directory for the split
dir_root_out = os.path.join(self._dir_root, "Ego-Noise Prediction", "Dataset")
# Loop over subsets and data types
for i, subset in enumerate(["Train", "Val", "Test"]):
for j, data in enumerate(["Audio", "States"]):
# Output directory for subset, data
dir_dest = os.path.join(dir_root_out, subset, data)
refresh_directory(dir_dest)
# Copy to destination
for f in files[i][j]:
shutil.copy(f, dir_dest)
def extract_spectra(self, offset=50, scaling=80):
# Loop over subsets
for subset in ["Train", "Val", "Test"]:
# Get audio files
dir_audio = os.path.join(
self._dir_root, "Ego-Noise Prediction", "Dataset", subset, "Audio"
)
files_audio = retrieve_files(dir_audio)
# directory for the unsynchronized spectra
dir_output = os.path.join(
self._dir_root_set, "Unsynchronized", subset, "Spectra"
)
# Refresh directory
refresh_directory(dir_output)
# Loop through files in set
for f in files_audio:
# Extract spectrum
Z = fh.extract_spectrum(f, self._feature)
# Scale spectrum
Z += offset
Z /= scaling
# Save to appropriate directory
fn = os.path.split(f)[-1].replace(".wav", ".csv")
fp = os.path.join(dir_output, fn)
pd.DataFrame(Z).to_csv(fp, index=False, header=False)
print_verbose(
self.super_verbose,
"Finished extracting feature for '%s' set." % subset,
)
def extract_states(self):
# Loop over subsets
for subset in ["Train", "Val", "Test"]:
# Get states files
dir_states = os.path.join(
self._dir_root, "Ego-Noise Prediction", "Dataset", subset, "States"
)
files_states = retrieve_files(dir_states)
# Directory for the unsynchronized states
dir_output = os.path.join(
self._dir_root_set, "Unsynchronized", subset, "States"
)
refresh_directory(dir_output)
# Loop through files in set
for f in files_states: # xyz in NED frame
# Read in as dataframe
df = pd.read_csv(f, header=0)
# Add delta-rpm
df["rpm_1_delta"] = np.diff(df["rpm_1"].to_numpy(), prepend=0)
df["rpm_2_delta"] = np.diff(df["rpm_2"].to_numpy(), prepend=0)
df["rpm_3_delta"] = np.diff(df["rpm_3"].to_numpy(), prepend=0)
df["rpm_4_delta"] = np.diff(df["rpm_4"].to_numpy(), prepend=0)
# Add delta-cmd
df["cmd_thrust_delta"] = np.diff(df["cmd_thrust"].to_numpy(), prepend=0)
df["cmd_roll_delta"] = np.diff(df["cmd_roll"].to_numpy(), prepend=0)
df["cmd_pitch_delta"] = np.diff(df["cmd_pitch"].to_numpy(), prepend=0)
df["cmd_yaw_delta"] = np.diff(df["cmd_yaw"].to_numpy(), prepend=0)
# Prune horizontal position
df.drop(columns=["pos_x", "pos_y"], inplace=True)
# Negate vertical position to get height
df.rename(columns={"pos_z": "height"}, inplace=True)
df["height"] *= -1
# Replace north- and east velocities with magnitude (horizontal)
df["vel_hor"] = np.sqrt(df["vel_x"] ** 2 + df["vel_y"] ** 2)
df.drop(columns=["vel_x", "vel_y"], inplace=True)
# Negate downwards velocity go get vertical velocity
df.rename(columns={"vel_z": "vel_ver"}, inplace=True)
df["vel_ver"] *= -1
# Replace north- and east accelerations with magnitude (horizontal)
df["acc_hor"] = np.sqrt(df["acc_x"] ** 2 + df["acc_y"] ** 2)
df.drop(columns=["acc_x", "acc_y"], inplace=True)
# Negate downwards velocity go get vertical acceleration
df.rename(columns={"acc_z": "acc_ver"}, inplace=True)
df["acc_ver"] *= -1
# Re-order the frame
cols = [
"delta_t",
"rpm_1",
"rpm_2",
"rpm_3",
"rpm_4",
"rpm_1_delta",
"rpm_2_delta",
"rpm_3_delta",
"rpm_4_delta",
"cmd_thrust",
"cmd_roll",
"cmd_pitch",
"cmd_yaw",
"cmd_thrust_delta",
"cmd_roll_delta",
"cmd_pitch_delta",
"cmd_yaw_delta",
"height",
"vel_hor",
"vel_ver",
"acc_hor",
"acc_ver",
"angle_phi",
"angle_theta",
"angle_psi",
"rate_p",
"rate_q",
"rate_r",
]
df = df[cols]
# Export
fn = os.path.split(f)[-1]
df.to_csv(os.path.join(dir_output, fn), header=True, index=False)
def synchronize_data(self, skip_takeoff=True):
# Loop over subsets
for subset in ["Train", "Val", "Test"]:
# list unsynchronized spectra
dir_spectra = os.path.join(
self._dir_root_set, "Unsynchronized", subset, "Spectra"
)
files_spectra = retrieve_files(dir_spectra)
# list unsynchronized states
dir_states = os.path.join(
self._dir_root_set, "Unsynchronized", subset, "States"
)
files_states = retrieve_files(dir_states)
# set the root output directory and refresh the output directories
dir_root_output = os.path.join(self._dir_root_set, "Dataset", subset)
refresh_directory(os.path.join(dir_root_output, "Spectra"))
refresh_directory(os.path.join(dir_root_output, "States"))
# synchronize each pair of files
for i in range(len(files_spectra)):
self._synchronize_pair(
files_spectra[i], files_states[i], dir_root_output, skip_takeoff
)
def _synchronize_pair(
self, file_spectrum, file_states, dir_root_output, skip_takeoff
):
# load spectrum
Z = pd.read_csv(file_spectrum, header=None).to_numpy()
# get time vector from the spectrum
t_mic = librosa.times_like(
Z,
sr=self._feature["fft_sample_rate"],
hop_length=self._feature["stft_hop_length"],
)
# load states
S = | pd.read_csv(file_states, header=0) | pandas.read_csv |
# encode=utf-8
"""
一维数组
"""
import numpy as np
ndarry = np.array([[35, 20, 66], [23, 67, 89], [13, 244, 67]], np.int32)
print(ndarry.shape, ndarry.size)
print(ndarry.dtype)
print(ndarry[1:2, 1:2])
import pandas as pd
stocks = pd.Series([20.1, 100.0, 66.5], index=['tx', 'tobao', 'apple'])
stocks2 = | pd.Series([23.1, 95, 88], index=['tx', 'tobao', 'google']) | pandas.Series |
#!/usr/bin/env python3
import glob
import os
import pprint
import traceback
import pandas as pd
from tensorboard.backend.event_processing.event_accumulator import EventAccumulator
# Extraction function
def tflog2pandas(path: str) -> pd.DataFrame:
"""convert single tensorflow log file to pandas DataFrame
Parameters
----------
path : str
path to tensorflow log file
Returns
-------
pd.DataFrame
converted dataframe
"""
DEFAULT_SIZE_GUIDANCE = {
"compressedHistograms": 1,
"images": 1,
"scalars": 0, # 0 means load all
"histograms": 1,
}
runlog_data = pd.DataFrame({"metric": [], "value": [], "step": []})
try:
event_acc = EventAccumulator(path, DEFAULT_SIZE_GUIDANCE)
event_acc.Reload()
tags = event_acc.Tags()["scalars"]
event_list = event_acc.Scalars('Eval_Reward/Mean')
values = list(map(lambda x: x.value, event_list))
step = list(map(lambda x: x.step, event_list))
r = {"metric": ['Eval_Reward/Mean'] * len(step), "value": values, "step": step}
r = pd.DataFrame(r)
runlog_data = | pd.concat([runlog_data, r]) | pandas.concat |
import scipy.io.wavfile as wav
from python_speech_features import mfcc
import numpy as np
import os
import pandas as pd
CLASSICAL_DIR = "C:\\Users\\<NAME>\\Music\\Classical\\"
METAL_DIR = "C:\\Users\\<NAME>\\Music\\Metal\\"
JAZZ_DIR = "C:\\Users\\<NAME>\\Music\\Jazz\\"
POP_DIR = "C:\\Users\\<NAME>\\Music\\Pop\\"
PATH = "E:\\git\\python_speech_features\\covariance\\"
x = [CLASSICAL_DIR, METAL_DIR, JAZZ_DIR, POP_DIR]
t = 100
columns = ['Feature1', 'Feature2', 'Feature3', 'Feature4', 'Feature5', 'Feature6', 'Feature7', 'Feature8', 'Feature9',
'Feature10', 'Feature11', 'Feature12', 'Feature13']
dataset = []
genre = []
for i in x:
if i == CLASSICAL_DIR:
for index in range(0, t):
genre.append(0)
file_name = "classical.000"+str(index).zfill(2)
file = file_name+".wav"
(rate, signal) = wav.read(CLASSICAL_DIR+file)
mfcc_feat = mfcc(signal, rate)
cov = np.cov(mfcc_feat, rowvar=0)
mean = np.mean(mfcc_feat, axis=0)
# if not os.path.exists(PATH+file_name):
# os.makedirs(PATH+file_name)
| pd.DataFrame(cov) | pandas.DataFrame |
import os
import sys
import datetime
import numpy as np
import scipy.signal
import pandas as pd
import yfinance as yf
from contextlib import contextmanager
from src.utils_date import add_days
from src.utils_date import prev_weekday
#from pandas_datareader.nasdaq_trader import get_nasdaq_symbols
ERROR_NO_MINUTE_DATA_YTD = 'Skip: Missing minute-level data for yesterday'
ERROR_NO_MINUTE_DATA_TDY = 'Skip: Missing minute-level data for today'
ERROR_CANDLES_PER_DAY = 'Skip: Insufficient candles today ({} less than {})'
ERROR_NULL_COL = 'Skip: NULL value in df_i columns ({})'
ERROR_NULL_DAY_LEVEL_IND = 'Skip: NULL value in day-level indicators'
ERROR_PRICES_D_NOT_UPDATE = 'Error: prices_d not updated, latest date found: {}'
@contextmanager
def suppress_stdout():
'''Decorator to supress function output to sys.stdout'''
with open(os.devnull, "w") as devnull:
old_stdout = sys.stdout
sys.stdout = devnull
try:
yield
finally:
sys.stdout = old_stdout
def get_ls_sym():
'''Returns list of tickers from nasdaqtrader.com
Duplicates and strings with length > 5 are removed
Returns:
ls_sym (List of str)
'''
#df_symbols = get_nasdaq_symbols()
#ls_sym = df_symbols.index.to_list()
ls_urls = [
'http://ftp.nasdaqtrader.com/dynamic/SymDir/nasdaqlisted.txt'
,'http://ftp.nasdaqtrader.com/dynamic/SymDir/otherlisted.txt'
]
ls_sym = []
for i, url in enumerate(ls_urls):
df = pd.read_csv(url, sep='|')
for col in list(df):
if col in ['ACT Symbol', 'Symbol']: df['sym'] = df[col]
ls_sym+=df[df['sym'].str.len()<=5]['sym'].to_list()
ls_sym = list(set(ls_sym)) # remove duplicates
return ls_sym
def get_df_prices(sym, start_str, end_str):
'''Return dataframe with minute-level stock price data
from start date to end date (inclusive).
Args:
sym (str): Ticker symbol e.g. 'BYND'
start_str (str): Start date string e.g. '2020-07-18'
end_str (str): End date string e.g. '2020-07-18'
Returns:
df (pandas.Dataframe)
'''
assert start_str <= end_str
end_str_mod=add_days(end_str, 3)
with suppress_stdout():
df = yf.download(sym,
start=start_str,
end=end_str_mod,
interval='1m',
progress=0,
prepost=True).reset_index()
is_date_range = ((df['Datetime'].dt.date.astype('str')>=start_str)
&(df['Datetime'].dt.date.astype('str')<=end_str))
df = df[is_date_range]
df['Datetime'] = df['Datetime'].dt.tz_localize(None) #remove timezone
is_reg_hours = ((df['Datetime'].dt.time.astype('str')>='09:30:00')
&(df['Datetime'].dt.time.astype('str')<='15:59:00'))
df['is_reg_hours'] = np.where(is_reg_hours, 1, 0)
df['sym'] = sym
df = df.rename(columns={
'Datetime':'datetime',
'Open':'open',
'High':'high',
'Low':'low',
'Adj Close':'adj_close',
'Volume':'volume'
})
ls_col = [
'sym',
'datetime',
'open',
'high',
'low',
'adj_close',
'volume',
'is_reg_hours',
]
return df[ls_col]
def add_rsi(df, rsi_period):
'''Returns dataframe with additional columns:
rsi (float)
Args:
df (pandas.DataFrame): Must be index sorted by datetime:
adj_close (float)
rsi_period (int): Number of rsi periods
Returns:
df (pandas.DataFrame)
'''
chg = df['adj_close'].diff(1)
gain = chg.mask(chg<0,0)
loss = chg.mask(chg>0,0)
avg_gain = gain.ewm(com=rsi_period-1, min_periods=rsi_period).mean()
avg_loss = loss.ewm(com=rsi_period-1, min_periods=rsi_period).mean()
rs = abs(avg_gain/avg_loss)
rsi = 100 - (100/(1+rs))
df['rsi14'] = rsi
return df
def add_vwap(df):
'''Returns dataframe with additional columns:
vwap (float): Volume Weighted Average Price
vwap_var (float): % variance of close from vwap
Args:
df (pandas.DataFrame): Dataframe with at least columns:
datetime
open
high
low
adj_close
volume
Returns:
df (pandas.DataFrame)
'''
df['vwap'] = (df['volume']*(df['high']+df['low']+df['adj_close'])/3).cumsum()/df['volume'].cumsum()
df['vwap'] = df['vwap'].fillna(df['adj_close'])
df['vwap_var'] = (df['adj_close']/df['vwap'])-1
return df
def get_df_i(sym, date_str, live_data, db, num_candles_min = 200):
'''Returns interim dataframe with price data and
trading indicators for input symbol and date
Args:
sym (str)
date_str (str)
live_data (int)
db (Database object)
num_candles_min (int)
Returns:
df_i (pandas.Dataframe)
'''
start_str = prev_weekday(date_str) #start 1 day early to get prev day data for rsi etc
end_str = add_days(date_str, 3) #extend end date string due to bug
if live_data:
with suppress_stdout():
df = yf.download(sym,
start=start_str,
end=end_str,
interval='1m',
prepost = False,
progress=0).reset_index()
df['Datetime'] = df['Datetime'].dt.tz_localize(None) #remove timezone
df = df.rename(columns={'Adj Close':'adj_close',
'Datetime':'datetime',
'Open':'open',
'High':'high',
'Low':'low',
'Volume':'volume'})
else:
q = '''
SELECT *
FROM prices_m
WHERE is_reg_hours = 1
AND sym='{}'
AND DATE(datetime)>='{}'
AND DATE(datetime)<='{}'
ORDER BY datetime
'''.format(sym, start_str, date_str)
df = pd.read_sql(q, db.conn)
df['datetime'] = | pd.to_datetime(df['datetime']) | pandas.to_datetime |
import pandas as pd
import pytest
import woodwork as ww
from woodwork.logical_types import Boolean, Double, Integer
from rayml.exceptions import MethodPropertyNotFoundError
from rayml.pipelines.components import (
ComponentBase,
FeatureSelector,
RFClassifierSelectFromModel,
RFRegressorSelectFromModel,
)
def make_rf_feature_selectors():
rf_classifier = RFClassifierSelectFromModel(
number_features=5,
n_estimators=10,
max_depth=7,
percent_features=0.5,
threshold=0,
)
rf_regressor = RFRegressorSelectFromModel(
number_features=5,
n_estimators=10,
max_depth=7,
percent_features=0.5,
threshold=0,
)
return rf_classifier, rf_regressor
def test_init():
rf_classifier, rf_regressor = make_rf_feature_selectors()
assert rf_classifier.name == "RF Classifier Select From Model"
assert rf_regressor.name == "RF Regressor Select From Model"
def test_component_fit(X_y_binary, X_y_multi, X_y_regression):
X_binary, y_binary = X_y_binary
X_multi, y_multi = X_y_multi
X_reg, y_reg = X_y_regression
rf_classifier, rf_regressor = make_rf_feature_selectors()
assert isinstance(rf_classifier.fit(X_binary, y_binary), ComponentBase)
assert isinstance(rf_classifier.fit(X_multi, y_multi), ComponentBase)
assert isinstance(rf_regressor.fit(X_reg, y_reg), ComponentBase)
def test_feature_selector_missing_component_obj():
class MockFeatureSelector(FeatureSelector):
name = "Mock Feature Selector"
def fit(self, X, y):
return self
mock_feature_selector = MockFeatureSelector()
mock_feature_selector.fit(pd.DataFrame(), pd.Series())
with pytest.raises(
MethodPropertyNotFoundError,
match="Feature selector requires a transform method or a component_obj that implements transform",
):
mock_feature_selector.transform(pd.DataFrame())
with pytest.raises(
MethodPropertyNotFoundError,
match="Feature selector requires a transform method or a component_obj that implements transform",
):
mock_feature_selector.fit_transform(pd.DataFrame())
def test_feature_selector_component_obj_missing_transform():
class MockFeatureSelector(FeatureSelector):
name = "Mock Feature Selector"
def __init__(self):
self._component_obj = None
def fit(self, X, y):
return self
mock_feature_selector = MockFeatureSelector()
mock_feature_selector.fit(pd.DataFrame(), pd.Series())
with pytest.raises(
MethodPropertyNotFoundError,
match="Feature selector requires a transform method or a component_obj that implements transform",
):
mock_feature_selector.transform(pd.DataFrame())
with pytest.raises(
MethodPropertyNotFoundError,
match="Feature selector requires a transform method or a component_obj that implements transform",
):
mock_feature_selector.fit_transform(pd.DataFrame())
def test_feature_selectors_drop_columns_maintains_woodwork():
X = pd.DataFrame({"a": [1, 2, 3], "b": [2, 4, 6], "c": [1, 2, 3], "d": [1, 2, 3]})
X.ww.init(logical_types={"a": "double", "b": "categorical"})
y = pd.Series([0, 1, 1])
rf_classifier, rf_regressor = make_rf_feature_selectors()
rf_classifier.fit(X, y)
X_t = rf_classifier.transform(X, y)
assert len(X_t.columns) == 2
rf_regressor.fit(X, y)
X_t = rf_regressor.transform(X, y)
assert len(X_t.columns) == 2
@pytest.mark.parametrize(
"X_df",
[
pd.DataFrame(
pd.to_datetime(["20190902", "20200519", "20190607"], format="%Y%m%d")
),
pd.DataFrame(pd.Series([1, 2, 3], dtype="Int64")),
pd.DataFrame(pd.Series([1.0, 2.0, 3.0], dtype="float")),
pd.DataFrame(pd.Series(["a", "b", "a"], dtype="category")),
pd.DataFrame(pd.Series([True, False, True], dtype="boolean")),
pd.DataFrame(
pd.Series(
["this will be a natural language column because length", "yay", "hay"],
dtype="string",
)
),
],
)
def test_feature_selectors_woodwork_custom_overrides_returned_by_components(X_df):
rf_classifier, rf_regressor = make_rf_feature_selectors()
y = | pd.Series([1, 2, 1]) | pandas.Series |
# pylint:disable=missing-docstring,redefined-outer-name
import pytest
import pandas as pd
from pandas.testing import assert_series_equal, assert_frame_equal
from survey_toolkit.core import MultipleChoiceQuestion
@pytest.fixture
def question():
return MultipleChoiceQuestion('favouritePhones', 'What are your favourite phone brands?')
def test_add_answer_when_no_choices_given(question):
question.add_answer(['Windows Phone', 'Nokia'])
question.add_answer(None)
assert question.answers[-2] == ['Windows Phone', 'Nokia']
assert question.answers[-1] is None
def test_add_noniterable_answer(question):
question.add_answer('Windows Phone')
question.add_answer(None)
assert question.answers[-2] == ['Windows Phone']
assert question.answers[-1] is None
def test_add_answer_when_choices_given_as_list(question):
question.choices = ['Huawei', 'iPhone', 'Nokia', 'Samsung', 'Xiaomi']
question.add_answer(['Huawei', 'iPhone'])
question.add_answer(None)
assert question.answers[-2] == ['Huawei', 'iPhone']
assert question.answers[-1] is None
def test_add_answer_not_in_choices(question):
with pytest.raises(ValueError):
question.choices = ['Huawei', 'iPhone', 'Nokia', 'Samsung', 'Xiaomi']
question.add_answer('Windows Phone')
def test_add_answer_when_choices_given_as_dict(question):
question.choices = {1: 'iPhone', 2: 'Samsung', 3: 'Huawei', 4: 'Xiaomi', 5: 'Nokia'}
question.add_answer([1, 2])
question.add_answer(None)
assert question.answers[-2] == [1, 2]
assert question.answers[-1] is None
def test_to_series(question):
answers = [['Samsung', 'iPhone'], None, ['Nokia'], ['Huawei', 'Xiaomi']]
question.choices = ['Huawei', 'iPhone', 'Nokia', 'Samsung', 'Xiaomi']
question.answers = answers
series = question.to_series()
expected = | pd.Series(answers, name='favouritePhones') | pandas.Series |
# import libraries that we need
import glob, os, re
import pandas as pd
from lib.export import export_files
from lib.filesearch import find_participants, find_highest_export
# import custom-made functions that we'll need
from lib.sorting import Sorting, process_surfaces, merge_all_dataframes, extract_survey
# set root to current path location
top_root = os.path.join(os.getcwd(), 'data')
# set or create directory for saving logs
savelogs_directory = os.path.join(os.getcwd(), 'data', 'analysis')
if not os.path.exists(savelogs_directory):
os.makedirs(top_root + "/processed")
os.makedirs(top_root + "/analysis")
# keeps track of any issues and saves to file at end
issues = pd.DataFrame(columns=['participant', 'error'])
# read in survey key
survey_key = pd.read_csv('./lib/survey_key-SA.csv')
# figure out the participants in each sub-directory
# each participant = full path to their datafolder
included_participants = find_participants(os.path.join(top_root, "raw"))
# cycle through participants
for next_participant in included_participants:
# set participant's working directories
root = next_participant
containing_directory = os.path.abspath(os.path.join(root, "../"))
# sets participant info for documentation purposes
participant_info = re.sub("_pupil", "",
os.path.split(containing_directory)[1])
# identify log file path
try:
logfile_path = glob.glob(containing_directory + '/*.log')[0]
except IndexError:
# a .log file wasn't found in the participants directory
# aka index [0] doesn't exist, so document issue and continue to next?
issues.append(
{'participant': participant_info,
'error': 'logfile not found/glob list empty'},
ignore_index=True)
continue
try:
infofile_path = glob.glob(containing_directory + './info.csv')[0]
except IndexError:
issues.append(
{'participant': participant_info,
'error': 'info.csv not found/glob list empty'},
ignore_index=True)
continue
# look for the exports folder
exportfolder_path = find_highest_export(os.path.join(root, 'exports'))
# gaze/position file paths
full_gaze_path = glob.glob(exportfolder_path + '/gaze_positions.csv')[0]
gazesurface_path = glob.glob(exportfolder_path + '/surfaces/gaze_positions*.csv')[0]
surfaceevents_path = os.path.join(exportfolder_path, 'surfaces', 'surface_events.csv')
# initialize the Sort class
sort = Sorting(savelogs_directory)
# process the surface file
processed_surfaces = process_surfaces(surfaceevents_path, full_gaze_path)
# process the logfile
[full_logfile, processed_img_logs] = sort.logsort(logfile_path, infofile_path)
# adjust the timestamps for gaze on recognized surfaces
gaze_surface_df = | pd.read_csv(gazesurface_path) | pandas.read_csv |
from itertools import combinations
import numpy as np
import pandas as pd
import pytest
from synthesized_insight.check import ColumnCheck
from synthesized_insight.metrics import (
CramersV,
DistanceCNCorrelation,
DistanceNNCorrelation,
EarthMoversDistance,
EarthMoversDistanceBinned,
HellingerDistance,
JensenShannonDivergence,
KendallTauCorrelationTest,
KullbackLeiblerDivergence,
Mean,
Norm,
R2Mcfadden,
StandardDeviation,
)
mean = Mean()
std_dev = StandardDeviation()
cramers_v = CramersV()
emd = EarthMoversDistance()
kendell_tau_correlation_test = KendallTauCorrelationTest()
hellinger_distance = HellingerDistance()
distance_nn_corr = DistanceNNCorrelation()
distance_cn_corr = DistanceCNCorrelation()
kl_divergence = KullbackLeiblerDivergence()
js_divergence = JensenShannonDivergence()
r2_mcfadden = R2Mcfadden()
norm = Norm()
norm_ord1 = Norm(ord=1)
@pytest.fixture(scope='module')
def df():
df = pd.read_csv("tests/datasets/mini_compas.csv")
return df
@pytest.fixture(scope='module')
def group1(df):
pred1 = df["Ethnicity"] == "Caucasian"
target_attr = "RawScore"
group1 = df[pred1][target_attr]
return group1
@pytest.fixture(scope='module')
def group2(df):
pred2 = df["Ethnicity"] == "African-American"
target_attr = "RawScore"
group2 = df[pred2][target_attr]
return group2
@pytest.fixture(scope='module')
def group3(group2):
group3 = group2.sort_values()[len(group2) // 2:]
return group3
@pytest.fixture
def data1():
return np.random.normal(0, 1, 1000)
@pytest.fixture
def data2():
return np.random.normal(1, 1, 1000)
def test_mean():
sr_a = pd.Series(np.arange(100), name='a')
val_a = mean(sr=sr_a)
assert val_a == 49.5
sr_b = pd.Series(np.datetime64('2020-01-01') + np.arange(0, 3, step=1).astype('m8[D]'), name='b')
val_b = mean(sr=sr_b)
assert val_b == np.datetime64('2020-01-02')
sr_c = pd.Series(['a', 'b', 'c', 'd'], name='c')
val_c = mean(sr=sr_c)
assert val_c is None
def test_standard_deviation():
sr_a = pd.Series(np.random.normal(0, 1, 100), name='a')
val_a = std_dev(sr=sr_a)
assert val_a is not None
sr_b = pd.Series(np.datetime64('2020-01-01') + np.arange(0, 20, step=1).astype('m8[D]'), name='b')
val_b = std_dev(sr=sr_b)
assert val_b is not None
sr_c = pd.Series(['a', 'b', 'c', 'd'], name='c')
val_c = std_dev(sr=sr_c)
assert val_c is None
def test_em_distance():
sr_a = pd.Series(np.random.normal(0, 1, 100), name='a')
sr_b = pd.Series(['a', 'b', 'c', 'd'], name='b')
assert emd(sr_a, sr_a) is None
assert emd(sr_b, sr_b) is not None
def test_cramers_v_basic():
sr_a = | pd.Series([1, 2, 3, 1, 2, 3, 1, 2, 3] * 100, name='a') | pandas.Series |
import json
import os
import pickle
import boto3
import numpy as np
import pandas as pd
from ticket_closure_lib.transformers import DateColTransformer # noqa
from ticket_closure_lib.transformers import FeatureRemover # noqa
from ticket_closure_lib.transformers import OrdinalConverter # noqa
class TicketPredictor:
def __init__(self, model_file="model.pkl"):
self.mod = pickle.load(open(model_file, "rb"))
self.columns = [
"reassignment_count",
"reopen_count",
"sys_mod_count",
"opened_at",
"sys_created_at",
"sys_updated_at",
"contact_type",
"location",
"category",
"subcategory",
"u_symptom",
"cmdb_ci",
"impact",
"urgency",
"priority",
"assignment_group",
"u_priority_confirmation",
"notify",
"problem_id",
]
def predict_json(self, in_data):
if isinstance(in_data, dict):
in_data = [in_data]
assert isinstance(in_data, list), "Input data should be a list of dictionaries"
in_df = pd.DataFrame.from_records(in_data)
# make sure column order is right
in_df = in_df[self.columns]
for date_col in ["opened_at", "sys_created_at", "sys_updated_at"]:
in_df[date_col] = | pd.to_datetime(in_df[date_col], format="%d/%m/%Y %H:%M") | pandas.to_datetime |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
Combined scheduling and planning models (deterministic and robust).
'''
import pyomo.environ as pyomo
from pyomo.opt import SolverStatus, TerminationCondition
import numpy as np
import time
import pandas as pd
import dill
import collections
from stn.deg import degradationModel, calc_p_fail
from stn.blocks import (blockScheduling, blockSchedulingRobust,
blockPlanning, blockPlanningRobust)
class stnModel(object):
"""Deterministic model of the STN."""
def __init__(self, stn=None):
self.Demand = {} # demand for products
if stn is None:
self.stn = stnStruct() # contains STN architecture
else:
self.stn = stn
self.m_list = []
self.gapmin = 100
self.gapmax = 0
self.gapmean = 0
self.alpha = 0.5
self.rid = 0
self.prefix = ''
self.rdir = 'results'
def solve(self, T_list, periods=1, solver='cplex',
solverparams=None,
save=False, trace=False, gantt=True, **kwargs):
"""
Solves the model
T_list: []
periods: number of rolling horizon periods
solver: specifies which solver to use
prefix: added to all file names
rdir: directory for result files
solverparams: dictionary of solver parameters
save: save results as .pyomo?
trace: generate trace?
gantt: generate gantt graphs?
"""
# Initialize solver and set parameters
ts = time.time()
self.solver = pyomo.SolverFactory(solver)
if solverparams is not None:
for key, value in solverparams.items():
self.solver.options[key] = value
# Rolling horizon
for period in range(0, periods):
if periods > 1:
rolling = True
else:
rolling = False
# Build model
self.build(T_list, period=period, rolling=rolling, **kwargs)
logfile = self.prfx + "STN.log"
# Solve model
results = self.solver.solve(self.model,
tee=True,
keepfiles=True,
symbolic_solver_labels=True,
logfile=logfile)
results.write()
# Check if solver exited normally
if ((results.solver.status == SolverStatus.ok) and
(results.solver.termination_condition ==
TerminationCondition.optimal or
results.solver.termination_condition ==
TerminationCondition.maxTimeLimit)):
# Calculate MIP Gap
obj = self.model.Obj()
gap = self.solver._gap
if gap is None:
gap = 0.0
self.gapmin = min(self.gapmin,
gap/obj*100)
self.gapmax = max(self.gapmax,
gap/obj*100)
self.gapmean = (self.gapmean
* period/(period+1)
+ (1 - period/(period + 1))
* gap/obj*100)
# Save results
if save:
with open(self.prfx+'output.txt', 'w') as f:
f.write("STN Output:")
self.model.display(ostream=f)
with open(self.prfx+'STN.pyomo', 'wb') as dill_file:
dill.dump(self.model, dill_file)
if gantt:
self.sb.gantt()
self.pb.gantt()
if trace:
self.sb.trace()
self.pb.trace()
if periods > 1:
self.transfer_next_period(**kwargs)
# Add current model to list
self.m_list.append(self.model)
else:
break
self.ttot = time.time() - ts
def build(self, T_list, objective="terminal", period=None, alpha=0.5,
extend=False, rdir='results', prefix='', rolling=False,
rid=None, **kwargs):
"""Build STN model."""
assert period is not None
self.rdir = rdir
self.prefix = prefix
if rid is not None:
self.rid = rid
else:
try:
df = pd.read_pickle(self.rdir+"/"+self.prefix+"results.pkl")
self.rid = max(df["id"]) + 1
except IOError:
pass
self.prfx = self.rdir + "/" + self.prefix + str(self.rid)
if rolling:
self.prfx += "_" + str(period)
self.model = pyomo.ConcreteModel()
m = self.model
stn = self.stn
m.cons = pyomo.ConstraintList()
m.ptransfer = pyomo.Var(stn.tasks, stn.units, stn.opmodes,
domain=pyomo.NonNegativeReals)
m.Btransfer = pyomo.Var(stn.tasks, stn.units, stn.opmodes,
domain=pyomo.NonNegativeReals)
m.tautransfer = pyomo.Var(stn.units, domain=pyomo.NonNegativeReals)
m.Dslack = pyomo.Var(stn.states, domain=pyomo.NonNegativeReals)
m.TotSlack = pyomo.Var(domain=pyomo.NonNegativeReals)
# replace D by Dmax if alpha != self.alpha
if alpha != self.alpha:
for j in stn.units:
for i in stn.I[j]:
for k in stn.O[j]:
tm = i + "-" + k
p = stn.p[i, j, k]
D = stn.deg[j].get_mu(tm, p)
eps = stn.deg[j].get_eps(alpha, tm, p)
# X = stn.deg[j].get_quantile(alpha, tm, p)
# stn.D[i, j, k] = 2*D - X
stn.D[i, j, k] = D*(1 + eps)
self.alpha = alpha
# scheduling and planning block
Ts = T_list[0]
dTs = T_list[1]
Tp = T_list[2]
dTp = T_list[3]
Ts_start = period * Ts
Tp_start = (period + 1) * Ts
Ts = Ts_start + Ts
if extend:
Tp = Ts_start + Tp
self.add_blocks([Ts_start, Ts, dTs], [Tp_start, Tp, dTp],
objective=objective, **kwargs)
# add continuity constraints to model
self.add_unit_constraints()
self.add_state_constraints()
self.add_deg_constraints(**kwargs)
# add objective function to model
self.add_objective()
def add_blocks(self, TIMEs, TIMEp, **kwargs):
"""Add scheduling and planning block to model."""
stn = self.stn
m = self.model
m.sb = pyomo.Block()
self.sb = blockScheduling(stn, TIMEs,
self.Demand, prfx=self.prfx, **kwargs)
self.sb.define_block(m.sb, **kwargs)
m.pb = pyomo.Block()
self.pb = blockPlanning(stn, TIMEp,
self.Demand, prfx=self.prfx, **kwargs)
self.pb.define_block(m.pb, **kwargs)
def add_unit_constraints(self):
"""Add unit allocation continuity constraints to model."""
m = self.model
stn = self.stn
# continuity of allocation
for j in stn.units:
rhs = 0
for i in stn.I[j]:
for k in stn.O[j]:
# Add processing time of tasks started in scheduling
# horizon
rhs2 = 0
rhs3 = 0
for tprime in self.sb.TIME[self.sb.TIME
>= self.sb.T
- stn.p[i, j, k]
+ self.sb.dT]:
rhs2 += (m.sb.W[i, j, k, tprime]
* (stn.p[i, j, k]
- (self.sb.T - tprime)))
rhs += rhs2
rhs3 += m.sb.B[i, j, k, tprime]
m.cons.add(m.ptransfer[i, j, k] == rhs2)
m.cons.add(m.Btransfer[i, j, k] == rhs3)
# Add time for maintenance started in scheduling horizon
rhs2 = 0
for tprime in self.sb.TIME[(self.sb.TIME
>= self.sb.T
- stn.tau[j]
+ self.sb.dT)]:
rhs2 += m.sb.M[j, tprime]*(stn.tau[j] - (self.sb.T - tprime))
rhs += rhs2
m.cons.add(m.pb.Ntransfer[j] == rhs) # TODO: should this time?
m.cons.add(m.tautransfer[j] == rhs2)
def add_state_constraints(self):
"""Add state continuity constraints to model."""
m = self.model
stn = self.stn
for s in stn.states:
# Calculate states at end of scheduling horizon
rhs = m.sb.S[s, self.sb.T - self.sb.dT]
for i in stn.T_[s]:
for j in stn.K[i]:
for k in stn.O[j]:
rhs += (stn.rho_[(i, s)]
* m.sb.B[i, j, k,
self.sb.T
- stn.p[i, j, k]])
# Subtract demand from last scheduling period
if (s, self.sb.TIME[0]) in self.Demand:
rhs -= self.Demand[s, self.sb.TIME[0]]
rhs += m.Dslack[s]
m.cons.add(m.sb.Sfin[s] == rhs)
m.cons.add(0 <= m.sb.Sfin[s] <= stn.C[s])
# Calculate amounts transfered into planning period
for i in stn.T_[s]:
for j in stn.K[i]:
for k in stn.O[j]:
for tprime in self.sb.TIME[self.sb.TIME
>= self.sb.T
- stn.p[i, j, k]
+ self.sb.dT]:
rhs += stn.rho_[(i, s)] * m.sb.B[i, j, k, tprime]
m.cons.add(m.pb.Stransfer[s] == rhs)
def add_deg_constraints(self, **kwargs):
"""Add residual life continuity constraints to model."""
stn = self.stn
m = self.model
for j in stn.units:
m.cons.add(m.pb.Rtransfer[j] == m.sb.R[j, self.sb.T - self.sb.dT])
def add_objective(self):
"""Add objective function."""
m = self.model
stn = self.stn
totslack = 0
for s in stn.states:
totslack += m.Dslack[s]
for t in m.sb.TIME:
totslack += m.sb.Sslack[s, t]
for t in m.pb.TIME:
totslack += m.pb.Dslack[s, t]
m.cons.add(m.TotSlack == totslack)
m.Obj = pyomo.Objective(expr=m.sb.Cost # cost scheduling blk
+ m.pb.Cost # cost planning blk
+ m.TotSlack*10000, # penalize slack vars
sense=pyomo.minimize)
def demand(self, state, time, Demand):
"""Add demand to model."""
self.Demand[state, time] = Demand
def uncertainty(self, alpha):
"""Set uncertainty set size parameter."""
self.alpha = alpha
def transfer_next_period(self, **kwargs):
"""
Transfer results from previous scheduling period to next (rolling
horizon).
"""
m = self.model
stn = self.stn
for s in stn.states:
stn.init[s] = m.sb.Sfin[s]()
for j in stn.units:
for i in stn.I[j]:
for k in stn.O[j]:
stn.pinit[i, j, k] = m.ptransfer[i, j, k]()
stn.Binit[i, j, k] = m.Btransfer[i, j, k]()
stn.tauinit[j] = m.tautransfer[j]()
if m.ptransfer[i, j, k]() < self.sb.dT/2:
stn.pinit[i, j, k] = 0
stn.Binit[i, j, k] = 0
if m.tautransfer[j]() < self.sb.dT/2:
stn.tauinit[j] = 0
stn.Rinit[j] = m.sb.R[j, self.sb.T - self.sb.dT]()
def loadres(self, prefix="", f="STN.pyomo", periods=0):
if periods == 0:
with open(prefix + f, 'rb') as dill_file:
self.model = dill.load(dill_file)
else:
for period in range(0, periods):
with open(prefix + str(period) + f, 'rb') as dill_file:
m = dill.load(dill_file)
self.m_list.append(m)
self.model = m
self.sb.b = m.sb
self.pb.b = m.pb
def get_gap(self):
return self.gapmax, self.gapmean, self.gapmin
def calc_p_fail(self, units=None, TP=None, periods=0, pb=True, save=True):
assert TP is not None
if units is None:
units = self.stn.units
elif type(units) == str:
units = set([units])
df = | pd.DataFrame(columns=units) | pandas.DataFrame |
from unittest.mock import patch
import pytest
from AWS_AACT_Pipeline.categorize_driver import Driver
from AWS_AACT_Pipeline.mock_db_manager import MockDatabaseManager
from AWS_AACT_Pipeline.categorizer import Categorizer
from AWS_AACT_Pipeline.mock_db import MockDatabase
import pandas as pd
def test_missing_json_file():
categorizer = Categorizer()
pytest.raises(Exception, categorizer.read_file_conditions, "fake_json")
def test_misformatted_json_file():
categorizer = Categorizer()
pytest.raises(Exception, categorizer.read_file_conditions, "misformatted_json")
def test_good_driver_call():
og_df = pd.DataFrame(columns=['color', 'nct_id'], index=['kylie','willy', 'riley', 'ben', 'jonah'])
og_df.loc['kylie'] = pd.Series({'color': "yellow", 'nct_id': 1})
og_df.loc['willy'] = pd.Series({'color': "turquoise", 'nct_id': 2})
og_df.loc['riley'] = pd.Series({'color': "blue", 'nct_id': 3})
og_df.loc['ben'] = pd.Series({'color': "blue", 'nct_id': 4})
og_df.loc['jonah'] = pd.Series({'color': "blue", 'nct_id': 5})
end_df = | pd.DataFrame(columns=['nct_id', 'color_category'], index=['kylie', 'willy', 'riley', 'ben', 'jonah']) | pandas.DataFrame |
# pylint: disable=E1101,E1103,W0232
from datetime import datetime, timedelta
from pandas.compat import range, lrange, lzip, u, zip
import operator
import re
import nose
import warnings
import os
import numpy as np
from numpy.testing import assert_array_equal
from pandas import period_range, date_range
from pandas.core.index import (Index, Float64Index, Int64Index, MultiIndex,
InvalidIndexError, NumericIndex)
from pandas.tseries.index import DatetimeIndex
from pandas.tseries.tdi import TimedeltaIndex
from pandas.tseries.period import PeriodIndex
from pandas.core.series import Series
from pandas.util.testing import (assert_almost_equal, assertRaisesRegexp,
assert_copy)
from pandas import compat
from pandas.compat import long
import pandas.util.testing as tm
import pandas.core.config as cf
from pandas.tseries.index import _to_m8
import pandas.tseries.offsets as offsets
import pandas as pd
from pandas.lib import Timestamp
class Base(object):
""" base class for index sub-class tests """
_holder = None
_compat_props = ['shape', 'ndim', 'size', 'itemsize', 'nbytes']
def verify_pickle(self,index):
unpickled = self.round_trip_pickle(index)
self.assertTrue(index.equals(unpickled))
def test_pickle_compat_construction(self):
# this is testing for pickle compat
if self._holder is None:
return
# need an object to create with
self.assertRaises(TypeError, self._holder)
def test_numeric_compat(self):
idx = self.create_index()
tm.assertRaisesRegexp(TypeError,
"cannot perform __mul__",
lambda : idx * 1)
tm.assertRaisesRegexp(TypeError,
"cannot perform __mul__",
lambda : 1 * idx)
div_err = "cannot perform __truediv__" if compat.PY3 else "cannot perform __div__"
tm.assertRaisesRegexp(TypeError,
div_err,
lambda : idx / 1)
tm.assertRaisesRegexp(TypeError,
div_err,
lambda : 1 / idx)
tm.assertRaisesRegexp(TypeError,
"cannot perform __floordiv__",
lambda : idx // 1)
tm.assertRaisesRegexp(TypeError,
"cannot perform __floordiv__",
lambda : 1 // idx)
def test_boolean_context_compat(self):
# boolean context compat
idx = self.create_index()
def f():
if idx:
pass
tm.assertRaisesRegexp(ValueError,'The truth value of a',f)
def test_ndarray_compat_properties(self):
idx = self.create_index()
self.assertTrue(idx.T.equals(idx))
self.assertTrue(idx.transpose().equals(idx))
values = idx.values
for prop in self._compat_props:
self.assertEqual(getattr(idx, prop), getattr(values, prop))
# test for validity
idx.nbytes
idx.values.nbytes
class TestIndex(Base, tm.TestCase):
_holder = Index
_multiprocess_can_split_ = True
def setUp(self):
self.indices = dict(
unicodeIndex = tm.makeUnicodeIndex(100),
strIndex = tm.makeStringIndex(100),
dateIndex = tm.makeDateIndex(100),
intIndex = tm.makeIntIndex(100),
floatIndex = tm.makeFloatIndex(100),
boolIndex = Index([True,False]),
empty = Index([]),
tuples = MultiIndex.from_tuples(lzip(['foo', 'bar', 'baz'],
[1, 2, 3]))
)
for name, ind in self.indices.items():
setattr(self, name, ind)
def create_index(self):
return Index(list('abcde'))
def test_wrong_number_names(self):
def testit(ind):
ind.names = ["apple", "banana", "carrot"]
for ind in self.indices.values():
assertRaisesRegexp(ValueError, "^Length", testit, ind)
def test_set_name_methods(self):
new_name = "This is the new name for this index"
indices = (self.dateIndex, self.intIndex, self.unicodeIndex,
self.empty)
for ind in indices:
original_name = ind.name
new_ind = ind.set_names([new_name])
self.assertEqual(new_ind.name, new_name)
self.assertEqual(ind.name, original_name)
res = ind.rename(new_name, inplace=True)
# should return None
self.assertIsNone(res)
self.assertEqual(ind.name, new_name)
self.assertEqual(ind.names, [new_name])
#with assertRaisesRegexp(TypeError, "list-like"):
# # should still fail even if it would be the right length
# ind.set_names("a")
with assertRaisesRegexp(ValueError, "Level must be None"):
ind.set_names("a", level=0)
# rename in place just leaves tuples and other containers alone
name = ('A', 'B')
ind = self.intIndex
ind.rename(name, inplace=True)
self.assertEqual(ind.name, name)
self.assertEqual(ind.names, [name])
def test_hash_error(self):
with tm.assertRaisesRegexp(TypeError,
"unhashable type: %r" %
type(self.strIndex).__name__):
hash(self.strIndex)
def test_new_axis(self):
new_index = self.dateIndex[None, :]
self.assertEqual(new_index.ndim, 2)
tm.assert_isinstance(new_index, np.ndarray)
def test_copy_and_deepcopy(self):
from copy import copy, deepcopy
for func in (copy, deepcopy):
idx_copy = func(self.strIndex)
self.assertIsNot(idx_copy, self.strIndex)
self.assertTrue(idx_copy.equals(self.strIndex))
new_copy = self.strIndex.copy(deep=True, name="banana")
self.assertEqual(new_copy.name, "banana")
new_copy2 = self.intIndex.copy(dtype=int)
self.assertEqual(new_copy2.dtype.kind, 'i')
def test_duplicates(self):
idx = Index([0, 0, 0])
self.assertFalse(idx.is_unique)
def test_sort(self):
self.assertRaises(TypeError, self.strIndex.sort)
def test_mutability(self):
self.assertRaises(TypeError, self.strIndex.__setitem__, 0, 'foo')
def test_constructor(self):
# regular instance creation
tm.assert_contains_all(self.strIndex, self.strIndex)
tm.assert_contains_all(self.dateIndex, self.dateIndex)
# casting
arr = np.array(self.strIndex)
index = Index(arr)
tm.assert_contains_all(arr, index)
self.assert_numpy_array_equal(self.strIndex, index)
# copy
arr = np.array(self.strIndex)
index = Index(arr, copy=True, name='name')
tm.assert_isinstance(index, Index)
self.assertEqual(index.name, 'name')
assert_array_equal(arr, index)
arr[0] = "SOMEBIGLONGSTRING"
self.assertNotEqual(index[0], "SOMEBIGLONGSTRING")
# what to do here?
# arr = np.array(5.)
# self.assertRaises(Exception, arr.view, Index)
def test_constructor_corner(self):
# corner case
self.assertRaises(TypeError, Index, 0)
def test_constructor_from_series(self):
expected = DatetimeIndex([Timestamp('20110101'),Timestamp('20120101'),Timestamp('20130101')])
s = Series([Timestamp('20110101'),Timestamp('20120101'),Timestamp('20130101')])
result = Index(s)
self.assertTrue(result.equals(expected))
result = DatetimeIndex(s)
self.assertTrue(result.equals(expected))
# GH 6273
# create from a series, passing a freq
s = Series(pd.to_datetime(['1-1-1990', '2-1-1990', '3-1-1990', '4-1-1990', '5-1-1990']))
result = DatetimeIndex(s, freq='MS')
expected = DatetimeIndex(['1-1-1990', '2-1-1990', '3-1-1990', '4-1-1990', '5-1-1990'],freq='MS')
self.assertTrue(result.equals(expected))
df = pd.DataFrame(np.random.rand(5,3))
df['date'] = ['1-1-1990', '2-1-1990', '3-1-1990', '4-1-1990', '5-1-1990']
result = DatetimeIndex(df['date'], freq='MS')
# GH 6274
# infer freq of same
result = pd.infer_freq(df['date'])
self.assertEqual(result,'MS')
def test_constructor_ndarray_like(self):
# GH 5460#issuecomment-44474502
# it should be possible to convert any object that satisfies the numpy
# ndarray interface directly into an Index
class ArrayLike(object):
def __init__(self, array):
self.array = array
def __array__(self, dtype=None):
return self.array
for array in [np.arange(5),
np.array(['a', 'b', 'c']),
date_range('2000-01-01', periods=3).values]:
expected = pd.Index(array)
result = pd.Index(ArrayLike(array))
self.assertTrue(result.equals(expected))
def test_index_ctor_infer_periodindex(self):
xp = period_range('2012-1-1', freq='M', periods=3)
rs = Index(xp)
assert_array_equal(rs, xp)
tm.assert_isinstance(rs, PeriodIndex)
def test_constructor_simple_new(self):
idx = Index([1, 2, 3, 4, 5], name='int')
result = idx._simple_new(idx, 'int')
self.assertTrue(result.equals(idx))
idx = Index([1.1, np.nan, 2.2, 3.0], name='float')
result = idx._simple_new(idx, 'float')
self.assertTrue(result.equals(idx))
idx = Index(['A', 'B', 'C', np.nan], name='obj')
result = idx._simple_new(idx, 'obj')
self.assertTrue(result.equals(idx))
def test_copy(self):
i = Index([], name='Foo')
i_copy = i.copy()
self.assertEqual(i_copy.name, 'Foo')
def test_view(self):
i = Index([], name='Foo')
i_view = i.view()
self.assertEqual(i_view.name, 'Foo')
def test_legacy_pickle_identity(self):
# GH 8431
pth = tm.get_data_path()
s1 = pd.read_pickle(os.path.join(pth,'s1-0.12.0.pickle'))
s2 = pd.read_pickle(os.path.join(pth,'s2-0.12.0.pickle'))
self.assertFalse(s1.index.identical(s2.index))
self.assertFalse(s1.index.equals(s2.index))
def test_astype(self):
casted = self.intIndex.astype('i8')
# it works!
casted.get_loc(5)
# pass on name
self.intIndex.name = 'foobar'
casted = self.intIndex.astype('i8')
self.assertEqual(casted.name, 'foobar')
def test_compat(self):
self.strIndex.tolist()
def test_equals(self):
# same
self.assertTrue(Index(['a', 'b', 'c']).equals(Index(['a', 'b', 'c'])))
# different length
self.assertFalse(Index(['a', 'b', 'c']).equals(Index(['a', 'b'])))
# same length, different values
self.assertFalse(Index(['a', 'b', 'c']).equals(Index(['a', 'b', 'd'])))
# Must also be an Index
self.assertFalse(Index(['a', 'b', 'c']).equals(['a', 'b', 'c']))
def test_insert(self):
# GH 7256
# validate neg/pos inserts
result = Index(['b', 'c', 'd'])
#test 0th element
self.assertTrue(Index(['a', 'b', 'c', 'd']).equals(
result.insert(0, 'a')))
#test Nth element that follows Python list behavior
self.assertTrue(Index(['b', 'c', 'e', 'd']).equals(
result.insert(-1, 'e')))
#test loc +/- neq (0, -1)
self.assertTrue(result.insert(1, 'z').equals(
result.insert(-2, 'z')))
#test empty
null_index = Index([])
self.assertTrue(Index(['a']).equals(
null_index.insert(0, 'a')))
def test_delete(self):
idx = Index(['a', 'b', 'c', 'd'], name='idx')
expected = Index(['b', 'c', 'd'], name='idx')
result = idx.delete(0)
self.assertTrue(result.equals(expected))
self.assertEqual(result.name, expected.name)
expected = Index(['a', 'b', 'c'], name='idx')
result = idx.delete(-1)
self.assertTrue(result.equals(expected))
self.assertEqual(result.name, expected.name)
with tm.assertRaises((IndexError, ValueError)):
# either depeidnig on numpy version
result = idx.delete(5)
def test_identical(self):
# index
i1 = Index(['a', 'b', 'c'])
i2 = Index(['a', 'b', 'c'])
self.assertTrue(i1.identical(i2))
i1 = i1.rename('foo')
self.assertTrue(i1.equals(i2))
self.assertFalse(i1.identical(i2))
i2 = i2.rename('foo')
self.assertTrue(i1.identical(i2))
i3 = Index([('a', 'a'), ('a', 'b'), ('b', 'a')])
i4 = Index([('a', 'a'), ('a', 'b'), ('b', 'a')], tupleize_cols=False)
self.assertFalse(i3.identical(i4))
def test_is_(self):
ind = Index(range(10))
self.assertTrue(ind.is_(ind))
self.assertTrue(ind.is_(ind.view().view().view().view()))
self.assertFalse(ind.is_(Index(range(10))))
self.assertFalse(ind.is_(ind.copy()))
self.assertFalse(ind.is_(ind.copy(deep=False)))
self.assertFalse(ind.is_(ind[:]))
self.assertFalse(ind.is_(ind.view(np.ndarray).view(Index)))
self.assertFalse(ind.is_(np.array(range(10))))
# quasi-implementation dependent
self.assertTrue(ind.is_(ind.view()))
ind2 = ind.view()
ind2.name = 'bob'
self.assertTrue(ind.is_(ind2))
self.assertTrue(ind2.is_(ind))
# doesn't matter if Indices are *actually* views of underlying data,
self.assertFalse(ind.is_(Index(ind.values)))
arr = np.array(range(1, 11))
ind1 = Index(arr, copy=False)
ind2 = Index(arr, copy=False)
self.assertFalse(ind1.is_(ind2))
def test_asof(self):
d = self.dateIndex[0]
self.assertIs(self.dateIndex.asof(d), d)
self.assertTrue(np.isnan(self.dateIndex.asof(d - timedelta(1))))
d = self.dateIndex[-1]
self.assertEqual(self.dateIndex.asof(d + timedelta(1)), d)
d = self.dateIndex[0].to_datetime()
tm.assert_isinstance(self.dateIndex.asof(d), Timestamp)
def test_asof_datetime_partial(self):
idx = pd.date_range('2010-01-01', periods=2, freq='m')
expected = Timestamp('2010-01-31')
result = idx.asof('2010-02')
self.assertEqual(result, expected)
def test_nanosecond_index_access(self):
s = Series([Timestamp('20130101')]).values.view('i8')[0]
r = DatetimeIndex([s + 50 + i for i in range(100)])
x = Series(np.random.randn(100), index=r)
first_value = x.asof(x.index[0])
# this does not yet work, as parsing strings is done via dateutil
#self.assertEqual(first_value, x['2013-01-01 00:00:00.000000050+0000'])
self.assertEqual(first_value, x[Timestamp(np.datetime64('2013-01-01 00:00:00.000000050+0000', 'ns'))])
def test_argsort(self):
result = self.strIndex.argsort()
expected = np.array(self.strIndex).argsort()
self.assert_numpy_array_equal(result, expected)
def test_comparators(self):
index = self.dateIndex
element = index[len(index) // 2]
element = _to_m8(element)
arr = np.array(index)
def _check(op):
arr_result = op(arr, element)
index_result = op(index, element)
self.assertIsInstance(index_result, np.ndarray)
self.assert_numpy_array_equal(arr_result, index_result)
_check(operator.eq)
_check(operator.ne)
_check(operator.gt)
_check(operator.lt)
_check(operator.ge)
_check(operator.le)
def test_booleanindex(self):
boolIdx = np.repeat(True, len(self.strIndex)).astype(bool)
boolIdx[5:30:2] = False
subIndex = self.strIndex[boolIdx]
for i, val in enumerate(subIndex):
self.assertEqual(subIndex.get_loc(val), i)
subIndex = self.strIndex[list(boolIdx)]
for i, val in enumerate(subIndex):
self.assertEqual(subIndex.get_loc(val), i)
def test_fancy(self):
sl = self.strIndex[[1, 2, 3]]
for i in sl:
self.assertEqual(i, sl[sl.get_loc(i)])
def test_empty_fancy(self):
empty_farr = np.array([], dtype=np.float_)
empty_iarr = np.array([], dtype=np.int_)
empty_barr = np.array([], dtype=np.bool_)
# pd.DatetimeIndex is excluded, because it overrides getitem and should
# be tested separately.
for idx in [self.strIndex, self.intIndex, self.floatIndex]:
empty_idx = idx.__class__([])
values = idx.values
self.assertTrue(idx[[]].identical(empty_idx))
self.assertTrue(idx[empty_iarr].identical(empty_idx))
self.assertTrue(idx[empty_barr].identical(empty_idx))
# np.ndarray only accepts ndarray of int & bool dtypes, so should
# Index.
self.assertRaises(IndexError, idx.__getitem__, empty_farr)
def test_getitem(self):
arr = np.array(self.dateIndex)
exp = self.dateIndex[5]
exp = _to_m8(exp)
self.assertEqual(exp, arr[5])
def test_shift(self):
shifted = self.dateIndex.shift(0, timedelta(1))
self.assertIs(shifted, self.dateIndex)
shifted = self.dateIndex.shift(5, timedelta(1))
self.assert_numpy_array_equal(shifted, self.dateIndex + timedelta(5))
shifted = self.dateIndex.shift(1, 'B')
self.assert_numpy_array_equal(shifted, self.dateIndex + offsets.BDay())
shifted.name = 'shifted'
self.assertEqual(shifted.name, shifted.shift(1, 'D').name)
def test_intersection(self):
first = self.strIndex[:20]
second = self.strIndex[:10]
intersect = first.intersection(second)
self.assertTrue(tm.equalContents(intersect, second))
# Corner cases
inter = first.intersection(first)
self.assertIs(inter, first)
# non-iterable input
assertRaisesRegexp(TypeError, "iterable", first.intersection, 0.5)
idx1 = Index([1, 2, 3, 4, 5], name='idx')
# if target has the same name, it is preserved
idx2 = Index([3, 4, 5, 6, 7], name='idx')
expected2 = Index([3, 4, 5], name='idx')
result2 = idx1.intersection(idx2)
self.assertTrue(result2.equals(expected2))
self.assertEqual(result2.name, expected2.name)
# if target name is different, it will be reset
idx3 = Index([3, 4, 5, 6, 7], name='other')
expected3 = Index([3, 4, 5], name=None)
result3 = idx1.intersection(idx3)
self.assertTrue(result3.equals(expected3))
self.assertEqual(result3.name, expected3.name)
# non monotonic
idx1 = Index([5, 3, 2, 4, 1], name='idx')
idx2 = Index([4, 7, 6, 5, 3], name='idx')
result2 = idx1.intersection(idx2)
self.assertTrue(tm.equalContents(result2, expected2))
self.assertEqual(result2.name, expected2.name)
idx3 = Index([4, 7, 6, 5, 3], name='other')
result3 = idx1.intersection(idx3)
self.assertTrue(tm.equalContents(result3, expected3))
self.assertEqual(result3.name, expected3.name)
# non-monotonic non-unique
idx1 = Index(['A','B','A','C'])
idx2 = Index(['B','D'])
expected = Index(['B'], dtype='object')
result = idx1.intersection(idx2)
self.assertTrue(result.equals(expected))
def test_union(self):
first = self.strIndex[5:20]
second = self.strIndex[:10]
everything = self.strIndex[:20]
union = first.union(second)
self.assertTrue(tm.equalContents(union, everything))
# Corner cases
union = first.union(first)
self.assertIs(union, first)
union = first.union([])
self.assertIs(union, first)
union = Index([]).union(first)
self.assertIs(union, first)
# non-iterable input
assertRaisesRegexp(TypeError, "iterable", first.union, 0.5)
# preserve names
first.name = 'A'
second.name = 'A'
union = first.union(second)
self.assertEqual(union.name, 'A')
second.name = 'B'
union = first.union(second)
self.assertIsNone(union.name)
def test_add(self):
# - API change GH 8226
with tm.assert_produces_warning():
self.strIndex + self.strIndex
firstCat = self.strIndex.union(self.dateIndex)
secondCat = self.strIndex.union(self.strIndex)
if self.dateIndex.dtype == np.object_:
appended = np.append(self.strIndex, self.dateIndex)
else:
appended = np.append(self.strIndex, self.dateIndex.astype('O'))
self.assertTrue(tm.equalContents(firstCat, appended))
self.assertTrue(tm.equalContents(secondCat, self.strIndex))
tm.assert_contains_all(self.strIndex, firstCat)
tm.assert_contains_all(self.strIndex, secondCat)
tm.assert_contains_all(self.dateIndex, firstCat)
def test_append_multiple(self):
index = Index(['a', 'b', 'c', 'd', 'e', 'f'])
foos = [index[:2], index[2:4], index[4:]]
result = foos[0].append(foos[1:])
self.assertTrue(result.equals(index))
# empty
result = index.append([])
self.assertTrue(result.equals(index))
def test_append_empty_preserve_name(self):
left = Index([], name='foo')
right = Index([1, 2, 3], name='foo')
result = left.append(right)
self.assertEqual(result.name, 'foo')
left = Index([], name='foo')
right = Index([1, 2, 3], name='bar')
result = left.append(right)
self.assertIsNone(result.name)
def test_add_string(self):
# from bug report
index = Index(['a', 'b', 'c'])
index2 = index + 'foo'
self.assertNotIn('a', index2)
self.assertIn('afoo', index2)
def test_iadd_string(self):
index = pd.Index(['a', 'b', 'c'])
# doesn't fail test unless there is a check before `+=`
self.assertIn('a', index)
index += '_x'
self.assertIn('a_x', index)
def test_difference(self):
first = self.strIndex[5:20]
second = self.strIndex[:10]
answer = self.strIndex[10:20]
first.name = 'name'
# different names
result = first.difference(second)
self.assertTrue(tm.equalContents(result, answer))
self.assertEqual(result.name, None)
# same names
second.name = 'name'
result = first.difference(second)
self.assertEqual(result.name, 'name')
# with empty
result = first.difference([])
self.assertTrue(tm.equalContents(result, first))
self.assertEqual(result.name, first.name)
# with everythin
result = first.difference(first)
self.assertEqual(len(result), 0)
self.assertEqual(result.name, first.name)
# non-iterable input
assertRaisesRegexp(TypeError, "iterable", first.diff, 0.5)
def test_symmetric_diff(self):
# smoke
idx1 = Index([1, 2, 3, 4], name='idx1')
idx2 = Index([2, 3, 4, 5])
result = idx1.sym_diff(idx2)
expected = Index([1, 5])
self.assertTrue(tm.equalContents(result, expected))
self.assertIsNone(result.name)
# __xor__ syntax
expected = idx1 ^ idx2
self.assertTrue(tm.equalContents(result, expected))
self.assertIsNone(result.name)
# multiIndex
idx1 = MultiIndex.from_tuples(self.tuples)
idx2 = MultiIndex.from_tuples([('foo', 1), ('bar', 3)])
result = idx1.sym_diff(idx2)
expected = MultiIndex.from_tuples([('bar', 2), ('baz', 3), ('bar', 3)])
self.assertTrue(tm.equalContents(result, expected))
# nans:
# GH #6444, sorting of nans. Make sure the number of nans is right
# and the correct non-nan values are there. punt on sorting.
idx1 = Index([1, 2, 3, np.nan])
idx2 = Index([0, 1, np.nan])
result = idx1.sym_diff(idx2)
# expected = Index([0.0, np.nan, 2.0, 3.0, np.nan])
nans = pd.isnull(result)
self.assertEqual(nans.sum(), 2)
self.assertEqual((~nans).sum(), 3)
[self.assertIn(x, result) for x in [0.0, 2.0, 3.0]]
# other not an Index:
idx1 = Index([1, 2, 3, 4], name='idx1')
idx2 = np.array([2, 3, 4, 5])
expected = Index([1, 5])
result = idx1.sym_diff(idx2)
self.assertTrue(tm.equalContents(result, expected))
self.assertEqual(result.name, 'idx1')
result = idx1.sym_diff(idx2, result_name='new_name')
self.assertTrue(tm.equalContents(result, expected))
self.assertEqual(result.name, 'new_name')
# other isn't iterable
with tm.assertRaises(TypeError):
Index(idx1,dtype='object') - 1
def test_pickle(self):
self.verify_pickle(self.strIndex)
self.strIndex.name = 'foo'
self.verify_pickle(self.strIndex)
self.verify_pickle(self.dateIndex)
def test_is_numeric(self):
self.assertFalse(self.dateIndex.is_numeric())
self.assertFalse(self.strIndex.is_numeric())
self.assertTrue(self.intIndex.is_numeric())
self.assertTrue(self.floatIndex.is_numeric())
def test_is_object(self):
self.assertTrue(self.strIndex.is_object())
self.assertTrue(self.boolIndex.is_object())
self.assertFalse(self.intIndex.is_object())
self.assertFalse(self.dateIndex.is_object())
self.assertFalse(self.floatIndex.is_object())
def test_is_all_dates(self):
self.assertTrue(self.dateIndex.is_all_dates)
self.assertFalse(self.strIndex.is_all_dates)
self.assertFalse(self.intIndex.is_all_dates)
def test_summary(self):
self._check_method_works(Index.summary)
# GH3869
ind = Index(['{other}%s', "~:{range}:0"], name='A')
result = ind.summary()
# shouldn't be formatted accidentally.
self.assertIn('~:{range}:0', result)
self.assertIn('{other}%s', result)
def test_format(self):
self._check_method_works(Index.format)
index = Index([datetime.now()])
formatted = index.format()
expected = [str(index[0])]
self.assertEqual(formatted, expected)
# 2845
index = Index([1, 2.0+3.0j, np.nan])
formatted = index.format()
expected = [str(index[0]), str(index[1]), u('NaN')]
self.assertEqual(formatted, expected)
# is this really allowed?
index = Index([1, 2.0+3.0j, None])
formatted = index.format()
expected = [str(index[0]), str(index[1]), u('NaN')]
self.assertEqual(formatted, expected)
self.strIndex[:0].format()
def test_format_with_name_time_info(self):
# bug I fixed 12/20/2011
inc = timedelta(hours=4)
dates = Index([dt + inc for dt in self.dateIndex], name='something')
formatted = dates.format(name=True)
self.assertEqual(formatted[0], 'something')
def test_format_datetime_with_time(self):
t = Index([datetime(2012, 2, 7), datetime(2012, 2, 7, 23)])
result = t.format()
expected = ['2012-02-07 00:00:00', '2012-02-07 23:00:00']
self.assertEqual(len(result), 2)
self.assertEqual(result, expected)
def test_format_none(self):
values = ['a', 'b', 'c', None]
idx = Index(values)
idx.format()
self.assertIsNone(idx[3])
def test_take(self):
indexer = [4, 3, 0, 2]
result = self.dateIndex.take(indexer)
expected = self.dateIndex[indexer]
self.assertTrue(result.equals(expected))
def _check_method_works(self, method):
method(self.empty)
method(self.dateIndex)
method(self.unicodeIndex)
method(self.strIndex)
method(self.intIndex)
method(self.tuples)
def test_get_indexer(self):
idx1 = Index([1, 2, 3, 4, 5])
idx2 = Index([2, 4, 6])
r1 = idx1.get_indexer(idx2)
assert_almost_equal(r1, [1, 3, -1])
r1 = idx2.get_indexer(idx1, method='pad')
assert_almost_equal(r1, [-1, 0, 0, 1, 1])
rffill1 = idx2.get_indexer(idx1, method='ffill')
assert_almost_equal(r1, rffill1)
r1 = idx2.get_indexer(idx1, method='backfill')
assert_almost_equal(r1, [0, 0, 1, 1, 2])
rbfill1 = idx2.get_indexer(idx1, method='bfill')
assert_almost_equal(r1, rbfill1)
def test_slice_locs(self):
for dtype in [int, float]:
idx = Index(np.array([0, 1, 2, 5, 6, 7, 9, 10], dtype=dtype))
n = len(idx)
self.assertEqual(idx.slice_locs(start=2), (2, n))
self.assertEqual(idx.slice_locs(start=3), (3, n))
self.assertEqual(idx.slice_locs(3, 8), (3, 6))
self.assertEqual(idx.slice_locs(5, 10), (3, n))
self.assertEqual(idx.slice_locs(5.0, 10.0), (3, n))
self.assertEqual(idx.slice_locs(4.5, 10.5), (3, 8))
self.assertEqual(idx.slice_locs(end=8), (0, 6))
self.assertEqual(idx.slice_locs(end=9), (0, 7))
idx2 = idx[::-1]
self.assertEqual(idx2.slice_locs(8, 2), (2, 6))
self.assertEqual(idx2.slice_locs(8.5, 1.5), (2, 6))
self.assertEqual(idx2.slice_locs(7, 3), (2, 5))
self.assertEqual(idx2.slice_locs(10.5, -1), (0, n))
def test_slice_locs_dup(self):
idx = Index(['a', 'a', 'b', 'c', 'd', 'd'])
self.assertEqual(idx.slice_locs('a', 'd'), (0, 6))
self.assertEqual(idx.slice_locs(end='d'), (0, 6))
self.assertEqual(idx.slice_locs('a', 'c'), (0, 4))
self.assertEqual(idx.slice_locs('b', 'd'), (2, 6))
idx2 = idx[::-1]
self.assertEqual(idx2.slice_locs('d', 'a'), (0, 6))
self.assertEqual(idx2.slice_locs(end='a'), (0, 6))
self.assertEqual(idx2.slice_locs('d', 'b'), (0, 4))
self.assertEqual(idx2.slice_locs('c', 'a'), (2, 6))
for dtype in [int, float]:
idx = Index(np.array([10, 12, 12, 14], dtype=dtype))
self.assertEqual(idx.slice_locs(12, 12), (1, 3))
self.assertEqual(idx.slice_locs(11, 13), (1, 3))
idx2 = idx[::-1]
self.assertEqual(idx2.slice_locs(12, 12), (1, 3))
self.assertEqual(idx2.slice_locs(13, 11), (1, 3))
def test_slice_locs_na(self):
idx = Index([np.nan, 1, 2])
self.assertRaises(KeyError, idx.slice_locs, start=1.5)
self.assertRaises(KeyError, idx.slice_locs, end=1.5)
self.assertEqual(idx.slice_locs(1), (1, 3))
self.assertEqual(idx.slice_locs(np.nan), (0, 3))
idx = Index([np.nan, np.nan, 1, 2])
self.assertRaises(KeyError, idx.slice_locs, np.nan)
def test_drop(self):
n = len(self.strIndex)
dropped = self.strIndex.drop(self.strIndex[lrange(5, 10)])
expected = self.strIndex[lrange(5) + lrange(10, n)]
self.assertTrue(dropped.equals(expected))
self.assertRaises(ValueError, self.strIndex.drop, ['foo', 'bar'])
dropped = self.strIndex.drop(self.strIndex[0])
expected = self.strIndex[1:]
self.assertTrue(dropped.equals(expected))
ser = Index([1, 2, 3])
dropped = ser.drop(1)
expected = Index([2, 3])
self.assertTrue(dropped.equals(expected))
def test_tuple_union_bug(self):
import pandas
import numpy as np
aidx1 = np.array([(1, 'A'), (2, 'A'), (1, 'B'), (2, 'B')],
dtype=[('num', int), ('let', 'a1')])
aidx2 = np.array([(1, 'A'), (2, 'A'), (1, 'B'), (2, 'B'), (1, 'C'), (2,
'C')], dtype=[('num', int), ('let', 'a1')])
idx1 = pandas.Index(aidx1)
idx2 = pandas.Index(aidx2)
# intersection broken?
int_idx = idx1.intersection(idx2)
# needs to be 1d like idx1 and idx2
expected = idx1[:4] # pandas.Index(sorted(set(idx1) & set(idx2)))
self.assertEqual(int_idx.ndim, 1)
self.assertTrue(int_idx.equals(expected))
# union broken
union_idx = idx1.union(idx2)
expected = idx2
self.assertEqual(union_idx.ndim, 1)
self.assertTrue(union_idx.equals(expected))
def test_is_monotonic_incomparable(self):
index = Index([5, datetime.now(), 7])
self.assertFalse(index.is_monotonic)
self.assertFalse(index.is_monotonic_decreasing)
def test_get_set_value(self):
values = np.random.randn(100)
date = self.dateIndex[67]
assert_almost_equal(self.dateIndex.get_value(values, date),
values[67])
self.dateIndex.set_value(values, date, 10)
self.assertEqual(values[67], 10)
def test_isin(self):
values = ['foo', 'bar', 'quux']
idx = Index(['qux', 'baz', 'foo', 'bar'])
result = idx.isin(values)
expected = np.array([False, False, True, True])
self.assert_numpy_array_equal(result, expected)
# empty, return dtype bool
idx = Index([])
result = idx.isin(values)
self.assertEqual(len(result), 0)
self.assertEqual(result.dtype, np.bool_)
def test_isin_nan(self):
self.assert_numpy_array_equal(
Index(['a', np.nan]).isin([np.nan]), [False, True])
self.assert_numpy_array_equal(
Index(['a', pd.NaT]).isin([pd.NaT]), [False, True])
self.assert_numpy_array_equal(
Index(['a', np.nan]).isin([float('nan')]), [False, False])
self.assert_numpy_array_equal(
Index(['a', np.nan]).isin([pd.NaT]), [False, False])
# Float64Index overrides isin, so must be checked separately
self.assert_numpy_array_equal(
Float64Index([1.0, np.nan]).isin([np.nan]), [False, True])
self.assert_numpy_array_equal(
Float64Index([1.0, np.nan]).isin([float('nan')]), [False, True])
self.assert_numpy_array_equal(
Float64Index([1.0, np.nan]).isin([pd.NaT]), [False, True])
def test_isin_level_kwarg(self):
def check_idx(idx):
values = idx.tolist()[-2:] + ['nonexisting']
expected = np.array([False, False, True, True])
self.assert_numpy_array_equal(expected, idx.isin(values, level=0))
self.assert_numpy_array_equal(expected, idx.isin(values, level=-1))
self.assertRaises(IndexError, idx.isin, values, level=1)
self.assertRaises(IndexError, idx.isin, values, level=10)
self.assertRaises(IndexError, idx.isin, values, level=-2)
self.assertRaises(KeyError, idx.isin, values, level=1.0)
self.assertRaises(KeyError, idx.isin, values, level='foobar')
idx.name = 'foobar'
self.assert_numpy_array_equal(expected,
idx.isin(values, level='foobar'))
self.assertRaises(KeyError, idx.isin, values, level='xyzzy')
self.assertRaises(KeyError, idx.isin, values, level=np.nan)
check_idx(Index(['qux', 'baz', 'foo', 'bar']))
# Float64Index overrides isin, so must be checked separately
check_idx(Float64Index([1.0, 2.0, 3.0, 4.0]))
def test_boolean_cmp(self):
values = [1, 2, 3, 4]
idx = Index(values)
res = (idx == values)
self.assert_numpy_array_equal(res,np.array([True,True,True,True],dtype=bool))
def test_get_level_values(self):
result = self.strIndex.get_level_values(0)
self.assertTrue(result.equals(self.strIndex))
def test_slice_keep_name(self):
idx = Index(['a', 'b'], name='asdf')
self.assertEqual(idx.name, idx[1:].name)
def test_join_self(self):
# instance attributes of the form self.<name>Index
indices = 'unicode', 'str', 'date', 'int', 'float'
kinds = 'outer', 'inner', 'left', 'right'
for index_kind in indices:
res = getattr(self, '{0}Index'.format(index_kind))
for kind in kinds:
joined = res.join(res, how=kind)
self.assertIs(res, joined)
def test_indexing_doesnt_change_class(self):
idx = Index([1, 2, 3, 'a', 'b', 'c'])
self.assertTrue(idx[1:3].identical(
pd.Index([2, 3], dtype=np.object_)))
self.assertTrue(idx[[0,1]].identical(
pd.Index([1, 2], dtype=np.object_)))
def test_outer_join_sort(self):
left_idx = Index(np.random.permutation(15))
right_idx = tm.makeDateIndex(10)
with tm.assert_produces_warning(RuntimeWarning):
joined = left_idx.join(right_idx, how='outer')
# right_idx in this case because DatetimeIndex has join precedence over
# Int64Index
expected = right_idx.astype(object).union(left_idx.astype(object))
tm.assert_index_equal(joined, expected)
def test_nan_first_take_datetime(self):
idx = Index([pd.NaT, Timestamp('20130101'), Timestamp('20130102')])
res = idx.take([-1, 0, 1])
exp = Index([idx[-1], idx[0], idx[1]])
tm.assert_index_equal(res, exp)
def test_reindex_preserves_name_if_target_is_list_or_ndarray(self):
# GH6552
idx = pd.Index([0, 1, 2])
dt_idx = pd.date_range('20130101', periods=3)
idx.name = None
self.assertEqual(idx.reindex([])[0].name, None)
self.assertEqual(idx.reindex(np.array([]))[0].name, None)
self.assertEqual(idx.reindex(idx.tolist())[0].name, None)
self.assertEqual(idx.reindex(idx.tolist()[:-1])[0].name, None)
self.assertEqual(idx.reindex(idx.values)[0].name, None)
self.assertEqual(idx.reindex(idx.values[:-1])[0].name, None)
# Must preserve name even if dtype changes.
self.assertEqual(idx.reindex(dt_idx.values)[0].name, None)
self.assertEqual(idx.reindex(dt_idx.tolist())[0].name, None)
idx.name = 'foobar'
self.assertEqual(idx.reindex([])[0].name, 'foobar')
self.assertEqual(idx.reindex(np.array([]))[0].name, 'foobar')
self.assertEqual(idx.reindex(idx.tolist())[0].name, 'foobar')
self.assertEqual(idx.reindex(idx.tolist()[:-1])[0].name, 'foobar')
self.assertEqual(idx.reindex(idx.values)[0].name, 'foobar')
self.assertEqual(idx.reindex(idx.values[:-1])[0].name, 'foobar')
# Must preserve name even if dtype changes.
self.assertEqual(idx.reindex(dt_idx.values)[0].name, 'foobar')
self.assertEqual(idx.reindex(dt_idx.tolist())[0].name, 'foobar')
def test_reindex_preserves_type_if_target_is_empty_list_or_array(self):
# GH7774
idx = pd.Index(list('abc'))
def get_reindex_type(target):
return idx.reindex(target)[0].dtype.type
self.assertEqual(get_reindex_type([]), np.object_)
self.assertEqual(get_reindex_type(np.array([])), np.object_)
self.assertEqual(get_reindex_type(np.array([], dtype=np.int64)),
np.object_)
def test_reindex_doesnt_preserve_type_if_target_is_empty_index(self):
# GH7774
idx = pd.Index(list('abc'))
def get_reindex_type(target):
return idx.reindex(target)[0].dtype.type
self.assertEqual(get_reindex_type(pd.Int64Index([])), np.int64)
self.assertEqual(get_reindex_type(pd.Float64Index([])), np.float64)
self.assertEqual(get_reindex_type(pd.DatetimeIndex([])), np.datetime64)
reindexed = idx.reindex(pd.MultiIndex([pd.Int64Index([]),
pd.Float64Index([])],
[[], []]))[0]
self.assertEqual(reindexed.levels[0].dtype.type, np.int64)
self.assertEqual(reindexed.levels[1].dtype.type, np.float64)
class Numeric(Base):
def test_numeric_compat(self):
idx = self._holder(np.arange(5,dtype='int64'))
didx = self._holder(np.arange(5,dtype='int64')**2
)
result = idx * 1
tm.assert_index_equal(result, idx)
result = 1 * idx
tm.assert_index_equal(result, idx)
result = idx * idx
tm.assert_index_equal(result, didx)
result = idx / 1
tm.assert_index_equal(result, idx)
result = idx // 1
tm.assert_index_equal(result, idx)
result = idx * np.array(5,dtype='int64')
tm.assert_index_equal(result, self._holder(np.arange(5,dtype='int64')*5))
result = idx * np.arange(5,dtype='int64')
tm.assert_index_equal(result, didx)
result = idx * Series(np.arange(5,dtype='int64'))
tm.assert_index_equal(result, didx)
result = idx * Series(np.arange(5,dtype='float64')+0.1)
tm.assert_index_equal(result,
Float64Index(np.arange(5,dtype='float64')*(np.arange(5,dtype='float64')+0.1)))
# invalid
self.assertRaises(TypeError, lambda : idx * date_range('20130101',periods=5))
self.assertRaises(ValueError, lambda : idx * self._holder(np.arange(3)))
self.assertRaises(ValueError, lambda : idx * np.array([1,2]))
def test_explicit_conversions(self):
# GH 8608
# add/sub are overriden explicity for Float/Int Index
idx = self._holder(np.arange(5,dtype='int64'))
# float conversions
arr = np.arange(5,dtype='int64')*3.2
expected = Float64Index(arr)
fidx = idx * 3.2
tm.assert_index_equal(fidx,expected)
fidx = 3.2 * idx
tm.assert_index_equal(fidx,expected)
# interops with numpy arrays
expected = Float64Index(arr)
a = np.zeros(5,dtype='float64')
result = fidx - a
tm.assert_index_equal(result,expected)
expected = Float64Index(-arr)
a = np.zeros(5,dtype='float64')
result = a - fidx
tm.assert_index_equal(result,expected)
def test_ufunc_compat(self):
idx = self._holder(np.arange(5,dtype='int64'))
result = np.sin(idx)
expected = Float64Index(np.sin(np.arange(5,dtype='int64')))
tm.assert_index_equal(result, expected)
class TestFloat64Index(Numeric, tm.TestCase):
_holder = Float64Index
_multiprocess_can_split_ = True
def setUp(self):
self.mixed = Float64Index([1.5, 2, 3, 4, 5])
self.float = Float64Index(np.arange(5) * 2.5)
def create_index(self):
return Float64Index(np.arange(5,dtype='float64'))
def test_hash_error(self):
with tm.assertRaisesRegexp(TypeError,
"unhashable type: %r" %
type(self.float).__name__):
hash(self.float)
def test_repr_roundtrip(self):
for ind in (self.mixed, self.float):
tm.assert_index_equal(eval(repr(ind)), ind)
def check_is_index(self, i):
self.assertIsInstance(i, Index)
self.assertNotIsInstance(i, Float64Index)
def check_coerce(self, a, b, is_float_index=True):
self.assertTrue(a.equals(b))
if is_float_index:
self.assertIsInstance(b, Float64Index)
else:
self.check_is_index(b)
def test_constructor(self):
# explicit construction
index = Float64Index([1,2,3,4,5])
self.assertIsInstance(index, Float64Index)
self.assertTrue((index.values == np.array([1,2,3,4,5],dtype='float64')).all())
index = Float64Index(np.array([1,2,3,4,5]))
self.assertIsInstance(index, Float64Index)
index = Float64Index([1.,2,3,4,5])
self.assertIsInstance(index, Float64Index)
index = Float64Index(np.array([1.,2,3,4,5]))
self.assertIsInstance(index, Float64Index)
self.assertEqual(index.dtype, float)
index = Float64Index(np.array([1.,2,3,4,5]),dtype=np.float32)
self.assertIsInstance(index, Float64Index)
self.assertEqual(index.dtype, np.float64)
index = Float64Index(np.array([1,2,3,4,5]),dtype=np.float32)
self.assertIsInstance(index, Float64Index)
self.assertEqual(index.dtype, np.float64)
# nan handling
result = Float64Index([np.nan, np.nan])
self.assertTrue(pd.isnull(result.values).all())
result = Float64Index(np.array([np.nan]))
self.assertTrue(pd.isnull(result.values).all())
result = Index(np.array([np.nan]))
self.assertTrue(pd.isnull(result.values).all())
def test_constructor_invalid(self):
# invalid
self.assertRaises(TypeError, Float64Index, 0.)
self.assertRaises(TypeError, Float64Index, ['a','b',0.])
self.assertRaises(TypeError, Float64Index, [Timestamp('20130101')])
def test_constructor_coerce(self):
self.check_coerce(self.mixed,Index([1.5, 2, 3, 4, 5]))
self.check_coerce(self.float,Index(np.arange(5) * 2.5))
self.check_coerce(self.float,Index(np.array(np.arange(5) * 2.5, dtype=object)))
def test_constructor_explicit(self):
# these don't auto convert
self.check_coerce(self.float,Index((np.arange(5) * 2.5), dtype=object),
is_float_index=False)
self.check_coerce(self.mixed,Index([1.5, 2, 3, 4, 5],dtype=object),
is_float_index=False)
def test_astype(self):
result = self.float.astype(object)
self.assertTrue(result.equals(self.float))
self.assertTrue(self.float.equals(result))
self.check_is_index(result)
i = self.mixed.copy()
i.name = 'foo'
result = i.astype(object)
self.assertTrue(result.equals(i))
self.assertTrue(i.equals(result))
self.check_is_index(result)
def test_equals(self):
i = Float64Index([1.0,2.0])
self.assertTrue(i.equals(i))
self.assertTrue(i.identical(i))
i2 = Float64Index([1.0,2.0])
self.assertTrue(i.equals(i2))
i = Float64Index([1.0,np.nan])
self.assertTrue(i.equals(i))
self.assertTrue(i.identical(i))
i2 = Float64Index([1.0,np.nan])
self.assertTrue(i.equals(i2))
def test_get_loc_na(self):
idx = Float64Index([np.nan, 1, 2])
self.assertEqual(idx.get_loc(1), 1)
self.assertEqual(idx.get_loc(np.nan), 0)
idx = Float64Index([np.nan, 1, np.nan])
self.assertEqual(idx.get_loc(1), 1)
self.assertRaises(KeyError, idx.slice_locs, np.nan)
def test_contains_nans(self):
i = Float64Index([1.0, 2.0, np.nan])
self.assertTrue(np.nan in i)
def test_contains_not_nans(self):
i = Float64Index([1.0, 2.0, np.nan])
self.assertTrue(1.0 in i)
def test_doesnt_contain_all_the_things(self):
i = Float64Index([np.nan])
self.assertFalse(i.isin([0]).item())
self.assertFalse(i.isin([1]).item())
self.assertTrue(i.isin([np.nan]).item())
def test_nan_multiple_containment(self):
i = Float64Index([1.0, np.nan])
np.testing.assert_array_equal(i.isin([1.0]), np.array([True, False]))
np.testing.assert_array_equal(i.isin([2.0, np.pi]),
np.array([False, False]))
np.testing.assert_array_equal(i.isin([np.nan]),
np.array([False, True]))
np.testing.assert_array_equal(i.isin([1.0, np.nan]),
np.array([True, True]))
i = Float64Index([1.0, 2.0])
np.testing.assert_array_equal(i.isin([np.nan]),
np.array([False, False]))
def test_astype_from_object(self):
index = Index([1.0, np.nan, 0.2], dtype='object')
result = index.astype(float)
expected = Float64Index([1.0, np.nan, 0.2])
tm.assert_equal(result.dtype, expected.dtype)
tm.assert_index_equal(result, expected)
class TestInt64Index(Numeric, tm.TestCase):
_holder = Int64Index
_multiprocess_can_split_ = True
def setUp(self):
self.index = Int64Index(np.arange(0, 20, 2))
def create_index(self):
return Int64Index(np.arange(5,dtype='int64'))
def test_too_many_names(self):
def testit():
self.index.names = ["roger", "harold"]
assertRaisesRegexp(ValueError, "^Length", testit)
def test_constructor(self):
# pass list, coerce fine
index = Int64Index([-5, 0, 1, 2])
expected = np.array([-5, 0, 1, 2], dtype=np.int64)
self.assert_numpy_array_equal(index, expected)
# from iterable
index = Int64Index(iter([-5, 0, 1, 2]))
self.assert_numpy_array_equal(index, expected)
# scalar raise Exception
self.assertRaises(TypeError, Int64Index, 5)
# copy
arr = self.index.values
new_index = Int64Index(arr, copy=True)
self.assert_numpy_array_equal(new_index, self.index)
val = arr[0] + 3000
# this should not change index
arr[0] = val
self.assertNotEqual(new_index[0], val)
def test_constructor_corner(self):
arr = np.array([1, 2, 3, 4], dtype=object)
index = Int64Index(arr)
self.assertEqual(index.values.dtype, np.int64)
self.assertTrue(index.equals(arr))
# preventing casting
arr = np.array([1, '2', 3, '4'], dtype=object)
with tm.assertRaisesRegexp(TypeError, 'casting'):
Int64Index(arr)
arr_with_floats = [0, 2, 3, 4, 5, 1.25, 3, -1]
with tm.assertRaisesRegexp(TypeError, 'casting'):
Int64Index(arr_with_floats)
def test_hash_error(self):
with tm.assertRaisesRegexp(TypeError,
"unhashable type: %r" %
type(self.index).__name__):
hash(self.index)
def test_copy(self):
i = Int64Index([], name='Foo')
i_copy = i.copy()
self.assertEqual(i_copy.name, 'Foo')
def test_view(self):
i = Int64Index([], name='Foo')
i_view = i.view()
self.assertEqual(i_view.name, 'Foo')
def test_coerce_list(self):
# coerce things
arr = Index([1, 2, 3, 4])
tm.assert_isinstance(arr, Int64Index)
# but not if explicit dtype passed
arr = Index([1, 2, 3, 4], dtype=object)
tm.assert_isinstance(arr, Index)
def test_dtype(self):
self.assertEqual(self.index.dtype, np.int64)
def test_is_monotonic(self):
self.assertTrue(self.index.is_monotonic)
self.assertTrue(self.index.is_monotonic_increasing)
self.assertFalse(self.index.is_monotonic_decreasing)
index = Int64Index([4, 3, 2, 1])
self.assertFalse(index.is_monotonic)
self.assertTrue(index.is_monotonic_decreasing)
index = Int64Index([1])
self.assertTrue(index.is_monotonic)
self.assertTrue(index.is_monotonic_increasing)
self.assertTrue(index.is_monotonic_decreasing)
def test_is_monotonic_na(self):
examples = [Index([np.nan]),
Index([np.nan, 1]),
Index([1, 2, np.nan]),
Index(['a', 'b', np.nan]),
pd.to_datetime(['NaT']),
pd.to_datetime(['NaT', '2000-01-01']),
pd.to_datetime(['2000-01-01', 'NaT', '2000-01-02']),
pd.to_timedelta(['1 day', 'NaT']),
]
for index in examples:
self.assertFalse(index.is_monotonic_increasing)
self.assertFalse(index.is_monotonic_decreasing)
def test_equals(self):
same_values = Index(self.index, dtype=object)
self.assertTrue(self.index.equals(same_values))
self.assertTrue(same_values.equals(self.index))
def test_identical(self):
i = Index(self.index.copy())
self.assertTrue(i.identical(self.index))
same_values_different_type = Index(i, dtype=object)
self.assertFalse(i.identical(same_values_different_type))
i = self.index.copy(dtype=object)
i = i.rename('foo')
same_values = Index(i, dtype=object)
self.assertTrue(same_values.identical(self.index.copy(dtype=object)))
self.assertFalse(i.identical(self.index))
self.assertTrue(Index(same_values, name='foo', dtype=object
).identical(i))
self.assertFalse(
self.index.copy(dtype=object)
.identical(self.index.copy(dtype='int64')))
def test_get_indexer(self):
target = Int64Index(np.arange(10))
indexer = self.index.get_indexer(target)
expected = np.array([0, -1, 1, -1, 2, -1, 3, -1, 4, -1])
self.assert_numpy_array_equal(indexer, expected)
def test_get_indexer_pad(self):
target = Int64Index(np.arange(10))
indexer = self.index.get_indexer(target, method='pad')
expected = np.array([0, 0, 1, 1, 2, 2, 3, 3, 4, 4])
self.assert_numpy_array_equal(indexer, expected)
def test_get_indexer_backfill(self):
target = Int64Index(np.arange(10))
indexer = self.index.get_indexer(target, method='backfill')
expected = np.array([0, 1, 1, 2, 2, 3, 3, 4, 4, 5])
self.assert_numpy_array_equal(indexer, expected)
def test_join_outer(self):
other = Int64Index([7, 12, 25, 1, 2, 5])
other_mono = Int64Index([1, 2, 5, 7, 12, 25])
# not monotonic
# guarantee of sortedness
res, lidx, ridx = self.index.join(other, how='outer',
return_indexers=True)
noidx_res = self.index.join(other, how='outer')
self.assertTrue(res.equals(noidx_res))
eres = Int64Index([0, 1, 2, 4, 5, 6, 7, 8, 10, 12, 14, 16, 18, 25])
elidx = np.array([0, -1, 1, 2, -1, 3, -1, 4, 5, 6, 7, 8, 9, -1],
dtype=np.int64)
eridx = np.array([-1, 3, 4, -1, 5, -1, 0, -1, -1, 1, -1, -1, -1, 2],
dtype=np.int64)
tm.assert_isinstance(res, Int64Index)
self.assertTrue(res.equals(eres))
self.assert_numpy_array_equal(lidx, elidx)
self.assert_numpy_array_equal(ridx, eridx)
# monotonic
res, lidx, ridx = self.index.join(other_mono, how='outer',
return_indexers=True)
noidx_res = self.index.join(other_mono, how='outer')
self.assertTrue(res.equals(noidx_res))
eridx = np.array([-1, 0, 1, -1, 2, -1, 3, -1, -1, 4, -1, -1, -1, 5],
dtype=np.int64)
tm.assert_isinstance(res, Int64Index)
self.assertTrue(res.equals(eres))
self.assert_numpy_array_equal(lidx, elidx)
self.assert_numpy_array_equal(ridx, eridx)
def test_join_inner(self):
other = Int64Index([7, 12, 25, 1, 2, 5])
other_mono = Int64Index([1, 2, 5, 7, 12, 25])
# not monotonic
res, lidx, ridx = self.index.join(other, how='inner',
return_indexers=True)
# no guarantee of sortedness, so sort for comparison purposes
ind = res.argsort()
res = res.take(ind)
lidx = lidx.take(ind)
ridx = ridx.take(ind)
eres = Int64Index([2, 12])
elidx = np.array([1, 6])
eridx = np.array([4, 1])
tm.assert_isinstance(res, Int64Index)
self.assertTrue(res.equals(eres))
self.assert_numpy_array_equal(lidx, elidx)
self.assert_numpy_array_equal(ridx, eridx)
# monotonic
res, lidx, ridx = self.index.join(other_mono, how='inner',
return_indexers=True)
res2 = self.index.intersection(other_mono)
self.assertTrue(res.equals(res2))
eridx = np.array([1, 4])
tm.assert_isinstance(res, Int64Index)
self.assertTrue(res.equals(eres))
self.assert_numpy_array_equal(lidx, elidx)
self.assert_numpy_array_equal(ridx, eridx)
def test_join_left(self):
other = Int64Index([7, 12, 25, 1, 2, 5])
other_mono = Int64Index([1, 2, 5, 7, 12, 25])
# not monotonic
res, lidx, ridx = self.index.join(other, how='left',
return_indexers=True)
eres = self.index
eridx = np.array([-1, 4, -1, -1, -1, -1, 1, -1, -1, -1],
dtype=np.int64)
tm.assert_isinstance(res, Int64Index)
self.assertTrue(res.equals(eres))
self.assertIsNone(lidx)
self.assert_numpy_array_equal(ridx, eridx)
# monotonic
res, lidx, ridx = self.index.join(other_mono, how='left',
return_indexers=True)
eridx = np.array([-1, 1, -1, -1, -1, -1, 4, -1, -1, -1],
dtype=np.int64)
tm.assert_isinstance(res, Int64Index)
self.assertTrue(res.equals(eres))
self.assertIsNone(lidx)
self.assert_numpy_array_equal(ridx, eridx)
# non-unique
"""
idx = Index([1,1,2,5])
idx2 = Index([1,2,5,7,9])
res, lidx, ridx = idx2.join(idx, how='left', return_indexers=True)
eres = idx2
eridx = np.array([0, 2, 3, -1, -1])
elidx = np.array([0, 1, 2, 3, 4])
self.assertTrue(res.equals(eres))
self.assert_numpy_array_equal(lidx, elidx)
self.assert_numpy_array_equal(ridx, eridx)
"""
def test_join_right(self):
other = Int64Index([7, 12, 25, 1, 2, 5])
other_mono = Int64Index([1, 2, 5, 7, 12, 25])
# not monotonic
res, lidx, ridx = self.index.join(other, how='right',
return_indexers=True)
eres = other
elidx = np.array([-1, 6, -1, -1, 1, -1],
dtype=np.int64)
tm.assert_isinstance(other, Int64Index)
self.assertTrue(res.equals(eres))
self.assert_numpy_array_equal(lidx, elidx)
self.assertIsNone(ridx)
# monotonic
res, lidx, ridx = self.index.join(other_mono, how='right',
return_indexers=True)
eres = other_mono
elidx = np.array([-1, 1, -1, -1, 6, -1],
dtype=np.int64)
tm.assert_isinstance(other, Int64Index)
self.assertTrue(res.equals(eres))
self.assert_numpy_array_equal(lidx, elidx)
self.assertIsNone(ridx)
# non-unique
"""
idx = Index([1,1,2,5])
idx2 = Index([1,2,5,7,9])
res, lidx, ridx = idx.join(idx2, how='right', return_indexers=True)
eres = idx2
elidx = np.array([0, 2, 3, -1, -1])
eridx = np.array([0, 1, 2, 3, 4])
self.assertTrue(res.equals(eres))
self.assert_numpy_array_equal(lidx, elidx)
self.assert_numpy_array_equal(ridx, eridx)
idx = Index([1,1,2,5])
idx2 = Index([1,2,5,9,7])
res = idx.join(idx2, how='right', return_indexers=False)
eres = idx2
self.assert(res.equals(eres))
"""
def test_join_non_int_index(self):
other = Index([3, 6, 7, 8, 10], dtype=object)
outer = self.index.join(other, how='outer')
outer2 = other.join(self.index, how='outer')
expected = Index([0, 2, 3, 4, 6, 7, 8, 10, 12, 14,
16, 18], dtype=object)
self.assertTrue(outer.equals(outer2))
self.assertTrue(outer.equals(expected))
inner = self.index.join(other, how='inner')
inner2 = other.join(self.index, how='inner')
expected = Index([6, 8, 10], dtype=object)
self.assertTrue(inner.equals(inner2))
self.assertTrue(inner.equals(expected))
left = self.index.join(other, how='left')
self.assertTrue(left.equals(self.index))
left2 = other.join(self.index, how='left')
self.assertTrue(left2.equals(other))
right = self.index.join(other, how='right')
self.assertTrue(right.equals(other))
right2 = other.join(self.index, how='right')
self.assertTrue(right2.equals(self.index))
def test_join_non_unique(self):
left = Index([4, 4, 3, 3])
joined, lidx, ridx = left.join(left, return_indexers=True)
exp_joined = Index([3, 3, 3, 3, 4, 4, 4, 4])
self.assertTrue(joined.equals(exp_joined))
exp_lidx = np.array([2, 2, 3, 3, 0, 0, 1, 1], dtype=np.int64)
self.assert_numpy_array_equal(lidx, exp_lidx)
exp_ridx = np.array([2, 3, 2, 3, 0, 1, 0, 1], dtype=np.int64)
self.assert_numpy_array_equal(ridx, exp_ridx)
def test_join_self(self):
kinds = 'outer', 'inner', 'left', 'right'
for kind in kinds:
joined = self.index.join(self.index, how=kind)
self.assertIs(self.index, joined)
def test_intersection(self):
other = Index([1, 2, 3, 4, 5])
result = self.index.intersection(other)
expected = np.sort(np.intersect1d(self.index.values, other.values))
self.assert_numpy_array_equal(result, expected)
result = other.intersection(self.index)
expected = np.sort(np.asarray(np.intersect1d(self.index.values,
other.values)))
self.assert_numpy_array_equal(result, expected)
def test_intersect_str_dates(self):
dt_dates = [datetime(2012, 2, 9), datetime(2012, 2, 22)]
i1 = Index(dt_dates, dtype=object)
i2 = Index(['aa'], dtype=object)
res = i2.intersection(i1)
self.assertEqual(len(res), 0)
def test_union_noncomparable(self):
from datetime import datetime, timedelta
# corner case, non-Int64Index
now = datetime.now()
other = Index([now + timedelta(i) for i in range(4)], dtype=object)
result = self.index.union(other)
expected = np.concatenate((self.index, other))
self.assert_numpy_array_equal(result, expected)
result = other.union(self.index)
expected = np.concatenate((other, self.index))
self.assert_numpy_array_equal(result, expected)
def test_cant_or_shouldnt_cast(self):
# can't
data = ['foo', 'bar', 'baz']
self.assertRaises(TypeError, Int64Index, data)
# shouldn't
data = ['0', '1', '2']
self.assertRaises(TypeError, Int64Index, data)
def test_view_Index(self):
self.index.view(Index)
def test_prevent_casting(self):
result = self.index.astype('O')
self.assertEqual(result.dtype, np.object_)
def test_take_preserve_name(self):
index = Int64Index([1, 2, 3, 4], name='foo')
taken = index.take([3, 0, 1])
self.assertEqual(index.name, taken.name)
def test_int_name_format(self):
from pandas import Series, DataFrame
index = Index(['a', 'b', 'c'], name=0)
s = Series(lrange(3), index)
df = DataFrame(lrange(3), index=index)
repr(s)
repr(df)
def test_print_unicode_columns(self):
df = pd.DataFrame(
{u("\u05d0"): [1, 2, 3], "\u05d1": [4, 5, 6], "c": [7, 8, 9]})
repr(df.columns) # should not raise UnicodeDecodeError
def test_repr_summary(self):
with cf.option_context('display.max_seq_items', 10):
r = repr(pd.Index(np.arange(1000)))
self.assertTrue(len(r) < 100)
self.assertTrue("..." in r)
def test_repr_roundtrip(self):
tm.assert_index_equal(eval(repr(self.index)), self.index)
def test_unicode_string_with_unicode(self):
idx = Index(lrange(1000))
if compat.PY3:
str(idx)
else:
compat.text_type(idx)
def test_bytestring_with_unicode(self):
idx = Index(lrange(1000))
if compat.PY3:
bytes(idx)
else:
str(idx)
def test_slice_keep_name(self):
idx = Int64Index([1, 2], name='asdf')
self.assertEqual(idx.name, idx[1:].name)
class TestDatetimeIndex(Base, tm.TestCase):
_holder = DatetimeIndex
_multiprocess_can_split_ = True
def create_index(self):
return date_range('20130101',periods=5)
def test_pickle_compat_construction(self):
pass
def test_numeric_compat(self):
super(TestDatetimeIndex, self).test_numeric_compat()
if not compat.PY3_2:
for f in [lambda : np.timedelta64(1, 'D').astype('m8[ns]') * pd.date_range('2000-01-01', periods=3),
lambda : pd.date_range('2000-01-01', periods=3) * np.timedelta64(1, 'D').astype('m8[ns]') ]:
self.assertRaises(TypeError, f)
def test_roundtrip_pickle_with_tz(self):
# GH 8367
# round-trip of timezone
index=date_range('20130101',periods=3,tz='US/Eastern',name='foo')
unpickled = self.round_trip_pickle(index)
self.assertTrue(index.equals(unpickled))
def test_reindex_preserves_tz_if_target_is_empty_list_or_array(self):
# GH7774
index = date_range('20130101', periods=3, tz='US/Eastern')
self.assertEqual(str(index.reindex([])[0].tz), 'US/Eastern')
self.assertEqual(str(index.reindex(np.array([]))[0].tz), 'US/Eastern')
class TestPeriodIndex(Base, tm.TestCase):
_holder = PeriodIndex
_multiprocess_can_split_ = True
def create_index(self):
return period_range('20130101',periods=5,freq='D')
def test_pickle_compat_construction(self):
pass
class TestTimedeltaIndex(Base, tm.TestCase):
_holder = TimedeltaIndex
_multiprocess_can_split_ = True
def create_index(self):
return pd.to_timedelta(range(5),unit='d') + pd.offsets.Hour(1)
def test_numeric_compat(self):
idx = self._holder(np.arange(5,dtype='int64'))
didx = self._holder(np.arange(5,dtype='int64')**2
)
result = idx * 1
tm.assert_index_equal(result, idx)
result = 1 * idx
tm.assert_index_equal(result, idx)
result = idx / 1
tm.assert_index_equal(result, idx)
result = idx // 1
tm.assert_index_equal(result, idx)
result = idx * np.array(5,dtype='int64')
tm.assert_index_equal(result, self._holder(np.arange(5,dtype='int64')*5))
result = idx * np.arange(5,dtype='int64')
tm.assert_index_equal(result, didx)
result = idx * Series(np.arange(5,dtype='int64'))
tm.assert_index_equal(result, didx)
result = idx * Series(np.arange(5,dtype='float64')+0.1)
tm.assert_index_equal(result,
Float64Index(np.arange(5,dtype='float64')*(np.arange(5,dtype='float64')+0.1)))
# invalid
self.assertRaises(TypeError, lambda : idx * idx)
self.assertRaises(ValueError, lambda : idx * self._holder(np.arange(3)))
self.assertRaises(ValueError, lambda : idx * np.array([1,2]))
def test_pickle_compat_construction(self):
pass
class TestMultiIndex(Base, tm.TestCase):
_holder = MultiIndex
_multiprocess_can_split_ = True
_compat_props = ['shape', 'ndim', 'size', 'itemsize']
def setUp(self):
major_axis = Index(['foo', 'bar', 'baz', 'qux'])
minor_axis = Index(['one', 'two'])
major_labels = np.array([0, 0, 1, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 0, 1])
self.index_names = ['first', 'second']
self.index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels],
names=self.index_names, verify_integrity=False)
def create_index(self):
return self.index
def test_boolean_context_compat2(self):
# boolean context compat
# GH7897
i1 = MultiIndex.from_tuples([('A', 1), ('A', 2)])
i2 = MultiIndex.from_tuples([('A', 1), ('A', 3)])
common = i1.intersection(i2)
def f():
if common:
pass
tm.assertRaisesRegexp(ValueError,'The truth value of a',f)
def test_labels_dtypes(self):
# GH 8456
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
self.assertTrue(i.labels[0].dtype == 'int8')
self.assertTrue(i.labels[1].dtype == 'int8')
i = MultiIndex.from_product([['a'],range(40)])
self.assertTrue(i.labels[1].dtype == 'int8')
i = MultiIndex.from_product([['a'],range(400)])
self.assertTrue(i.labels[1].dtype == 'int16')
i = MultiIndex.from_product([['a'],range(40000)])
self.assertTrue(i.labels[1].dtype == 'int32')
i = pd.MultiIndex.from_product([['a'],range(1000)])
self.assertTrue((i.labels[0]>=0).all())
self.assertTrue((i.labels[1]>=0).all())
def test_hash_error(self):
with tm.assertRaisesRegexp(TypeError,
"unhashable type: %r" %
type(self.index).__name__):
hash(self.index)
def test_set_names_and_rename(self):
# so long as these are synonyms, we don't need to test set_names
self.assertEqual(self.index.rename, self.index.set_names)
new_names = [name + "SUFFIX" for name in self.index_names]
ind = self.index.set_names(new_names)
self.assertEqual(self.index.names, self.index_names)
self.assertEqual(ind.names, new_names)
with assertRaisesRegexp(ValueError, "^Length"):
ind.set_names(new_names + new_names)
new_names2 = [name + "SUFFIX2" for name in new_names]
res = ind.set_names(new_names2, inplace=True)
self.assertIsNone(res)
self.assertEqual(ind.names, new_names2)
# set names for specific level (# GH7792)
ind = self.index.set_names(new_names[0], level=0)
self.assertEqual(self.index.names, self.index_names)
self.assertEqual(ind.names, [new_names[0], self.index_names[1]])
res = ind.set_names(new_names2[0], level=0, inplace=True)
self.assertIsNone(res)
self.assertEqual(ind.names, [new_names2[0], self.index_names[1]])
# set names for multiple levels
ind = self.index.set_names(new_names, level=[0, 1])
self.assertEqual(self.index.names, self.index_names)
self.assertEqual(ind.names, new_names)
res = ind.set_names(new_names2, level=[0, 1], inplace=True)
self.assertIsNone(res)
self.assertEqual(ind.names, new_names2)
def test_set_levels(self):
# side note - you probably wouldn't want to use levels and labels
# directly like this - but it is possible.
levels, labels = self.index.levels, self.index.labels
new_levels = [[lev + 'a' for lev in level] for level in levels]
def assert_matching(actual, expected):
# avoid specifying internal representation
# as much as possible
self.assertEqual(len(actual), len(expected))
for act, exp in zip(actual, expected):
act = np.asarray(act)
exp = np.asarray(exp)
assert_almost_equal(act, exp)
# level changing [w/o mutation]
ind2 = self.index.set_levels(new_levels)
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# level changing [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels, inplace=True)
self.assertIsNone(inplace_return)
assert_matching(ind2.levels, new_levels)
# level changing specific level [w/o mutation]
ind2 = self.index.set_levels(new_levels[0], level=0)
assert_matching(ind2.levels, [new_levels[0], levels[1]])
assert_matching(self.index.levels, levels)
ind2 = self.index.set_levels(new_levels[1], level=1)
assert_matching(ind2.levels, [levels[0], new_levels[1]])
assert_matching(self.index.levels, levels)
# level changing multiple levels [w/o mutation]
ind2 = self.index.set_levels(new_levels, level=[0, 1])
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# level changing specific level [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels[0], level=0, inplace=True)
self.assertIsNone(inplace_return)
assert_matching(ind2.levels, [new_levels[0], levels[1]])
assert_matching(self.index.levels, levels)
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels[1], level=1, inplace=True)
self.assertIsNone(inplace_return)
assert_matching(ind2.levels, [levels[0], new_levels[1]])
assert_matching(self.index.levels, levels)
# level changing multiple levels [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels, level=[0, 1], inplace=True)
self.assertIsNone(inplace_return)
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
def test_set_labels(self):
# side note - you probably wouldn't want to use levels and labels
# directly like this - but it is possible.
levels, labels = self.index.levels, self.index.labels
major_labels, minor_labels = labels
major_labels = [(x + 1) % 3 for x in major_labels]
minor_labels = [(x + 1) % 1 for x in minor_labels]
new_labels = [major_labels, minor_labels]
def assert_matching(actual, expected):
# avoid specifying internal representation
# as much as possible
self.assertEqual(len(actual), len(expected))
for act, exp in zip(actual, expected):
act = np.asarray(act)
exp = np.asarray(exp)
assert_almost_equal(act, exp)
# label changing [w/o mutation]
ind2 = self.index.set_labels(new_labels)
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels, inplace=True)
self.assertIsNone(inplace_return)
assert_matching(ind2.labels, new_labels)
# label changing specific level [w/o mutation]
ind2 = self.index.set_labels(new_labels[0], level=0)
assert_matching(ind2.labels, [new_labels[0], labels[1]])
assert_matching(self.index.labels, labels)
ind2 = self.index.set_labels(new_labels[1], level=1)
assert_matching(ind2.labels, [labels[0], new_labels[1]])
assert_matching(self.index.labels, labels)
# label changing multiple levels [w/o mutation]
ind2 = self.index.set_labels(new_labels, level=[0, 1])
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing specific level [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels[0], level=0, inplace=True)
self.assertIsNone(inplace_return)
assert_matching(ind2.labels, [new_labels[0], labels[1]])
assert_matching(self.index.labels, labels)
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels[1], level=1, inplace=True)
self.assertIsNone(inplace_return)
assert_matching(ind2.labels, [labels[0], new_labels[1]])
assert_matching(self.index.labels, labels)
# label changing multiple levels [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels, level=[0, 1], inplace=True)
self.assertIsNone(inplace_return)
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
def test_set_levels_labels_names_bad_input(self):
levels, labels = self.index.levels, self.index.labels
names = self.index.names
with tm.assertRaisesRegexp(ValueError, 'Length of levels'):
self.index.set_levels([levels[0]])
with tm.assertRaisesRegexp(ValueError, 'Length of labels'):
self.index.set_labels([labels[0]])
with tm.assertRaisesRegexp(ValueError, 'Length of names'):
self.index.set_names([names[0]])
# shouldn't scalar data error, instead should demand list-like
with tm.assertRaisesRegexp(TypeError, 'list of lists-like'):
self.index.set_levels(levels[0])
# shouldn't scalar data error, instead should demand list-like
with tm.assertRaisesRegexp(TypeError, 'list of lists-like'):
self.index.set_labels(labels[0])
# shouldn't scalar data error, instead should demand list-like
with tm.assertRaisesRegexp(TypeError, 'list-like'):
self.index.set_names(names[0])
# should have equal lengths
with tm.assertRaisesRegexp(TypeError, 'list of lists-like'):
self.index.set_levels(levels[0], level=[0, 1])
with tm.assertRaisesRegexp(TypeError, 'list-like'):
self.index.set_levels(levels, level=0)
# should have equal lengths
with tm.assertRaisesRegexp(TypeError, 'list of lists-like'):
self.index.set_labels(labels[0], level=[0, 1])
with tm.assertRaisesRegexp(TypeError, 'list-like'):
self.index.set_labels(labels, level=0)
# should have equal lengths
with tm.assertRaisesRegexp(ValueError, 'Length of names'):
self.index.set_names(names[0], level=[0, 1])
with tm.assertRaisesRegexp(TypeError, 'string'):
self.index.set_names(names, level=0)
def test_metadata_immutable(self):
levels, labels = self.index.levels, self.index.labels
# shouldn't be able to set at either the top level or base level
mutable_regex = re.compile('does not support mutable operations')
with assertRaisesRegexp(TypeError, mutable_regex):
levels[0] = levels[0]
with assertRaisesRegexp(TypeError, mutable_regex):
levels[0][0] = levels[0][0]
# ditto for labels
with assertRaisesRegexp(TypeError, mutable_regex):
labels[0] = labels[0]
with assertRaisesRegexp(TypeError, mutable_regex):
labels[0][0] = labels[0][0]
# and for names
names = self.index.names
with assertRaisesRegexp(TypeError, mutable_regex):
names[0] = names[0]
def test_inplace_mutation_resets_values(self):
levels = [['a', 'b', 'c'], [4]]
levels2 = [[1, 2, 3], ['a']]
labels = [[0, 1, 0, 2, 2, 0], [0, 0, 0, 0, 0, 0]]
mi1 = MultiIndex(levels=levels, labels=labels)
mi2 = MultiIndex(levels=levels2, labels=labels)
vals = mi1.values.copy()
vals2 = mi2.values.copy()
self.assertIsNotNone(mi1._tuples)
# make sure level setting works
new_vals = mi1.set_levels(levels2).values
assert_almost_equal(vals2, new_vals)
# non-inplace doesn't kill _tuples [implementation detail]
assert_almost_equal(mi1._tuples, vals)
# and values is still same too
assert_almost_equal(mi1.values, vals)
# inplace should kill _tuples
mi1.set_levels(levels2, inplace=True)
assert_almost_equal(mi1.values, vals2)
# make sure label setting works too
labels2 = [[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]]
exp_values = np.array([(long(1), 'a')] * 6, dtype=object)
new_values = mi2.set_labels(labels2).values
# not inplace shouldn't change
assert_almost_equal(mi2._tuples, vals2)
# should have correct values
assert_almost_equal(exp_values, new_values)
# and again setting inplace should kill _tuples, etc
mi2.set_labels(labels2, inplace=True)
assert_almost_equal(mi2.values, new_values)
def test_copy_in_constructor(self):
levels = np.array(["a", "b", "c"])
labels = np.array([1, 1, 2, 0, 0, 1, 1])
val = labels[0]
mi = MultiIndex(levels=[levels, levels], labels=[labels, labels],
copy=True)
self.assertEqual(mi.labels[0][0], val)
labels[0] = 15
self.assertEqual(mi.labels[0][0], val)
val = levels[0]
levels[0] = "PANDA"
self.assertEqual(mi.levels[0][0], val)
def test_set_value_keeps_names(self):
# motivating example from #3742
lev1 = ['hans', 'hans', 'hans', 'grethe', 'grethe', 'grethe']
lev2 = ['1', '2', '3'] * 2
idx = pd.MultiIndex.from_arrays(
[lev1, lev2],
names=['Name', 'Number'])
df = pd.DataFrame(
np.random.randn(6, 4),
columns=['one', 'two', 'three', 'four'],
index=idx)
df = df.sortlevel()
self.assertIsNone(df.is_copy)
self.assertEqual(df.index.names, ('Name', 'Number'))
df = df.set_value(('grethe', '4'), 'one', 99.34)
self.assertIsNone(df.is_copy)
self.assertEqual(df.index.names, ('Name', 'Number'))
def test_names(self):
# names are assigned in __init__
names = self.index_names
level_names = [level.name for level in self.index.levels]
self.assertEqual(names, level_names)
# setting bad names on existing
index = self.index
assertRaisesRegexp(ValueError, "^Length of names", setattr, index,
"names", list(index.names) + ["third"])
assertRaisesRegexp(ValueError, "^Length of names", setattr, index,
"names", [])
# initializing with bad names (should always be equivalent)
major_axis, minor_axis = self.index.levels
major_labels, minor_labels = self.index.labels
assertRaisesRegexp(ValueError, "^Length of names", MultiIndex,
levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels],
names=['first'])
assertRaisesRegexp(ValueError, "^Length of names", MultiIndex,
levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels],
names=['first', 'second', 'third'])
# names are assigned
index.names = ["a", "b"]
ind_names = list(index.names)
level_names = [level.name for level in index.levels]
self.assertEqual(ind_names, level_names)
def test_reference_duplicate_name(self):
idx = MultiIndex.from_tuples([('a', 'b'), ('c', 'd')], names=['x', 'x'])
self.assertTrue(idx._reference_duplicate_name('x'))
idx = MultiIndex.from_tuples([('a', 'b'), ('c', 'd')], names=['x', 'y'])
self.assertFalse(idx._reference_duplicate_name('x'))
def test_astype(self):
expected = self.index.copy()
actual = self.index.astype('O')
assert_copy(actual.levels, expected.levels)
assert_copy(actual.labels, expected.labels)
self.check_level_names(actual, expected.names)
with assertRaisesRegexp(TypeError, "^Setting.*dtype.*object"):
self.index.astype(np.dtype(int))
def test_constructor_single_level(self):
single_level = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux']],
labels=[[0, 1, 2, 3]],
names=['first'])
tm.assert_isinstance(single_level, Index)
self.assertNotIsInstance(single_level, MultiIndex)
self.assertEqual(single_level.name, 'first')
single_level = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux']],
labels=[[0, 1, 2, 3]])
self.assertIsNone(single_level.name)
def test_constructor_no_levels(self):
assertRaisesRegexp(ValueError, "non-zero number of levels/labels",
MultiIndex, levels=[], labels=[])
both_re = re.compile('Must pass both levels and labels')
with tm.assertRaisesRegexp(TypeError, both_re):
MultiIndex(levels=[])
with tm.assertRaisesRegexp(TypeError, both_re):
MultiIndex(labels=[])
def test_constructor_mismatched_label_levels(self):
labels = [np.array([1]), np.array([2]), np.array([3])]
levels = ["a"]
assertRaisesRegexp(ValueError, "Length of levels and labels must be"
" the same", MultiIndex, levels=levels,
labels=labels)
length_error = re.compile('>= length of level')
label_error = re.compile(r'Unequal label lengths: \[4, 2\]')
# important to check that it's looking at the right thing.
with tm.assertRaisesRegexp(ValueError, length_error):
MultiIndex(levels=[['a'], ['b']], labels=[[0, 1, 2, 3], [0, 3, 4, 1]])
with tm.assertRaisesRegexp(ValueError, label_error):
| MultiIndex(levels=[['a'], ['b']], labels=[[0, 0, 0, 0], [0, 0]]) | pandas.core.index.MultiIndex |
#!/usr/bin/env python
'''
<NAME> October 2018
Scripts for looking at and evaluating input data files for dvmdostem.
Generally data has been prepared by M. Lindgren of SNAP for the IEM project and
consists of directories of well labled .tif images, with one image for each
timestep.
This script has (or will have) a variety of routines for summarizing the data
and displaying plots that will let us look for problems, missing data, or
anomolies.
'''
import os
import sys
import subprocess
import glob
import pickle
import multiprocessing
import datetime as dt
from osgeo import gdal
import numpy as np
import pandas as pd
import matplotlib
matplotlib.use('pdf')
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import matplotlib.ticker as ticker
TMP_DATA = 'climatology-intermediate-data'
def timeseries_summary_stats_and_plots(base_path, secondary_path_list):
'''
'''
# Decades for projected, truncated first
fx_periods = [
(2006,2010),(2010,2020),(2020,2030),(2030,2040),(2040,2050),
(2050,2060),(2060,2070),(2070,2080),(2080,2090),(2090,2100)
]
# Decades for historic, truncated at end
hist_periods = [
(1901,1911),(1911,1921),(1921,1931),(1931,1941),(1941,1951),
(1951,1961),(1961,1971),(1971,1981),(1981,1991),(1991,2001),
(2001,2011),(2011,2015)
]
procs = []
for i in secondary_path_list:
if 'pr_total' in i.lower():
units = 'mm month-1'
elif 'tas_mean' in i.lower():
units = 'degrees C'
elif 'vap_mean' in i.lower():
units = 'hPa'
elif 'rsds_mean' in i.lower():
units = 'MJ-m2-d1'
elif 'hurs_mean' in i.lower():
units = 'percent'
else:
print("ERROR! hmmm can't find variable in {}".format(i))
if '_cru' in i.lower():
periods = hist_periods
elif '_mri' in i.lower():
periods = fx_periods
elif '_ncar' in i.lower():
periods = fx_periods
secondary_path = i
print("MAIN PROCESS! [{}] Starting worker...".format(os.getpid()))
p = multiprocessing.Process(target=worker_func, args=(base_path, secondary_path, units, periods))
procs.append(p)
p.start()
print("Done starting processes. Looping to set join on each process...")
for p in procs:
p.join()
print("DONE! Plots should be saved...")
def worker_func(base_path, secondary_path, units, periods):
'''
'''
print("worker function! pid:{}".format(os.getpid()))
print(" [{}] {}".format(os.getpid(), base_path))
print(" [{}] {}".format(os.getpid(), secondary_path))
print(" [{}] {}".format(os.getpid(), units))
monthlies_figure = get_monthlies_figure(
base_path, secondary_path,
title='\n'.join((base_path, secondary_path)),
units=units,
src='fresh',
save_intermediates=False,
madata=None
)
overveiw_figure, period_averages = get_overview_figure(
periods,
base_path, secondary_path,
title='\n'.join((base_path, secondary_path)),
units=units,
src='fresh', # can be: fresh, pickle, or passed
save_intermediates=False,
padata=None
)
individual_figs, _ = get_period_avg_figures(
periods,
base_path, secondary_path,
title=os.path.dirname(secondary_path),
units=units,
src='passed',
padata=period_averages
)
# Create multi-page pdf document
import matplotlib.backends.backend_pdf
ofname = "climatology_{}.pdf".format(secondary_path.split("/")[0])
print("Building PDF with many images: {}".format(ofname))
pdf = matplotlib.backends.backend_pdf.PdfPages(ofname)
pdf.savefig(monthlies_figure)
pdf.savefig(overveiw_figure)
for f in individual_figs:
pdf.savefig(f)
pdf.close()
print("Done saving pdf: {}".format(ofname))
def create_vrt(filelist, ofname):
'''
Creates a GDAL vrt (virtual file format) for a series of input files.
Expects the each of the files in the filelist to be a single band GeoTiff.
The files will be combined into a single .vrt file with one Band for each
of the input files. The single VRT file may then be further manipulated with
GDAL (i.e take the average over all the bands).
Parameters
----------
filelist : list of strings (paths) to files that will be combined
ofname : string for a filename that will be written
Returns
-------
None
Use Cases, Examples
-------------------
- Create a monthly or decadal summary file for a set of images representing
a timeseries (e.g. tifs that will be pre-processed and turned to netcdf files
for dvmdostem runs).
'''
basename = os.path.basename(ofname)
basename_noext, ext = os.path.splitext(basename)
temporary_filelist_file = os.path.join("/tmp/", "filelist-pid-{}-{}.txt".format(os.getpid(), basename_noext))
with open(temporary_filelist_file, 'w') as f:
f.write("\n".join(filelist))
result = subprocess.check_call([
'gdalbuildvrt',
'-overwrite',
'-separate',
ofname,
'-input_file_list', temporary_filelist_file
])
os.remove(temporary_filelist_file)
def average_over_bands(ifname, bands='all'):
'''
Given an input file (`ifname`), this function computes the average over all
the bands and returns the result. Assumes the bands are named Band1, Band2,
etc.
Parameters
----------
ifname : str
A multi-band file that can be opened and read with GDAL. Expects that all
bands have data and are the same spatial extents. Ignored data less
than -9999.
bands : str
One of 'all', 'first10', or 'first3'. Selects a subset of bands for faster
processing for testing and development.
Returns
-------
avg : numpy masked array
Returned array is the same shape as an individual band in the input file,
and with each pixel being the average of the pixel values in all of the
input file's bands.
'''
ds = gdal.Open(ifname)
print(" [ DESCRIPTION ]: ", ds.GetDescription())
print(" [ RASTER BAND COUNT ]: ", ds.RasterCount)
print(" [ RASTER Y SIZE ]: ", ds.RasterYSize)
print(" [ RASTER X SIZE ]: ", ds.RasterXSize)
if bands == 'all':
band_range = list(range(1, ds.RasterCount+1))
elif bands == 'first10':
band_range = list(range(1, 10+1))
elif bands == 'first3':
band_range = list(range(1, 3+1))
print(" [ AVERAGE OVER BANDS ]: {}".format(len(band_range)))
print(" [ START BAND ]: {}".format(band_range[0]))
print(" [ END BAND ]: {}".format(band_range[-1]))
# allocate a storage location
running_sum = np.ma.masked_less_equal(np.zeros((ds.RasterYSize, ds.RasterXSize)), -9999)
for band in band_range:
dsb = ds.GetRasterBand(band)
if dsb is None:
print("huh??")
# continue (? as per example here: https://pcjericks.github.io/py-gdalogr-cookbook/raster_layers.html)
masked_data = np.ma.masked_less_equal(dsb.ReadAsArray(), -9999)
running_sum += masked_data
print("adding band: {} band min/max: {}/{} running_sum min/max: {}/{}".format(
band,
masked_data.min(), masked_data.max(),
running_sum.min(), running_sum.max()
))
# Compute average
avg = running_sum / float(len(band_range)+1)
# Close gdal file
ds = None
return avg
def read_period_averages(periods):
'''
Reads pickled period average data from the TMP_DATA directory. Expects files
to be in a further sub-directory, period-averages, and have names
like: "pa-{start}-{end}.pickle".
Parameters
----------
periods : list of tuples
Each tuple should have values (start, end) that are used to define the
period.
Returns
-------
period_averages : list
A list of (masked) numpy arrays that have been un-pickled from the TMP_DATA
directory. The pickles are expected to be the period averages built using
other routines in this script.
'''
print("Reading period average pickles into list...")
period_averages = []
for i, (start, end) in enumerate(periods):
path = os.path.join(TMP_DATA, 'period-averages-pid{}'.format(os.getpid()), 'pa-{}-{}.pickle'.format(start, end))
pa = pickle.load(file(path))
period_averages.append(pa)
print("Done reading period average pickles into list.")
return period_averages
def read_monthly_pickles(months=list(range(1,13))):
print("reading monthly pickle files for months {}...".format(months))
mavgs = []
for m in months:
path = os.path.join(
TMP_DATA,
'month-averages-pid{}'.format(os.getpid()),
'month-{:02d}.pickle'.format(m)
)
ma = pickle.load(file(path))
mavgs.append(ma)
print("Returning monthly averages list..")
return mavgs
def calculate_period_averages(periods, base_path, secondary_path, save_intermediates=False):
'''Given a stack of tif files, one file for each month, this routine will
calculate the averages for the supplied periods. Periods are expected to be
selections of years, i.e. 1901 to 1911.
Parameters
----------
periods : list of tuples
each tuple has a start and end year for the period
base_path : str
path on the file system where files are located
secondary_path : str
remainder of path on file system where files will be found. The secondary
path string is expected to be somethign like this:
"ar5_MRI-CGCM3_rcp85_{month:}_{year:}.tif"
with the one set of braces for the month one set of braces for the year.
This function will fill the braces to match any month and the years
specified in the periods tuples
save_intermediates : bool
when true, period average array will be pickled for each period. Will be
saved like so 'climatology/period-averages/pa-{}-{}.pickle'
Returns
-------
list of 2D masked numpy arrays
'''
# Ensure there is a place to put the vrt files
path = os.path.join(TMP_DATA, 'period-averages-pid{}'.format(os.getpid()))
try:
os.makedirs(path)
except OSError:
if not os.path.isdir(path):
raise
# Make the VRTs for the periods
for i, (start, end) in enumerate(periods):
print("[ period {} ] Making vrt for period {} to {} (range {})".format(i, start, end, list(range(start, end))))
filelist = []
for year in range(start, end):
final_secondary_path = secondary_path.format(month="*", year="{:04d}")
#print os.path.join(base_path, final_secondary_path.format(year))
single_year_filelist = sorted(glob.glob(os.path.join(base_path, final_secondary_path.format(year))))
#print "Length of single year filelist {}".format(len(single_year_filelist))
filelist += single_year_filelist
print("Length of full filelist: {} ".format(len(filelist)))
vrtp = os.path.join(TMP_DATA, 'period-averages-pid{}'.format(os.getpid()), "period-{}-{}.vrt".format(start, end))
create_vrt(filelist, vrtp)
# Calculate the period averages from the VRT files
period_averages = []
for i, (start, end) in enumerate(periods):
# Find the average over the selected range
vrtp = os.path.join(TMP_DATA, 'period-averages-pid{}'.format(os.getpid()), "period-{}-{}.vrt".format(start, end))
pa = average_over_bands(vrtp, bands='all')
period_averages.append(pa)
if save_intermediates:
# Make sure there is a place to put our pickles
path = os.path.join(TMP_DATA, 'period-averages-pid{}'.format(os.getpid()))
try:
os.makedirs(path)
except OSError:
if not os.path.isdir(path):
raise
print("Dumping pickle for period {} to {}".format(start, end))
pickle.dump(pa, file(os.path.join(path, "pa-{}-{}.pickle".format(start, end)), 'wb'))
# Clean up any intermediate files.
if not save_intermediates:
papath = os.path.join(TMP_DATA, 'period-averages-pid{}'.format(os.getpid()))
for f in os.listdir(papath):
os.remove(os.path.join(papath, f))
os.rmdir(papath)
print("Returning period averages list...")
return period_averages
def calculate_monthly_averages(months, base_path, secondary_path, save_intermediates=False):
'''
'''
# Make sure there is a place to put our pickles and vrt files
intermediates_path = os.path.join(TMP_DATA, 'month-averages-pid{}'.format(os.getpid()))
try:
os.makedirs(intermediates_path)
except OSError:
if not os.path.isdir(intermediates_path):
raise
# Build the vrt files
print("Creating monthly VRT files...")
for im, MONTH in enumerate(months[:]):
final_secondary_path = secondary_path.format(month="{:02d}", year="*").format(im+1)
filelist = sorted(glob.glob(os.path.join(base_path, final_secondary_path)))
if len(filelist) < 1:
print("ERROR! No files found in {}".format( os.path.join(base_path, final_secondary_path) ))
vrt_path = os.path.join(intermediates_path,"month-{:02d}.vrt".format(im+1))
create_vrt(filelist, vrt_path)
print("Computing monthly averages from monthly VRT files...")
# make list of expected input vrt paths
ivp_list = [os.path.join(intermediates_path,"month-{:02d}.vrt".format(im)) for im in range(1, len(months)+1)]
monthly_averages = [average_over_bands(ivp, bands='all') for ivp in ivp_list]
if save_intermediates:
print("Saving pickles...")
for im, ma in enumerate(monthly_averages):
pp = os.path.join(intermediates_path, "month-{:02d}.pickle".format(im+1))
pickle.dump(ma, file(pp, 'wb'))
print("Done saving pickles...")
# Clean up any intermediate files.
if not save_intermediates:
mapath = os.path.join(TMP_DATA, 'month-averages-pid{}'.format(os.getpid()))
for f in os.listdir(mapath):
os.remove(os.path.join(mapath, f))
os.rmdir(mapath)
print("Returning monthly_averages list...")
return monthly_averages
def get_monthlies_figure(base_path, secondary_path, title, units,
src='fresh', save_intermediates=True, madata=None ):
'''
Creates a single figure with 12 subplots, each showing the average for that
month across the timeseries.
'''
months = ['jan', 'feb', 'mar', 'apr', 'may', 'jun', 'jul', 'aug', 'sep', 'oct', 'nov', 'dec']
if src == 'fresh':
monthly_averages = calculate_monthly_averages(months, base_path, secondary_path, save_intermediates=save_intermediates)
elif src == 'pickle':
monthly_averages = read_monthly_pickles(months=list(range(1,13)))
elif src == 'passed':
monthly_averages = madata
else:
print("Invalid argument for src! '{}'".format(src))
vmax = np.max([avg.max() for avg in monthly_averages])
vmin = np.min([avg.min() for avg in monthly_averages])
print("vmax: {} vmin: {}".format(vmax, vmin))
print("Creating monthlies figure...")
fig, axes = plt.subplots(figsize=(11,8.5), nrows=3, ncols=4, sharex=True, sharey=True)
imgs = []
for ax, avg, month in zip(axes.flat, monthly_averages, months):
im = ax.imshow(avg, vmin=vmin, vmax=vmax, cmap='gist_ncar')
imgs.append(im)
ax.set_title(month)
cbar = fig.colorbar(imgs[0], ax=axes.ravel().tolist())
cbar.set_label(units)
fig.suptitle(title)
print("Done creating monthlies figure.")
return fig
def get_overview_figure(periods, base_path, secondary_path, title='',
units='', src='fresh', save_intermediates=True, padata=None):
'''
Creates and returns a matplotlib figure that has ??
Parameters
----------
Returns
-------
fig : matplotlib figure instance
'''
if src == 'fresh':
period_averages = calculate_period_averages(periods, base_path, secondary_path, save_intermediates=save_intermediates)
elif src == 'pickle':
period_averages = read_period_averages(periods)
elif src == 'passed':
period_averages = padata
else:
print("Invalid argument for src! '{}'".format(src))
print("Converting to stacked masked array...")
pa2 = np.ma.stack(period_averages)
vmax = pa2.max()
vmin = pa2.min()
print("vmax: {} vmin: {}".format(vmax, vmin))
NCOLS = 4 # fixed number of cols, may add more rows
NROWS = len(period_averages)/NCOLS
if (len(period_averages) % NCOLS) > 0:
NROWS += 1
if len(period_averages) < NCOLS:
NCOLS = len(period_averages)
NROWS = 1
overview_fig, axes = plt.subplots(nrows=NROWS, ncols=NCOLS, sharex=True, sharey=True)
overview_fig.set_size_inches((11, 8.5), forward=True)
imgs = [] # in case we need to manipulate the images all at once
for ax, avg, period in zip(axes.flat, period_averages, periods):
print("plotting image for period:", period)
# Setting vmax and vmin normalized the colorbars across all images
im = ax.imshow(avg, vmin=vmin, vmax=vmax, cmap='gist_ncar')
ax.set_title('{} to {}'.format(period[0], period[1]))
imgs.append(im)
# set a colorbar on the first axes
cbar = overview_fig.colorbar(imgs[0], ax=axes.ravel().tolist())
cbar.set_label(units)
overview_fig.suptitle(title)
return overview_fig, period_averages
def get_period_avg_figures(periods, base_path, secondary_path,
title='', units='', src='fresh', save_intermediates=True, padata=None):
'''
Parameters
----------
Returns
-------
'''
if src == 'fresh':
period_averages = calculate_period_averages(periods, base_path, secondary_path, save_intermediates=save_intermediates)
elif src == 'pickle':
period_averages = read_period_averages(periods)
elif src == 'passed':
period_averages = padata
else:
print("Invalid argument for src! '{}'".format(src))
print("Converting to stacked masked array...")
pa2 = np.ma.stack(period_averages)
vmax = pa2.max()
vmin = pa2.min()
print("vmax: {} vmin: {}".format(vmax, vmin))
ind_figures = []
for i, ((start,end), periodavg) in enumerate(zip(periods, pa2)):
fig = plt.figure()
fig.suptitle(title) #fontsize=8
im = plt.imshow(periodavg, vmin=vmin, vmax=vmax, cmap='gist_ncar')
ax = fig.axes[0]
ax.set_title('Average, {} to {}'.format(start, end))
cbar = plt.colorbar()
cbar.set_label(units)
ind_figures.append(fig)
return ind_figures, padata
def worker_func2(f):
if f == 'file3':
time.sleep(1)
if f == 'file7':
time.sleep(5)
print("will open, read, average {}".format(f))
return f
def worker_func3(in_file_path):
'''
'''
# Deduce month and year from file name
bname = os.path.basename(in_file_path)
n, ext = os.path.splitext(bname)
parts = n.split('_')
month, year = [int(p) for p in parts[-2:]]
date = dt.date(year=year, month=month, day=1)
# Open the file, get some stats
ds = gdal.Open(in_file_path)
ds_array = ds.ReadAsArray()
ds_m = np.ma.masked_less_equal(ds_array, -9999)
data_dict = dict(
fname=bname,
date=date,
statewide_mean=ds_m.mean(),
statewide_min=ds_m.min(),
statewide_max=ds_m.max(),
statewide_std=ds_m.std()
)
return data_dict
def generate_spatial_summary_stats(base_path, secondary_path):
'''
'''
# This produces a bunch of csv files with statewide averages
for sec_path in secondary_path_list[0:]:
files = sorted(glob.glob(os.path.join(base_path, sec_path.format(month='*', year='*'))))
p = multiprocessing.Pool()
results = p.map(worker_func3, files[0:])
p.close()
p.join()
s_results = sorted(results, key=lambda k: k['date'])
stats_path = "SPATIAL_SUMMARY_STATS_{}.csv".format(sec_path.split('/')[0])
import pandas as pd
df = pd.DataFrame(s_results)
df.to_csv(stats_path)
def plot_timeseries_of_spatial_summary_stats():
'''
'''
# Build this automatically:
# - look for SPATIAL_SUMMARY_STATS_*
ss_file_list = [
'SPATIAL_SUMMARY_STATS_hurs_mean_pct_ar5_MRI-CGCM3_rcp85_2006_2100_fix.csv',
'SPATIAL_SUMMARY_STATS_hurs_mean_pct_ar5_NCAR-CCSM4_rcp85_2006_2100_fix.csv',
'SPATIAL_SUMMARY_STATS_hurs_mean_pct_iem_CRU-TS40_historical_1901_2015_fix.csv',
'SPATIAL_SUMMARY_STATS_pr_total_mm_ar5_MRI-CGCM3_rcp85_2006_2100.csv',
'SPATIAL_SUMMARY_STATS_pr_total_mm_ar5_NCAR-CCSM4_rcp85_2006_2100.csv',
'SPATIAL_SUMMARY_STATS_pr_total_mm_iem_cru_TS40_1901_2015.csv',
'SPATIAL_SUMMARY_STATS_rsds_mean_MJ-m2-d1_ar5_MRI-CGCM3_rcp85_2006_2100_fix.csv',
'SPATIAL_SUMMARY_STATS_rsds_mean_MJ-m2-d1_ar5_NCAR-CCSM4_rcp85_2006_2100_fix.csv',
'SPATIAL_SUMMARY_STATS_rsds_mean_MJ-m2-d1_iem_CRU-TS40_historical_1901_2015_fix.csv',
'SPATIAL_SUMMARY_STATS_tas_mean_C_ar5_MRI-CGCM3_rcp85_2006_2100.csv',
'SPATIAL_SUMMARY_STATS_tas_mean_C_ar5_NCAR-CCSM4_rcp85_2006_2100.csv',
'SPATIAL_SUMMARY_STATS_tas_mean_C_iem_cru_TS40_1901_2015.csv',
'SPATIAL_SUMMARY_STATS_vap_mean_hPa_ar5_MRI-CGCM3_rcp85_2006_2100_fix.csv',
'SPATIAL_SUMMARY_STATS_vap_mean_hPa_ar5_NCAR-CCSM4_rcp85_2006_2100_fix.csv',
'SPATIAL_SUMMARY_STATS_vap_mean_hPa_iem_CRU-TS40_historical_1901_2015_fix.csv',
]
# Create multi-page pdf document
import matplotlib.backends.backend_pdf
ofname = "climatology_statewide_averages.pdf".format()
print("Saving PDF: {}".format(ofname))
pdf = matplotlib.backends.backend_pdf.PdfPages(ofname)
var_list = ['tas_mean','pr_total','rsds_mean','vap_mean','hurs_mean']
unit_list = ['celsius', 'mm month-1', 'MJ-m2-d1','hPa', 'percent']
for var, units in zip(var_list, unit_list):
# Figure out the right files to work on
var_files = [x for x in ss_file_list if var in x.lower()]
print(var_files)
print()
h_file = [x for x in var_files if 'cru' in x.lower()]
pmri_file = [x for x in var_files if 'mri' in x.lower()]
pncar_file = [x for x in var_files if 'ncar' in x.lower()]
# Filtering above should result in single item lists, unpack for convenience.
h_file = h_file[0]
pmri_file = pmri_file[0]
pncar_file = pncar_file[0]
print("var: ", var)
print("hfile: ", h_file)
print("pmri_file: ", pmri_file)
print("pncar_file: ", pncar_file)
print()
# Read data into DataFrames
hdf = pd.read_csv( h_file )
hdf.set_index( pd.to_datetime(hdf['date']), inplace=True )
pmri_df = pd.read_csv( pmri_file )
pmri_df.set_index( pd.to_datetime(pmri_df['date']), inplace=True )
pncar_df = pd.read_csv( pncar_file )
pncar_df.set_index( pd.to_datetime(pncar_df['date']), inplace=True )
# build an index for the whole range, historic & projected
full_index = | pd.DatetimeIndex(start=hdf.index[0], end=pncar_df.index[-1], freq="MS") | pandas.DatetimeIndex |
# -*- coding: utf-8 -*-
"""
Created on Sat Oct 13 17:45:11 2018
@author: <NAME>
@e-mail: <EMAIL>
Program for analysis and creation of fragmentation diagrams in mass spectrometry out of .csv files
"""
import os
import time
from tkinter import filedialog
import pandas as pd
import numpy as np
from numpy import trapz
from scipy.signal import savgol_filter
from sklearn.svm import SVR
import matplotlib.pyplot as plt
from matplotlib import cm
from mpl_toolkits.mplot3d import axes3d
import pickle as pl
xMin = 15
xMax = 30
stepWidth = 1
def prepare_data(ms_info):
global filepath
data = pd.io.parsers.read_csv(filepath)
data.drop(data[data.m > (ms_info + 2)].index, inplace=True)
data.drop(data[data.m < (ms_info - 1)].index, inplace=True)
#data.intensity = savgol_filter(data.intensity, 23, 6, mode='wrap')
#data.intensity = savgol_filter(data.intensity, 21, 7, mode='nearest')
global highest_value_overall
global ms_info_overall
highest_value = 0
scan = 0
index = 0
d = {'scan': [scan],
'intensity': [highest_value]}
data_new = pd.DataFrame(d)
data_new_scaled = pd.DataFrame(d)
for index, row in data.iterrows():
scan_new = row['scan']
if scan_new == scan:
highest_value_new = row['intensity']
if highest_value_new > highest_value:
highest_value = highest_value_new
else:
d = {'scan': [scan],
'intensity': [highest_value]}
data_new = data_new.append(pd.DataFrame(d))
scan = scan_new
highest_value = 0
data_new = data_new.iloc[2:]
data_new.intensity = savgol_filter(data_new.intensity, 11, 6, mode='nearest')
if ms_info < ms_info_overall:
data_new['intensity'].iloc[0] = 0
for index, row in data_new.iterrows():
highest_value = row['intensity']
if highest_value >= highest_value_overall:
highest_value_overall = highest_value
for i, row in data_new.iterrows():
scan = row['scan']
highest_value = row['intensity']
d = {'scan': [scan],
'intensity': [(highest_value/highest_value_overall)*100]}
data_new_scaled = data_new_scaled.append(pd.DataFrame(d))
data_new_scaled = data_new_scaled.iloc[2:]
if ms_info < ms_info_overall:
data_new_scaled['intensity'].iloc[0] = 0
return data_new, data_new_scaled
def plot_diag(catab, plant, category, version, catabolite, fragmentation_mode):
global time
fig_1 = plt.figure(1)
ax = plt.axes()
ax.yaxis.grid()
overall_length = 0
dataframe = pd.DataFrame()
dataframe_scaled = pd.DataFrame()
for i in catab:
data_to_draw, data_to_draw_scaled = prepare_data(int(i))
length = data_to_draw.scan.size
if length > overall_length:
overall_length = length
for i in catab:
data_to_draw, data_to_draw_scaled = prepare_data(int(i))
length = data_to_draw.scan.size
#x = np.arange(0, (length/(overall_length+1)*100), ((length/(overall_length+1)*100)/length))
#x = np.arange(0, ((length/overall_length)*100), (((length/overall_length)*100)/length))
#x = np.arange(20,45,1)
#x = np.arange(0,100,4)
#x = np.arange(15,30,1)
x = np.arange(xMin, xMax, stepWidth)
plt.plot(x, data_to_draw.intensity, label = i + ' Da')
plt.suptitle(plant+' - '+category+'-'+catabolite+fragmentation_mode)
plt.title(version)
plt.xlabel('normalised collision energy (in %)')
plt.ylabel('intensity (arbitrary unit)')
plt.legend()
ax.set_ylim(ymin=0)
#ax.set_xlim([0,96])
#ax.set_xlim([20,44])
#ax.set_xlim([15,29])
ax.set_xlim([xMin,xMax-stepWidth])
directory = 'diagrams_output/'+plant+'/'+category+'/'+catabolite+'/'+time+'/'+fragmentation_mode+'/'+version+'/'
diag_name = directory+catabolite+fragmentation_mode+'-'+version
if not os.path.exists(directory):
os.makedirs(directory)
lines = len(catab)
index = 0
while index <= lines - 1:
data = fig_1.axes[0].lines[index].get_data()
if index == 0:
dataframe_two = pd.DataFrame(data[0])
dataframe = pd.concat([dataframe, dataframe_two], axis=1)
dataframe_two = pd.DataFrame(data[1])
dataframe = pd.concat([dataframe, dataframe_two], axis=1)
else:
dataframe_two = pd.DataFrame(data[1])
dataframe = pd.concat([dataframe, dataframe_two], axis=1)
index = index + 1
dataframe.to_csv(diag_name+'.csv')
plt.savefig(diag_name+'.png')
pl.dump(fig_1, open(diag_name+'.pickle','wb'))
plt.show()
choice = '200'
while choice != '0':
print("---------------------")
print("create diagram <1>")
print("view diagram <2>")
print("calc. derivatives <3>")
print("total ion current <4>")
print("Exit <0>")
choice = input("Enter: ")
if choice == '1':
print(" ")
time = input("time of measurement (DDMMYYYY): ")
filename = input("filename (.csv file - without last number): ")
plant = input("plant: ")
category = input("catabolite type: ")
catabolite = input("fragmented mass: ")
fragmentation_mode = input("fragmentation mode: ")
catabolites_string = input("[M]-fragments of above catabolite: ")
catabolites = catabolites_string.split(",")
highest_value_overall = 0
ms_info_overall = int(catabolites[0])
xMin = int(input("minimum collision energy: "))
xMax = int(input("maximum collision energy: "))
stepWidth = int(input("step width: "))
versions = input("number of versions: ")
print("<<info - close window for next to appear>>")
i = 1
while i <= int(versions):
filepath = 'RawFiles/'+time+'/'+plant+'/'+filename+str(i)+'.csv'
version = 'Version'+str(i)
print('...'+version)
plot_diag(catabolites, plant, category, version, catabolite, fragmentation_mode)
i = i+1
if choice == '2':
pathname = filedialog.askopenfilename(title = "Select file",filetypes = (("pickle files","*.pickle"),("all files","*.*")))
fig = pl.load(open(pathname, 'rb'))
fig.show()
if choice == '3':
pathname = filedialog.askopenfilename(title = "Select file",filetypes = (("pickle files","*.pickle"),("all files","*.*")))
fig_1 = pl.load(open(pathname, 'rb'))
dataframe = pd.DataFrame()
lines = 3
index = 0
while index <= lines - 1:
data = fig_1.axes[0].lines[index].get_data()
if index == 0:
dataframe_two = pd.DataFrame(data[0])
dataframe = pd.concat([dataframe, dataframe_two], axis=1)
dataframe_two = | pd.DataFrame(data[1]) | pandas.DataFrame |
# coding: utf-8
from collections import OrderedDict
import pandas as pd
from czsc.objects import Signal, Factor, Event, Freq, Operate, PositionLong, PositionShort
def test_signal():
s = Signal(k1="1分钟", k3="倒1形态", v1="类一买", v2="七笔", v3="基础型", score=3)
assert str(s) == "Signal('1分钟_任意_倒1形态_类一买_七笔_基础型_3')"
assert s.key == "1分钟_倒1形态"
s1 = Signal(signal='1分钟_任意_倒1形态_类一买_七笔_基础型_3')
assert s == s1
assert s.is_match({"1分钟_倒1形态": "类一买_七笔_基础型_3"})
assert not s.is_match({"1分钟_倒1形态": "类一买_七笔_特例一_3"})
assert not s.is_match({"1分钟_倒1形态": "类一买_九笔_基础型_3"})
s = Signal(k1="1分钟", k2="倒1形态", k3="类一买", score=3)
assert str(s) == "Signal('1分钟_倒1形态_类一买_任意_任意_任意_3')"
assert s.key == "1分钟_倒1形态_类一买"
try:
s = Signal(k1="1分钟", k2="倒1形态", k3="类一买", score=101)
except ValueError as e:
assert str(e) == 'score 必须在0~100之间'
def test_factor():
freq = Freq.F15
s = OrderedDict()
default_signals = [
Signal(k1=str(freq.value), k2="倒0笔", k3="方向", v1="向上", v2='其他', v3='其他'),
Signal(k1=str(freq.value), k2="倒0笔", k3="长度", v1="大于5", v2='其他', v3='其他'),
Signal(k1=str(freq.value), k2="倒0笔", k3="三K形态", v1="顶分型", v2='其他', v3='其他'),
Signal(k1=str(freq.value), k2="倒1笔", k3="表里关系", v1="其他", v2='其他', v3='其他'),
Signal(k1=str(freq.value), k2="倒1笔", k3="RSQ状态", v1="小于0.2", v2='其他', v3='其他'),
]
for signal in default_signals:
s[signal.key] = signal.value
factor = Factor(
name="单测",
signals_all=[
Signal(k1=str(freq.value), k2="倒0笔", k3="方向", v1="向上", v2='其他', v3='其他'),
Signal(k1=str(freq.value), k2="倒0笔", k3="长度", v1="大于5", v2='其他', v3='其他')
]
)
assert factor.is_match(s)
factor = Factor(
name="单测",
signals_all=[
Signal(k1=str(freq.value), k2="倒0笔", k3="方向", v1="向上", v2='其他', v3='其他'),
Signal(k1=str(freq.value), k2="倒0笔", k3="长度", v1="大于5", v2='其他', v3='其他')
],
signals_any=[
Signal(k1=str(freq.value), k2="倒1笔", k3="RSQ状态", v1="小于0.2", v2='其他', v3='其他')
]
)
assert factor.is_match(s)
factor = Factor(
name="单测",
signals_all=[
Signal(k1=str(freq.value), k2="倒0笔", k3="方向", v1="向上", v2='其他', v3='其他'),
Signal(k1=str(freq.value), k2="倒0笔", k3="长度", v1="大于5", v2='其他', v3='其他')
],
signals_any=[
Signal(k1=str(freq.value), k2="倒1笔", k3="RSQ状态", v1="小于0.8", v2='其他', v3='其他')
]
)
assert not factor.is_match(s)
factor = Factor(
name="单测",
signals_all=[
Signal(k1=str(freq.value), k2="倒0笔", k3="方向", v1="向上", v2='其他', v3='其他'),
Signal(k1=str(freq.value), k2="倒0笔", k3="长度", v1="大于5", v2='其他', v3='其他')
],
signals_any=[
Signal(k1=str(freq.value), k2="倒1笔", k3="RSQ状态", v1="小于0.2", v2='其他', v3='其他')
],
signals_not=[
Signal(k1=str(freq.value), k2="倒0笔", k3="三K形态", v1="顶分型", v2='其他', v3='其他'),
]
)
assert not factor.is_match(s)
def test_event():
freq = Freq.F15
s = OrderedDict()
default_signals = [
Signal(k1=str(freq.value), k2="倒0笔", k3="方向", v1="向上", v2='其他', v3='其他'),
Signal(k1=str(freq.value), k2="倒0笔", k3="长度", v1="大于5", v2='其他', v3='其他'),
Signal(k1=str(freq.value), k2="倒0笔", k3="三K形态", v1="顶分型", v2='其他', v3='其他'),
Signal(k1=str(freq.value), k2="倒1笔", k3="表里关系", v1="其他", v2='其他', v3='其他'),
Signal(k1=str(freq.value), k2="倒1笔", k3="RSQ状态", v1="小于0.2", v2='其他', v3='其他'),
]
for signal in default_signals:
s[signal.key] = signal.value
event = Event(name="单测", operate=Operate.LO, factors=[
Factor(
name="测试",
signals_all=[
Signal(k1=str(freq.value), k2="倒0笔", k3="方向", v1="向上", v2='其他', v3='其他'),
Signal(k1=str(freq.value), k2="倒0笔", k3="长度", v1="大于5", v2='其他', v3='其他')]
)
])
m, f = event.is_match(s)
assert m and f
event = Event(name="单测", operate=Operate.LO, factors=[
Factor(
name="测试",
signals_all=[
Signal('15分钟_倒0笔_方向_向上_其他_其他_0'), Signal('15分钟_倒0笔_长度_任意_其他_其他_0')
]
)
])
m, f = event.is_match(s)
assert m and f
event = Event(name="单测", operate=Operate.LO, factors=[
Factor(
name="测试",
signals_all=[
Signal('15分钟_倒0笔_方向_向上_其他_其他_20'), Signal('15分钟_倒0笔_长度_任意_其他_其他_0')
]
)
])
m, f = event.is_match(s)
assert not m and not f
event = Event(name="单测", operate=Operate.LO, factors=[
Factor(
name="测试",
signals_all=[
Signal('15分钟_倒0笔_方向_向下_其他_其他_0'), Signal('15分钟_倒0笔_长度_任意_其他_其他_0')
]
)
])
m, f = event.is_match(s)
assert not m and not f
def test_position_long():
pos_long = PositionLong(symbol="000001.XSHG")
pos_long.update(dt=pd.to_datetime('2021-01-01'), op=Operate.HO, price=100, bid=0)
assert not pos_long.pos_changed and pos_long.pos == 0
pos_long.update(dt=pd.to_datetime('2021-01-02'), op=Operate.LO, price=100, bid=1, op_desc="首次开仓测试")
assert pos_long.pos_changed and pos_long.pos == 0.5
pos_long.update(dt=pd.to_datetime('2021-01-03'), op=Operate.LO, price=100, bid=2, op_desc="首次开仓测试")
assert not pos_long.pos_changed and pos_long.pos == 0.5
pos_long.update(dt=pd.to_datetime('2021-01-04'), op=Operate.LA1, price=100, bid=3)
assert pos_long.pos_changed and pos_long.pos == 0.8
pos_long.update(dt=pd.to_datetime('2021-01-05'), op=Operate.LA1, price=100, bid=4)
assert not pos_long.pos_changed and pos_long.pos == 0.8
pos_long.update(dt=pd.to_datetime('2021-01-06'), op=Operate.LA2, price=100, bid=5)
assert pos_long.pos_changed and pos_long.pos == 1
pos_long.update(dt=pd.to_datetime('2021-01-07'), op=Operate.LR1, price=100, bid=6)
assert pos_long.pos_changed and pos_long.pos == 0.8
pos_long.update(dt=pd.to_datetime('2021-01-08'), op=Operate.LR2, price=100, bid=7)
assert pos_long.pos_changed and pos_long.pos == 0.5
pos_long.update(dt=pd.to_datetime('2021-01-08'), op=Operate.LR2, price=100, bid=7)
assert not pos_long.pos_changed and pos_long.pos == 0.5
pos_long.update(dt=pd.to_datetime('2021-01-09'), op=Operate.LA2, price=100, bid=8)
assert not pos_long.pos_changed and pos_long.pos == 0.5
pos_long.update(dt=pd.to_datetime('2021-01-10'), op=Operate.LA1, price=100, bid=9)
assert pos_long.pos_changed and pos_long.pos == 0.8
pos_long.update(dt=pd.to_datetime('2021-01-11'), op=Operate.LE, price=100, bid=10)
assert pos_long.pos_changed and pos_long.pos == 0
assert len(pos_long.pairs) == 1
assert pos_long.pairs[0]['持仓天数'] == 9
pos_long.evaluate_operates()
def test_position_long_t0():
"""测试T0逻辑"""
pos_long = PositionLong(symbol="000001.XSHG", T0=False)
pos_long.update(dt=pd.to_datetime('2021-01-01'), op=Operate.HO, price=100, bid=0)
assert not pos_long.pos_changed and pos_long.pos == 0
pos_long.update(dt=pd.to_datetime('2021-01-02'), op=Operate.LO, price=100, bid=1, op_desc="首次开仓测试")
assert pos_long.pos_changed and pos_long.pos == 0.5
pos_long.update(dt=pd.to_datetime('2021-01-02'), op=Operate.LA1, price=100, bid=3)
assert pos_long.pos_changed and pos_long.pos == 0.8
pos_long.update(dt=pd.to_datetime('2021-01-02'), op=Operate.LA2, price=100, bid=5)
assert pos_long.pos_changed and pos_long.pos == 1
# T0 平仓信号不生效
pos_long.update(dt=pd.to_datetime('2021-01-02'), op=Operate.LE, price=100, bid=8)
assert not pos_long.pos_changed and pos_long.pos == 1
pos_long.update(dt=pd.to_datetime('2021-01-03'), op=Operate.LE, price=100, bid=10)
assert pos_long.pos_changed and pos_long.pos == 0
try:
pos_long.update(dt=pd.to_datetime('2021-01-03'), op=Operate.SO, price=100, bid=11)
except AssertionError as e:
print(e)
assert len(pos_long.pairs) == 1
pos_long.evaluate_operates()
def test_position_long_min_interval():
"""测试T0逻辑"""
pos_long = PositionLong(symbol="000001.XSHG", T0=False, long_min_interval=3600*72)
pos_long.update(dt=pd.to_datetime('2021-01-01'), op=Operate.HO, price=100, bid=0)
assert not pos_long.pos_changed and pos_long.pos == 0
pos_long.update(dt=pd.to_datetime('2021-01-02'), op=Operate.LO, price=100, bid=1, op_desc="首次开仓测试")
assert pos_long.pos_changed and pos_long.pos == 0.5
pos_long.update(dt=pd.to_datetime('2021-01-02'), op=Operate.LA1, price=100, bid=3)
assert pos_long.pos_changed and pos_long.pos == 0.8
pos_long.update(dt=pd.to_datetime('2021-01-02'), op=Operate.LA2, price=100, bid=5)
assert pos_long.pos_changed and pos_long.pos == 1
# T0 平仓信号不生效
pos_long.update(dt=pd.to_datetime('2021-01-02'), op=Operate.LE, price=100, bid=8)
assert not pos_long.pos_changed and pos_long.pos == 1
pos_long.update(dt= | pd.to_datetime('2021-01-03') | pandas.to_datetime |
from visions.core.model import VisionsBaseType, VisionsTypeset
from visions.core.implementations.types import visions_generic
from visions.core.model.relations import IdentityRelation
import pandas.api.types as pdt
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
class visions_statistical_set(VisionsTypeset):
"""Typeset that exclusively supports time related types
Includes support for the following types:
- visions_binary
- visions_nominal
- visions_ordinal
- visions_interval
- visions_ratio
"""
def __init__(self):
types = {
visions_binary,
visions_nominal,
visions_ordinal,
visions_interval,
visions_ratio,
}
super().__init__(types)
class visions_binary(VisionsBaseType):
@classmethod
def get_relations(cls):
return [IdentityRelation(visions_binary, visions_nominal)]
@classmethod
def contains_op(cls, series: pd.Series) -> bool:
return pdt.is_bool_dtype(series)
class visions_nominal(VisionsBaseType):
@classmethod
def get_relations(cls):
return [IdentityRelation(visions_nominal, visions_generic)]
@classmethod
def contains_op(cls, series: pd.Series) -> bool:
return (pdt.is_categorical_dtype(series) and not series.cat.ordered) or pdt.is_bool_dtype(series)
class visions_ordinal(VisionsBaseType):
@classmethod
def get_relations(cls):
return [IdentityRelation(visions_ordinal, visions_generic)]
@classmethod
def contains_op(cls, series: pd.Series) -> bool:
return pdt.is_categorical_dtype(series) and series.cat.ordered
class visions_interval(VisionsBaseType):
"""
Aliases
"""
@classmethod
def get_relations(cls):
return [IdentityRelation(visions_interval, visions_generic)]
@classmethod
def contains_op(cls, series: pd.Series) -> bool:
return | pdt.is_numeric_dtype(series) | pandas.api.types.is_numeric_dtype |
# --------------
#Importing header files
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
#Code starts here
data = pd.read_csv(path)
data['Rating'].hist()
data = data[data['Rating']<=5]
data['Rating'].hist()
#Code ends here
# --------------
# code starts here
total_null = data.isnull().sum()
percent_null = total_null/data.isnull().count()
missing_data = pd.concat([total_null,percent_null],axis=1,keys=['Total','Percent'])
print(missing_data)
data.dropna(inplace=True)
total_null_1 = data.isnull().sum()
percent_null_1 = total_null_1/data.isnull().count()
missing_data_1 = pd.concat([total_null_1,percent_null_1],axis=1,keys=['Total','Percent'])
print(missing_data_1)
# code ends here
# --------------
#Code starts here
sns.catplot(x='Category',y='Rating',data=data,kind='box')
plt.xticks(rotation=90)
plt.show()
#Code ends here
# --------------
#Importing header files
from sklearn.preprocessing import MinMaxScaler, LabelEncoder
#Code starts here
print(data.Installs.value_counts())
data['Installs'] = (data['Installs'].str.replace(',','')).str.replace('+','')
data['Installs'] = data['Installs'].astype(int)
le = LabelEncoder()
data['Installs'] = le.fit(data['Installs']).transform(data['Installs'])
sns.regplot(x='Installs',y='Rating',data=data)
plt.title('Rating vs Installs [RegPlot]')
plt.show()
#Code ends here
# --------------
#Code starts here
print(data['Price'].value_counts())
data['Price'] = (data['Price'].str.replace('$','')).astype(float)
sns.regplot(x='Price',y='Rating',data=data)
plt.title('Rating vs Price [RegPlot]')
plt.show()
#Code ends here
# --------------
#Code starts here
print(data['Genres'].unique())
data['Genres'] = (data['Genres'].str.split(";", n = 1, expand = True))[0]
gr_mean = data[['Genres','Rating']].groupby('Genres',as_index=False).mean()
print(gr_mean.describe())
gr_mean = gr_mean.sort_values(by='Rating')
print(gr_mean.head(1))
print(gr_mean.tail(1))
#Code ends here
# --------------
#Code starts here
data['Last Updated'] = | pd.to_datetime(data['Last Updated']) | pandas.to_datetime |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
from scipy import stats
from sklearn.linear_model import Ridge, RidgeCV
from sklearn.model_selection import cross_val_score, train_test_split
from sklearn.metrics import mean_squared_error, make_scorer
# In[2]:
def calculate_pearson(df):
correlations = {}
numerical_features = df.select_dtypes(exclude = ["object"]).columns
numerical_features = numerical_features.drop("cod_municipio")
for i in numerical_features:
corr = stats.pearsonr(df[i], df['ideb'])[0]
correlations[i] = corr
df_corr = pd.DataFrame(list(correlations.items()), columns=['feature', 'correlation_with_ideb'])
df_corr = df_corr.dropna()
return df_corr
# In[3]:
def calculate_categorical_correlation(df):
categorical_features = df.select_dtypes(include = ["object"]).columns
return categorical_features
# # Puxa dados do CSV de cada integrante do grupo
# ### Dados Alexandre
# In[4]:
path = '../../data/'
# In[5]:
#Dados iniciais
alexandre_inicio_2015 = pd.read_csv(path + 'bases_ale/anos_iniciais/ideb_municipios_2015_ai.csv')
alexandre_inicio_2017 = pd.read_csv(path + 'bases_ale/anos_iniciais/ideb_municipios_2017_ai.csv')
# Dados finais
alexandre_final_2015 = pd.read_csv(path + 'bases_ale/anos_finais/ideb_municipios_2015_af.csv')
alexandre_final_2017 = pd.read_csv(path + 'bases_ale/anos_finais/ideb_municipios_2017_af.csv')
# ### Dados Lidia
# In[6]:
#Dados iniciais
lidia_inicio_2007 = | pd.read_csv(path + 'bases_lidia/anos_iniciais/ideb_escola_2007_ai.csv') | pandas.read_csv |
import os
import sys
import numpy as np
import pytest
import pandas as pd
from pandas import DataFrame, compat
from pandas.util import testing as tm
class TestToCSV:
@pytest.mark.xfail((3, 6, 5) > sys.version_info >= (3, 5),
reason=("Python csv library bug "
"(see https://bugs.python.org/issue32255)"))
def test_to_csv_with_single_column(self):
# see gh-18676, https://bugs.python.org/issue32255
#
# Python's CSV library adds an extraneous '""'
# before the newline when the NaN-value is in
# the first row. Otherwise, only the newline
# character is added. This behavior is inconsistent
# and was patched in https://bugs.python.org/pull_request4672.
df1 = DataFrame([None, 1])
expected1 = """\
""
1.0
"""
with tm.ensure_clean('test.csv') as path:
df1.to_csv(path, header=None, index=None)
with open(path, 'r') as f:
assert f.read() == expected1
df2 = DataFrame([1, None])
expected2 = """\
1.0
""
"""
with tm.ensure_clean('test.csv') as path:
df2.to_csv(path, header=None, index=None)
with open(path, 'r') as f:
assert f.read() == expected2
def test_to_csv_defualt_encoding(self):
# GH17097
df = DataFrame({'col': ["AAAAA", "ÄÄÄÄÄ", "ßßßßß", "聞聞聞聞聞"]})
with tm.ensure_clean('test.csv') as path:
# the default to_csv encoding is uft-8.
df.to_csv(path)
tm.assert_frame_equal(pd.read_csv(path, index_col=0), df)
def test_to_csv_quotechar(self):
df = DataFrame({'col': [1, 2]})
expected = """\
"","col"
"0","1"
"1","2"
"""
with tm.ensure_clean('test.csv') as path:
df.to_csv(path, quoting=1) # 1=QUOTE_ALL
with open(path, 'r') as f:
assert f.read() == expected
expected = """\
$$,$col$
$0$,$1$
$1$,$2$
"""
with tm.ensure_clean('test.csv') as path:
df.to_csv(path, quoting=1, quotechar="$")
with open(path, 'r') as f:
assert f.read() == expected
with tm.ensure_clean('test.csv') as path:
with pytest.raises(TypeError, match='quotechar'):
df.to_csv(path, quoting=1, quotechar=None)
def test_to_csv_doublequote(self):
df = DataFrame({'col': ['a"a', '"bb"']})
expected = '''\
"","col"
"0","a""a"
"1","""bb"""
'''
with tm.ensure_clean('test.csv') as path:
df.to_csv(path, quoting=1, doublequote=True) # QUOTE_ALL
with open(path, 'r') as f:
assert f.read() == expected
from _csv import Error
with tm.ensure_clean('test.csv') as path:
with pytest.raises(Error, match='escapechar'):
df.to_csv(path, doublequote=False) # no escapechar set
def test_to_csv_escapechar(self):
df = DataFrame({'col': ['a"a', '"bb"']})
expected = '''\
"","col"
"0","a\\"a"
"1","\\"bb\\""
'''
with tm.ensure_clean('test.csv') as path: # QUOTE_ALL
df.to_csv(path, quoting=1, doublequote=False, escapechar='\\')
with open(path, 'r') as f:
assert f.read() == expected
df = DataFrame({'col': ['a,a', ',bb,']})
expected = """\
,col
0,a\\,a
1,\\,bb\\,
"""
with tm.ensure_clean('test.csv') as path:
df.to_csv(path, quoting=3, escapechar='\\') # QUOTE_NONE
with open(path, 'r') as f:
assert f.read() == expected
def test_csv_to_string(self):
df = DataFrame({'col': [1, 2]})
expected_rows = [',col',
'0,1',
'1,2']
expected = tm.convert_rows_list_to_csv_str(expected_rows)
assert df.to_csv() == expected
def test_to_csv_decimal(self):
# see gh-781
df = DataFrame({'col1': [1], 'col2': ['a'], 'col3': [10.1]})
expected_rows = [',col1,col2,col3',
'0,1,a,10.1']
expected_default = tm.convert_rows_list_to_csv_str(expected_rows)
assert df.to_csv() == expected_default
expected_rows = [';col1;col2;col3',
'0;1;a;10,1']
expected_european_excel = tm.convert_rows_list_to_csv_str(
expected_rows)
assert df.to_csv(decimal=',', sep=';') == expected_european_excel
expected_rows = [',col1,col2,col3',
'0,1,a,10.10']
expected_float_format_default = tm.convert_rows_list_to_csv_str(
expected_rows)
assert df.to_csv(float_format='%.2f') == expected_float_format_default
expected_rows = [';col1;col2;col3',
'0;1;a;10,10']
expected_float_format = tm.convert_rows_list_to_csv_str(expected_rows)
assert df.to_csv(decimal=',', sep=';',
float_format='%.2f') == expected_float_format
# see gh-11553: testing if decimal is taken into account for '0.0'
df = pd.DataFrame({'a': [0, 1.1], 'b': [2.2, 3.3], 'c': 1})
expected_rows = ['a,b,c',
'0^0,2^2,1',
'1^1,3^3,1']
expected = tm.convert_rows_list_to_csv_str(expected_rows)
assert df.to_csv(index=False, decimal='^') == expected
# same but for an index
assert df.set_index('a').to_csv(decimal='^') == expected
# same for a multi-index
assert df.set_index(['a', 'b']).to_csv(decimal="^") == expected
def test_to_csv_float_format(self):
# testing if float_format is taken into account for the index
# GH 11553
df = pd.DataFrame({'a': [0, 1], 'b': [2.2, 3.3], 'c': 1})
expected_rows = ['a,b,c',
'0,2.20,1',
'1,3.30,1']
expected = tm.convert_rows_list_to_csv_str(expected_rows)
assert df.set_index('a').to_csv(float_format='%.2f') == expected
# same for a multi-index
assert df.set_index(['a', 'b']).to_csv(
float_format='%.2f') == expected
def test_to_csv_na_rep(self):
# see gh-11553
#
# Testing if NaN values are correctly represented in the index.
df = DataFrame({'a': [0, np.NaN], 'b': [0, 1], 'c': [2, 3]})
expected_rows = ['a,b,c',
'0.0,0,2',
'_,1,3']
expected = tm.convert_rows_list_to_csv_str(expected_rows)
assert df.set_index('a').to_csv(na_rep='_') == expected
assert df.set_index(['a', 'b']).to_csv(na_rep='_') == expected
# now with an index containing only NaNs
df = DataFrame({'a': np.NaN, 'b': [0, 1], 'c': [2, 3]})
expected_rows = ['a,b,c',
'_,0,2',
'_,1,3']
expected = tm.convert_rows_list_to_csv_str(expected_rows)
assert df.set_index('a').to_csv(na_rep='_') == expected
assert df.set_index(['a', 'b']).to_csv(na_rep='_') == expected
# check if na_rep parameter does not break anything when no NaN
df = DataFrame({'a': 0, 'b': [0, 1], 'c': [2, 3]})
expected_rows = ['a,b,c',
'0,0,2',
'0,1,3']
expected = tm.convert_rows_list_to_csv_str(expected_rows)
assert df.set_index('a').to_csv(na_rep='_') == expected
assert df.set_index(['a', 'b']).to_csv(na_rep='_') == expected
def test_to_csv_date_format(self):
# GH 10209
df_sec = DataFrame({'A': pd.date_range('20130101', periods=5, freq='s')
})
df_day = DataFrame({'A': pd.date_range('20130101', periods=5, freq='d')
})
expected_rows = [',A',
'0,2013-01-01 00:00:00',
'1,2013-01-01 00:00:01',
'2,2013-01-01 00:00:02',
'3,2013-01-01 00:00:03',
'4,2013-01-01 00:00:04']
expected_default_sec = tm.convert_rows_list_to_csv_str(expected_rows)
assert df_sec.to_csv() == expected_default_sec
expected_rows = [',A',
'0,2013-01-01 00:00:00',
'1,2013-01-02 00:00:00',
'2,2013-01-03 00:00:00',
'3,2013-01-04 00:00:00',
'4,2013-01-05 00:00:00']
expected_ymdhms_day = tm.convert_rows_list_to_csv_str(expected_rows)
assert (df_day.to_csv(date_format='%Y-%m-%d %H:%M:%S') ==
expected_ymdhms_day)
expected_rows = [',A',
'0,2013-01-01',
'1,2013-01-01',
'2,2013-01-01',
'3,2013-01-01',
'4,2013-01-01']
expected_ymd_sec = tm.convert_rows_list_to_csv_str(expected_rows)
assert df_sec.to_csv(date_format='%Y-%m-%d') == expected_ymd_sec
expected_rows = [',A',
'0,2013-01-01',
'1,2013-01-02',
'2,2013-01-03',
'3,2013-01-04',
'4,2013-01-05']
expected_default_day = tm.convert_rows_list_to_csv_str(expected_rows)
assert df_day.to_csv() == expected_default_day
assert df_day.to_csv(date_format='%Y-%m-%d') == expected_default_day
# see gh-7791
#
# Testing if date_format parameter is taken into account
# for multi-indexed DataFrames.
df_sec['B'] = 0
df_sec['C'] = 1
expected_rows = ['A,B,C',
'2013-01-01,0,1']
expected_ymd_sec = tm.convert_rows_list_to_csv_str(expected_rows)
df_sec_grouped = df_sec.groupby([pd.Grouper(key='A', freq='1h'), 'B'])
assert (df_sec_grouped.mean().to_csv(date_format='%Y-%m-%d') ==
expected_ymd_sec)
def test_to_csv_multi_index(self):
# see gh-6618
df = DataFrame([1], columns=pd.MultiIndex.from_arrays([[1], [2]]))
exp_rows = [',1',
',2',
'0,1']
exp = tm.convert_rows_list_to_csv_str(exp_rows)
assert df.to_csv() == exp
exp_rows = ['1', '2', '1']
exp = tm.convert_rows_list_to_csv_str(exp_rows)
assert df.to_csv(index=False) == exp
df = DataFrame([1], columns=pd.MultiIndex.from_arrays([[1], [2]]),
index=pd.MultiIndex.from_arrays([[1], [2]]))
exp_rows = [',,1', ',,2', '1,2,1']
exp = tm.convert_rows_list_to_csv_str(exp_rows)
assert df.to_csv() == exp
exp_rows = ['1', '2', '1']
exp = tm.convert_rows_list_to_csv_str(exp_rows)
assert df.to_csv(index=False) == exp
df = DataFrame(
[1], columns= | pd.MultiIndex.from_arrays([['foo'], ['bar']]) | pandas.MultiIndex.from_arrays |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
#
# This file contains functions used to analyse data sets for a single
# test case.
import sys
import argparse
import glob
import os
import pandas as pd # Data manipulation and analysis
import datetime as dt
from StatisticsFunctions import StatisticsFunctions as sf
import plotly
import numpy
from TSVContainer import TSVContainer
from PrintFuncs import *
RESULTS_SUBDIRNAME = "result_data"
EVAL_PERIODS = "EvaluationPeriods.tsv"
class CaseResults:
def __init__(self):
self.score = 0
self.norms = dict()
self.simQbadge = 0 # means failed
self.ToolID = ""
self.TestCase = ""
self.Variable = ""
self.ErrorCode = -10 # no data/dataset broken
self.Editor = ""
self.Version = ""
self.DisplayName = ""
self.DisplayColor = "#ffc120"
self.Unit = ""
self.Reference = False
self.Data = pd.DataFrame
self.RefData = pd.DataFrame
def appendErrorResults(tsvData, testCaseName, toolID, errorCode, variables):
cr = CaseResults()
cr.TestCase = testCaseName
cr.ToolID = toolID
cr.ErrorCode = errorCode
for v in variables:
cr.Variable = v
tsvData.append(cr)
def listsEqual(list1, list2):
if len(list1) != len(list2):
return False
for i in range(len(list1)):
if list1[i] != list2[i]:
return False
return True
def evaluateVariableResults(variable, timeColumnRef, timeColumnData, refData, testData, starts, ends, weightFactors,
timeIndicator):
"""
Performance difference calculation between variable data sets.
we use different statistical metrics to perform deep comparisions
of the different datasets.
"""
printNotification(" {}".format(variable))
cr = CaseResults()
pdTime = pd.DataFrame()
pdRef = pd.DataFrame()
pdData = pd.DataFrame()
cr.RefData = pd.DataFrame()
# initialize all statistical methods in cr.norms
for key in weightFactors:
cr.norms[key] = -99
for i in range(len(starts)):
start = starts[i]
end = ends[i]
try:
split = 1
if timeIndicator == "min":
split = 60
tempTimeColumnData = [x / split for x in timeColumnData]
cr.Data = pd.DataFrame(data=testData, index=tempTimeColumnData, columns=["Data"])
except Exception as e:
printError(str(e))
raise Exception("Could not convert data to case result object.")
try:
# Check if time columns are equal. Some tools cannot produce output in under hourly mannor.
# For this we are nice and try to convert our reference results.
if not listsEqual(timeColumnData, timeColumnRef):
printWarning(f" Mismatching time columns in data set file and reference data set. "
f"Could be due to Numerical inaccuracy.")
printWarning(f" Trying to compare with numerical tolerance.")
if not numpy.allclose(timeColumnData, timeColumnRef):
printWarning(f" Yep, that was the case. We can continue ;)")
except Exception as e:
printWarning(f" Nope, still quite a mass. :(")
printWarning(f" Trying to convert reference results for our tool with less time steps.")
if len(timeColumnData) < len(timeColumnRef):
newRefData = []
for i in range(len(timeColumnData)):
if timeColumnData[i] not in timeColumnRef:
printWarning(f" Could not be converted unfortunately. Sorry. :(")
return cr
index = timeColumnRef.index(timeColumnData[i])
newRefData.append(refData[index])
# time column data from tool data set is now set for reference data set
timeColumnRef = timeColumnData
refData = newRefData
elif len(timeColumnData) > len(timeColumnRef):
newTestData = []
for i in range(len(timeColumnRef)):
if timeColumnRef[i] not in timeColumnData:
printWarning(f" Could not be converted unfortunately. Sorry. :(")
return cr
index = timeColumnData.index(timeColumnRef[i])
newTestData.append(testData[index])
# time column data from tool data set is now set for reference data set
timeColumnData = timeColumnRef
testData = newTestData
# We first convert our data to pandas
try:
split = 1
if timeIndicator == "min":
split = 60
# Convert all the data to hourly indexes
start = float(start) / split
end = float(end) / split
tempTimeColumnData = [x / split for x in timeColumnData]
tempTimeColumnRef = [x / split for x in timeColumnRef]
startDate = dt.datetime(2021, 1, 1) + dt.timedelta(hours=tempTimeColumnData[0])
pdT = pd.DataFrame(data=pd.date_range(start=startDate, periods=len(tempTimeColumnRef), freq=timeIndicator),
index=tempTimeColumnRef, columns=["Date and Time"])
pdD = pd.DataFrame(data=testData, index=tempTimeColumnData, columns=["Data"])
pdR = pd.DataFrame(data=refData, index=tempTimeColumnRef, columns=["Data"])
# cr.Data = pd.DataFrame(data=testData, index=tempTimeColumnData, columns=["Data"])
cr.RefData = pd.concat([cr.RefData,
pd.DataFrame(data=refData, index=tempTimeColumnData,
columns=["Data"]).loc[start:end]])
except ValueError as e:
printWarning(str(e))
printWarning(f" Could not convert given data of file to pandas dataframe.")
cr.ErrorCode = -15
return cr
# We only use data between out start and end point
pdTime = pd.concat([pdTime, pdT.loc[start:end]])
pdData = pd.concat([pdData, pdD.loc[start:end]])
pdRef = pd.concat([pdRef, pdR.loc[start:end]])
####### MAXIMUM #######
try:
# we evaluate the results
cr.norms['Maximum'] = sf.function_Maximum(pdRef["Data"], pdData["Data"], pdTime["Date and Time"])
except (RuntimeError, RuntimeWarning) as e:
printNotification(f" {str(e)}")
printNotification(f" Cannot calculate Maximum for variable '{variable}'")
####### MINUMUM #######
try:
cr.norms['Minimum'] = sf.function_Minimum(pdRef["Data"], pdData["Data"], pdTime["Date and Time"])
except (RuntimeError, RuntimeWarning) as e:
printNotification(f" {str(e)}")
printNotification(f" Cannot calculate Minimum for variable '{variable}'")
####### Average #######
try:
cr.norms['Average'] = sf.function_Average(pdRef["Data"], pdData["Data"], pdTime["Date and Time"])
except (RuntimeError, RuntimeWarning) as e:
printNotification(f" {str(e)}")
printNotification(f" Cannot calculate Average for variable '{variable}'")
####### CVRMSE #######
try:
cr.norms['CVRMSE'] = sf.function_CVRMSE(pdRef["Data"], pdData["Data"], pdTime["Date and Time"])
except (RuntimeError, RuntimeWarning) as e:
printNotification(f" {str(e)}")
printNotification(f" Cannot calculate CVRMSE for variable '{variable}'")
####### Daily Amplitude CVRMSE #######
try:
cr.norms['Daily Amplitude CVRMSE'] = sf.function_Daily_Amplitude_CVRMSE(pdRef["Data"], pdData["Data"],
pdTime["Date and Time"])
except (RuntimeError, RuntimeWarning) as e:
printNotification(f" {str(e)}")
printNotification(f" Cannot calculate Daily Amplitude CVRMSE for variable '{variable}'")
####### MBE #######
try:
cr.norms['MBE'] = sf.function_MBE(pdRef["Data"], pdData["Data"], pdTime["Date and Time"])
except (RuntimeError, RuntimeWarning) as e:
printNotification(f" {str(e)}")
printNotification(f" Cannot calculate MBE for variable '{variable}'")
try:
cr.norms['RMSEIQR'] = sf.function_RMSEIQR(pdRef["Data"], pdData["Data"], pdTime["Date and Time"])
except (RuntimeError, RuntimeWarning) as e:
printNotification(f" {str(e)}")
printNotification(f" Cannot calculate RMSIQR for variable '{variable}'")
try:
cr.norms['MSE'] = sf.function_MSE(pdRef["Data"], pdData["Data"], pdTime["Date and Time"])
except (RuntimeError, RuntimeWarning) as e:
printNotification(f" {str(e)}")
printNotification(f" Cannot calculate MSE for variable '{variable}'")
try:
cr.norms['NMBE'] = sf.function_NMBE(pdRef["Data"], pdData["Data"], pdTime["Date and Time"])
except (RuntimeError, RuntimeWarning) as e:
printNotification(f" {str(e)}")
printNotification(f" Cannot calculate NMBE for variable '{variable}'")
try:
cr.norms['NRMSE'] = sf.function_NRMSE(pdRef["Data"], pdData["Data"], pdTime["Date and Time"])
except (RuntimeError, RuntimeWarning) as e:
printNotification(f" {str(e)}")
printNotification(f" Cannot calculate NRMSE for variable '{variable}'")
try:
cr.norms['RMSE'] = sf.function_RMSE(pdRef["Data"], pdData["Data"], pdTime["Date and Time"])
except (RuntimeError, RuntimeWarning) as e:
printNotification(f" {str(e)}")
printNotification(f" Cannot calculate RMSE for variable '{variable}'")
try:
cr.norms['RMSLE'] = sf.function_RMSLE(pdRef["Data"], pdData["Data"], pdTime["Date and Time"])
except (RuntimeError, RuntimeWarning) as e:
printNotification(f" {str(e)}")
printNotification(f" Cannot calculate RMSLE for variable '{variable}'")
try:
cr.norms['R squared'] = sf.function_R_squared_coeff_determination(pdRef["Data"],pdData["Data"],pdTime["Date and Time"])
except (RuntimeError, RuntimeWarning) as e:
printNotification(f" {str(e)}")
printNotification(f" Cannot calculate R squared for variable '{variable}'")
try:
cr.norms['std dev'] = sf.function_std_dev(pdRef["Data"], pdData["Data"], pdTime["Date and Time"])
except (RuntimeError, RuntimeWarning) as e:
printNotification(f" {str(e)}")
printNotification(f" Cannot calculate std dev for variable '{variable}'")
try:
cr.norms['Max Difference'] = sf.function_max_difference(pdRef["Data"], pdData["Data"], pdTime["Date and Time"])
except (RuntimeError, RuntimeWarning) as e:
printNotification(f" {str(e)}")
printNotification(f" Cannot calculate Max Difference for variable '{variable}'")
# TODO : Wichtung
if (abs(cr.norms['Average']) < 1e-4):
cr.score = 0 # prevent division by zero error
else:
sum = 999999
if weightFactors['Sum'] > 0:
sum = weightFactors['Sum']
if 'Max Difference' in weightFactors.keys():
if sum == 999999:
sum = 1
else:
sum = sum + 1
maxDiff = 0
if "Max Difference" in weightFactors.keys():
maxDiff = 80.0 + \
20.0 * (weightFactors.get('Max Difference', 0) - abs(cr.norms['Max Difference'])) / weightFactors.get('Max Difference', 0)
cr.score = cr.score + \
(weightFactors.get('CVRMSE', 0) * (100.0 - abs(cr.norms['CVRMSE'])) + # in %
weightFactors.get('Daily Amplitude CVRMSE', 0) * (
100.0 - abs(cr.norms['Daily Amplitude CVRMSE'])) + # in %
weightFactors.get('MBE', 0) * (100.0 - 100.0 * abs(cr.norms['MBE']) / cr.norms['Average']) +
weightFactors.get('RMSEIQR', 0) * (100.0 - abs(cr.norms['RMSEIQR'])) + # in %
weightFactors.get('MSE', 0) * (100.0 - 100 * abs(cr.norms['MSE']) / cr.norms['Average']) +
weightFactors.get('NMBE', 0) * (100.0 - abs(cr.norms['NMBE'])) + # in %
weightFactors.get('NRMSE', 0) * (100.0 - abs(cr.norms['NRMSE'])) + # in %
weightFactors.get('RMSE', 0) * (100.0 - 100.0 * abs(cr.norms['RMSE']) / cr.norms['Average']) +
weightFactors.get('RMSLE', 0) * (100.0 - abs(cr.norms['RMSLE']) / cr.norms['Average']) +
weightFactors.get('R squared', 0) * (cr.norms['R squared']) + # in %
weightFactors.get('std dev', 0) * (100.0 - abs(cr.norms['std dev']) / cr.norms['Average']) +
maxDiff) / sum
cr.score = cr.score / len(starts) # normation
# scoring caluclation --> >95% : Gold | >90% : Silver | >80% : Bronze
badge = 0
if (cr.score >= 90):
badge = 1
elif (cr.score >= 80):
badge = 2
# now set the final SimQuality Badge
cr.simQbadge = badge
cr.score = max(round(cr.score, 2), 0)
return cr
# all the data is stored in a dictionary with tool-specific data
def processDirectory(path):
"""
Processes a test case directory, i.e. path = "data/TF03-Waermeleitung".
It then reads data from the subdirectory 'Auswertung/Ergebnisse' and
calculates the validation score.
Returns a CaseResults object with data for all test variables.
'None' indicates entirely invalid/missing test data or reference data.
"""
# test case name
testCaseName = os.path.split(path)[1]
testCaseName = testCaseName[2:]
# result dir exists?
tsvPath = os.path.join(path, RESULTS_SUBDIRNAME)
if not os.path.exists(tsvPath):
printError(" Missing test result directory '{}'.".format(tsvPath))
return None # None indicates entirely invalid/missing test data.
tsvFiles = [o for o in os.listdir(tsvPath) if o.endswith("tsv")]
evalFiles = [o for o in os.listdir(path) if o.endswith("tsv")]
if not "Reference.tsv" in evalFiles:
printError(" Missing 'Reference.tsv' file.")
return None
if not "EvaluationPeriods.tsv" in evalFiles:
printError(" Missing 'EvaluationPeriods.tsv' file.")
return None
tsvFiles = sorted(tsvFiles)
# read evaluation periods
evalData = TSVContainer()
evalData.readAsStrings(os.path.join(path, "EvaluationPeriods.tsv"))
if True in evalData.emptyColumn:
printError(" 'EvaluationPeriods.tsv' contains empty columns.")
return None
# read reference file
refData = TSVContainer()
refData.readAsStrings(os.path.join(path, "Reference.tsv"))
if True in refData.emptyColumn:
printError(" 'Reference.tsv' contains empty columns.")
return None
if not refData.convert2Double():
printError(" 'Reference.tsv' contains invalid numbers.")
return None
# read reference specification
try:
with open(os.path.join(path,'References.txt')) as f:
lines = f.readlines()
references = lines[0].split(",")
except RuntimeError as e:
printError(e)
printError(f"References.txt needs to be specified. Separated by ','")
# read Weight factors
try:
weightFactorsTSV = TSVContainer()
weightFactorsTSV.readAsStrings(os.path.join(path, "WeightFactors.tsv"))
except RuntimeError as e:
printError(e)
printError(f"At least one weight factor has to be specified in 'WeightFactors.tsv'.")
exit(1)
weightFactors = dict()
diffFactor = 0
for i in range(len(weightFactorsTSV.data[0])):
if weightFactorsTSV.data[0][i] == "Max Difference":
diffFactor = - float(weightFactorsTSV.data[1][i])
weightFactors[weightFactorsTSV.data[0][i]] = float(weightFactorsTSV.data[1][i])
weightFactors['Sum'] = diffFactor + sum(map(float, weightFactorsTSV.data[1])) # convert to int and then sum it up
# read Weight factors
ToolData = []
try:
toolData = pd.read_csv(os.path.join(path, "ToolSpecifications.tsv"), encoding='utf-8', sep="\t",
engine="pyarrow")
# toolData = toolData.set_index(['Tool'])
# toolData = toolData.to_dict('records')
except RuntimeError as e:
print(e)
print(f"Tool Data '{str(ToolData)}' is not specified in directory {path}.")
exit(1)
# extract variable names
variables = []
rawVariables = []
for v in refData.headers[1:]:
rawVariables.append(v)
# remove unit and optional '(mean)' identifier
p = v.find("(mean)")
if p == -1:
p = v.find("[")
if p == -1:
printError(" Missing unit in header label '{}' of 'Reference.tsv'".format(v))
return None
v = v[0:p].strip()
variables.append(v)
printNotification(" {}".format(v))
# extract variable names
evaluationVariables = []
for e in evalData.data[0]:
evaluationVariables.append(e)
printNotification(" {}".format(e))
###############################################################
referenceDf = | pd.DataFrame() | pandas.DataFrame |
import pyaniasetools as aat
import pyanitools as ant
import hdnntools as hdt
import pandas as pd
import sys
import numpy as np
import re
import os
import matplotlib.pyplot as plt
import matplotlib as mpl
from matplotlib.colors import LogNorm
import matplotlib.cm as cm
from mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes
from mpl_toolkits.axes_grid1.inset_locator import mark_inset
from matplotlib.backends.backend_pdf import PdfPages
#import seaborn as sns
pd.options.display.float_format = '{:.2f}'.format
# ----------------------------------
# Plot force histogram
# ----------------------------------
def plot_corr_dist_axes(ax, Xp, Xa, cmap, labelx, labely, plabel, vmin=0, vmax=0, inset=True):
Fmx = Xa.max()
Fmn = Xa.min()
# Plot ground truth line
ax.plot([Fmn, Fmx], [Fmn, Fmx], '--', c='red', linewidth=3)
# Set labels
ax.set_xlabel(labelx, fontsize=26)
ax.set_ylabel(labely, fontsize=26)
# Plot 2d Histogram
if vmin == 0 and vmax ==0:
bins = ax.hist2d(Xp, Xa, bins=200, norm=LogNorm(), range=[[Fmn, Fmx], [Fmn, Fmx]], cmap=cmap)
else:
bins = ax.hist2d(Xp, Xa, bins=200, norm=LogNorm(), range=[[Fmn, Fmx], [Fmn, Fmx]], cmap=cmap, vmin=vmin, vmax=vmax)
# Build color bar
#cbaxes = fig.add_axes([0.91, 0.1, 0.03, 0.8])
# Annotate with label
ax.text(0.25*((Fmx-Fmn))+Fmn, 0.06*((Fmx-Fmn))+Fmn, plabel, fontsize=26)
# Annotate with errors
PMAE = hdt.calculatemeanabserror(Xa, Xp)
PRMS = hdt.calculaterootmeansqrerror(Xa, Xp)
ax.text(0.6*((Fmx-Fmn))+Fmn, 0.2*((Fmx-Fmn))+Fmn, 'MAE='+"{:.3f}".format(PMAE)+'\nRMSE='+"{:.3f}".format(PRMS), fontsize=30,
bbox={'facecolor': 'white', 'alpha': 0.5, 'pad': 5})
if inset:
axins = zoomed_inset_axes(ax, 2., loc=2) # zoom = 6
sz = 0.1*(Fmx-Fmn)
axins.hist2d(Xp, Xa, bins=50, range=[[Xa.mean() - sz, Xa.mean() + sz], [Xp.mean() - sz, Xp.mean() + sz]], norm=LogNorm(), cmap=cmap)
axins.plot([Xp.mean() - sz, Xp.mean() + sz], [Xp.mean() - sz, Xp.mean() + sz], '--', c='r', linewidth=3)
# sub region of the original image
x1, x2, y1, y2 = Xa.mean() - sz, Xa.mean() + sz, Xp.mean() - sz, Xp.mean() + sz
axins.set_xlim(x1, x2)
axins.set_ylim(y1, y2)
axins.yaxis.tick_right()
mark_inset(ax, axins, loc1=1, loc2=3, fc="none", ec="1.5")
plt.xticks(visible=True)
plt.yticks(visible=True)
return bins
def add_inset_histogram(Xa, Xp, pos, ylim, xlim):
Ferr = Xa - Xp
std = np.std(Ferr)
men = np.mean(Ferr)
axh = plt.axes(pos)
axh.hist(Ferr, bins=75, range=[men - 4 * std, men + 4 * std], normed=False)
axh.set_ylim(ylim)
axh.set_xlim(xlim)
#axh.set_title('Difference distribution')
# ----------------------------------
# Plot force histogram
# ----------------------------------
def plot_corr_dist(Xa, Xp, inset=True, xlabel='$F_{dft}$' + r' $(kcal \times mol^{-1} \times \AA^{-1})$', ylabel='$F_{dft}$' + r' $(kcal \times mol^{-1} \times \AA^{-1})$', figsize=[13,10], cmap=mpl.cm.viridis):
Fmx = Xa.max()
Fmn = Xa.min()
label_size = 14
mpl.rcParams['xtick.labelsize'] = label_size
mpl.rcParams['ytick.labelsize'] = label_size
fig, ax = plt.subplots(figsize=figsize)
# Plot ground truth line
ax.plot([Fmn, Fmx], [Fmn, Fmx], '--', c='r', linewidth=3)
# Set labels
ax.set_xlabel(xlabel, fontsize=22)
ax.set_ylabel(ylabel, fontsize=22)
#cmap = mpl.cm.viridis
#cmap = mpl.cm.brg
# Plot 2d Histogram
bins = ax.hist2d(Xa, Xp, bins=200, norm=LogNorm(), range= [[Xa.min(), Xa.max()], [Xp.min(), Xp.max()]], cmap=cmap)
# Build color bar
#cbaxes = fig.add_axes([0.91, 0.1, 0.03, 0.8])
cb1 = fig.colorbar(bins[-1], cmap=cmap)
cb1.set_label('Count', fontsize=16)
# Annotate with errors
PMAE = hdt.calculatemeanabserror(Xa, Xp)
PRMS = hdt.calculaterootmeansqrerror(Xa, Xp)
ax.text(0.75*((Fmx-Fmn))+Fmn, 0.43*((Fmx-Fmn))+Fmn, 'MAE='+"{:.3f}".format(PMAE)+'\nRMSE='+"{:.3f}".format(PRMS), fontsize=20,
bbox={'facecolor': 'white', 'alpha': 0.5, 'pad': 5})
if inset:
axins = zoomed_inset_axes(ax, 2.2, loc=2) # zoom = 6
sz = 6
axins.hist2d(Xa, Xp,bins=50, range=[[Fmn/sz, Fmx/sz], [Fmn/sz, Fmx/sz]], norm=LogNorm(), cmap=cmap)
axins.plot([Xa.min(), Xa.max()], [Xa.min(), Xa.max()], '--', c='r', linewidth=3)
# sub region of the original image
x1, x2, y1, y2 = Fmn/sz, Fmx/sz, Fmn/sz, Fmx/sz
axins.set_xlim(x1, x2)
axins.set_ylim(y1, y2)
axins.yaxis.tick_right()
plt.xticks(visible=True)
plt.yticks(visible=True)
mark_inset(ax, axins, loc1=1, loc2=3, fc="none", ec="0.5")
Ferr = Xa - Xp
std = np.std(Ferr)
men = np.mean(Ferr)
axh = plt.axes([.49, .16, .235, .235])
axh.hist(Ferr, bins=75, range=[men-4*std, men+4*std], normed=True)
axh.set_title('Difference distribution')
#plt.draw()
plt.show()
class generate_ensemble_data(aat.anicrossvalidationconformer):
'''Constructor'''
def __init__(self, networks, tsfiles, gpu=0):
super().__init__(networks['cns'], networks['sae'], networks['nnf'], networks['nts'], gpu )
self.tsfiles = tsfiles
self.Nn = networks['nts']
'''Stat generator'''
def generate_stats(self, maxe=sys.float_info.max, forces=True, grad=False):
self.tdata = dict()
for key in self.tsfiles.keys():
print(' -Working on',key,'...')
cdata = dict({'Eani': [],
'Edft': [],
'Erel': [],
'Fani': [],
'Fdft': [],
'dEani': [],
'dEdft': [],
'Na': [],
'Na2': [],})
for file in self.tsfiles[key]:
print(key,file)
adl = ant.anidataloader(file)
for i, data in enumerate(adl):
#if i > 5:
# break
if data['coordinates'].shape[0] != 0:
Eani, Fani, sig = self.compute_energyandforce_conformations(np.array(data['coordinates'],dtype=np.float64), data['species'], ensemble=False)
midx = np.where( data['energies'] - data['energies'].min() < maxe/hdt.hatokcal )[0]
Eani = Eani[:,midx]
Edft = data['energies'][midx]
Erel = (data['energies'] - data['energies'].min())[midx]
Fani = Fani[:,midx,:,:]
if forces:
if grad:
Fdft = -data['forces'][midx]
else:
Fdft = data['forces'][midx]
else:
Fdft = 0.0*data['coordinates'][midx]
#Eestd = np.std(Eani, axis=0)/np.sqrt(len(data['species']))
Eeani = np.mean(Eani, axis=0).reshape(1,-1)
Feani = np.mean(Fani, axis=0).flatten().reshape(1,-1)
Fani = Fani.reshape(Fani.shape[0],-1)
Eani = np.vstack([Eani, Eeani])
Fani = np.vstack([Fani, Feani])
Edft = hdt.hatokcal * Edft
Fdft = hdt.hatokcal * Fdft.flatten()
cdata['Na'].append(np.full(Edft.size, len(data['species']), dtype=np.int32))
cdata['Eani'].append(Eani)
cdata['Edft'].append(Edft)
cdata['Erel'].append(Erel)
cdata['Fani'].append(Fani)
cdata['Fdft'].append(Fdft)
#cdata['Frmse'].append(np.sqrt(np.mean((Fani-Fdft).reshape(Fdft.shape[0], -1)**2, axis=1)))
#cdata['Frmae'].append(np.sqrt(np.mean(np.abs((Fani - Fdft).reshape(Fdft.shape[0], -1)), axis=1)))
cdata['dEani'].append(hdt.calculateKdmat(self.Nn+1, Eani))
cdata['dEdft'].append(hdt.calculatedmat(Edft))
cdata['Na2'].append(np.full(cdata['dEdft'][-1].size, len(data['species']), dtype=np.int32))
#cdata['Erani'].append(Eani-Eani.min())
#cdata['Erdft'].append(Edft-Edft.min())
for k in ['Na', 'Na2', 'Edft', 'Fdft', 'dEdft', 'Erel']:
cdata[k] = np.concatenate(cdata[k])
for k in ['Eani', 'Fani', 'dEani']:
cdata[k] = np.hstack(cdata[k])
self.tdata.update({key: cdata})
''' Generate total errors '''
def store_data(self, filename):
if os.path.exists(filename):
os.remove(filename)
dpack = ant.datapacker(filename)
for k in self.tdata.keys():
dpack.store_data(k,**(self.tdata[k]))
dpack.cleanup()
names = ['E$_\mathrm{MAE}$$\mu$',
'E$_\mathrm{MAE}$$\sigma$',
'E$_\mathrm{RMS}$$\mu$',
'E$_\mathrm{RMS}$$\sigma$',
'$\Delta$E$_\mathrm{MAE}$$\mu$',
'$\Delta$E$_\mathrm{MAE}$$\sigma$',
'$\Delta$E$_\mathrm{RMS}$$\mu$',
'$\Delta$E$_\mathrm{RMS}$$\sigma$',
'F$_\mathrm{MAE}$$\mu$',
'F$_\mathrm{MAE}$$\sigma$',
'F$_\mathrm{RMS}$$\mu$',
'F$_\mathrm{RMS}$$\sigma$',
]
class evaluate_ensemble_data(aat.anicrossvalidationconformer):
'''Constructor'''
def __init__(self, datafile):
self.fdata = dict()
for df in datafile:
adl = ant.anidataloader(df)
tdata = dict()
for data in adl:
tdata.update({data['path'].split('/')[-1] : data})
adl.cleanup()
self.fdata[df.split('tsdata_')[-1].split('.h5')[0]] = tdata
''' Generate total errors '''
def generate_fullset_errors(self, ntkey, tslist):
#idx = np.nonzero(self.fdata[ntkey][tskey]['Erdft'])
#tskeys = self.fdata[ntkey].keys()
if not tslist:
tskeys = self.fdata[ntkey].keys()
else:
tskeys = tslist
Nn = self.fdata[ntkey][list(tskeys)[0]]['Eani'].shape[0]-1
#print(self.fdata[ntkey][tskey]['Fdft'].shape)
return {names[0]: hdt.calculatemeanabserror(
np.concatenate([self.fdata[ntkey][tskey]['Eani'][Nn,:] for tskey in tskeys]),
np.concatenate([self.fdata[ntkey][tskey]['Edft'] for tskey in tskeys])),
names[1]: np.std(hdt.calculatemeanabserror(
np.hstack([self.fdata[ntkey][tskey]['Eani'][0:Nn,:] for tskey in tskeys]),
np.hstack([self.fdata[ntkey][tskey]['Edft'] for tskey in tskeys]), axis=1)),
names[2]: hdt.calculaterootmeansqrerror(
np.concatenate([self.fdata[ntkey][tskey]['Eani'][Nn, :] for tskey in tskeys]),
np.concatenate([self.fdata[ntkey][tskey]['Edft'] for tskey in tskeys])),
names[3]: np.std(hdt.calculaterootmeansqrerror(
np.hstack([self.fdata[ntkey][tskey]['Eani'][0:Nn, :] for tskey in tskeys]),
np.hstack([self.fdata[ntkey][tskey]['Edft'] for tskey in tskeys]), axis=1)),
names[4]: hdt.calculatemeanabserror(
np.concatenate([self.fdata[ntkey][tskey]['dEani'][Nn, :] for tskey in tskeys]),
np.concatenate([self.fdata[ntkey][tskey]['dEdft'] for tskey in tskeys])),
names[5]: np.std(hdt.calculatemeanabserror(
np.hstack([self.fdata[ntkey][tskey]['dEani'][0:Nn, :] for tskey in tskeys]),
np.hstack([self.fdata[ntkey][tskey]['dEdft'] for tskey in tskeys]), axis=1)),
names[6]: hdt.calculaterootmeansqrerror(
np.concatenate([self.fdata[ntkey][tskey]['dEani'][Nn, :] for tskey in tskeys]),
np.concatenate([self.fdata[ntkey][tskey]['dEdft'] for tskey in tskeys])),
names[7]: np.std(hdt.calculaterootmeansqrerror(
np.hstack([self.fdata[ntkey][tskey]['dEani'][0:Nn, :] for tskey in tskeys]),
np.hstack([self.fdata[ntkey][tskey]['dEdft'] for tskey in tskeys]), axis=1)),
names[8]: hdt.calculatemeanabserror(
np.concatenate([self.fdata[ntkey][tskey]['Fani'][Nn, :] for tskey in tskeys]),
np.concatenate([self.fdata[ntkey][tskey]['Fdft'] for tskey in tskeys])),
names[9]: np.std(hdt.calculatemeanabserror(
np.hstack([self.fdata[ntkey][tskey]['Fani'][0:Nn, :] for tskey in tskeys]),
np.hstack([self.fdata[ntkey][tskey]['Fdft'] for tskey in tskeys]), axis=1)),
names[10]: hdt.calculaterootmeansqrerror(
np.concatenate([self.fdata[ntkey][tskey]['Fani'][Nn, :] for tskey in tskeys]),
np.concatenate([self.fdata[ntkey][tskey]['Fdft'] for tskey in tskeys])),
names[11]: np.std(hdt.calculaterootmeansqrerror(
np.hstack([self.fdata[ntkey][tskey]['Fani'][0:Nn, :] for tskey in tskeys]),
np.hstack([self.fdata[ntkey][tskey]['Fdft'] for tskey in tskeys]), axis=1)),
#'FMAEm': hdt.calculatemeanabserror(self.fdata[ntkey][tskey]['Fani'][Nn,:], self.fdata[ntkey][tskey]['Fdft']),
#'FMAEs': np.std(hdt.calculatemeanabserror(self.fdata[ntkey][tskey]['Fani'][0:Nn,:], self.fdata[ntkey][tskey]['Fdft'], axis=1)),
#'FRMSm': hdt.calculaterootmeansqrerror(self.fdata[ntkey][tskey]['Fani'][Nn,:], self.fdata[ntkey][tskey]['Fdft']),
#'FRMSs': np.std(hdt.calculaterootmeansqrerror(self.fdata[ntkey][tskey]['Fani'][0:Nn, :],self.fdata[ntkey][tskey]['Fdft'], axis=1)),
#'dEMAE': hdt.calculatemeanabserror(self.fdata[ntkey][tskey]['dEani'], self.fdata[ntkey][tskey]['dEdft']),
#'dERMS': hdt.calculaterootmeansqrerror(self.fdata[ntkey][tskey]['dEani'], self.fdata[ntkey][tskey]['dEdft']),
#'ERMAE': hdt.calculatemeanabserror(self.fdata[ntkey][tskey]['Erani'][idx], self.fdata[ntkey][tskey]['Erdft'][idx]),
#'ERRMS': hdt.calculaterootmeansqrerror(self.fdata[ntkey][tskey]['Erani'][idx], self.fdata[ntkey][tskey]['rdft'][idx]),
}
''' Generate total errors '''
def get_range_stats(self, tslist, dkey):
#idx = np.nonzero(self.fdata[ntkey][tskey]['Erdft'])
ntkey = list(self.fdata.keys())[0]
if not tslist:
tskeys = self.fdata[ntkey].keys()
else:
tskeys = tslist
Nn = self.fdata[ntkey][list(tskeys)[0]][dkey].shape[0]-1
return np.concatenate([self.fdata[ntkey][tskey][dkey] for tskey in tskeys])
''' Generate total errors '''
def generate_fullset_peratom_errors(self, ntkey, tslist):
#idx = np.nonzero(self.fdata[ntkey][tskey]['Erdft'])
if not tslist:
tskeys = self.fdata[ntkey].keys()
else:
tskeys = tslist
Nn = self.fdata[ntkey][list(tskeys)[0]]['Eani'].shape[0]-1
#print(self.fdata[ntkey]['GDB07to09']['Eani'][Nn,:])
#print(self.fdata[ntkey]['GDB07to09']['Na'])
#print(self.fdata[ntkey]['GDB07to09']['Eani'][Nn,:]/self.fdata[ntkey]['GDB07to09']['Na'])
return {names[0]: 1000*hdt.calculatemeanabserror(
np.concatenate([self.fdata[ntkey][tskey]['Eani'][Nn,:]/self.fdata[ntkey][tskey]['Na'] for tskey in tskeys]),
np.concatenate([self.fdata[ntkey][tskey]['Edft']/self.fdata[ntkey][tskey]['Na'] for tskey in tskeys])),
names[2]: 1000*hdt.calculaterootmeansqrerror(
np.concatenate([self.fdata[ntkey][tskey]['Eani'][Nn, :]/self.fdata[ntkey][tskey]['Na'] for tskey in tskeys]),
np.concatenate([self.fdata[ntkey][tskey]['Edft']/self.fdata[ntkey][tskey]['Na'] for tskey in tskeys])),
names[4]: 1000*hdt.calculatemeanabserror(
np.concatenate([self.fdata[ntkey][tskey]['dEani'][Nn, :] / self.fdata[ntkey][tskey]['Na2'] for tskey in tskeys]),
np.concatenate([self.fdata[ntkey][tskey]['dEdft'] / self.fdata[ntkey][tskey]['Na2'] for tskey in tskeys])),
names[6]: 1000*hdt.calculaterootmeansqrerror(
np.concatenate([self.fdata[ntkey][tskey]['dEani'][Nn, :] / self.fdata[ntkey][tskey]['Na2'] for tskey in tskeys]),
np.concatenate([self.fdata[ntkey][tskey]['dEdft'] / self.fdata[ntkey][tskey]['Na2'] for tskey in tskeys])),
}
''' Generate total errors '''
def generate_fullset_mean_errors(self, ntkey):
#idx = np.nonzero(self.fdata[ntkey][tskey]['Erdft'])
tskeys = self.fdata[ntkey].keys()
Nn = self.fdata[ntkey][list(tskeys)[0]]['Eani'].shape[0]-1
return {names[2]+'E': hdt.calculaterootmeansqrerror(
np.concatenate([self.fdata[ntkey][tskey]['Eani'][Nn,:] for tskey in tskeys]),
np.concatenate([self.fdata[ntkey][tskey]['Edft'] for tskey in tskeys])),
names[2]+'M': np.mean(hdt.calculaterootmeansqrerror(
np.hstack([self.fdata[ntkey][tskey]['Eani'][0:Nn, :] for tskey in tskeys]),
np.hstack([self.fdata[ntkey][tskey]['Edft'] for tskey in tskeys]),axis=1)),
names[6]+'E': hdt.calculaterootmeansqrerror(
np.concatenate([self.fdata[ntkey][tskey]['dEani'][Nn, :] for tskey in tskeys]),
np.concatenate([self.fdata[ntkey][tskey]['dEdft'] for tskey in tskeys])),
names[6]+'M': np.mean(hdt.calculaterootmeansqrerror(
np.hstack([self.fdata[ntkey][tskey]['dEani'][0:Nn, :] for tskey in tskeys]),
np.hstack([self.fdata[ntkey][tskey]['dEdft'] for tskey in tskeys]),axis=1)),
names[10]+'E': hdt.calculaterootmeansqrerror(
np.concatenate([self.fdata[ntkey][tskey]['Fani'][Nn, :] for tskey in tskeys]),
np.concatenate([self.fdata[ntkey][tskey]['Fdft'] for tskey in tskeys])),
names[10]+'M': np.mean(hdt.calculaterootmeansqrerror(
np.hstack([self.fdata[ntkey][tskey]['Fani'][0:Nn, :] for tskey in tskeys]),
np.hstack([self.fdata[ntkey][tskey]['Fdft'] for tskey in tskeys]),axis=1)),
}
''' Generate total errors '''
def generate_total_errors(self, ntkey, tskey):
#idx = np.nonzero(self.fdata[ntkey][tskey]['Erdft'])
Nn = self.fdata[ntkey][tskey]['Eani'].shape[0]-1
return {names[0]: hdt.calculatemeanabserror(self.fdata[ntkey][tskey]['Eani'][Nn,:], self.fdata[ntkey][tskey]['Edft']),
names[1]: np.std(hdt.calculatemeanabserror(self.fdata[ntkey][tskey]['Eani'][0:Nn,:], self.fdata[ntkey][tskey]['Edft'], axis=1)),
names[2]: hdt.calculaterootmeansqrerror(self.fdata[ntkey][tskey]['Eani'][Nn,:], self.fdata[ntkey][tskey]['Edft']),
names[3]: np.std(hdt.calculaterootmeansqrerror(self.fdata[ntkey][tskey]['Eani'][0:Nn,:], self.fdata[ntkey][tskey]['Edft'], axis=1)),
names[4]: hdt.calculatemeanabserror(self.fdata[ntkey][tskey]['dEani'][Nn,:], self.fdata[ntkey][tskey]['dEdft']),
names[5]: np.std(hdt.calculatemeanabserror(self.fdata[ntkey][tskey]['dEani'][0:Nn,:], self.fdata[ntkey][tskey]['dEdft'], axis=1)),
names[6]: hdt.calculaterootmeansqrerror(self.fdata[ntkey][tskey]['dEani'][Nn,:], self.fdata[ntkey][tskey]['dEdft']),
names[7]: np.std(hdt.calculaterootmeansqrerror(self.fdata[ntkey][tskey]['dEani'][0:Nn,:], self.fdata[ntkey][tskey]['dEdft'], axis=1)),
names[8]: hdt.calculatemeanabserror(self.fdata[ntkey][tskey]['Fani'][Nn,:], self.fdata[ntkey][tskey]['Fdft']),
names[9]: np.std(hdt.calculatemeanabserror(self.fdata[ntkey][tskey]['Fani'][0:Nn,:], self.fdata[ntkey][tskey]['Fdft'], axis=1)),
names[10]: hdt.calculaterootmeansqrerror(self.fdata[ntkey][tskey]['Fani'][Nn,:], self.fdata[ntkey][tskey]['Fdft']),
names[11]: np.std(hdt.calculaterootmeansqrerror(self.fdata[ntkey][tskey]['Fani'][0:Nn, :],self.fdata[ntkey][tskey]['Fdft'], axis=1)),
#'dEMAE': hdt.calculatemeanabserror(self.fdata[ntkey][tskey]['dEani'], self.fdata[ntkey][tskey]['dEdft']),
#'dERMS': hdt.calculaterootmeansqrerror(self.fdata[ntkey][tskey]['dEani'], self.fdata[ntkey][tskey]['dEdft']),
#'ERMAE': hdt.calculatemeanabserror(self.fdata[ntkey][tskey]['Erani'][idx], self.fdata[ntkey][tskey]['Erdft'][idx]),
#'ERRMS': hdt.calculaterootmeansqrerror(self.fdata[ntkey][tskey]['Erani'][idx], self.fdata[ntkey][tskey]['rdft'][idx]),
}
def determine_min_error_by_sigma(self, ntkey, minerror, percent, tskeys = ['GDB07to09'], figsize=(15.0, 12.0), labelx='', labely='', xyrange=(0.0,10.0,0.0,10.0), storepath='', cmap=mpl.cm.viridis):
#tskeys = self.fdata[ntkey].keys()
mpl.rcParams['xtick.labelsize'] = 18
mpl.rcParams['ytick.labelsize'] = 18
Nn = self.fdata[ntkey][list(tskeys)[0]]['Eani'].shape[0]-1
Eani = np.hstack([self.fdata[ntkey][tskey]['Eani'][0:Nn, :] for tskey in tskeys])
Eanimu = np.hstack([self.fdata[ntkey][tskey]['Eani'][Nn, :] for tskey in tskeys])
#Eani = np.hstack([self.fdata[ntkey][tskey]['Eani'][Nn, :] for tskey in tskeys])
Edft = np.concatenate([self.fdata[ntkey][tskey]['Edft'] for tskey in tskeys])
#print(Eani.shape, Edft.shape, )
#print(np.max(Eerr.shape, axis=0))
Sani = np.concatenate([np.std(self.fdata[ntkey][tskey]['Eani'][0:Nn, :], axis=0) for tskey in tskeys])
Na = np.concatenate([self.fdata[ntkey][tskey]['Na'] for tskey in tskeys])
#print(Sani.shape, Na.shape)
Sani = Sani / np.sqrt(Na)
Eerr = np.max(np.abs(Eani - Edft),axis=0) / np.sqrt(Na)
#Eerr = np.abs(np.mean(Eani,axis=0) - Edft) / np.sqrt(Na)
#Eerr = np.abs(Eani - Edft) / np.sqrt(Na)
#print(Eerr)
#print(Sani)
Nmax = np.where(Eerr > minerror)[0].size
perc = 0
dS = Sani.max()
step = 0
while perc < percent:
S = dS - step*0.001
Sidx = np.where(Sani > S)
step += 1
perc = 100.0*np.where(Eerr[Sidx] > minerror)[0].size/(Nmax+1.0E-7)
#print(step,perc,S,Sidx)
#print('Step:',step, 'S:',S,' -Perc over:',perc,'Total',100.0*Sidx[0].size/Edft.size)
#dE = np.max(Eerr, axis=0) / np.sqrt(Na)
#print(Eerr.shape,Eerr)
So = np.where(Sani > S)
Su = np.where(Sani <= S)
print('RMSE Over: ', hdt.calculaterootmeansqrerror(Eanimu[So],Edft[So]))
print('RMSE Under: ', hdt.calculaterootmeansqrerror(Eanimu[Su],Edft[Su]))
fig, ax = plt.subplots(figsize=figsize)
poa = np.where(Eerr[So] > minerror)[0].size / So[0].size
pob = np.where(Eerr > minerror)[0].size / Eerr.size
ax.text(0.57*(xyrange[1]), 0.04*(xyrange[3]), 'Total Captured: ' + str(int(100.0 * Sidx[0].size / Edft.size)) + '%' +
'\n' + r'($\mathrm{\mathcal{E}>}$'+ "{:.1f}".format(minerror) + r'$\mathrm{) \forall \rho}$: ' + str(int(100*pob)) + '%' +
'\n' + r'($\mathrm{\mathcal{E}>}$'+ "{:.1f}".format(minerror) + r'$\mathrm{) \forall \rho >}$' + "{:.2f}".format(S) + ': ' + str(int(100*poa)) + '%' +
'\n' + r'$\mathrm{E}$ RMSE ($\mathrm{\rho>}$'+ "{:.2f}".format(S) + r'$\mathrm{)}$: ' + "{:.1f}".format(hdt.calculaterootmeansqrerror(Eanimu[So],Edft[So])) +
'\n' + r'$\mathrm{E}$ RMSE ($\mathrm{\rho\leq}$' + "{:.2f}".format(S) + r'$\mathrm{)}$: ' + "{:.1f}".format(hdt.calculaterootmeansqrerror(Eanimu[Su], Edft[Su])),
bbox={'facecolor':'grey', 'alpha':0.5, 'pad':10}, fontsize=18)
plt.axvline(x=S,linestyle='--',color='r',linewidth=5, label=r"$\mathrm{\rho=}$"+"{:.2f}".format(S) + ' is the value that captures\n'+ str(int(percent)) + '% of errors over ' + r"$\mathrm{\mathcal{E}=}$" + "{:.1f}".format(minerror))
#)
# Set labels
ax.set_xlabel(labelx, fontsize=24)
ax.set_ylabel(labely, fontsize=24)
# Plot 2d Histogram
bins = ax.hist2d(Sani, Eerr, bins=400, norm=LogNorm(), range=[[xyrange[0], xyrange[1]], [xyrange[2], xyrange[3]]], cmap=cmap)
# Build color bar
# cbaxes = fig.add_axes([0.91, 0.1, 0.03, 0.8])
cb1 = fig.colorbar(bins[-1], cmap=cmap)
cb1.set_label('Count', fontsize=20)
cb1.ax.tick_params(labelsize=18)
plt.legend(loc='upper center',fontsize=18)
if storepath:
pp = PdfPages(storepath)
pp.savefig(fig)
pp.close()
else:
plt.show()
def get_net_keys(self):
return self.fdata.keys()
def get_totalerror_table(self, tslist = []):
errors = dict()
for k in self.fdata.keys():
errors[k] = pd.Series(self.generate_fullset_errors(k, tslist))
pd.set_option('expand_frame_repr', False)
edat = pd.DataFrame(errors).transpose()
return edat
def get_totalerrorperatom_table(self, tslist = []):
errors = dict()
for k in self.fdata.keys():
errors[k] = pd.Series(self.generate_fullset_peratom_errors(k, tslist))
pd.set_option('expand_frame_repr', False)
edat = pd.DataFrame(errors).transpose()
return edat
def get_totalmeanerror_table(self):
errors = dict()
for k in self.fdata.keys():
errors[k] = pd.Series(self.generate_fullset_mean_errors(k))
pd.set_option('expand_frame_repr', False)
edat = pd.DataFrame(errors).transpose()
return edat
def get_error_table(self, tskey):
errors = dict()
for k in self.fdata.keys():
errors[k] = pd.Series(self.generate_total_errors(k,tskey))
pd.set_option('expand_frame_repr', False)
edat = pd.DataFrame(errors).transpose()
return edat
def get_ntwrk_error_table(self, ntkey):
errors = dict()
for k in self.fdata[ntkey].keys():
errors[k] = pd.Series(self.generate_total_errors(ntkey, k))
| pd.set_option('expand_frame_repr', False) | pandas.set_option |
import numpy as np
import pandas as pd
from datetime import datetime
from functools import partial
import tensorflow as tf
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Layer, Input, Dense, Dropout, BatchNormalization
from tensorflow.keras import metrics
from sklearn import preprocessing
from sklearn.metrics import confusion_matrix, roc_curve, auc
import os
import gc
from .. import utils
from ..config import cfg
from .base_model import BaseModel
class FFNModel(BaseModel):
def __init__(self, X_train, y_train, X_test, params_file=None, folds_lookup=None,
prefix=None, weights=None, tf_path=None, logger=None):
BaseModel.__init__(self, X_train, y_train, X_test, params_file, folds_lookup,
prefix, logger)
self.model = None
self.sess = None
self.history = None
self.weights = weights
self.n_inputs = None
self.initializer = None
self.regularizer = None
self.activation = None
self.tf_path = tf_path
self.logdir = None
self.output_suffix = '_keras_pred'
def init_hparams(self):
'''interpret params.yaml file to set tf.Graph params
'''
self.n_inputs = self.X_train.shape[1]
self.initializer = tf.keras.initializers.VarianceScaling(
scale=1.0,
mode=self.params['init_mode'],
distribution=self.params['init_distribution'],
seed=None)
l1_reg=float(self.params.get('l1_reg_weight', 0.0))
l2_reg=float(self.params.get('l2_reg_weight', 0.0))
reg={'None': None,
'l1': tf.keras.regularizers.l1(l1_reg),
'l2': tf.keras.regularizers.l2(l2_reg),
'l1-l2': tf.keras.regularizers.l1_l2(l1=l1_reg, l2=l2_reg)}
self.regularizer = reg.get(self.params['regularizer'], None)
eta = float(self.params.get('eta', 0.001))
momentum=float(self.params.get('momentum', 0.0))
beta_1=float(self.params.get('beta1', 0.9))
beta_2=float(self.params.get('beta2', 0.999))
epsilon=float(self.params.get('epsilon', 1e-08))
decay=float(self.params.get('decay', 0.0))
amsgrad=self.params.get('amsgrad', False)
optimizers = {
'sgd': tf.keras.optimizers.SGD(lr=eta,
momentum=momentum,
decay=decay),
'adam': tf.keras.optimizers.Adam(lr=eta,
beta_1=beta_1,
beta_2=beta_2,
epsilon=epsilon,
decay=decay,
amsgrad=amsgrad),
'adagrad': tf.keras.optimizers.Adagrad(lr=eta,
epsilon=epsilon,
decay=decay),
'adadelta': tf.keras.optimizers.Adadelta(lr=eta,
epsilon=epsilon,
decay=decay)}
self.optimizer = optimizers[self.params.get('optimizer', 'sgd')]
def get_sample_weights(self, fold):
idx, _ = self._get_fold_indices(fold)
return self.weights[idx.values.astype(int)]
def init_tensorboard(self, fold='', param_set=''):
'''Set directory and filename for tensorboard logs and checkpoint file
'''
now = datetime.now().strftime("%m%d-%H%M")
comment = ''
self.logdir = f'{self.tf_path}/tensorboard_logs/{now}-{fold}-{param_set}{comment}/'
self.ckpt_file = f'{self.tf_path}/sessions/mlp.ckpt'
def feedforward_layers(self, final_activation=None):
'''Iterate layers of dropout-dense-batch norm'''
X = Input(shape=(self.X_train.shape[1], ))
layer = Layer(name='identity')(X)
n_layers = len(self.params['layers'])
for i, units in enumerate(self.params['layers']):
drop_rate = self.params.get('drop_rates', [0.0] * n_layers)[i]
if drop_rate > 0.0:
layer = Dropout(drop_rate,
noise_shape=None,
seed=None,
name='drop_' + str(i+1))(layer)
layer = Dense(units,
activation=self.params.get('activation', None),
kernel_initializer=self.initializer,
bias_initializer='zeros',
kernel_regularizer=self.regularizer,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
name='dense_' + str(i+1))(layer)
if self.params.get('use_batch_norm', [False] * n_layers)[i]:
layer = BatchNormalization(axis=-1,
momentum=self.params.get('batch_norm_momentum', 0.99),
epsilon=0.001,
center=True,
scale=True,
beta_initializer='zeros',
gamma_initializer='ones',
moving_mean_initializer='zeros',
moving_variance_initializer='ones',
beta_regularizer=None,
gamma_regularizer=None,
beta_constraint=None,
gamma_constraint=None,
name='bn_'+str(i+1))(layer)
outputs = Dense(1,
activation=final_activation,
kernel_initializer=self.initializer,
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
name='outputs')(layer)
return X, outputs
def find_lr(self, epoch, lr):
'''Learning rate (eta) finder'''
factor = float(self.params.get('find_eta_factor', 1.3))
start_eta = float(self.params.get('find_eta_start', 1e-08))
return factor ** (epoch) * start_eta
def clr(self, epoch, lr):
'''Cyclical learning rate'''
eta_min = float(self.params.get('clr_min', 1e-05))
eta_max = float(self.params.get('clr_max', 1e-02))
clr_period = self.params.get('clr_period', 15)
assert clr_period >= 4
assert eta_min < eta_max
if int(clr_period) % 2 > 0:
clr_period = int(clr_period + 1)
eta_range = eta_max - eta_min
step = epoch % clr_period
factor = abs( (clr_period // 2) - step) / (clr_period // 2)
return eta_max - eta_range * factor
def train_eval(self, X_train, y_train, X_val, y_val=None, weights=None,
return_preds=False, save_ckpt=False, fold=None, plot_n_samples=None):
'''Core training and evals routine.
Args:
X_train, y_train, X_val, y_val: train and validation datasets
weights: per-instance weighting factor for each instance in X_train
return_preds: bool, False for train/eval, True to generate predictions (without evals)
save_ckpt: bool, not implemented
fold: integer fold number
plot_n_samples: integer, number of samples for plot_regression_preds
Returns: if return_preds, returns model predictions for X_val
'''
self.build_model(enable_metrics=not return_preds)
callbacks = []
if self.params.get('use_clr', False):
callbacks.append(tf.keras.callbacks.LearningRateScheduler(
self.clr,
verbose=0))
if (self.params.get('reduce_eta', False)
and not self.params.get('find_eta', False)
and not self.params.get('use_clr', False)):
callbacks.append(tf.keras.callbacks.ReduceLROnPlateau(
monitor='loss',
factor=self.params['reduce_eta_factor'],
patience=self.params['reduce_eta_steps'],
verbose=1,
mode='auto',
min_delta=0.0001,
cooldown=0,
min_lr=1e-08))
if not return_preds:
# metrics callbacks, disabled for predictions-only mode
epoch_begin = lambda epoch, logs: self.epoch_begin()
epoch_end = lambda epoch, logs: self.epoch_end(
epoch, logs, X_train, y_train, X_val, y_val)
train_end = lambda logs: self.train_end(
X_val, y_val, plot_n_samples, fold)
if self.params.get('find_eta', False):
callbacks.append(tf.keras.callbacks.LearningRateScheduler(
self.find_lr,
verbose=0))
if self.params.get('use_tensorboard', False):
callbacks.append(tf.keras.callbacks.TensorBoard(
log_dir=self.logdir,
histogram_freq=0,
batch_size=self.params['val_batch_size'],
write_graph=True,
write_grads=False,
write_images=False,
update_freq='epoch'))
if self.params.get('early_stop_rounds', False):
callbacks.append(tf.keras.callbacks.EarlyStopping(
monitor='val_loss',
min_delta=0,
patience=self.params['early_stop_rounds'],
verbose=1,
mode='auto',
baseline=None,
restore_best_weights=False))
callbacks.append(tf.keras.callbacks.LambdaCallback(
on_epoch_begin=self.epoch_begin,
on_epoch_end=epoch_end,
on_batch_begin=None,
on_batch_end=None,
on_train_begin=self.train_begin,
on_train_end=train_end))
validation_data = (X_val, y_val)
elif return_preds:
validation_data=None
train_batch_size = self.params.get('train_batch_size', 32)
mode = self.params.get('train_mode', 'minibatch_gd')
if mode == 'minibatch_gd':
steps_per_epoch = None
validation_steps = None
n_batches = X_train.shape[0] // train_batch_size
self.logger.info(f'training {n_batches} iterations per epoch')
elif mode == 'sgd':
steps_per_epoch = self.params.get('epoch_train_batches', 10)
validation_steps = self.params.get('epoch_val_batches', None)
# ignore class weights for regression
if self.params['loss_fn'] in ['mean_squared_error', 'mean_absolute_error']:
class_weight = None
else:
class_weight = {0: 1, 1: self.params.get('pos_weight', 1.0)}
with self.sess.as_default():
self.model.fit(X_train, y_train,
batch_size=train_batch_size,
epochs=self.params['n_epochs'],
verbose=self.params['verbose'],
callbacks=callbacks,
validation_data=validation_data,
shuffle=True,
class_weight=class_weight,
sample_weight=weights,
steps_per_epoch=steps_per_epoch,
validation_steps=validation_steps)
if return_preds:
chunk_size = int(self.params['predict_chunk_size'])
fold_preds = self.model.predict(X_val,
batch_size=chunk_size,
verbose=0).ravel()
return fold_preds
def _grid_cv_fold(self, fold, plot_n_samples):
'''Single-fold CV on permutations of cv_grid params'''
params_grid, keys = self._get_cv_params_grid()
columns_list = ['fold_no', *keys]
for met in self.metrics:
columns_list.extend(['best_' + met, 'rnd_' + met])
fold_results_list = []
X_train, y_train, X_val, y_val = self._get_fold_data(fold)
if self.weights is not None:
weights = self.get_sample_weights(fold)
else: weights = None
for i, param_set in enumerate(params_grid):
params_str = ''
for j in range(len(param_set)):
self.params[keys[j]] = param_set[j]
params_str += f'{keys[j]}={self.params[keys[j]]} '
self.logger.info(params_str)
self.init_hparams()
self.init_tensorboard(fold, i+1)
self.sess = tf.Session()
self.sess.run(tf.global_variables_initializer())
self.train_eval(X_train, y_train, X_val, y_val, weights,
fold=fold, plot_n_samples=plot_n_samples)
tf.reset_default_graph()
best_evals = self.best_eval_multi()
for eval in best_evals:
self.logger.info(f' best val {eval[0]}: {eval[1]:.4f}, ' +
f'round {eval[2]}')
self.logger.info('')
results_row = [fold, *(str(k) for k in param_set)]
for eval in best_evals:
results_row.extend([eval[1], eval[2]])
round_results = pd.DataFrame([results_row], columns=columns_list, index=[i])
fold_results_list.append(round_results)
return pd.concat(fold_results_list, axis=0)
def grid_cv(self, val_rounds, plot_n_samples=None):
'''Grid cross-valdidation. Permutes params/values in self.cv_grid (dict).
Args: val_rounds, integer: number of CV rounds
(mimimum: 1, maximum: number of folds)
Returns: no return; updates self.cv_results with grid CV results
'''
self.load_hparams()
keys = [*self.cv_grid.keys()]
columns = []
for met in self.metrics:
columns.extend(['best_' + met, 'rnd_' + met])
results_list = []
self.log_hparams()
for fold in range(1, val_rounds + 1):
self.logger.info(f'------------------------ FOLD {fold} OF {val_rounds} ------------------------')
fold_results = self._grid_cv_fold(fold, plot_n_samples)
results_list.append(fold_results)
self.cv_results = pd.concat(results_list, axis=0)
self.logger.info('grid CV complete.')
if plot_n_samples is not None:
self.plot_regression_preds()
# display/log grid CV summary
groupby = [self.cv_results[key] for key in keys]
summ_df = self.cv_results[columns].groupby(groupby).mean()
self.logger.info(self.parse_summ_df(summ_df))
# reset/reload all params from params file
self.load_hparams()
def cv_predictions(self):
'''Generate fold-by-fold predictions. For each fold k, train on all other
folds and make predictions for k.
Returns: pandas DataFrame with predictions for each fold in the training set.
'''
self.load_hparams()
self.logger.info(f'starting predictions for CV outputs...')
train_preds = []
for fold in range(1, self.n_folds + 1):
_, val_idx = self._get_fold_indices(fold)
X_train, y_train, X_val, y_val = self._get_fold_data(fold)
if self.weights is not None:
weights = self.get_sample_weights(fold)
else: weights = None
self.init_hparams()
self.sess = tf.Session()
self.sess.run(tf.global_variables_initializer())
fold_preds = self.train_eval(X_train, y_train, X_val, y_val,
return_preds=True, fold=fold)
tf.reset_default_graph()
fold_preds = pd.Series(fold_preds, index=val_idx)
train_preds.append(fold_preds)
self.logger.info(f'fold {fold} CV outputs complete.')
train_preds = pd.concat(train_preds)
return train_preds.rename(self.prefix + self.output_suffix, inplace=True)
def test_predictions(self):
'''Train on full X_train/y_train and return predictions for X_test
'''
if self.weights is not None:
weights = self.weights
else: weights = None
self.load_hparams()
self.init_hparams()
self.logger.info(f'starting predictions for test outputs...')
self.sess = tf.Session()
self.sess.run(tf.global_variables_initializer())
test_preds = self.train_eval(self.X_train, self.y_train,
self.X_test, return_preds=True, weights=weights)
tf.reset_default_graph()
test_preds = | pd.Series(test_preds, index=self.X_test.index) | pandas.Series |
import unittest
import pandas as pd
import numpy as np
class TestNumpyJSONEncoder(unittest.TestCase):
def setUp(self):
from bokeh.protocol import NumpyJSONEncoder
self.encoder = NumpyJSONEncoder()
def test_fail(self):
self.assertRaises(TypeError, self.encoder.default, {'testing': 1})
def test_panda_series(self):
s = | pd.Series([1, 3, 5, 6, 8]) | pandas.Series |
import pandas as pd
import re
default_units = {'speed': 'km/h',
'distance': 'km',
'weight': 'kg',
'height': 'cm'}
units_conversions = {}
# Team 2
def convert_time(time: str, time_format: str = None, mode: str = 'flag'):
"""
Converts string with time into pd.Timedelta object
Parameters
----------
time: string
time_format: string
if mode = flag : valid flags for pd.to_datetime function
if mode = regex : raw string with regex. Each unit of time to be included in the result
should be named group of regex. One of the following values should be used as a key:
{'days', 'seconds', 'microseconds', 'milliseconds', 'minutes', 'hours', 'weeks'}
Example of valid regex:
r"(?P<hours>[\d]{0,2})\:?(?P<minutes>[\d]{2})\:(?P<seconds>[\d]{2}).[\d]{3}"
default = None : Default call of to_timedelta function without flags
mode: string with value 'flag' or 'regex'
flag for using flag mode (matching time using flags recognised by pd.to_datetime),
regex for using regex patterns matching time
Returns
-------
pd.Timedelta object if operation was successful, else returns time parameter as a string.
"""
possible_keys = {'days', 'seconds', 'microseconds', 'milliseconds', 'minutes', 'hours', 'weeks'}
if mode == 'flag':
try:
dt_time = pd.to_datetime(time, 'ignore', format=time_format)
try:
return pd.to_timedelta(dt_time)
except ValueError:
return pd.to_timedelta(str(dt_time.time()))
except (TypeError, AttributeError):
return str(time)
elif mode == 'regex':
if re_compiler(time_format) is True:
try:
time_dict = re.search(time_format, time).groupdict()
time_dict = dict_comprehension(possible_keys, time_dict)
return pd.Timedelta(**time_dict)
except (AttributeError, KeyError):
return str(time)
else:
return str(time)
# Team 2
def convert_date(date: str, date_format: str = None, mode: str = 'flag'):
"""
Parameters
----------
date : date in string format
date_format: string
if mode = flag : valid flags for pd.to_datetime function
if mode = regex : raw string with regex. Each unit of date to be included in the result
should be named group of regex. One of the following values should be used as a key:
{'year', 'month', 'day', 'hour', 'minute', 'second', 'time_zone'}
Example of valid regex:
r"(?P<day>[\d]{2}) (?P<month>[\w]{3}) (?P<year>[\d]{2})"
default = None : Default call of to_datetime function without flags
mode : string
flag for using flag mode (matching date using flags recognised by pd.to_datetime),
regex for using regex patterns matching date
Returns
-------
pd.Timestamp object if operation was successful, else returns date parameter as a string.
"""
possible_keys = {'year', 'month', 'day', 'hour', 'minute', 'second', 'time_zone'}
if mode == 'flag':
try:
return | pd.to_datetime(date, 'ignore', format=date_format) | pandas.to_datetime |
from pathlib import Path
import pandas as pd
import numpy as np
from matplotlib.font_manager import FontProperties
import os, sys, inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
grandpadir = os.path.dirname(os.path.dirname(currentdir))
sys.path.insert(0, grandpadir)
from collections import OrderedDict
from utils.helper_functions import read_nav_files, sort_files_by_dim
from analysis.comparison.comparison_utils import get_dataset_name, read_proteus_files, read_baseline_files, reform_pseudo_samples_dict
from utils.pseudo_samples import PseudoSamplesMger
from utils.shared_names import FileKeys, FileNames
import matplotlib.pyplot as plt
import statsmodels.api as sm
pipeline_grouping = 'results_predictive_grouping'
pipeline_no_grouping = 'results_predictive'
expl_size = 10
noise_level = None
keep_only_prot_fs = False
datasets = {
'wbc',
'ionosphere',
'arrhythmia'
}
# test_confs = [
# {'path': Path('..', pipeline, 'loda'), 'detector': 'loda', 'type': 'test'},
# # {'path': Path('..', pipeline, 'iforest'), 'detector': 'iforest', 'type': 'test'}
# ]
synth_confs =[
{'path': Path('..', pipeline_grouping, 'iforest'), 'detector': 'iforest', 'type': 'synthetic'},
{'path': Path('..', pipeline_grouping, 'lof'), 'detector': 'lof', 'type': 'synthetic'},
{'path': Path('..', pipeline_grouping, 'loda'), 'detector': 'loda', 'type': 'synthetic'}
]
real_confs = [
{'path': Path('..', pipeline_grouping, 'iforest'), 'detector': 'iforest', 'type': 'real'},
{'path': Path('..', pipeline_grouping, 'lof'), 'detector': 'lof', 'type': 'real'},
{'path': Path('..', pipeline_grouping, 'loda'), 'detector': 'loda', 'type': 'real'}
]
synth_confs_no_grouping = [
{'path': Path('..', pipeline_no_grouping, 'iforest'), 'detector': 'iforest', 'type': 'synthetic'},
{'path': Path('..', pipeline_no_grouping, 'lof'), 'detector': 'lof', 'type': 'synthetic'},
{'path': Path('..', pipeline_no_grouping, 'loda'), 'detector': 'loda', 'type': 'synthetic'}
]
confs_to_analyze = synth_confs
def plot_panels():
synth_no_grouping = unstructured_perfs(synth_confs_no_grouping)
synth_grouping = structured_perfs(synth_confs)
real_grouping = unstructured_perfs(real_confs)
bias_plot(synth_grouping, real_grouping, synth_no_grouping)
# test_auc_plot(pred_perfs_dict, 0)
# test_auc_plot(pred_perfs_dict, 1)
def best_models(conf):
best_models_perf_in_sample = pd.DataFrame()
cv_estimates = pd.DataFrame()
ci_in_sample = pd.DataFrame()
error_in_sample = pd.DataFrame()
best_models_perf_out_of_sample = pd.DataFrame()
dataset_names = []
nav_files_json = sort_files_by_dim(read_nav_files(conf['path'], conf['type']))
for dim, nav_file in nav_files_json.items():
real_dims = dim - 1 - (conf['type'] == 'synthetic')
dname = get_dataset_name(nav_file[FileKeys.navigator_original_dataset_path], conf['type'] != 'real')
print(dname + ' ' + str(real_dims) + 'd')
rel_fratio = '(' + str(int(round((dim-5)/dim, 2) * 100)) + '%)' if conf['type'] != 'real' else ''
dataset_names.append(dname + ' ' + str(real_dims) + 'd ' + rel_fratio)
# time_df = pd.concat([time_df, get_time_per_method(nav_file)], axis=1)
best_models_perf_in_sample_curr, ci_in_sample_curr, err_in_sample_curr, cv_estimates_curr = \
get_best_models_perf_per_method(nav_file, True)
best_models_perf_in_sample = | pd.concat([best_models_perf_in_sample, best_models_perf_in_sample_curr], axis=1) | pandas.concat |
import pandas
import glob
daily_report_files = glob.glob('data/daily_reports/*.csv')
all_data = pandas.DataFrame({'Kommune': [],
'Last Update Day': [],
'Last Update Time': [],
'Confirmed': [],
'Deaths': [],
'Recovered': [],
'Quarantine': [],
'Source (Link)': []
})
for f in daily_report_files:
all_data = pandas.concat([pandas.read_csv(f), all_data], sort=False)
all_data['Last Update Day'] = pandas.to_datetime(all_data['Last Update Day'])
all_data['Last Update Time'] = pandas.to_datetime(all_data['Last Update Time'])
all_data['Confirmed'] = all_data['Confirmed'].astype(int)
all_data['Recovered'] = all_data['Recovered'].astype(int)
all_data['Deaths'] = all_data['Deaths'].astype(int)
# ~ all_data['Deaths'] = all_data['Quarantine'].astype(int)
all_data = all_data.sort_values(by='Last Update Day')
confirmed_df = pandas.DataFrame({'Kommune': all_data['Kommune'].unique()})
recovered_df = pandas.DataFrame({'Kommune': all_data['Kommune'].unique()})
deaths_df = pandas.DataFrame({'Kommune': all_data['Kommune'].unique()})
confirmed_df = confirmed_df.set_index('Kommune', drop=True)
recovered_df = recovered_df.set_index('Kommune', drop=True)
deaths_df = deaths_df.set_index('Kommune', drop=True)
for day in all_data['Last Update Day'].dt.date.unique():
for kommune in all_data['Kommune'].unique():
all_data_kommune = all_data[all_data['Kommune'] == kommune]
c = all_data_kommune[all_data_kommune['Last Update Day'] == pandas.Timestamp(day)]['Confirmed'].values
r = all_data_kommune[all_data_kommune['Last Update Day'] == | pandas.Timestamp(day) | pandas.Timestamp |
# --------------
# Importing header files
import numpy as np
import pandas as pd
from scipy.stats import mode
import warnings
warnings.filterwarnings('ignore')
#Reading file
bank_data = pd.read_csv(path)
bank = | pd.DataFrame(bank_data) | pandas.DataFrame |
import pandas as pd
import dataset
import albumentations as A
import time
import torch
import numpy as np
from torch.utils.data import DataLoader
from albumentations.pytorch.transforms import ToTensorV2
from tqdm import tqdm
from albumentations import (
HorizontalFlip, IAAPerspective, ShiftScaleRotate, CLAHE, RandomRotate90,
Transpose, ShiftScaleRotate, Blur, OpticalDistortion, GridDistortion, HueSaturationValue,
IAAAdditiveGaussianNoise, GaussNoise, MotionBlur, MedianBlur, IAAPiecewiseAffine,
IAASharpen, IAAEmboss, RandomBrightnessContrast, Flip, OneOf, Compose
)
"""
Complete mAP code here => https://gist.github.com/tarlen5/008809c3decf19313de216b9208f3734
"""
def calculate_image_precision(gts, preds, thresholds = (0.5, ), form = 'coco') -> float:
# https://www.kaggle.com/sadmanaraf/wheat-detection-using-faster-rcnn-train
"""Calculates image precision.
Args:
gts: (List[List[Union[int, float]]]) Coordinates of the available ground-truth boxes
preds: (List[List[Union[int, float]]]) Coordinates of the predicted boxes,
sorted by confidence value (descending)
thresholds: (float) Different thresholds
form: (str) Format of the coordinates
Return:
(float) Precision
"""
n_threshold = len(thresholds)
image_precision = 0.0
ious = np.ones((len(gts), len(preds))) * -1
# ious = None
for threshold in thresholds:
precision_at_threshold = calculate_precision(gts.copy(), preds, threshold=threshold,
form=form, ious=ious)
image_precision += precision_at_threshold / n_threshold
return image_precision
def calculate_iou(gt, pr, form='pascal_voc') -> float:
# https://www.kaggle.com/sadmanaraf/wheat-detection-using-faster-rcnn-train
"""Calculates the Intersection over Union.
Args:
gt: (np.ndarray[Union[int, float]]) coordinates of the ground-truth box
pr: (np.ndarray[Union[int, float]]) coordinates of the prdected box
form: (str) gt/pred coordinates format
- pascal_voc: [xmin, ymin, xmax, ymax]
- coco: [xmin, ymin, w, h]
Returns:
(float) Intersection over union (0.0 <= iou <= 1.0)
"""
if form == 'coco':
gt = gt.copy()
pr = pr.copy()
gt[2] = gt[0] + gt[2]
gt[3] = gt[1] + gt[3]
pr[2] = pr[0] + pr[2]
pr[3] = pr[1] + pr[3]
# Calculate overlap area
dx = min(gt[2], pr[2]) - max(gt[0], pr[0]) + 1
if dx < 0:
return 0.0
dy = min(gt[3], pr[3]) - max(gt[1], pr[1]) + 1
if dy < 0:
return 0.0
overlap_area = dx * dy
# Calculate union area
union_area = (
(gt[2] - gt[0] + 1) * (gt[3] - gt[1] + 1) +
(pr[2] - pr[0] + 1) * (pr[3] - pr[1] + 1) -
overlap_area
)
return overlap_area / union_area
def find_best_match(gts, pred, pred_idx, threshold = 0.5, form = 'pascal_voc', ious=None) -> int:
# https://www.kaggle.com/sadmanaraf/wheat-detection-using-faster-rcnn-train
"""Returns the index of the 'best match' between the
ground-truth boxes and the prediction. The 'best match'
is the highest IoU. (0.0 IoUs are ignored).
Args:
gts: (List[List[Union[int, float]]]) Coordinates of the available ground-truth boxes
pred: (List[Union[int, float]]) Coordinates of the predicted box
pred_idx: (int) Index of the current predicted box
threshold: (float) Threshold
form: (str) Format of the coordinates
ious: (np.ndarray) len(gts) x len(preds) matrix for storing calculated ious.
Return:
(int) Index of the best match GT box (-1 if no match above threshold)
"""
best_match_iou = -np.inf
best_match_idx = -1
for gt_idx in range(len(gts)):
if gts[gt_idx][0] < 0:
# Already matched GT-box
continue
iou = -1 if ious is None else ious[gt_idx][pred_idx]
if iou < 0:
iou = calculate_iou(gts[gt_idx], pred, form=form)
if ious is not None:
ious[gt_idx][pred_idx] = iou
if iou < threshold:
continue
if iou > best_match_iou:
best_match_iou = iou
best_match_idx = gt_idx
return best_match_idx
def calculate_precision(gts, preds, threshold = 0.5, form = 'coco', ious=None) -> float:
# https://www.kaggle.com/sadmanaraf/wheat-detection-using-faster-rcnn-train
"""Calculates precision for GT - prediction pairs at one threshold.
Args:
gts: (List[List[Union[int, float]]]) Coordinates of the available ground-truth boxes
preds: (List[List[Union[int, float]]]) Coordinates of the predicted boxes,
sorted by confidence value (descending)
threshold: (float) Threshold
form: (str) Format of the coordinates
ious: (np.ndarray) len(gts) x len(preds) matrix for storing calculated ious.
Return:
(float) Precision
"""
n = len(preds)
tp = 0
fp = 0
for pred_idx in range(n):
best_match_gt_idx = find_best_match(gts, preds[pred_idx], pred_idx,
threshold=threshold, form=form, ious=ious)
if best_match_gt_idx >= 0:
# True positive: The predicted box matches a gt box with an IoU above the threshold.
tp += 1
# Remove the matched GT box
gts[best_match_gt_idx] = -1
else:
# No match
# False positive: indicates a predicted box had no associated gt box.
fp += 1
# False negative: indicates a gt box had no associated predicted box.
fn = (gts.sum(axis=1) > 0).sum()
return tp / (tp + fp + fn)
# Albumentations
def get_train_transform():
return A.Compose([
A.Flip(0.5),
A.RandomRotate90(0.5),
MotionBlur(p=0.2),
MedianBlur(blur_limit=3, p=0.1),
Blur(blur_limit=3, p=0.1),
ToTensorV2(p=1.0)
], bbox_params={'format': 'pascal_voc', 'label_fields': ['labels']})
def get_valid_transform():
return A.Compose([
ToTensorV2(p=1.0)
], bbox_params={'format': 'pascal_voc', 'label_fields': ['labels']})
def collate_fn(batch):
return tuple(zip(*batch))
def prepare_data():
DIR_INPUT = '../input/rsna-pneumonia-detection-2018/input'
DIR_TRAIN = f"{DIR_INPUT}/images/"
train_df = pd.read_csv(f"{DIR_INPUT}/stage_2_train_labels.csv")
print(train_df.shape)
train_df.head()
train_df_pos = | pd.DataFrame(columns=['patientId', 'x', 'y', 'width', 'height']) | pandas.DataFrame |
#!/usr/bin/env python
import os
import sys
import datetime
from pathlib import Path
from functools import partial
import numpy as np
import pandas as pd
from tqdm import tqdm
from scipy import optimize
from tqdm.contrib import concurrent
from lib.io import read_file
from lib.utils import ROOT
def _get_outbreak_mask(data: pd.DataFrame, threshold: int = 10):
""" Returns a mask for > N confirmed cases. Used to filter out uninteresting dates """
return data["Confirmed"] > threshold
def _logistic_function(X: float, a: float, b: float, c: float):
"""
Used for prediction model. Uses the function:
`f(x) = a * e^(-b * e^(-cx))`
"""
return a * np.exp(-b * np.exp(-c * X))
def _forward_indices(indices: list, window: int):
""" Adds `window` indices to a list of dates """
date_indices = [datetime.date.fromisoformat(idx) for idx in indices]
for _ in range(window):
date_indices.append(date_indices[-1] + datetime.timedelta(days=1))
return [idx.isoformat() for idx in date_indices]
def _compute_forecast(data: pd.Series, window: int):
"""
Perform a forecast of `window` days past the last day of `data`, including a model estimate of
all days already existing in `data`.
"""
# Some of the parameter fittings result in overflow
np.seterr(all="ignore")
# Perform a simple fit of all available data up to this date
X, y = list(range(len(data))), data.tolist()
# Providing a reasonable initial guess is crucial for this model
params, _ = optimize.curve_fit(
_logistic_function, X, y, maxfev=int(1e6), p0=[max(y), np.median(X), 0.1]
)
# Append N new days to our indices
date_indices = _forward_indices(data.index, window)
# Perform projection with the previously estimated parameters
projected = [_logistic_function(x, *params) for x in range(len(X) + window)]
return | pd.Series(projected, index=date_indices, name="Estimated") | pandas.Series |
from itertools import product as it_product
from typing import List, Dict
import numpy as np
import os
import pandas as pd
from scipy.stats import spearmanr, wilcoxon
from provided_code.constants_class import ModelParameters
from provided_code.data_loader import DataLoader
from provided_code.dose_evaluation_class import EvaluateDose
from provided_code.general_functions import get_paths, get_predictions_to_optimize
def consolidate_data_for_analysis(cs: ModelParameters, force_new_consolidate: bool = False) \
-> [pd.DataFrame, pd.DataFrame, pd.DataFrame, pd.DataFrame, pd.DataFrame, pd.DataFrame, pd.DataFrame]:
"""
Consolidated data of all reference plans, dose predictions, and KBP plans. This may take about an hour to run, but
only needs to be run once for a given set of experiments.
Args:
cs: A constants object.
force_new_consolidate: Flag that will force consolidating data, which will overwrite previous data that was
consolidated in previous iterations.
Returns:
df_dose_error: Summary of dose error
df_dvh_metrics: Summary of DVH metric performance (can be converted to DVH error later)
df_clinical_criteria: Summary of clinical criteria performance
df_ref_dvh_metrics: Summary of reference dose DVH metrics
df_ref_clinical_criteria: Summary of reference dose clinical criteria performance
df_objective_data: The data from the objective functions (e.g., weights, objective function values)
df_solve_time: The time it took to solve models
"""
# Run consolidate_data_for_analysis when new predictions or plans
consolidate_data_paths = {'dose': f'{cs.results_data_dir}/dose_error_df.csv',
'dvh': f'{cs.results_data_dir}/dvh_metric_df.csv',
'clinical_criteria': f'{cs.results_data_dir}/clinical_criteria_df.csv',
'ref_dvh': f'{cs.results_data_dir}/reference_metrics.csv',
'ref_clinical_criteria': f'{cs.results_data_dir}/reference_criteria.csv',
'weights': f'{cs.results_data_dir}/weights_df.csv',
'solve_time': f'{cs.results_data_dir}/solve_time_df.csv'
}
# Check if consolidated data already exists
no_consolidated_date = False
for p in consolidate_data_paths.values():
if not os.path.isfile(p):
print(p)
no_consolidated_date = True
os.makedirs(cs.results_data_dir, exist_ok=True) # Make dir for results
# Consolidate data if it doesn't exist yet or force flag is True
if no_consolidated_date or force_new_consolidate:
# Prepare strings for data that will be evaluated
predictions_to_optimize, prediction_names = get_predictions_to_optimize(cs)
patient_names = os.listdir(cs.reference_data_dir)
hold_out_plan_paths = get_paths(cs.reference_data_dir, ext='') # list of paths used for held out testing
# Evaluate dose metrics
patient_data_loader = DataLoader(hold_out_plan_paths, mode_name='evaluation') # Set data loader
dose_evaluator_sample = EvaluateDose(patient_data_loader)
# Make reference dose DVH metrics and clinical criteria
dose_evaluator_sample.make_metrics()
dose_evaluator_sample.melt_dvh_metrics('Reference', 'reference_dose_metric_df').to_csv(
consolidate_data_paths['ref_dvh'])
dose_evaluator_sample.melt_dvh_metrics('Reference', 'reference_criteria_df').to_csv(
consolidate_data_paths['ref_clinical_criteria'])
# Initialize DataFrames for all scores and errors
optimizer_names = os.listdir(cs.plans_dir) # Get names of all optimizers
dose_error_index_dict, dvh_metric_index_dict = make_error_and_metric_indices(patient_names,
dose_evaluator_sample,
optimizer_names)
df_dose_error_indices = pd.MultiIndex.from_product(**dose_error_index_dict)
df_dvh_error_indices = pd.MultiIndex.from_arrays(**dvh_metric_index_dict)
# Make DataFrames
df_dose_error = pd.DataFrame(columns=prediction_names, index=df_dose_error_indices)
df_solve_time = pd.DataFrame(columns=prediction_names, index=df_dose_error_indices)
df_dvh_metrics = pd.DataFrame(columns=prediction_names, index=df_dvh_error_indices)
df_clinical_criteria = pd.DataFrame(columns=prediction_names, index=df_dvh_error_indices)
weights_list = []
weight_columns = []
# Iterate through each prediction in the list of prediction_names
for prediction in prediction_names:
# Make a dataloader that loads predicted dose distributions
prediction_paths = get_paths(f'{cs.prediction_dir}/{prediction}', ext='csv')
prediction_dose_loader = DataLoader(prediction_paths, mode_name='predicted_dose') # Set prediction loader
# Evaluate predictions and plans with respect to ground truth
dose_evaluator = EvaluateDose(patient_data_loader, prediction_dose_loader)
populate_error_dfs(dose_evaluator, df_dose_error, df_dvh_metrics, df_clinical_criteria, prediction,
'Prediction')
# Make dataloader for plan dose distributions
for opt_name in optimizer_names:
print(opt_name)
# Get the paths of all optimized plans for prediction
cs.get_optimization_directories(prediction, opt_name)
weights_list, weight_columns = populate_weights_df(cs, weights_list)
populate_solve_time_df(cs, df_solve_time)
# Make data loader to load plan doses
plan_paths = get_paths(cs.plan_dose_from_pred_dir, ext='csv') # List of all plan dose paths
plan_dose_loader = DataLoader(plan_paths, mode_name='predicted_dose') # Set plan dose loader
plan_evaluator = EvaluateDose(patient_data_loader, plan_dose_loader) # Make evaluation object
# Ignore prediction name if no data exists, o/w populate DataFrames
if not patient_data_loader.file_paths_list:
print('No patient information was given to calculate metrics')
else:
# Evaluate prediction errors
populate_error_dfs(plan_evaluator, df_dose_error, df_dvh_metrics, df_clinical_criteria, prediction,
opt_name)
# Clean up weights
weights_df = pd.DataFrame(weights_list, columns=weight_columns)
weights_df.set_index(['Objective', 'Structure', 'Patients', 'Dose_type', 'Prediction'], inplace=True)
weights_df = weights_df.unstack('Prediction')
# Save dose and DVH error DataFrames
df_dose_error.to_csv(consolidate_data_paths['dose'])
df_dvh_metrics.to_csv(consolidate_data_paths['dvh'])
df_clinical_criteria.to_csv(consolidate_data_paths['clinical_criteria'])
weights_df.to_csv(consolidate_data_paths['weights'])
df_solve_time.to_csv(consolidate_data_paths['solve_time'])
# Loads the DataFrames that contain consolidated data
df_dose_error = pd.read_csv(consolidate_data_paths['dose'], index_col=[0, 1])
df_dvh_metrics = pd.read_csv(consolidate_data_paths['dvh'], index_col=[0, 1, 2, 3])
df_clinical_criteria = pd.read_csv(consolidate_data_paths['clinical_criteria'], index_col=[0, 1, 2, 3])
df_ref_dvh_metrics = | pd.read_csv(consolidate_data_paths['ref_dvh'], index_col=[0, 1, 2, 3], squeeze=True) | pandas.read_csv |
import numpy as np
import pandas as pd
import pandas.testing as pdt
import pyarrow as pa
import pytest
from pandas.arrays import SparseArray
from kartothek.core.cube.constants import (
KTK_CUBE_DF_SERIALIZER,
KTK_CUBE_METADATA_DIMENSION_COLUMNS,
KTK_CUBE_METADATA_KEY_IS_SEED,
KTK_CUBE_METADATA_PARTITION_COLUMNS,
KTK_CUBE_METADATA_SUPPRESS_INDEX_ON,
)
from kartothek.core.cube.cube import Cube
from kartothek.core.dataset import DatasetMetadata
from kartothek.core.index import ExplicitSecondaryIndex, PartitionIndex
from kartothek.io.testing.utils import assert_num_row_groups
from kartothek.io_components.cube.write import MultiTableCommitAborted
from kartothek.io_components.metapartition import SINGLE_TABLE
from kartothek.serialization._parquet import ParquetSerializer
__all__ = (
"test_accept_projected_duplicates",
"test_distinct_branches",
"test_do_not_modify_df",
"test_empty_df",
"test_fail_all_empty",
"test_fail_duplicates_global",
"test_fail_duplicates_local",
"test_fail_no_store_factory",
"test_fail_nondistinc_payload",
"test_fail_not_a_df",
"test_fail_partial_build",
"test_fail_partial_overwrite",
"test_fail_partition_on_1",
"test_fail_partition_on_3",
"test_fail_partition_on_4",
"test_fail_partition_on_nondistinc_payload",
"test_fail_sparse",
"test_fail_wrong_dataset_ids",
"test_fail_wrong_types",
"test_fails_duplicate_columns",
"test_fails_metadata_nested_wrong_type",
"test_fails_metadata_unknown_id",
"test_fails_metadata_wrong_type",
"test_fails_missing_dimension_columns",
"test_fails_missing_partition_columns",
"test_fails_missing_seed",
"test_fails_no_dimension_columns",
"test_fails_null_dimension",
"test_fails_null_index",
"test_fails_null_partition",
"test_fails_projected_duplicates",
"test_indices",
"test_metadata",
"test_nones",
"test_overwrite",
"test_overwrite_rollback_ktk_cube",
"test_parquet",
"test_partition_on_enrich_extra",
"test_partition_on_enrich_none",
"test_partition_on_index_column",
"test_projected_data",
"test_regression_pseudo_duplicates",
"test_rowgroups_are_applied_when_df_serializer_is_passed_to_build_cube",
"test_simple_seed_only",
"test_simple_two_datasets",
"test_single_rowgroup_when_df_serializer_is_not_passed_to_build_cube",
"test_split",
)
def test_simple_seed_only(driver, function_store):
"""
Simple integration test w/ a seed dataset only. This is the most simple way to create a cube.
"""
df = pd.DataFrame({"x": [0, 1, 2, 3], "p": [0, 0, 1, 1], "v": [10, 11, 12, 13]})
cube = Cube(dimension_columns=["x"], partition_columns=["p"], uuid_prefix="cube")
result = driver(data=df, cube=cube, store=function_store)
assert set(result.keys()) == {cube.seed_dataset}
ds = list(result.values())[0]
ds = ds.load_all_indices(function_store())
assert ds.uuid == cube.ktk_dataset_uuid(cube.seed_dataset)
assert len(ds.partitions) == 2
assert set(ds.indices.keys()) == {"p", "x"}
assert isinstance(ds.indices["p"], PartitionIndex)
assert isinstance(ds.indices["x"], ExplicitSecondaryIndex)
assert ds.table_name == SINGLE_TABLE
def test_simple_two_datasets(driver, function_store):
"""
Simple intergration test w/ 2 datasets.
"""
df_source = pd.DataFrame(
{"x": [0, 1, 2, 3], "p": [0, 0, 1, 1], "v1": [10, 11, 12, 13]}
)
df_enrich = pd.DataFrame(
{"x": [0, 1, 2, 3], "p": [0, 0, 1, 1], "v2": [20, 21, 22, 23]}
)
cube = Cube(
dimension_columns=["x"],
partition_columns=["p"],
uuid_prefix="cube",
seed_dataset="source",
)
result = driver(
data={"source": df_source, "enrich": df_enrich}, cube=cube, store=function_store
)
assert set(result.keys()) == {cube.seed_dataset, "enrich"}
ds_source = result[cube.seed_dataset].load_all_indices(function_store())
ds_enrich = result["enrich"].load_all_indices(function_store())
assert ds_source.uuid == cube.ktk_dataset_uuid(cube.seed_dataset)
assert ds_enrich.uuid == cube.ktk_dataset_uuid("enrich")
assert len(ds_source.partitions) == 2
assert len(ds_enrich.partitions) == 2
assert set(ds_source.indices.keys()) == {"p", "x"}
assert isinstance(ds_source.indices["p"], PartitionIndex)
assert isinstance(ds_source.indices["x"], ExplicitSecondaryIndex)
assert set(ds_enrich.indices.keys()) == {"p"}
assert isinstance(ds_enrich.indices["p"], PartitionIndex)
assert ds_source.table_name == SINGLE_TABLE
assert ds_enrich.table_name == SINGLE_TABLE
def test_indices(driver, function_store):
"""
Test that index structures are created correctly.
"""
df_source = pd.DataFrame(
{
"x": [0, 1, 2, 3],
"p": [0, 0, 1, 1],
"v1": [10, 11, 12, 13],
"i1": [100, 101, 102, 103],
}
)
df_enrich = pd.DataFrame(
{
"x": [0, 1, 4, 5],
"p": [0, 0, 2, 2],
"v2": [20, 21, 22, 23],
"i2": [200, 201, 202, 203],
}
)
cube = Cube(
dimension_columns=["x"],
partition_columns=["p"],
uuid_prefix="cube",
seed_dataset="source",
index_columns=["i1", "i2"],
)
result = driver(
data={"source": df_source, "enrich": df_enrich}, cube=cube, store=function_store
)
assert set(result.keys()) == {cube.seed_dataset, "enrich"}
ds_source = result[cube.seed_dataset].load_all_indices(function_store())
ds_enrich = result["enrich"].load_all_indices(function_store())
assert set(ds_source.indices.keys()) == {"p", "x", "i1"}
assert isinstance(ds_source.indices["p"], PartitionIndex)
assert isinstance(ds_source.indices["x"], ExplicitSecondaryIndex)
assert isinstance(ds_source.indices["i1"], ExplicitSecondaryIndex)
assert set(ds_enrich.indices.keys()) == {"p", "i2"}
assert isinstance(ds_enrich.indices["p"], PartitionIndex)
assert isinstance(ds_enrich.indices["i2"], ExplicitSecondaryIndex)
def test_dimension_index_suppression(driver, function_store):
"""
Test that suppress_index_on works as expected
"""
df_source = pd.DataFrame(
{
"x": [0, 0, 1, 1],
"y": [10, 11, 12, 13],
"p": [0, 0, 1, 1],
"v1": [10, 11, 12, 13],
"i1": [100, 101, 102, 103],
}
)
cube = Cube(
dimension_columns=["x", "y"],
partition_columns=["p"],
uuid_prefix="cube",
seed_dataset="source",
index_columns=["i1", "i2"],
suppress_index_on=["x"],
)
result = driver(data={"source": df_source}, cube=cube, store=function_store)
ds_source = result[cube.seed_dataset].load_all_indices(function_store())
assert set(ds_source.indices.keys()) == {"p", "i1", "y"}
assert isinstance(ds_source.indices["p"], PartitionIndex)
assert isinstance(ds_source.indices["i1"], ExplicitSecondaryIndex)
assert isinstance(ds_source.indices["y"], ExplicitSecondaryIndex)
def test_do_not_modify_df(driver, function_store):
"""
Functions should not modify their inputs.
"""
df = pd.DataFrame({"x": [0, 1, 2, 3], "p": [0, 0, 1, 1], "v": [10, 11, 12, 13]})
df_backup = df.copy()
cube = Cube(dimension_columns=["x"], partition_columns=["p"], uuid_prefix="cube")
driver(data=df, cube=cube, store=function_store)
pdt.assert_frame_equal(df, df_backup)
@pytest.mark.filterwarnings("ignore::UnicodeWarning")
def test_parquet(driver, function_store):
"""
Ensure the parquet files we generate are properly normalized.
"""
df = pd.DataFrame(
data={
"x": [10, 1, 1, 0, 0],
"y": [10, 0, 1, 1, 0],
"p": [0, 1, 1, 1, 1],
"föö".encode("utf8"): [100, 10, 11, 12, 13],
"v": np.nan,
},
index=[0, 1000, 1001, 1002, 1003],
columns=["x", "y", "p", "föö".encode("utf8"), "v"],
)
cube = Cube(
dimension_columns=["x", "y"], partition_columns=["p"], uuid_prefix="cube"
)
result = driver(data=df, cube=cube, store=function_store)
assert set(result.keys()) == {cube.seed_dataset}
ds = list(result.values())[0]
ds = ds.load_all_indices(function_store())
assert len(ds.partitions) == 2
for p in (0, 1):
part_key = ds.indices["p"].index_dct[p][0]
part = ds.partitions[part_key]
key = part.files[SINGLE_TABLE]
df_actual = KTK_CUBE_DF_SERIALIZER.restore_dataframe(function_store(), key)
df_expected = (
df.loc[df["p"] == p]
.sort_values(["x", "y"])
.reset_index(drop=True)
.drop(columns=["p"])
.rename(columns={"föö".encode("utf8"): "föö"})
)
pdt.assert_frame_equal(df_actual.reset_index(drop=True), df_expected)
@pytest.mark.parametrize("chunk_size", [None, 2])
def test_rowgroups_are_applied_when_df_serializer_is_passed_to_build_cube(
driver, function_store, chunk_size
):
"""
Test that the dataset is split into row groups depending on the chunk size
"""
df = pd.DataFrame(data={"x": [0, 1, 2, 3], "p": [0, 1, 1, 1]}, columns=["x", "p"],)
cube = Cube(dimension_columns=["x"], partition_columns=["p"], uuid_prefix="rg-cube")
result = driver(
data=df,
cube=cube,
store=function_store,
df_serializer=ParquetSerializer(chunk_size=chunk_size),
)
dataset = result["seed"].load_all_indices(function_store())
part_num_rows = {0: 1, 1: 3}
part_chunk_size = {0: chunk_size, 1: chunk_size}
assert len(dataset.partitions) == 2
assert_num_row_groups(function_store(), dataset, part_num_rows, part_chunk_size)
def test_single_rowgroup_when_df_serializer_is_not_passed_to_build_cube(
driver, function_store
):
"""
Test that the dataset has a single row group as default path
"""
df = pd.DataFrame(data={"x": [0, 1, 2, 3], "p": [0, 1, 1, 1]}, columns=["x", "p"],)
cube = Cube(dimension_columns=["x"], partition_columns=["p"], uuid_prefix="rg-cube")
result = driver(data=df, cube=cube, store=function_store,)
dataset = result["seed"].load_all_indices(function_store())
part_num_rows = {0: 1, 1: 3}
part_chunk_size = {0: None, 1: None}
assert len(dataset.partitions) == 2
assert_num_row_groups(function_store(), dataset, part_num_rows, part_chunk_size)
def test_fail_sparse(driver, driver_name, function_store):
"""
Ensure that sparse dataframes are rejected.
"""
df = pd.DataFrame(
data={
"x": SparseArray([10, 1, 1, 0, 0]),
"y": SparseArray([10, 0, 1, 1, 0]),
"p": SparseArray([0, 1, 1, 1, 1]),
"v": SparseArray([np.nan] * 5),
}
)
cube = Cube(
dimension_columns=["x", "y"], partition_columns=["p"], uuid_prefix="cube"
)
with pytest.raises(TypeError, match="Sparse data is not supported."):
driver(data=df, cube=cube, store=function_store)
def test_metadata(driver, function_store):
"""
Test auto- and user-generated metadata.
"""
df_source = pd.DataFrame(
{"x": [0, 1, 2, 3], "p": [0, 0, 1, 1], "v1": [10, 11, 12, 13]}
)
df_enrich = pd.DataFrame(
{"x": [0, 1, 4, 5], "p": [0, 0, 2, 2], "v2": [20, 21, 22, 23]}
)
cube = Cube(
dimension_columns=["x"],
partition_columns=["p"],
uuid_prefix="cube",
seed_dataset="source",
)
result = driver(
data={"source": df_source, "enrich": df_enrich},
cube=cube,
store=function_store,
metadata={"enrich": {"foo": 1}},
)
assert set(result.keys()) == {cube.seed_dataset, "enrich"}
ds_source = result[cube.seed_dataset]
assert set(ds_source.metadata.keys()) == {
"creation_time",
KTK_CUBE_METADATA_DIMENSION_COLUMNS,
KTK_CUBE_METADATA_KEY_IS_SEED,
KTK_CUBE_METADATA_PARTITION_COLUMNS,
KTK_CUBE_METADATA_SUPPRESS_INDEX_ON,
}
assert ds_source.metadata[KTK_CUBE_METADATA_DIMENSION_COLUMNS] == list(
cube.dimension_columns
)
assert ds_source.metadata[KTK_CUBE_METADATA_KEY_IS_SEED] is True
assert ds_source.metadata[KTK_CUBE_METADATA_PARTITION_COLUMNS] == list(
cube.partition_columns
)
assert ds_source.metadata[KTK_CUBE_METADATA_SUPPRESS_INDEX_ON] == []
ds_enrich = result["enrich"]
assert set(ds_enrich.metadata.keys()) == {
"creation_time",
KTK_CUBE_METADATA_DIMENSION_COLUMNS,
KTK_CUBE_METADATA_KEY_IS_SEED,
KTK_CUBE_METADATA_PARTITION_COLUMNS,
KTK_CUBE_METADATA_SUPPRESS_INDEX_ON,
"foo",
}
assert ds_enrich.metadata[KTK_CUBE_METADATA_DIMENSION_COLUMNS] == list(
cube.dimension_columns
)
assert ds_enrich.metadata[KTK_CUBE_METADATA_KEY_IS_SEED] is False
assert ds_enrich.metadata[KTK_CUBE_METADATA_PARTITION_COLUMNS] == list(
cube.partition_columns
)
assert ds_enrich.metadata["foo"] == 1
assert ds_source.metadata[KTK_CUBE_METADATA_SUPPRESS_INDEX_ON] == []
def test_fails_metadata_wrong_type(driver, function_store):
df_source = pd.DataFrame(
{"x": [0, 1, 2, 3], "p": [0, 0, 1, 1], "v1": [10, 11, 12, 13]}
)
cube = Cube(
dimension_columns=["x"],
partition_columns=["p"],
uuid_prefix="cube",
seed_dataset="source",
)
with pytest.raises(
TypeError, match="Provided metadata should be a dict but is int"
):
driver(data={"source": df_source}, cube=cube, store=function_store, metadata=1)
def test_fails_metadata_unknown_id(driver, function_store):
df_source = pd.DataFrame(
{"x": [0, 1, 2, 3], "p": [0, 0, 1, 1], "v1": [10, 11, 12, 13]}
)
cube = Cube(
dimension_columns=["x"],
partition_columns=["p"],
uuid_prefix="cube",
seed_dataset="source",
)
with pytest.raises(
ValueError,
match="Provided metadata for otherwise unspecified ktk_cube_dataset_ids: bar, foo",
):
driver(
data={"source": df_source},
cube=cube,
store=function_store,
metadata={"source": {}, "foo": {}, "bar": {}},
)
def test_fails_metadata_nested_wrong_type(driver, function_store):
df_source = pd.DataFrame(
{"x": [0, 1, 2, 3], "p": [0, 0, 1, 1], "v1": [10, 11, 12, 13]}
)
cube = Cube(
dimension_columns=["x"],
partition_columns=["p"],
uuid_prefix="cube",
seed_dataset="source",
)
with pytest.raises(
TypeError,
match="Provided metadata for dataset source should be a dict but is int",
):
driver(
data={"source": df_source},
cube=cube,
store=function_store,
metadata={"source": 1},
)
def test_fails_missing_seed(driver, function_store):
"""
A cube must contain its seed dataset, check this constraint as early as possible.
"""
df = pd.DataFrame({"x": [0, 1], "p": [0, 0], "v": [10, 11]})
cube = Cube(dimension_columns=["x"], partition_columns=["p"], uuid_prefix="cube")
with pytest.raises(ValueError) as exc:
driver(data={"foo": df}, cube=cube, store=function_store)
assert 'Seed data ("seed") is missing.' in str(exc.value)
assert list(function_store().keys()) == []
def test_fails_missing_dimension_columns(driver, function_store):
"""
Ensure that we catch missing dimension columns early.
"""
df_source = pd.DataFrame({"x": [0, 1], "p": 0})
cube = Cube(
dimension_columns=["x", "y", "z"],
partition_columns=["p"],
uuid_prefix="cube",
seed_dataset="source",
)
with pytest.raises(ValueError) as exc:
driver(data=df_source, cube=cube, store=function_store)
assert 'Missing dimension columns in seed data "source": y, z' in str(exc.value)
assert list(function_store().keys()) == []
def test_fails_no_dimension_columns(driver, function_store):
"""
Ensure that we catch missing dimension columns early.
"""
df_source = pd.DataFrame({"x": [0, 1], "y": [0, 1], "z": [0, 1], "p": 0})
df_enrich = pd.DataFrame({"p": [0], "v1": 0})
cube = Cube(
dimension_columns=["x", "y", "z"],
partition_columns=["p"],
uuid_prefix="cube",
seed_dataset="source",
)
with pytest.raises(ValueError) as exc:
driver(
data={"source": df_source, "enrich": df_enrich},
cube=cube,
store=function_store,
)
assert (
'Dataset "enrich" must have at least 1 of the following dimension columns: x, y'
in str(exc.value)
)
assert not DatasetMetadata.exists(cube.ktk_dataset_uuid("enrich"), function_store())
def test_fails_duplicate_columns(driver, function_store, driver_name):
"""
Catch weird pandas behavior.
"""
if driver_name == "dask_dataframe":
pytest.skip("already detected by dask.dataframe")
df = pd.DataFrame(
{"x": [0, 1], "p": 0, "a": 1, "b": 2}, columns=["x", "p", "a", "b"]
).rename(columns={"b": "a"})
assert len(df.columns) == 4
cube = Cube(dimension_columns=["x"], partition_columns=["p"], uuid_prefix="cube")
with pytest.raises(ValueError) as exc:
driver(data=df, cube=cube, store=function_store)
assert 'Duplicate columns found in dataset "seed": x, p, a, a' in str(exc.value)
assert list(function_store().keys()) == []
def test_fails_missing_partition_columns(driver, function_store):
"""
Just make the Kartothek error nicer.
"""
df = pd.DataFrame({"x": [0, 1], "p": 0})
cube = Cube(
dimension_columns=["x"], partition_columns=["p", "q", "r"], uuid_prefix="cube"
)
with pytest.raises(ValueError) as exc:
driver(data=df, cube=cube, store=function_store)
assert 'Missing partition columns in dataset "seed": q, r' in str(exc.value)
assert list(function_store().keys()) == []
def test_overwrite(driver, function_store):
"""
Test overwrite behavior aka call the build function if the cube already exists.
"""
df1 = pd.DataFrame({"x": [0, 1], "p": [0, 0], "v": [10, 11]})
df2 = | pd.DataFrame({"x": [2, 3], "p": [1, 1], "v": [12, 13]}) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Sat May 9 19:30:38 2020
@author: aletu
"""
import numpy as np
import pandas as pd
import random
import datetime
def generateWarehouseData(num_SKUs: int = 100,
nodecode: int = 1,
idwh: list = ['LOGICAL_WH1', 'LOGICAL_WH2', 'FAKE'],
whsubarea: list = ['AREA 1'],
num_aisles: int = 5,
num_bays: int = 66,
num_levels: int = 5,
level_height: int = 1200,
bay_width: int = 800,
aisle_width: int = 4000,
num_movements: int = 1000,
num_ordercode: int = 800,
average_time_between_movements: float = 1 / 24, # days
first_day: datetime.datetime = datetime.datetime(year=2020, month=1, day=2),
):
"""
Generate sample warehouse picking data
Args:
num_SKUs (int, optional): Number of SKUs of the Warehouse. Defaults to 100.
nodecode (int, optional): Nodecode of the Warehouse. Defaults to 1.
idwh (list, optional): List of logical clusters of the warehouse. Defaults to ['LOGICAL_WH1', 'LOGICAL_WH2', 'FAKE'].
whsubarea (list, optional): List of physical areas of the warehouse. Defaults to ['AREA 1'].
num_aisles (int, optional): Number of aisles of the warehouse. Defaults to 5.
num_bays (int, optional): Number of bays pof the warheouse. Defaults to 66.
num_levels (int, optional): Number of levels of the warehouse. Defaults to 5.
level_height (int, optional): Height of a level. Defaults to 1200.
bay_width (int, optional): Width of a bay. Defaults to 800.
aisle_width (int, optional): Width of an Aisle. Defaults to 4000.
num_movements (int, optional): Number of movements to generate. Defaults to 1000.
num_ordercode (int, optional): Number of picking lists to generate. Defaults to 800.
average_time_between_movements (float, optional): Average waiting time between movements. Defaults to 1 / 24.
first_day (datetime.datetime, optional): First day of the picking list. Defaults to datetime.datetime(year=2020, month=1, day=2).
Returns:
D_locations (pd.dataFrame): Output DataFrame with storage locations.
D_SKUs (pd.dataFrame): Output DataFrame with SKUs.
D_movements (pd.dataFrame): Output DataFrame with movements.
D_inventory (pd.dataFrame): Output DataFrame with inventory values.
"""
class SKU():
def __init__(self, itemcode: str):
self.ITEMCODE = itemcode
self.DESCRIPTION = f"PRODOTTO_{itemcode}"
self.VOLUME = np.random.uniform(0.1, 100) # volume in dm3
self.WEIGHT = np.random.uniform(0.1, 10) # weigth in Kg
class STORAGE_LOCATION():
def __init__(self, nodecode, idwh, whsubarea, idlocation,
loccodex, loccodey, loccodez, rack, bay, level):
self.NODECODE = nodecode
self.IDWH = idwh
self.WHSUBAREA = whsubarea
self.IDLOCATION = idlocation
self.LOCCODEX = loccodex
self.LOCCODEY = loccodey
self.LOCCODEZ = loccodez
self.RACK = rack
self.BAY = bay
self.LEVEL = level
class MOVEMENTS():
def __init__(self, itemcode, volume, weight, nodecode, idwh, whsubarea, idlocation,
rack, bay, level, loccodex, loccodey, loccodez,
ordercode, quantity, timestamp, inout, ordertype):
self.ITEMCODE = itemcode
self.NODECODE = nodecode
self.IDWH = idwh
self.WHSUBAREA = whsubarea
self.IDLOCATION = idlocation
self.RACK = rack
self.BAY = bay
self.LEVEL = level
self.LOCCODEX = loccodex
self.LOCCODEY = loccodey
self.LOCCODEZ = loccodez
self.ORDERCODE = ordercode
self.PICKINGLIST = ordercode
self.QUANTITY = quantity
self.VOLUME = volume * quantity
self.WEIGHT = weight * quantity
self.TIMESTAMP_IN = timestamp
self.INOUT = inout
self.ORDERTYPE = ordertype
class INVENTORY():
def __init__(self, itemcode, nodecode, idwh, idlocation, quantity, timestamp):
self.NODECODE = nodecode
self.IDWH = idwh
self.ITEMCODE = itemcode
self.IDLOCATION = idlocation
self.QUANTITY = quantity
self.TIMESTAMP = timestamp
dict_SKUs = {}
itemcodes = np.arange(0, num_SKUs)
for itemcode in itemcodes:
dict_SKUs[itemcode] = SKU(itemcode)
# % CREATE WH LAYOUT
dict_locations = {}
idlocation = 0
for corsia in range(0, num_aisles):
for campata in range(0, num_bays):
for livello in range(0, num_levels):
idlocation = idlocation + 1 # create a new location index
# save parameters
NODECODE = nodecode
IDWH = random.choice(idwh)
WHSUBAREA = random.choice(whsubarea)
IDLOCATION = idlocation
LOCCODEX = corsia * aisle_width
LOCCODEY = campata * bay_width
LOCCODEZ = livello * level_height
# create storage location
dict_locations[idlocation] = STORAGE_LOCATION(NODECODE,
IDWH,
WHSUBAREA,
IDLOCATION,
LOCCODEX,
LOCCODEY,
LOCCODEZ,
corsia,
campata,
livello)
# create movements
dict_movements = {}
num_creati = 0
ordercodes = np.arange(0, num_ordercode)
while num_creati < num_movements:
num_creati = num_creati + 1
# random select sku
sku = random.choice(dict_SKUs)
itemcode = sku.ITEMCODE
volume = sku.VOLUME
weight = sku.WEIGHT
# random select storage location
loc_key = random.choice(list(dict_locations.keys()))
loc = dict_locations[loc_key]
nodecode = loc.NODECODE
idwh = loc.IDWH
whsubarea = loc.WHSUBAREA
idlocation = loc.IDLOCATION
loccodex = loc.LOCCODEX
loccodey = loc.LOCCODEY
loccodez = loc.LOCCODEZ
rack = loc.RACK
bay = loc.BAY
level = loc.LEVEL
# generates movements data
ordercode = random.choice(ordercodes)
quantity = np.random.lognormal(mean=2, sigma=1)
wait = np.random.exponential(average_time_between_movements)
if num_creati == 1:
timestamp = first_day + datetime.timedelta(wait)
else:
timestamp = dict_movements[num_creati - 1].TIMESTAMP_IN + datetime.timedelta(wait)
inout = random.choice(['+', '-', ' '])
ordertype = random.choice(['PICKING', 'PUTAWAY', ' OTHER '])
dict_movements[num_creati] = MOVEMENTS(itemcode, volume, weight, nodecode, idwh, whsubarea, idlocation,
rack, bay, level, loccodex, loccodey, loccodez,
ordercode, quantity, timestamp, inout, ordertype)
# create inventory
dict_inventory = {}
for itemcode in dict_SKUs:
# sku = dict_SKUs[itemcode]
loc_key = random.choice(list(dict_locations.keys()))
loc = dict_locations[loc_key]
nodecode = loc.NODECODE
idwh = loc.IDWH
idlocation = loc.IDLOCATION
quantity = np.random.lognormal(mean=2, sigma=1)
dict_inventory[itemcode] = INVENTORY(itemcode, nodecode, idwh, idlocation, quantity, first_day)
# save locations and export
D_locations = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 30 15:15:03 2016
@author: Manuel
"""
from C45Tree_own import branchingCriterion
from C45Tree_own import split
import pandas as pa
def fit(X,y, branching = "gainRatio", splitCriterion = "infoGain", splitNumeric = "binary", gain_thres = 0):
'''This function fits a decision tree classifier according to the given parameters
Parameters
----------
X : required DataFrame (pandas) of all independent attributes
y : required list or numpy array of the label values
branching: "gainRatio", "infoGain", "giniIndex"
splitCriterion: "infoGain", "giniIndex"
splitNumeric: "binary", "multiple"
gain_thres :
Required modules
----------
pandas
'''
#transform values to required form
X_matrix = X.values.swapaxes(0,1).tolist()
#1. Calculate the best attribute
best_prio_score = -1
b_attr = ""
#calculating the old information value of considering soleley the label vals.
labelCount = branchingCriterion.labelCount(y)[1]
oldInfo = branchingCriterion.information(labelCount)
arrayTypes = X.dtypes
#selecting the best attribute for the next split, according to the set branching factor
for x in range(0, len(X_matrix)):
#If the next attribute is numeric, split it accordingly!
if(split.typeCheck(arrayTypes[x]) == "numeric"):
#no splitt possible, as only one value is left
if(len(X_matrix[x]) == 1):
X_matrix[x] = str("="+X_matrix[x])
if(splitNumeric == "binary"):
X_matrix[x] = split.binarySplit(X_matrix[x], y, splitCriterion)
else:
raise NotImplementedError("Multiple split has not yet been implemented")
if(branching == "gainRatio"):
prio_score = branchingCriterion.gainRatio(X_matrix[x], y, oldInfo)
elif(branching == "infoGain"):
prio_score = branchingCriterion.informationGain(X_matrix[x],y,oldInfo)
elif(branching == "giniIndex"):
prio_score = branchingCriterion.giniIndex(X_matrix[x],y)
else:
raise NotImplementedError("The given branching criterion could not be recognized")
if(best_prio_score < prio_score):
best_prio_score = prio_score
b_attr = X.iloc[:,x].name
#If the best attribute was a numeric, build new values
pa.options.mode.chained_assignment = None
X[b_attr] = X_matrix[X.columns.get_loc(b_attr)]
pa.options.mode.chained_assignment = "warn"
#2. build rule
rules = []
attr_values = X[b_attr].drop_duplicates().values
for a in range(0,len(attr_values)):
rules = rules + ([[b_attr, attr_values[a]]])
#3. Calculate new sets based on the old rules
#No further prunning possible, as all attributes are regarded!
if(len(X.columns) > 1):
#Build new sets & labels
X = X.assign(y = pa.Series(y, index = X.index))
newSets = [X[X.loc[:,b_attr]==attr_values[0]]]
for i in range(1,len(attr_values)):
newSets = newSets + [X[X.loc[:,b_attr]==attr_values[i]]]
#Calculate new & corresponding label values
newYs = [newSets[0].loc[:,'y']]
newSets[0] = newSets[0].drop('y',1)
for i in range(1,len(newSets)):
newYs = newYs + [newSets[i].loc[:,'y']]
newSets[i] = newSets[i].drop('y',1)
#Drop all unuseable attributes
for i in range(0,len(newSets)):
newSets[i] = newSets[i].drop(b_attr, 1)
#No splitt necessary for attributes with only one value
for j in newSets[i]:
#If it is not the last column, drop the column
if(len(newSets[i].columns) > 1):
if(len(newSets[i].loc[:,j].drop_duplicates())==1):
newSets[i] = newSets[i].drop(j,1)
#Else drop the whole table
else:
if(len(newSets[i].loc[:,j].drop_duplicates())==1):
newSets[i] = | pa.DataFrame() | pandas.DataFrame |
#
#
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from datetime import *
import math
class VolatilityArbitrage(object):
def __init__(self):
self.refl = ''
def startup(self):
print('VolatilityArbitrage v0.0.3')
self.ds_file = './data/50ETF.xlsx'
etf_close = pd.read_excel(self.ds_file,"close")
etf_ivx = pd.read_excel(self.ds_file,"ivx")
etf_hv = pd.read_excel(self.ds_file,"hv30")
lastdate = pd.read_excel(self.ds_file,"ETF_option_lasttradingdate")
etf_option_name = pd.read_excel(self.ds_file,"at_money_name")
##填补缺失值
for j in range(1,len(etf_close.columns.tolist())):
for i in range(len(etf_close.date.values.tolist())-1,0,-1):
if math.isnan(etf_close.iat[i,j]) and math.isnan(etf_close.iat[i-1,j]):
etf_close.iat[i,j]= etf_close.iat[i+1,j]
elif math.isnan(etf_close.iat[i,j]) and math.isnan(etf_close.iat[i-1,j])==False:
etf_close.iat[i,j]=(etf_close.iat[i-1,j]+etf_close.iat[i+1,j])/2
fee = 5.0 # 手续费
slippage = 5.0 # 滑点
capital = 1000000.0 # 初始资金
size=50 # straddle
option_value=0 # 期权价值,初始时为0
remain_money=capital # 当前资金
total_money = [remain_money]
trade_option = | pd.DataFrame() | pandas.DataFrame |
import json
from datetime import datetime
import pandas as pd
from autogluon import TabularPrediction as task
data_path = "./data/plasma/plasma"
label_column = "RETPLASMA"
fold1 = | pd.read_csv(data_path + "-fold1.csv") | pandas.read_csv |
import requests
import json
import urllib
import pandas as pd
from vikuatools.utils import int_to_string, remove_value_from_dict_key, parse_properties
def hs_get_recent_modified(url, parameters, max_results):
"""
Get recent modified object from hubspot API legacy
url: str endpoint to retreive. one of deals, companies or engagements
parameters: dict with parameters to include in call e.g. api_key, count, since
max_results: dbl max number of objects to retreive
return: list with object from responses
"""
object_list = []
get_recent_url = url
parameter_dict = parameters
headers = {}
# Paginate your request using offset
has_more = True
while has_more:
params = urllib.parse.urlencode(parameter_dict)
get_url = get_recent_url + params
r = requests.get(url= get_url, headers = headers)
response_dict = json.loads(r.text)
try:
has_more = response_dict['hasMore']
except KeyError:
has_more = response_dict['has-more']
try:
object_list.extend(response_dict['results'])
except KeyError:
object_list.extend(response_dict['contacts'])
try:
parameter_dict['offset'] = response_dict['offset']
except KeyError:
parameter_dict['vidOffset'] = response_dict['vid-offset']
if len(object_list) >= max_results: # Exit pagination, based on whatever value you've set your max results variable to.
print('maximum number of results exceeded')
break
print(f'Done!! Found {len(object_list)} object')
return object_list
def hs_get_recent_modified_contacts(url, hapikey, count, max_results, contact_property):
"""
Get recent modified contacts from hubspot API legacy. Contacts requires another function due to different name in hasMore attribute and
it need to ask for specific properties on the call
url: str endpoint to retreive. one of deals, companies or engagements
hapikey: str api_key
count: dbl number of object to retreive in a single call
max_results: dbl max number of objects to retreive
contact_property: list Properties to query
return: list with object from responses
"""
object_list = []
get_recent_url = url
parameter_dict = {'hapikey': hapikey, 'count': count}
headers = {}
properties_w_header = ['property='+x for x in contact_property]
properties_url = '&'+'&'.join(properties_w_header)
# Paginate your request using offset
has_more = True
while has_more:
parameters = urllib.parse.urlencode(parameter_dict)
get_url = get_recent_url + parameters + properties_url
r = requests.get(url= get_url, headers = headers)
response_dict = json.loads(r.text)
has_more = response_dict['has-more']
object_list.extend(response_dict['contacts'])
parameter_dict['vidOffset'] = response_dict['vid-offset']
if len(object_list) >= max_results: # Exit pagination, based on whatever value you've set your max results variable to.
print('maximum number of results exceeded')
break
print(f'Done!! Found {len(object_list)} object')
return object_list
def hs_extract_value(new_objects, property_names):
"""
Extract insterested properties from api call response. If response has association, it will extract company and vids
new_objects: list with http response
property_names: list with property names to keep
return: pd.DataFrame with property_names fields
"""
# Association Flag
has_associations = 'associations' in new_objects[0].keys()
# If exist, append association properties to element to keep
if has_associations:
property_names = property_names + ['associatedCompanyIds', 'associatedVids']
# Start loop to extract values
list_properties = []
for obj in new_objects:
# If association exist, extract association
if has_associations:
associatedCompanyIds = obj['associations']['associatedCompanyIds']
associatedVids = obj['associations']['associatedVids']
# Extract all property values
props = obj['properties']
saved_properties = {}
for key, value in props.items():
saved_properties[key] = value['value']
# If exist, append associations properties
if has_associations:
saved_properties['associatedCompanyIds'] = associatedCompanyIds
saved_properties['associatedVids'] = associatedVids
# Save properties
list_properties.append(saved_properties)
# Properties to df
df_properties = pd.DataFrame(list_properties)
# Keep only properties of interest
subset_columns = df_properties.columns.intersection(set(property_names))
df_properties = df_properties[subset_columns]
return df_properties
def hs_extract_engagements(engagement_list, *arg):
"""
Extract properties and associations from engagements
engagement_list: list with engagement and associations, response from engagement endpoint
return: df with engagement fields and company associations
"""
list_properties = []
for obj in engagement_list:
props_dict = obj['engagement']
props = dict((k, props_dict[k]) for k in ['id', 'createdAt', 'lastUpdated', 'type', 'ownerId', 'activityType'] if k in props_dict)
props['companyIds'] = obj['associations']['companyIds']
props['dealIds'] = obj['associations']['dealIds']
props['contactIds'] = obj['associations']['contactIds']
try:
props['disposition'] = obj['metadata']['disposition']
except:
props['disposition'] = None
list_properties.append(props)
df_eng = | pd.DataFrame(list_properties) | pandas.DataFrame |
# _*_ encoding:utf-8 _*_
# This script calculates index market capture by day through coingekco api
# market capture = index market cap / sum(each composition's market cap in the index )
# prerequisite:
# 1. install coingecko api python library https://github.com/man-c/pycoingecko
# 2. prepare index compositions info as a csv file which contain the info about when a coin is added
# or removed from the index and its id in coingecko. e.g. dpi_index.csv, mvi_index.csv.
# maintenance: each time a coin is added or removed from a index the csv file must change accordingly.
# result is saved as a csv file which contains the index market capture by day.
from pycoingecko import CoinGeckoAPI
import pandas as pd
import numpy as np
import time
import datetime
today = datetime.datetime.now().strftime("%Y-%m-%d")
# connect coingecko api
cg = CoinGeckoAPI()
def time_to_unix(str_time):
"""
convert str time tp unix timestamp
:param str_time: yyyy-mm-dd
:return: timestamp
"""
return time.mktime(time.strptime(str_time, "%Y-%m-%d"))
def get_index_compositions_market_cap(compositions_table):
"""
get index compositions market cap by day
:param compositions_table: dataframe which contains index composition info
:return: dataframe which is index compositions marketcap by day
"""
coins_cap = pd.DataFrame(columns=['dates','coinscap','coins'])
count = 0
for coin in compositions_table.values:
coin_id = coin[4]
from_timestamp = time_to_unix(coin[2])
if coin[2] == coin[3]:
to_timestamp = time_to_unix(today)
else:
to_timestamp = time_to_unix(coin[3])
datas = cg.get_coin_market_chart_range_by_id(id=coin_id,vs_currency='usd',from_timestamp=from_timestamp,to_timestamp=to_timestamp)
# waxe has no market cap data,so use Fully Diluted Market Cap instead
if coin_id == 'waxe':
datas_df = pd.DataFrame(datas['prices'],columns=['dates','coinscap'])
datas_df['coinscap'] = datas_df['coinscap']*3700000
else:
datas_df = pd.DataFrame(datas['market_caps'],columns=['dates','coinscap'])
datas_df['coins'] = coin[1]
coins_cap=coins_cap.append(datas_df)
time.sleep(5)
count += 1
print('round %d ,get market cap of %s'%(count,coin_id))
coins_cap['days'] = pd.to_datetime(coins_cap['dates'], unit='ms').dt.date
coins_cap = coins_cap.groupby(['coins', 'days']).nth(0).reset_index()
coins_cap = coins_cap.groupby('days')['coinscap'].sum().reset_index()
return coins_cap
def get_index_market_cap(id,from_time):
"""
get index marketcap
:param id: coingekco id
:param from_time: index start time yyyy-mm-dd
:return: dataframe which contains days and marketcap
"""
from_timestamp = time_to_unix(from_time)
to_timestamp = time_to_unix(today)
index_caps = cg.get_coin_market_chart_range_by_id(id=id, vs_currency='usd',
from_timestamp=from_timestamp, to_timestamp=to_timestamp)
index_df = pd.DataFrame(index_caps['market_caps'], columns=['dates', 'index_marketcap'])
index_df['days'] = pd.to_datetime(index_df['dates'], unit='ms').dt.date
index_df = index_df.drop(columns='dates')
index_df = index_df.groupby('days').nth(0).reset_index()
return index_df
def get_index_market_capture(index_info_dir,id,from_time):
"""
get index market capture
:param index_info_dir: dir of index info table
:param id: coingecko id of index
:param from_time: index start time yyyy-mm-dd
:return: dataframe, compositions and index market cap by day
"""
# read dpi composition info
index_table = | pd.read_csv(index_info_dir) | pandas.read_csv |
import json
import pandas as pd
from scipy.stats.stats import pearsonr, spearmanr
import numpy as np
from scipy import stats
import sys
import matplotlib.pyplot as plt
import os
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import OneHotEncoder
import argparse
def parse_args(args):
parser = argparse.ArgumentParser(description='Arguments for the evaluation script.')
baseline_metrics = [
# 'Bleu',
# 'Meteor',
# 'Rouge 1',
# 'Rouge 2',
'Rouge L',
'BertScore P Art',
# 'BertScore R Art',
# 'BertScore F1 Art',
# 'FEQA',
'QAGS',
# 'OpenIE',
'Dep Entail',
'FactCC',
]
ablations_cols = [
'Flip_Semantic_Frame_Errors', 'Flip_Discourse_Errors', 'Flip_Content_Verifiability_Errors',
# 'Flip_RelE', 'Flip_EntE', 'Flip_CircE', 'Flip_OutE', 'Flip_GramE', 'Flip_CorefE', 'Flip_LinkE', 'Flip_Other'
]
model_names = [
'bart','pgn', 'bus', 'bert_sum', 's2s',
'TranS2S', 'TConvS2S', 'PtGen', 'BERTS2S'
]
parser.add_argument('--mode', default='hm-correlation', choices=['hm-correlation', 'ablations', 'ablations-plot', 'mm-correlation'], help=(
'This script can calculate correlation with human judgments (hm-correlation),'
' evaluate the performance of the evaluation metrics at capturing different types of factual errors (ablations),'
' output the ablation as a plot (ablations-plot), and compute the Williams test (mm-correlation)'
))
parser.add_argument('--human_eval_path', default='/home/phillab/data/frank/human_annotations.json', help='file containing human annotations expects csv file.')
parser.add_argument('--baseline_metrics_outputs', default='/home/phillab/data/frank/baseline_factuality_metrics_outputs.json', help='file name containing outputs of baseline factuality metrics.')
parser.add_argument('--baseline_metrics', nargs='+', default=baseline_metrics, help='baseline metrics to evaluate on (should match the name in the baseline metrics output file).')
parser.add_argument('--no_baseline_metrics', action='store_true', help='If set, does not evaluate the baseline metrics')
parser.add_argument('--metrics_outputs', default=None, help='names of json files containing metric outputs with key "score"')
parser.add_argument('--metrics_outputs_info', default=None, help='json file describing how to parse metrics output files. This allows to customize the name of the score key and to have several metrics in one json file.')
parser.add_argument('--ablations', nargs='+', default=ablations_cols, help='column names for ablations.')
parser.add_argument('--human', default='Factuality', help='column for human judgements.')
parser.add_argument('--no_partial_correlation', action='store_true')
parser.add_argument('--partial_correlation_variable', default='model_name', help='what column to use as confounding to calculate partial correlations')
parser.add_argument('--store_path', default=None)
parser.add_argument('--dataset', default=None, choices=[None, 'cnndm', 'bbc'], help='if None use all data')
parser.add_argument('--model_name', nargs='+', default=None, help=f'by default use all data, availble model names {model_names}')
args = parser.parse_args(args)
return vars(args)
def williams_test(r12, r13, r23, n):
"""The Williams test (<NAME>. 1959. Regression Analysis, volume 14. Wiley, New York, USA)
A test of whether the population correlation r12 equals the population correlation r13.
Significant: p < 0.05
Arguments:
r12 (float): correlation between x1, x2
r13 (float): correlation between x1, x3
r23 (float): correlation between x2, x3
n (int): size of the population
Returns:
t (float): Williams test result
p (float): p-value of t-dist
"""
if r12 < r13:
print('r12 should be larger than r13')
sys.exit()
elif n <= 3:
print('n should be larger than 3')
sys.exit()
else:
K = 1 - r12**2 - r13**2 - r23**2 + 2*r12*r13*r23
denominator = np.sqrt(2*K*(n-1)/(n-3) + (((r12+r13)**2)/4)*((1-r23)**3))
numerator = (r12-r13) * np.sqrt((n-1)*(1+r23))
t = numerator / denominator
p = 1 - stats.t.cdf(t, df=n-3) # changed to n-3 on 30/11/14
return t, p
def human_metric_correlation(
data_df,
human_col,
metrics_cols,
partial_correlation=True,
partial_correlation_variable=None
):
"""
human_df: pandas dataframe, should only contain one column corresponding to human judgements
metrics_df: pandas dataframe, columns are metrics.
partial_correlation: bool - whether to use partial correlations.
returns a pandas dataframe with pearson and spearman correlation results
"""
correlations = []
named_correlations = dict()
for metric in metrics_cols:
if metric not in data_df:
correlations.append([0, 0, 0, 0])
named_correlations[metric] = [0, 0, 0, 0]
print(f'Warning: {metric} not in dataframe.')
continue
mask = (data_df[metric].isnull() == False) & (data_df[human_col].isnull() == False)
X = data_df[metric][mask]
Y = data_df[human_col][mask]
if partial_correlation:
assert partial_correlation_variable is not None, f'You must specify a column to use as confounding variable for partial correlation calculation'
Q = np.array(data_df[mask][partial_correlation_variable])
enc = OneHotEncoder(handle_unknown='ignore')
Q = enc.fit_transform(Q.reshape(-1, 1))
pred_X = LinearRegression().fit(Q, X).predict(Q)
pred_Y = LinearRegression().fit(Q, Y).predict(Q)
X = X - pred_X
Y = Y - pred_Y
print(f'Info: metric {metric} used {len(X)} summaries to calculate correlation.')
pr, pp = pearsonr(X, Y)
sr, sp = spearmanr(X, Y)
correlations.append([pr, pp, sr, sp])
named_correlations[metric] = [pr, pp, sr, sp]
correlation_df = pd.DataFrame.from_dict(
named_correlations,
orient='index',
columns=['pearson', 'pearson p-value', 'spearman', 'spearman p-value']
)
return correlation_df
def metric_metric_correlation(
data_df,
human_col,
metrics_cols,
partial_correlation=True,
partial_correlation_variable=None
):
"""
metrics_df: pandas dataframe, columns taken as metrics
partial_correlation: bool - whether to use partial correlations.
returns of tuple with two dataframes: (correlation_df, williams_df)
correlation_df is a dataframe that contains metric-metric pearson correlation
williams_df is a dataframe of booleans on weather the two metrics are different in statistically significant terms
"""
correlations = []
williams = []
for i, metric1 in enumerate(metrics_cols):
correlation_metric = []
williams_metric = []
for j, metric2 in enumerate(metrics_cols):
if j == i:
correlation_metric.append(1)
williams_metric.append(False)
continue
mask1 = (data_df[metric1].isnull() == False) & (data_df['model_name'] != 'reference')
mask2 = (data_df[metric2].isnull() == False) & (data_df['model_name'] != 'reference')
mask3 = (data_df[human_col].isnull() == False)
mask = mask1 & mask2 & mask3
X = data_df[metric1][mask]
Y = data_df[metric2][mask]
Z = data_df[human_col][mask]
if partial_correlation_variable is not None:
Q = np.array(data_df[mask][partial_correlation_variable])
enc = OneHotEncoder(handle_unknown='ignore')
Q = enc.fit_transform(Q.reshape(-1, 1))
pred_X = LinearRegression().fit(Q, X).predict(Q)
pred_Y = LinearRegression().fit(Q, Y).predict(Q)
pred_Z = LinearRegression().fit(Q, Z).predict(Q)
X = X - pred_X
Y = Y - pred_Y
Z = Z - pred_Z
r12, _ = pearsonr(X, Z)
r13, _ = pearsonr(Y, Z)
r23, _ = pearsonr(X, Y)
n = min(len(X), len(Y))
if r12 < r13:
r12, r13 = r13, r12
_, p = williams_test(r12, r13, r23, n)
correlation_metric.append(r23)
williams_metric.append(p)
correlations.append(correlation_metric)
williams.append(williams_metric)
correlations_df = pd.DataFrame(correlations, index=metrics_cols, columns=metrics_cols)
williams_df = pd.DataFrame(williams, index=metrics_cols, columns=metrics_cols)
return (correlations_df, williams_df)
def ablation(
data_df,
human_col,
ablations_cols,
metrics_cols,
partial_correlation=True,
partial_correlation_variable=None
):
"""
human_df: pandas dataframe, should only contain one column corresponding to human judgements
ablations_df: pandas dataframe, each column corresponds to a different ablation of the human judgements
metrics_df: pandas dataframe, columns are metrics.
partial_correlation: bool - whether to use partial correlations.
returns a dataframe each row corresponding to a different ablation
"""
ablations_dict = dict()
human_df = human_metric_correlation(data_df, human_col, metrics_cols, partial_correlation=partial_correlation, partial_correlation_variable=partial_correlation_variable)
human_correlation = human_df['pearson']
for ablation in ablations_cols:
ablation_df = human_metric_correlation(data_df, ablation, metrics_cols, partial_correlation=partial_correlation, partial_correlation_variable=partial_correlation_variable)
ablation_correlation = ablation_df['pearson']
ablations_dict[ablation] = human_correlation - ablation_correlation
ablations_df = pd.DataFrame(ablations_dict, index=metrics_cols)
return ablations_df
def plot_ablations(ablation_df, save_path):
"""
ablation_df: pandas dataframe, the output of ablation function
save_path: str, where to save the plot
Plots the ablation_df and possibly saves it to the location
"""
ax = ablation_df.plot.bar(figsize=(10, 4), rot=0)
plt.xticks(rotation=45)
if not save_path:
save_path = '.'
fig = ax.get_figure()
fig.savefig(os.path.join(save_path, 'ablations_plot.pdf'), bbox_inches='tight')
def main(args):
"""
Depending on the `mode` used, this script computes correlation between factuality metrics
and human judgments of factuality on the FRANK benchmark data. It can also measure how well
a metric captures certain types of errors.
The code uses baseline metric outputs provided as part of FRANK (in `baseline_facutlaity_metrics_outputs.json`).
The user can specify which metrics among the baseline metrics to use in the computation.
In addition to the baseline metrics, this tool allows users to evaluate their own factuality metric outputs on
FRANK. There are two ways to do so:
1. By providing a FRANK benchmark submission file: a `json` files containing a list of records, each record
having both `hash` and `model_name` fields as well as a `score` field with the metric output.
2. By defining a `json` file with information on how to parse the metric output files.
the schema should look like:
[
{
"path": "PATH_TO_JSON_FILE_WITH_OUTPUS"
"scores": [
{"name": "PRETTY NAME FOR THE METRIC 1", "key": "THE KEY CONTAINING THE METRIC 1 OUTPUT"},
{"name": "PRETTY NAME FOR THE METRIC 2", "key": "THE KEY CONTAINING THE METRIC 2 OUTPUT"},
...
]
},...
]
Note that the output files should still be `json` files with a list of records with `hash` and
`model_name` keys, but they can contain several metrics outputs in each record .
This allows to specify a name for each metric, and allows several metrics for each output file.
"""
# Load the human judgements.
data_df = pd.read_json(args['human_eval_path'])
human_col = args['human']
ablations_cols = args['ablations']
metrics_cols = []
# Load the metric outputs.
if not args['no_baseline_metrics']:
metric_df = pd.read_json(args['baseline_metrics_outputs'])
for baseline_metric in args['baseline_metrics']:
assert baseline_metric in metric_df, baseline_metric + ' not found. Your metrics_output_info file is likely not well defined.'
data_df = data_df.merge(metric_df[['hash', 'model_name'] + args['baseline_metrics']], on=['hash', 'model_name'], validate='one_to_one')
metrics_cols += args['baseline_metrics']
if args['metrics_outputs']:
metric_df = pd.read_json(args['metrics_outputs'])
assert 'score' in metric_df, 'The metric output should be in a field named "score"'
data_df = data_df.merge(metric_df[['hash', 'model_name', 'score']], on=['hash', 'model_name'], validate='one_to_one')
metrics_cols += ['score']
if args['metrics_outputs_info']:
with open(args['metrics_outputs_info']) as infile:
metrics_info = json.loads(infile.read())
for metric_info in metrics_info:
metric_df = | pd.read_json(metric_info['path']) | pandas.read_json |
from __future__ import division
import math
import sys
from random import randint
from random import random as rnd
from reoccuring_drift_stream import ReoccuringDriftStream
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from scipy.optimize import minimize
from scipy.spatial.distance import cdist
from scipy.special import logit
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.utils import validation
from sklearn.utils.multiclass import unique_labels
from sklearn.utils.validation import check_is_fitted
from skmultiflow.drift_detection import KSWIN
from skmultiflow.data.mixed_generator import MIXEDGenerator
#Abrupt Concept Drift Generators
from skmultiflow.drift_detection.adwin import ADWIN
from skmultiflow.evaluation.evaluate_prequential import EvaluatePrequential
from bix.classifiers.rrslvq import RRSLVQ
#!sr/bin/env python3
#-*- coding: utf-8 -*-
"""
Created on Fri Jun 22 09:35:11 2018
@author: moritz
"""
# TODO: add sigma for every prototype (TODO from https://github.com/MrNuggelz/sklearn-lvq)
class RRSLVQ(ClassifierMixin, BaseEstimator):
"""Robust Soft Learning Vector Quantization
Parameters
----------
prototypes_per_class : int or list of int, optional (default=1)
Number of prototypes per class. Use list to specify different
numbers per class.
initial_prototypes : array-like, shape = [n_prototypes, n_features + 1],
optional
Prototypes to start with. If not given initialization near the class
means. Class label must be placed as last entry of each prototype.
sigma : float, optional (default=0.5)
Variance for the distribution.
max_iter : int, optional (default=2500)
The maximum number of iterations.
gtol : float, optional (default=1e-5)
Gradient norm must be less than gtol before successful termination
of bfgs.
display : boolean, optional (default=False)
print information about the bfgs steps.
random_state : int, RandomState instance or None, optional
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
gradient_descent : string, Gradient Descent describes the used technique
to perform the gradient descent. Possible values: 'SGD' (default),
and 'l-bfgs-b'.
drift_handling : string, Type of concept drift DETECTION.
None means no concept drift detection
If KS, use of Kolmogorov Smirnov test
If ADWIN, use of Adaptive Sliding Window dimension wise
IF DIST, monitoring class distances to detect outlier.
Attributes
----------
w_ : array-like, shape = [n_prototypes, n_features]
Prototype vector, where n_prototypes in the number of prototypes and
n_features is the number of features
c_w_ : array-like, shape = [n_prototypes]
Prototype classes
classes_ : array-like, shape = [n_classes]
Array containing labels.
initial_fit : boolean, indicator for initial fitting. Set to false after
first call of fit/partial fit.
"""
def __init__(self, prototypes_per_class=1, initial_prototypes=None,
sigma=1.0, max_iter=2500, gtol=1e-5,
display=False, random_state=None,drift_handling = "KS",confidence=0.05,replace = True):
self.sigma = sigma
self.confidence = confidence
self.random_state = random_state
self.initial_prototypes = initial_prototypes
self.prototypes_per_class = prototypes_per_class
self.display = display
self.max_iter = max_iter
self.gtol = gtol
self.initial_fit = True
self.max_class_distances = None
self.classes_ = []
self.counter = 0
self.cd_detects = []
self.drift_handling = drift_handling
self.drift_detected = False
self.replace = replace
self.init_drift_detection = True
self.some = []
self.bg_data = [[],[]]
if not isinstance(self.display, bool):
raise ValueError("display must be a boolean")
if not isinstance(self.max_iter, int) or self.max_iter < 1:
raise ValueError("max_iter must be an positive integer")
if not isinstance(self.gtol, float) or self.gtol <= 0:
raise ValueError("gtol must be a positive float")
def _optfun(self, variables, training_data, label_equals_prototype):
n_data, n_dim = training_data.shape
nb_prototypes = self.c_w_.size
prototypes = variables.reshape(nb_prototypes, n_dim)
out = 0
for i in range(n_data):
xi = training_data[i]
y = label_equals_prototype[i]
fs = [self._costf(xi, w) for w in prototypes]
fs_max = max(fs)
s1 = sum([np.math.exp(fs[i] - fs_max) for i in range(len(fs))
if self.c_w_[i] == y])
s2 = sum([np.math.exp(f - fs_max) for f in fs])
s1 += 0.0000001
s2 += 0.0000001
out += math.log(s1 / s2)
return -out
def _optimize(self, X, y, random_state):
"""Implementation of Stochastical Gradient Descent"""
n_data, n_dim = X.shape
nb_prototypes = self.c_w_.size
prototypes = self.w_.reshape(nb_prototypes, n_dim)
for i in range(n_data):
xi = X[i]
c_xi = y[i]
for j in range(prototypes.shape[0]):
d = (xi - prototypes[j])
c = 0.5
if self.c_w_[j] == c_xi:
# Attract prototype to data point
self.w_[j] += c * (self._p(j, xi, prototypes=self.w_, y=c_xi) -
self._p(j, xi, prototypes=self.w_)) * d
else:
# Distance prototype from data point
self.w_[j] -= c * self._p(j, xi, prototypes=self.w_) * d
def _costf(self, x, w, **kwargs):
d = (x - w)[np.newaxis].T
d = d.T.dot(d)
return -d / (2 * self.sigma)
def _p(self, j, e, y=None, prototypes=None, **kwargs):
if prototypes is None:
prototypes = self.w_
if y is None:
fs = [self._costf(e, w, **kwargs) for w in prototypes]
else:
fs = [self._costf(e, prototypes[i], **kwargs) for i in
range(prototypes.shape[0]) if
self.c_w_[i] == y]
fs_max = max(fs)
s = sum([np.math.exp(f - fs_max) for f in fs])
o = np.math.exp(
self._costf(e, prototypes[j], **kwargs) - fs_max) / s
return o
def get_prototypes(self):
"""Returns the prototypes"""
return self.w_
def predict(self, x):
"""Predict class membership index for each input sample.
This function does classification on an array of
test vectors X.
Parameters
----------
x : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = (n_samples,)
Returns predicted values.
"""
check_is_fitted(self, ['w_', 'c_w_'])
x = validation.check_array(x)
if x.shape[1] != self.w_.shape[1]:
raise ValueError("X has wrong number of features\n"
"found=%d\n"
"expected=%d" % (self.w_.shape[1], x.shape[1]))
return np.array([self.c_w_[np.array([self._costf(xi,p) for p in self.w_]).argmax()] for xi in x])
def posterior(self, y, x):
"""
calculate the posterior for x:
p(y|x)
Parameters
----------
y: class
label
x: array-like, shape = [n_features]
sample
Returns
-------
posterior
:return: posterior
"""
check_is_fitted(self, ['w_', 'c_w_'])
x = validation.column_or_1d(x)
if y not in self.classes_:
raise ValueError('y must be one of the labels\n'
'y=%s\n'
'labels=%s' % (y, self.classes_))
s1 = sum([self._costf(x, self.w_[i]) for i in
range(self.w_.shape[0]) if
self.c_w_[i] == y])
s2 = sum([self._costf(x, w) for w in self.w_])
return s1 / s2
def get_info(self):
return 'RSLVQ'
def predict_proba(self, X):
""" predict_proba
Predicts the probability of each sample belonging to each one of the
known target_values.
Parameters
----------
X: Numpy.ndarray of shape (n_samples, n_features)
A matrix of the samples we want to predict.
Returns
-------
numpy.ndarray
An array of shape (n_samples, n_features), in which each outer entry is
associated with the X entry of the same index. And where the list in
index [i] contains len(self.target_values) elements, each of which represents
the probability that the i-th sample of X belongs to a certain label.
"""
return 'Not implemented'
def reset(self):
self.__init__()
def _validate_train_parms(self, train_set, train_lab, classes=None):
random_state = validation.check_random_state(self.random_state)
train_set, train_lab = validation.check_X_y(train_set, train_lab)
if(self.initial_fit):
if(classes):
self.classes_ = np.asarray(classes)
self.protos_initialized = np.zeros(self.classes_.size)
else:
self.classes_ = unique_labels(train_lab)
self.protos_initialized = np.zeros(self.classes_.size)
nb_classes = len(self.classes_)
nb_samples, nb_features = train_set.shape # nb_samples unused
# set prototypes per class
if isinstance(self.prototypes_per_class, int):
if self.prototypes_per_class < 0 or not isinstance(
self.prototypes_per_class, int):
raise ValueError("prototypes_per_class must be a positive int")
# nb_ppc = number of protos per class
nb_ppc = np.ones([nb_classes],
dtype='int') * self.prototypes_per_class
else:
nb_ppc = validation.column_or_1d(
validation.check_array(self.prototypes_per_class,
ensure_2d=False, dtype='int'))
if nb_ppc.min() <= 0:
raise ValueError(
"values in prototypes_per_class must be positive")
if nb_ppc.size != nb_classes:
raise ValueError(
"length of prototypes per class"
" does not fit the number of classes"
"classes=%d"
"length=%d" % (nb_classes, nb_ppc.size))
# initialize prototypes
if self.initial_prototypes is None:
#self.w_ = np.repeat(np.array([self.geometric_median(train_set[train_lab == l],"minimize") for l in self.classes_]),nb_ppc,axis=0)
#self.c_w_ = np.repeat(self.classes_,nb_ppc)
if self.initial_fit:
self.w_ = np.empty([np.sum(nb_ppc), nb_features], dtype=np.double)
self.c_w_ = np.empty([nb_ppc.sum()], dtype=self.classes_.dtype)
pos = 0
for actClass in range(len(self.classes_)):
nb_prot = nb_ppc[actClass] # nb_ppc: prototypes per class
if(self.protos_initialized[actClass] == 0 and actClass in unique_labels(train_lab)):
mean = np.mean(
train_set[train_lab == self.classes_[actClass], :], 0)
self.w_[pos:pos + nb_prot] = mean + (
random_state.rand(nb_prot, nb_features) * 2 - 1)
if math.isnan(self.w_[pos, 0]):
print('null: ', actClass)
self.protos_initialized[actClass] = 0
else:
self.protos_initialized[actClass] = 1
#
self.c_w_[pos:pos + nb_prot] = self.classes_[actClass]
pos += nb_prot
else:
x = validation.check_array(self.initial_prototypes)
self.w_ = x[:, :-1]
self.c_w_ = x[:, -1]
if self.w_.shape != (np.sum(nb_ppc), nb_features):
raise ValueError("the initial prototypes have wrong shape\n"
"found=(%d,%d)\n"
"expected=(%d,%d)" % (
self.w_.shape[0], self.w_.shape[1],
nb_ppc.sum(), nb_features))
if set(self.c_w_) != set(self.classes_):
raise ValueError(
"prototype labels and test data classes do not match\n"
"classes={}\n"
"prototype labels={}\n".format(self.classes_, self.c_w_))
if self.initial_fit:
self.initial_fit = False
return train_set, train_lab, random_state
def fit(self, X, y, classes=None):
"""Fit the LVQ model to the given training data and parameters using
l-bfgs-b.
Parameters
----------
x : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array, shape = [n_samples]
Target values (integers in classification, real numbers in
regression)
Returns
--------
self
"""
X, y, random_state = self._validate_train_parms(X, y, classes=classes)
if len(np.unique(y)) == 1:
raise ValueError("fitting " + type(
self).__name__ + " with only one class is not possible")
self._optimize(X, y, random_state)
return self
def partial_fit(self, X, y, classes=None):
"""Fit the LVQ model to the given training data and parameters using
l-bfgs-b.
Parameters
----------
x : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array, shape = [n_samples]
Target values (integers in classification, real numbers in
regression)
Returns
--------
self
"""
if unique_labels(y) in self.classes_ or self.initial_fit:
X, y, random_state = self._validate_train_parms(
X, y, classes=classes)
else:
raise ValueError('Class {} was not learned - please declare all \
classes in first call of fit/partial_fit'.format(y))
self.counter = self.counter + 1
if self.drift_handling is not None and self.concept_drift_detection(X,y):
self.cd_handling(X,y)
if self.counter > 30:
self.save_data(X,y,random_state)
self.cd_detects.append(self.counter)
print(self.w_.shape)
self._optimize(X, y, random_state)
return self
def save_data(self,X,y,random_state):
| pd.DataFrame(self.w_) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 6 22:15:42 2018
@author: katezeng
This module is for Predictive Analysis - Hypothesis Testing
- This component contains both the traditional statistical hypothesis testing, and the beginning of machine learning predictive analytics.
Here you will write three (3) hypotheses and see whether or not they are supported by your data. You must use all of the methods listed below
(at least once) on your data.
- You do not need to try all the methods for each hypothesis. For example, you might use ANOVA for one of your hypotheses, and you might use a
t-test and linear regression for another, etc. It will be the case, that some of the hypotheses will not be well supported.
- When trying methods like a decision tree, you should use cross-validation and show your ROC curve and a confusion matrix. For each method,
explain the method in one paragraph.
- Explain how and why you will apply your selected method(s) to each hypothesis, and discuss the results.
- Therefore, you will have at least three (3) hypothesis tests and will apply all seven (7) of the following methods to one or more of your
hypotheses.
- Required methods:
- t-test or Anova (choose one)
- Linear Regression or Logistical Regression (multivariate or multinomial) (choose one)
- Decision tree
- A Lazy Learner Method (such as kNN)
- Naïve Bayes
- SVM
- Random Forest
"""
#####################################################
# #
# Import Libraries #
# #
#####################################################
import pandas as pd
import numpy as np
from scipy import stats
from sklearn.linear_model import LinearRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, roc_curve, auc, confusion_matrix, classification_report
from sklearn import svm
from sklearn.preprocessing import Normalizer
from sklearn.model_selection import cross_val_predict
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import label_binarize
from imblearn.over_sampling import SMOTE
from sklearn.tree import DecisionTreeClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.neighbors import KNeighborsClassifier
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(color_codes=True)
########################################################
# #
# List of Functions #
# #
########################################################
# function for arranging columns
def arrangeCol(data):
cols = list(data)
cols.insert(len(cols), cols.pop(cols.index('price')))
data = data.loc[:, cols]
return data
# function for linear regression with absolute error plot
def linearRegression1(data):
X = data[['hotel_meanprice']]
y = data[['price']]
X_train, X_test , y_train , y_test = train_test_split(X,y,test_size=0.25,random_state=0)
model = LinearRegression()
model.fit(X_train, y_train)
predictions = model.predict(X_test)
plt.figure(figsize=(15,8))
ax = sns.distplot(y_test-predictions)
ax.set(ylabel='Density', xlabel='Error',
title='Error distribution of test sets by Linear Regrssion model')
plt.savefig("./plots/LRresults.png")
# function for linear regression with absolute error vs actual value
def linearRegression2(data):
X = data[['hotel_meanprice']]
y = data[['price']]
X_train, X_test , y_train , y_test = train_test_split(X,y,test_size=0.25,random_state=0)
model = LinearRegression()
model.fit(X_train, y_train)
predictions = model.predict(X_test)
plt.figure(figsize=(15,8))
ax = sns.distplot(abs(y_test-predictions)/y_test)
ax.set(ylabel='Percentage', xlabel='Mean Squared Error',
title='Error distribution of test sets by Linear Regrssion model')
plt.savefig("./plots/LR_absolute_diff.png")
# find relationship between hotel average price and airbnb average price
def hotel_airbnb(data):
output1 = data.groupby(['zipcode'])['price'].mean().reset_index()
output1.columns = ['zipcode', 'averagePrice']
output2 = data.groupby(['zipcode'])['hotel_meanprice'].mean().reset_index()
output = pd.merge(output1, output2, on='zipcode')
plt.figure(figsize=(15,8))
# get coeffs of linear fit
slope, intercept, r_value, p_value, std_err = stats.linregress(output['hotel_meanprice'], output['averagePrice'])
ax = sns.regplot(x='hotel_meanprice', y='averagePrice', data=output,
line_kws={'label':"y={0:.1f}x+{1:.1f}".format(slope,intercept)})
ax.set(xlabel='Hotel prices', ylabel='Airbnb prices',
title='Linear relationship between average hotel prices and Airbnb prices')
ax.legend()
plt.savefig("./plots/relationship_hotel_airbnb.png")
# find the distribution of airbnb price
def find_distribution(data):
plt.figure(figsize=(15,8))
ax = sns.distplot(data['price'])
ax.set(ylabel='density', xlabel='Airbnb Prices', title='Airbnb Price Distribution')
plt.savefig("./plots/airbnb_price_dist.png")
# find the impact of room type by doing one way ANOVA
def room_type_impact(data):
entire_apt = np.array(data[data['Entire home/apt'] == 1]['price'])
shared_room = np.array(data[data['Shared room'] == 1]['price'])
private_room = np.array(data[data['Private room'] == 1]['price'])
result = stats.f_oneway(entire_apt, private_room, shared_room)
print(result)
# preproccessing data for further model training
def preprocessing(data):
price_dict = {'A': 0, 'B': 1, 'C': 2}
data['price_group'] = pd.cut(data.price, bins=[0, 200, 400, 1000], labels=[0, 1, 2])
cols = ['latitude', 'longitude', 'zipcode', 'price']
data = data.drop(cols, axis=1)
mydict = {'t': 1, 'f': 0}
data = data.replace({'host_profile_pic': mydict})
data = data.replace({'identity_verified': mydict})
fig = plt.figure(figsize=(10, 8))
data.groupby('price_group').price_group.count().plot.bar(ylim=0)
fig.suptitle('class distribution', fontsize=15)
plt.xlabel('price group', fontsize=12)
plt.xticks(rotation='horizontal')
plt.ylabel('Number of hotels', fontsize=12)
fig.savefig('./plots/class_distribution.jpg')
X = pd.DataFrame(data.iloc[:, 0:-1])
y = pd.DataFrame(data.iloc[:, -1])
y = y.values.ravel()
sm = SMOTE(random_state=42)
X_res, y_res = sm.fit_sample(X, y)
col_names = data.columns.tolist()
new_data = np.c_[X_res, np.transpose(y_res)]
data = pd.DataFrame(new_data, columns = col_names)
return data, price_dict
# function for model evaluation including classification report, accuracy score
# and generate confusion matrix of each model
def model_evaluation(y_test, y_pred, name):
## for confusion matrix
# class info
class_names = ['A', 'B', 'C']
conf_mat = confusion_matrix(y_test, y_pred)
print("========Confusion Matrix and Reprot of " + name + "==========")
fig, ax = plt.subplots(figsize=(8, 8))
sns.heatmap(conf_mat, annot=True, fmt='d', xticklabels=class_names, yticklabels=class_names)
#sns.heatmap(conf_mat, annot=True, fmt='d')
plt.setp(ax.get_xticklabels(), rotation=45)
plt.setp(ax.get_yticklabels(), rotation=45)
plt.ylabel('Actual')
plt.xlabel('Predicted')
#plt.savefig('./plots/confusion-matrix' + name + '.png')
## for accuracy score
print("Accuracy Score of " + name + "\n", accuracy_score(y_test, y_pred))
## for classification report
print("Classification Report of " + name + "\n", classification_report(y_test, y_pred))
# training and testing using naive bayes classifier, and generate ROC curve
def naiveBayes(data):
X = pd.DataFrame(data.iloc[:, 0:-1])
y = | pd.factorize(data['price_group']) | pandas.factorize |
# LIBRARIES
# set up backend for ssh -x11 figures
import matplotlib
matplotlib.use('Agg')
# read and write
import os
import sys
import glob
import re
import fnmatch
import csv
import shutil
from datetime import datetime
# maths
import numpy as np
import pandas as pd
import math
import random
# miscellaneous
import warnings
import gc
import timeit
# sklearn
from sklearn.utils import resample
from sklearn.preprocessing import OneHotEncoder
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score, log_loss, roc_auc_score, \
accuracy_score, f1_score, precision_score, recall_score, confusion_matrix, average_precision_score
from sklearn.utils.validation import check_is_fitted
from sklearn.model_selection import KFold, PredefinedSplit, cross_validate
from sklearn.pipeline import Pipeline
from sklearn.linear_model import LinearRegression, ElasticNet
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor
from sklearn.neural_network import MLPRegressor
from sklearn.preprocessing import StandardScaler
# Statistics
from scipy.stats import pearsonr, ttest_rel, norm
# Other tools for ensemble models building (<NAME>'s InnerCV class)
from hyperopt import fmin, tpe, space_eval, Trials, hp, STATUS_OK
from xgboost import XGBRegressor
from lightgbm import LGBMRegressor
# CPUs
from multiprocessing import Pool
# GPUs
from GPUtil import GPUtil
# tensorflow
import tensorflow as tf
# keras
from keras_preprocessing.image import ImageDataGenerator, Iterator
from keras_preprocessing.image.utils import load_img, img_to_array, array_to_img
from tensorflow.keras.utils import Sequence
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras.layers import Flatten, Dense, Dropout, GlobalAveragePooling2D, concatenate
from tensorflow.keras import regularizers
from tensorflow.keras.optimizers import Adam, RMSprop, Adadelta
from tensorflow.keras.callbacks import Callback, EarlyStopping, ReduceLROnPlateau, ModelCheckpoint, CSVLogger
from tensorflow.keras.losses import MeanSquaredError, BinaryCrossentropy
from tensorflow.keras.metrics import RootMeanSquaredError, MeanAbsoluteError, AUC, BinaryAccuracy, Precision, Recall, \
TruePositives, FalsePositives, FalseNegatives, TrueNegatives
from tensorflow_addons.metrics import RSquare, F1Score
# Plots
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from PIL import Image
from bioinfokit import visuz
# Model's attention
from keract import get_activations, get_gradients_of_activations
from scipy.ndimage.interpolation import zoom
# Survival
from lifelines.utils import concordance_index
# Necessary to define MyCSVLogger
import collections
import csv
import io
import six
from tensorflow.python.lib.io import file_io
from tensorflow.python.util.compat import collections_abc
from tensorflow.keras.backend import eval
# Set display parameters
pd.set_option('display.max_rows', 200)
# CLASSES
class Basics:
"""
Root class herited by most other class. Includes handy helper functions
"""
def __init__(self):
# seeds for reproducibility
self.seed = 0
os.environ['PYTHONHASHSEED'] = str(self.seed)
np.random.seed(self.seed)
random.seed(self.seed)
# other parameters
self.path_data = '../data/'
self.folds = ['train', 'val', 'test']
self.n_CV_outer_folds = 10
self.outer_folds = [str(x) for x in list(range(self.n_CV_outer_folds))]
self.modes = ['', '_sd', '_str']
self.id_vars = ['id', 'eid', 'instance', 'outer_fold']
self.instances = ['0', '1', '1.5', '1.51', '1.52', '1.53', '1.54', '2', '3']
self.ethnicities_vars_forgot_Other = \
['Ethnicity.White', 'Ethnicity.British', 'Ethnicity.Irish', 'Ethnicity.White_Other', 'Ethnicity.Mixed',
'Ethnicity.White_and_Black_Caribbean', 'Ethnicity.White_and_Black_African', 'Ethnicity.White_and_Asian',
'Ethnicity.Mixed_Other', 'Ethnicity.Asian', 'Ethnicity.Indian', 'Ethnicity.Pakistani',
'Ethnicity.Bangladeshi', 'Ethnicity.Asian_Other', 'Ethnicity.Black', 'Ethnicity.Caribbean',
'Ethnicity.African', 'Ethnicity.Black_Other', 'Ethnicity.Chinese', 'Ethnicity.Other_ethnicity',
'Ethnicity.Do_not_know', 'Ethnicity.Prefer_not_to_answer', 'Ethnicity.NA']
self.ethnicities_vars = \
['Ethnicity.White', 'Ethnicity.British', 'Ethnicity.Irish', 'Ethnicity.White_Other', 'Ethnicity.Mixed',
'Ethnicity.White_and_Black_Caribbean', 'Ethnicity.White_and_Black_African', 'Ethnicity.White_and_Asian',
'Ethnicity.Mixed_Other', 'Ethnicity.Asian', 'Ethnicity.Indian', 'Ethnicity.Pakistani',
'Ethnicity.Bangladeshi', 'Ethnicity.Asian_Other', 'Ethnicity.Black', 'Ethnicity.Caribbean',
'Ethnicity.African', 'Ethnicity.Black_Other', 'Ethnicity.Chinese', 'Ethnicity.Other',
'Ethnicity.Other_ethnicity', 'Ethnicity.Do_not_know', 'Ethnicity.Prefer_not_to_answer', 'Ethnicity.NA']
self.demographic_vars = ['Age', 'Sex'] + self.ethnicities_vars
self.names_model_parameters = ['target', 'organ', 'view', 'transformation', 'architecture', 'n_fc_layers',
'n_fc_nodes', 'optimizer', 'learning_rate', 'weight_decay', 'dropout_rate',
'data_augmentation_factor']
self.targets_regression = ['Age']
self.targets_binary = ['Sex']
self.models_types = ['', '_bestmodels']
self.dict_prediction_types = {'Age': 'regression', 'Sex': 'binary'}
self.dict_side_predictors = {'Age': ['Sex'] + self.ethnicities_vars_forgot_Other,
'Sex': ['Age'] + self.ethnicities_vars_forgot_Other}
self.organs = ['Brain', 'Eyes', 'Arterial', 'Heart', 'Abdomen', 'Musculoskeletal']
self.left_right_organs_views = ['Eyes_Fundus', 'Eyes_OCT', 'Arterial_Carotids', 'Musculoskeletal_Hips',
'Musculoskeletal_Knees']
self.dict_organs_to_views = {'Brain': ['MRI'],
'Eyes': ['Fundus', 'OCT'],
'Arterial': ['Carotids'],
'Heart': ['MRI'],
'Abdomen': ['Liver', 'Pancreas'],
'Musculoskeletal': ['Spine', 'Hips', 'Knees', 'FullBody'],
'PhysicalActivity': ['FullWeek']}
self.dict_organsviews_to_transformations = \
{'Brain_MRI': ['SagittalRaw', 'SagittalReference', 'CoronalRaw', 'CoronalReference', 'TransverseRaw',
'TransverseReference'],
'Arterial_Carotids': ['Mixed', 'LongAxis', 'CIMT120', 'CIMT150', 'ShortAxis'],
'Heart_MRI': ['2chambersRaw', '2chambersContrast', '3chambersRaw', '3chambersContrast', '4chambersRaw',
'4chambersContrast'],
'Musculoskeletal_Spine': ['Sagittal', 'Coronal'],
'Musculoskeletal_FullBody': ['Mixed', 'Figure', 'Skeleton', 'Flesh'],
'PhysicalActivity_FullWeek': ['GramianAngularField1minDifference', 'GramianAngularField1minSummation',
'MarkovTransitionField1min', 'RecurrencePlots1min']}
self.dict_organsviews_to_transformations.update(dict.fromkeys(['Eyes_Fundus', 'Eyes_OCT'], ['Raw']))
self.dict_organsviews_to_transformations.update(
dict.fromkeys(['Abdomen_Liver', 'Abdomen_Pancreas'], ['Raw', 'Contrast']))
self.dict_organsviews_to_transformations.update(
dict.fromkeys(['Musculoskeletal_Hips', 'Musculoskeletal_Knees'], ['MRI']))
self.organsviews_not_to_augment = []
self.organs_instances23 = ['Brain', 'Eyes', 'Arterial', 'Heart', 'Abdomen', 'Musculoskeletal',
'PhysicalActivity']
self.organs_XWAS = \
['*', '*instances01', '*instances1.5x', '*instances23', 'Brain', 'BrainCognitive', 'BrainMRI', 'Eyes',
'EyesFundus', 'EyesOCT', 'Hearing', 'Lungs', 'Arterial', 'ArterialPulseWaveAnalysis', 'ArterialCarotids',
'Heart', 'HeartECG', 'HeartMRI', 'Abdomen', 'AbdomenLiver', 'AbdomenPancreas', 'Musculoskeletal',
'MusculoskeletalSpine', 'MusculoskeletalHips', 'MusculoskeletalKnees', 'MusculoskeletalFullBody',
'MusculoskeletalScalars', 'PhysicalActivity', 'Biochemistry', 'BiochemistryUrine', 'BiochemistryBlood',
'ImmuneSystem']
# Others
if '/Users/Alan/' in os.getcwd():
os.chdir('/Users/Alan/Desktop/Aging/Medical_Images/scripts/')
else:
os.chdir('/n/groups/patel/Alan/Aging/Medical_Images/scripts/')
gc.enable() # garbage collector
warnings.filterwarnings('ignore')
def _version_to_parameters(self, model_name):
parameters = {}
parameters_list = model_name.split('_')
for i, parameter in enumerate(self.names_model_parameters):
parameters[parameter] = parameters_list[i]
if len(parameters_list) > 11:
parameters['outer_fold'] = parameters_list[11]
return parameters
@staticmethod
def _parameters_to_version(parameters):
return '_'.join(parameters.values())
@staticmethod
def convert_string_to_boolean(string):
if string == 'True':
boolean = True
elif string == 'False':
boolean = False
else:
print('ERROR: string must be either \'True\' or \'False\'')
sys.exit(1)
return boolean
class Metrics(Basics):
"""
Helper class defining dictionaries of metrics and custom metrics
"""
def __init__(self):
# Parameters
Basics.__init__(self)
self.metrics_displayed_in_int = ['True-Positives', 'True-Negatives', 'False-Positives', 'False-Negatives']
self.metrics_needing_classpred = ['F1-Score', 'Binary-Accuracy', 'Precision', 'Recall']
self.dict_metrics_names_K = {'regression': ['RMSE'], # For now, R-Square is buggy. Try again in a few months.
'binary': ['ROC-AUC', 'PR-AUC', 'F1-Score', 'Binary-Accuracy', 'Precision',
'Recall', 'True-Positives', 'False-Positives', 'False-Negatives',
'True-Negatives'],
'multiclass': ['Categorical-Accuracy']}
self.dict_metrics_names = {'regression': ['RMSE', 'MAE', 'R-Squared', 'Pearson-Correlation'],
'binary': ['ROC-AUC', 'F1-Score', 'PR-AUC', 'Binary-Accuracy', 'Sensitivity',
'Specificity', 'Precision', 'Recall', 'True-Positives', 'False-Positives',
'False-Negatives', 'True-Negatives'],
'multiclass': ['Categorical-Accuracy']}
self.dict_losses_names = {'regression': 'MSE', 'binary': 'Binary-Crossentropy',
'multiclass': 'categorical_crossentropy'}
self.dict_main_metrics_names_K = {'Age': 'MAE', 'Sex': 'PR-AUC', 'imbalanced_binary_placeholder': 'PR-AUC'}
self.dict_main_metrics_names = {'Age': 'R-Squared', 'Sex': 'ROC-AUC',
'imbalanced_binary_placeholder': 'PR-AUC'}
self.main_metrics_modes = {'loss': 'min', 'R-Squared': 'max', 'Pearson-Correlation': 'max', 'RMSE': 'min',
'MAE': 'min', 'ROC-AUC': 'max', 'PR-AUC': 'max', 'F1-Score': 'max', 'C-Index': 'max',
'C-Index-difference': 'max'}
self.n_bootstrap_iterations = 1000
def rmse(y_true, y_pred):
return math.sqrt(mean_squared_error(y_true, y_pred))
def sensitivity_score(y, pred):
_, _, fn, tp = confusion_matrix(y, pred.round()).ravel()
return tp / (tp + fn)
def specificity_score(y, pred):
tn, fp, _, _ = confusion_matrix(y, pred.round()).ravel()
return tn / (tn + fp)
def true_positives_score(y, pred):
_, _, _, tp = confusion_matrix(y, pred.round()).ravel()
return tp
def false_positives_score(y, pred):
_, fp, _, _ = confusion_matrix(y, pred.round()).ravel()
return fp
def false_negatives_score(y, pred):
_, _, fn, _ = confusion_matrix(y, pred.round()).ravel()
return fn
def true_negatives_score(y, pred):
tn, _, _, _ = confusion_matrix(y, pred.round()).ravel()
return tn
self.dict_metrics_sklearn = {'mean_squared_error': mean_squared_error,
'mean_absolute_error': mean_absolute_error,
'RMSE': rmse,
'Pearson-Correlation': pearsonr,
'R-Squared': r2_score,
'Binary-Crossentropy': log_loss,
'ROC-AUC': roc_auc_score,
'F1-Score': f1_score,
'PR-AUC': average_precision_score,
'Binary-Accuracy': accuracy_score,
'Sensitivity': sensitivity_score,
'Specificity': specificity_score,
'Precision': precision_score,
'Recall': recall_score,
'True-Positives': true_positives_score,
'False-Positives': false_positives_score,
'False-Negatives': false_negatives_score,
'True-Negatives': true_negatives_score}
def _bootstrap(self, data, function):
results = []
for i in range(self.n_bootstrap_iterations):
data_i = resample(data, replace=True, n_samples=len(data.index))
results.append(function(data_i['y'], data_i['pred']))
return np.mean(results), np.std(results)
class PreprocessingMain(Basics):
"""
This class executes the code for step 01. It preprocesses the main dataframe by:
- reformating the rows and columns
- splitting the dataset into folds for the future cross validations
- imputing key missing data
- adding a new UKB instance for physical activity data
- formating the demographics columns (age, sex and ethnicity)
- reformating the dataframe so that different instances of the same participant are treated as different rows
- saving the dataframe
"""
def __init__(self):
Basics.__init__(self)
self.data_raw = None
self.data_features = None
self.data_features_eids = None
def _add_outer_folds(self):
outer_folds_split = pd.read_csv(self.path_data + 'All_eids.csv')
outer_folds_split.rename(columns={'fold': 'outer_fold'}, inplace=True)
outer_folds_split['eid'] = outer_folds_split['eid'].astype('str')
outer_folds_split['outer_fold'] = outer_folds_split['outer_fold'].astype('str')
outer_folds_split.set_index('eid', inplace=True)
self.data_raw = self.data_raw.join(outer_folds_split)
def _impute_missing_ecg_instances(self):
data_ecgs = pd.read_csv('/n/groups/patel/Alan/Aging/TimeSeries/scripts/age_analysis/missing_samples.csv')
data_ecgs['eid'] = data_ecgs['eid'].astype(str)
data_ecgs['instance'] = data_ecgs['instance'].astype(str)
for _, row in data_ecgs.iterrows():
self.data_raw.loc[row['eid'], 'Date_attended_center_' + row['instance']] = row['observation_date']
def _add_physicalactivity_instances(self):
data_pa = pd.read_csv(
'/n/groups/patel/Alan/Aging/TimeSeries/series/PhysicalActivity/90001/features/PA_visit_date.csv')
data_pa['eid'] = data_pa['eid'].astype(str)
data_pa.set_index('eid', drop=False, inplace=True)
data_pa.index.name = 'column_names'
self.data_raw = self.data_raw.merge(data_pa, on=['eid'], how='outer')
self.data_raw.set_index('eid', drop=False, inplace=True)
def _compute_sex(self):
# Use genetic sex when available
self.data_raw['Sex_genetic'][self.data_raw['Sex_genetic'].isna()] = \
self.data_raw['Sex'][self.data_raw['Sex_genetic'].isna()]
self.data_raw.drop(['Sex'], axis=1, inplace=True)
self.data_raw.rename(columns={'Sex_genetic': 'Sex'}, inplace=True)
self.data_raw.dropna(subset=['Sex'], inplace=True)
def _compute_age(self):
# Recompute age with greater precision by leveraging the month of birth
self.data_raw['Year_of_birth'] = self.data_raw['Year_of_birth'].astype(int)
self.data_raw['Month_of_birth'] = self.data_raw['Month_of_birth'].astype(int)
self.data_raw['Date_of_birth'] = self.data_raw.apply(
lambda row: datetime(row.Year_of_birth, row.Month_of_birth, 15), axis=1)
for i in self.instances:
self.data_raw['Date_attended_center_' + i] = \
self.data_raw['Date_attended_center_' + i].apply(
lambda x: pd.NaT if pd.isna(x) else datetime.strptime(x, '%Y-%m-%d'))
self.data_raw['Age_' + i] = self.data_raw['Date_attended_center_' + i] - self.data_raw['Date_of_birth']
self.data_raw['Age_' + i] = self.data_raw['Age_' + i].dt.days / 365.25
self.data_raw.drop(['Date_attended_center_' + i], axis=1, inplace=True)
self.data_raw.drop(['Year_of_birth', 'Month_of_birth', 'Date_of_birth'], axis=1, inplace=True)
self.data_raw.dropna(how='all', subset=['Age_0', 'Age_1', 'Age_1.5', 'Age_1.51', 'Age_1.52', 'Age_1.53',
'Age_1.54', 'Age_2', 'Age_3'], inplace=True)
def _encode_ethnicity(self):
# Fill NAs for ethnicity on instance 0 if available in other instances
eids_missing_ethnicity = self.data_raw['eid'][self.data_raw['Ethnicity'].isna()]
for eid in eids_missing_ethnicity:
sample = self.data_raw.loc[eid, :]
if not math.isnan(sample['Ethnicity_1']):
self.data_raw.loc[eid, 'Ethnicity'] = self.data_raw.loc[eid, 'Ethnicity_1']
elif not math.isnan(sample['Ethnicity_2']):
self.data_raw.loc[eid, 'Ethnicity'] = self.data_raw.loc[eid, 'Ethnicity_2']
self.data_raw.drop(['Ethnicity_1', 'Ethnicity_2'], axis=1, inplace=True)
# One hot encode ethnicity
dict_ethnicity_codes = {'1': 'Ethnicity.White', '1001': 'Ethnicity.British', '1002': 'Ethnicity.Irish',
'1003': 'Ethnicity.White_Other',
'2': 'Ethnicity.Mixed', '2001': 'Ethnicity.White_and_Black_Caribbean',
'2002': 'Ethnicity.White_and_Black_African',
'2003': 'Ethnicity.White_and_Asian', '2004': 'Ethnicity.Mixed_Other',
'3': 'Ethnicity.Asian', '3001': 'Ethnicity.Indian', '3002': 'Ethnicity.Pakistani',
'3003': 'Ethnicity.Bangladeshi', '3004': 'Ethnicity.Asian_Other',
'4': 'Ethnicity.Black', '4001': 'Ethnicity.Caribbean', '4002': 'Ethnicity.African',
'4003': 'Ethnicity.Black_Other',
'5': 'Ethnicity.Chinese',
'6': 'Ethnicity.Other_ethnicity',
'-1': 'Ethnicity.Do_not_know',
'-3': 'Ethnicity.Prefer_not_to_answer',
'-5': 'Ethnicity.NA'}
self.data_raw['Ethnicity'] = self.data_raw['Ethnicity'].fillna(-5).astype(int).astype(str)
ethnicities = pd.get_dummies(self.data_raw['Ethnicity'])
self.data_raw.drop(['Ethnicity'], axis=1, inplace=True)
ethnicities.rename(columns=dict_ethnicity_codes, inplace=True)
ethnicities['Ethnicity.White'] = ethnicities['Ethnicity.White'] + ethnicities['Ethnicity.British'] + \
ethnicities['Ethnicity.Irish'] + ethnicities['Ethnicity.White_Other']
ethnicities['Ethnicity.Mixed'] = ethnicities['Ethnicity.Mixed'] + \
ethnicities['Ethnicity.White_and_Black_Caribbean'] + \
ethnicities['Ethnicity.White_and_Black_African'] + \
ethnicities['Ethnicity.White_and_Asian'] + \
ethnicities['Ethnicity.Mixed_Other']
ethnicities['Ethnicity.Asian'] = ethnicities['Ethnicity.Asian'] + ethnicities['Ethnicity.Indian'] + \
ethnicities['Ethnicity.Pakistani'] + ethnicities['Ethnicity.Bangladeshi'] + \
ethnicities['Ethnicity.Asian_Other']
ethnicities['Ethnicity.Black'] = ethnicities['Ethnicity.Black'] + ethnicities['Ethnicity.Caribbean'] + \
ethnicities['Ethnicity.African'] + ethnicities['Ethnicity.Black_Other']
ethnicities['Ethnicity.Other'] = ethnicities['Ethnicity.Other_ethnicity'] + \
ethnicities['Ethnicity.Do_not_know'] + \
ethnicities['Ethnicity.Prefer_not_to_answer'] + \
ethnicities['Ethnicity.NA']
self.data_raw = self.data_raw.join(ethnicities)
def generate_data(self):
# Preprocessing
dict_UKB_fields_to_names = {'34-0.0': 'Year_of_birth', '52-0.0': 'Month_of_birth',
'53-0.0': 'Date_attended_center_0', '53-1.0': 'Date_attended_center_1',
'53-2.0': 'Date_attended_center_2', '53-3.0': 'Date_attended_center_3',
'31-0.0': 'Sex', '22001-0.0': 'Sex_genetic', '21000-0.0': 'Ethnicity',
'21000-1.0': 'Ethnicity_1', '21000-2.0': 'Ethnicity_2',
'22414-2.0': 'Abdominal_images_quality'}
self.data_raw = pd.read_csv('/n/groups/patel/uk_biobank/project_52887_41230/ukb41230.csv',
usecols=['eid', '31-0.0', '22001-0.0', '21000-0.0', '21000-1.0', '21000-2.0',
'34-0.0', '52-0.0', '53-0.0', '53-1.0', '53-2.0', '53-3.0', '22414-2.0'])
# Formatting
self.data_raw.rename(columns=dict_UKB_fields_to_names, inplace=True)
self.data_raw['eid'] = self.data_raw['eid'].astype(str)
self.data_raw.set_index('eid', drop=False, inplace=True)
self.data_raw.index.name = 'column_names'
self._add_outer_folds()
self._impute_missing_ecg_instances()
self._add_physicalactivity_instances()
self._compute_sex()
self._compute_age()
self._encode_ethnicity()
# Concatenate the data from the different instances
self.data_features = None
for i in self.instances:
print('Preparing the samples for instance ' + i)
df_i = self.data_raw[['eid', 'outer_fold', 'Age_' + i, 'Sex'] + self.ethnicities_vars +
['Abdominal_images_quality']].dropna(subset=['Age_' + i])
print(str(len(df_i.index)) + ' samples found in instance ' + i)
df_i.rename(columns={'Age_' + i: 'Age'}, inplace=True)
df_i['instance'] = i
df_i['id'] = df_i['eid'] + '_' + df_i['instance']
df_i = df_i[self.id_vars + self.demographic_vars + ['Abdominal_images_quality']]
if i != '2':
df_i['Abdominal_images_quality'] = np.nan # not defined for instance 3, not relevant for instances 0, 1
if self.data_features is None:
self.data_features = df_i
else:
self.data_features = self.data_features.append(df_i)
print('The size of the full concatenated dataframe is now ' + str(len(self.data_features.index)))
# Save age as a float32 instead of float64
self.data_features['Age'] = np.float32(self.data_features['Age'])
# Shuffle the rows before saving the dataframe
self.data_features = self.data_features.sample(frac=1)
# Generate dataframe for eids pipeline as opposed to instances pipeline
self.data_features_eids = self.data_features[self.data_features.instance == '0']
self.data_features_eids['instance'] = '*'
self.data_features_eids['id'] = [ID.replace('_0', '_*') for ID in self.data_features_eids['id'].values]
def save_data(self):
self.data_features.to_csv(self.path_data + 'data-features_instances.csv', index=False)
self.data_features_eids.to_csv(self.path_data + 'data-features_eids.csv', index=False)
class PreprocessingImagesIDs(Basics):
"""
Splits the different images datasets into folds for the future cross validation
"""
def __init__(self):
Basics.__init__(self)
# Instances 2 and 3 datasets (most medical images, mostly medical images)
self.instances23_eids = None
self.HEART_EIDs = None
self.heart_eids = None
self.FOLDS_23_EIDS = None
def _load_23_eids(self):
data_features = pd.read_csv(self.path_data + 'data-features_instances.csv')
images_eids = data_features['eid'][data_features['instance'].isin([2, 3])]
self.images_eids = list(set(images_eids))
def _load_heart_eids(self):
# IDs already used in Heart videos
HEART_EIDS = {}
heart_eids = []
for i in range(10):
# Important: The i's data fold is used as *validation* fold for outer fold i.
data_i = pd.read_csv(
"/n/groups/patel/JbProst/Heart/Data/FoldsAugmented/data-features_Heart_20208_Augmented_Age_val_" + str(
i) + ".csv")
HEART_EIDS[i] = list(set([int(str(ID)[:7]) for ID in data_i['eid']]))
heart_eids = heart_eids + HEART_EIDS[i]
self.HEART_EIDS = HEART_EIDS
self.heart_eids = heart_eids
def _split_23_eids_folds(self):
self._load_23_eids()
self._load_heart_eids()
# List extra images ids, and split them between the different folds.
extra_eids = [eid for eid in self.images_eids if eid not in self.heart_eids]
random.shuffle(extra_eids)
n_samples = len(extra_eids)
n_samples_by_fold = n_samples / self.n_CV_outer_folds
FOLDS_EXTRAEIDS = {}
FOLDS_EIDS = {}
for outer_fold in self.outer_folds:
FOLDS_EXTRAEIDS[outer_fold] = \
extra_eids[int((int(outer_fold)) * n_samples_by_fold):int((int(outer_fold) + 1) * n_samples_by_fold)]
FOLDS_EIDS[outer_fold] = self.HEART_EIDS[int(outer_fold)] + FOLDS_EXTRAEIDS[outer_fold]
self.FOLDS_23_EIDS = FOLDS_EIDS
def _save_23_eids_folds(self):
for outer_fold in self.outer_folds:
with open(self.path_data + 'instances23_eids_' + outer_fold + '.csv', 'w', newline='') as myfile:
wr = csv.writer(myfile, quoting=csv.QUOTE_ALL)
wr.writerow(self.FOLDS_23_EIDS[outer_fold])
def generate_eids_splits(self):
print("Generating eids split for organs on instances 2 and 3")
self._split_23_eids_folds()
self._save_23_eids_folds()
class PreprocessingFolds(Metrics):
"""
Splits the data into training, validation and testing sets for all CV folds
"""
def __init__(self, target, organ, regenerate_data):
Metrics.__init__(self)
self.target = target
self.organ = organ
self.list_ids_per_view_transformation = None
# Check if these folds have already been generated
if not regenerate_data:
if len(glob.glob(self.path_data + 'data-features_' + organ + '_*_' + target + '_*.csv')) > 0:
print("Error: The files already exist! Either change regenerate_data to True or delete the previous"
" version.")
sys.exit(1)
self.side_predictors = self.dict_side_predictors[target]
self.variables_to_normalize = self.side_predictors
if target in self.targets_regression:
self.variables_to_normalize.append(target)
self.dict_image_quality_col = {'Liver': 'Abdominal_images_quality'}
self.dict_image_quality_col.update(
dict.fromkeys(['Brain', 'Eyes', 'Arterial', 'Heart', 'Abdomen', 'Musculoskeletal', 'PhysicalActivity'],
None))
self.image_quality_col = self.dict_image_quality_col[organ]
self.views = self.dict_organs_to_views[organ]
self.list_ids = None
self.list_ids_per_view = {}
self.data = None
self.EIDS = None
self.EIDS_per_view = {'train': {}, 'val': {}, 'test': {}}
self.data_fold = None
def _get_list_ids(self):
self.list_ids_per_view_transformation = {}
list_ids = []
# if different views are available, take the union of the ids
for view in self.views:
self.list_ids_per_view_transformation[view] = {}
for transformation in self.dict_organsviews_to_transformations[self.organ + '_' + view]:
list_ids_transformation = []
path = '../images/' + self.organ + '/' + view + '/' + transformation + '/'
# for paired organs, take the unions of the ids available on the right and the left sides
if self.organ + '_' + view in self.left_right_organs_views:
for side in ['right', 'left']:
list_ids_transformation += os.listdir(path + side + '/')
list_ids_transformation = np.unique(list_ids_transformation).tolist()
else:
list_ids_transformation += os.listdir(path)
self.list_ids_per_view_transformation[view][transformation] = \
[im.replace('.jpg', '') for im in list_ids_transformation]
list_ids += self.list_ids_per_view_transformation[view][transformation]
self.list_ids = np.unique(list_ids).tolist()
self.list_ids.sort()
def _filter_and_format_data(self):
"""
Clean the data before it can be split between the rows
"""
cols_data = self.id_vars + self.demographic_vars
if self.image_quality_col is not None:
cols_data.append(self.dict_image_quality_col[self.organ])
data = pd.read_csv(self.path_data + 'data-features_instances.csv', usecols=cols_data)
data.rename(columns={self.dict_image_quality_col[self.organ]: 'Data_quality'}, inplace=True)
for col_name in self.id_vars:
data[col_name] = data[col_name].astype(str)
data.set_index('id', drop=False, inplace=True)
if self.image_quality_col is not None:
data = data[data['Data_quality'] != np.nan]
data.drop('Data_quality', axis=1, inplace=True)
# get rid of samples with NAs
data.dropna(inplace=True)
# list the samples' ids for which images are available
data = data.loc[self.list_ids]
self.data = data
def _split_data(self):
# Generate the data for each outer_fold
for i, outer_fold in enumerate(self.outer_folds):
of_val = outer_fold
of_test = str((int(outer_fold) + 1) % len(self.outer_folds))
DATA = {
'train': self.data[~self.data['outer_fold'].isin([of_val, of_test])],
'val': self.data[self.data['outer_fold'] == of_val],
'test': self.data[self.data['outer_fold'] == of_test]
}
# Generate the data for the different views and transformations
for view in self.views:
for transformation in self.dict_organsviews_to_transformations[self.organ + '_' + view]:
print('Splitting data for view ' + view + ', and transformation ' + transformation)
DF = {}
for fold in self.folds:
idx = DATA[fold]['id'].isin(self.list_ids_per_view_transformation[view][transformation]).values
DF[fold] = DATA[fold].iloc[idx, :]
# compute values for scaling of variables
normalizing_values = {}
for var in self.variables_to_normalize:
var_mean = DF['train'][var].mean()
if len(DF['train'][var].unique()) < 2:
print('Variable ' + var + ' has a single value in fold ' + outer_fold +
'. Using 1 as std for normalization.')
var_std = 1
else:
var_std = DF['train'][var].std()
normalizing_values[var] = {'mean': var_mean, 'std': var_std}
# normalize the variables
for fold in self.folds:
for var in self.variables_to_normalize:
DF[fold][var + '_raw'] = DF[fold][var]
DF[fold][var] = (DF[fold][var] - normalizing_values[var]['mean']) \
/ normalizing_values[var]['std']
# report issue if NAs were detected (most likely comes from a sample whose id did not match)
n_mismatching_samples = DF[fold].isna().sum().max()
if n_mismatching_samples > 0:
print(DF[fold][DF[fold].isna().any(axis=1)])
print('/!\\ WARNING! ' + str(n_mismatching_samples) + ' ' + fold + ' images ids out of ' +
str(len(DF[fold].index)) + ' did not match the dataframe!')
# save the data
DF[fold].to_csv(self.path_data + 'data-features_' + self.organ + '_' + view + '_' +
transformation + '_' + self.target + '_' + fold + '_' + outer_fold + '.csv',
index=False)
print('For outer_fold ' + outer_fold + ', the ' + fold + ' fold has a sample size of ' +
str(len(DF[fold].index)))
def generate_folds(self):
self._get_list_ids()
self._filter_and_format_data()
self._split_data()
class PreprocessingSurvival(Basics):
"""
Preprocesses the main dataframe for survival purposes.
Mirrors the PreprocessingMain class, but computes Death time and FollowTime for the future survival analysis
"""
def __init__(self):
Basics.__init__(self)
self.data_raw = None
self.data_features = None
self.data_features_eids = None
self.survival_vars = ['FollowUpTime', 'Death']
def _preprocessing(self):
usecols = ['eid', '40000-0.0', '34-0.0', '52-0.0', '53-0.0', '53-1.0', '53-2.0', '53-3.0']
self.data_raw = pd.read_csv('/n/groups/patel/uk_biobank/project_52887_41230/ukb41230.csv', usecols=usecols)
dict_UKB_fields_to_names = {'40000-0.0': 'FollowUpDate', '34-0.0': 'Year_of_birth', '52-0.0': 'Month_of_birth',
'53-0.0': 'Date_attended_center_0', '53-1.0': 'Date_attended_center_1',
'53-2.0': 'Date_attended_center_2', '53-3.0': 'Date_attended_center_3'}
self.data_raw.rename(columns=dict_UKB_fields_to_names, inplace=True)
self.data_raw['eid'] = self.data_raw['eid'].astype(str)
self.data_raw.set_index('eid', drop=False, inplace=True)
self.data_raw.index.name = 'column_names'
# Format survival data
self.data_raw['Death'] = ~self.data_raw['FollowUpDate'].isna()
self.data_raw['FollowUpDate'][self.data_raw['FollowUpDate'].isna()] = '2020-04-27'
self.data_raw['FollowUpDate'] = self.data_raw['FollowUpDate'].apply(
lambda x: pd.NaT if pd.isna(x) else datetime.strptime(x, '%Y-%m-%d'))
assert ('FollowUpDate.1' not in self.data_raw.columns)
def _add_physicalactivity_instances(self):
data_pa = pd.read_csv(
'/n/groups/patel/Alan/Aging/TimeSeries/series/PhysicalActivity/90001/features/PA_visit_date.csv')
data_pa['eid'] = data_pa['eid'].astype(str)
data_pa.set_index('eid', drop=False, inplace=True)
data_pa.index.name = 'column_names'
self.data_raw = self.data_raw.merge(data_pa, on=['eid'], how='outer')
self.data_raw.set_index('eid', drop=False, inplace=True)
def _compute_age(self):
# Recompute age with greater precision by leveraging the month of birth
self.data_raw.dropna(subset=['Year_of_birth'], inplace=True)
self.data_raw['Year_of_birth'] = self.data_raw['Year_of_birth'].astype(int)
self.data_raw['Month_of_birth'] = self.data_raw['Month_of_birth'].astype(int)
self.data_raw['Date_of_birth'] = self.data_raw.apply(
lambda row: datetime(row.Year_of_birth, row.Month_of_birth, 15), axis=1)
for i in self.instances:
self.data_raw['Date_attended_center_' + i] = self.data_raw['Date_attended_center_' + i].apply(
lambda x: pd.NaT if pd.isna(x) else datetime.strptime(x, '%Y-%m-%d'))
self.data_raw['Age_' + i] = self.data_raw['Date_attended_center_' + i] - self.data_raw['Date_of_birth']
self.data_raw['Age_' + i] = self.data_raw['Age_' + i].dt.days / 365.25
self.data_raw['FollowUpTime_' + i] = self.data_raw['FollowUpDate'] - self.data_raw[
'Date_attended_center_' + i]
self.data_raw['FollowUpTime_' + i] = self.data_raw['FollowUpTime_' + i].dt.days / 365.25
self.data_raw.drop(['Date_attended_center_' + i], axis=1, inplace=True)
self.data_raw.drop(['Year_of_birth', 'Month_of_birth', 'Date_of_birth', 'FollowUpDate'], axis=1, inplace=True)
self.data_raw.dropna(how='all', subset=['Age_0', 'Age_1', 'Age_1.5', 'Age_1.51', 'Age_1.52', 'Age_1.53',
'Age_1.54', 'Age_2', 'Age_3'], inplace=True)
def _concatenate_instances(self):
self.data_features = None
for i in self.instances:
print('Preparing the samples for instance ' + i)
df_i = self.data_raw.dropna(subset=['Age_' + i])
print(str(len(df_i.index)) + ' samples found in instance ' + i)
dict_names = {}
features = ['Age', 'FollowUpTime']
for feature in features:
dict_names[feature + '_' + i] = feature
self.dict_names = dict_names
df_i.rename(columns=dict_names, inplace=True)
df_i['instance'] = i
df_i['id'] = df_i['eid'] + '_' + df_i['instance']
df_i = df_i[['id', 'eid', 'instance'] + self.survival_vars]
if self.data_features is None:
self.data_features = df_i
else:
self.data_features = self.data_features.append(df_i)
print('The size of the full concatenated dataframe is now ' + str(len(self.data_features.index)))
# Add * instance for eids
survival_eids = self.data_features[self.data_features['instance'] == '0']
survival_eids['instance'] = '*'
survival_eids['id'] = survival_eids['eid'] + '_' + survival_eids['instance']
self.data_features = self.data_features.append(survival_eids)
def generate_data(self):
# Formatting
self._preprocessing()
self._add_physicalactivity_instances()
self._compute_age()
self._concatenate_instances()
# save data
self.data_features.to_csv('../data/data_survival.csv', index=False)
class MyImageDataGenerator(Basics, Sequence, ImageDataGenerator):
"""
Helper class: custom data generator for images.
It handles several custom features such as:
- provides batches of not only images, but also the scalar data (e.g demographics) that correspond to it
- it performs random shuffling while making sure that no leftover data (the remainder of the modulo batch size)
is being unused
- it can handle paired data for paired organs (e.g left/right eyes)
"""
def __init__(self, target=None, organ=None, view=None, data_features=None, n_samples_per_subepoch=None,
batch_size=None, training_mode=None, side_predictors=None, dir_images=None, images_width=None,
images_height=None, data_augmentation=False, data_augmentation_factor=None, seed=None):
# Parameters
Basics.__init__(self)
self.target = target
if target in self.targets_regression:
self.labels = data_features[target]
else:
self.labels = data_features[target + '_raw']
self.organ = organ
self.view = view
self.training_mode = training_mode
self.data_features = data_features
self.list_ids = data_features.index.values
self.batch_size = batch_size
# for paired organs, take twice fewer ids (two images for each id), and add organ_side as side predictor
if organ + '_' + view in self.left_right_organs_views:
self.data_features['organ_side'] = np.nan
self.n_ids_batch = batch_size // 2
else:
self.n_ids_batch = batch_size
if self.training_mode & (n_samples_per_subepoch is not None): # during training, 1 epoch = number of samples
self.steps = math.ceil(n_samples_per_subepoch / batch_size)
else: # during prediction and other tasks, an epoch is defined as all the samples being seen once and only once
self.steps = math.ceil(len(self.list_ids) / self.n_ids_batch)
# learning_rate_patience
if n_samples_per_subepoch is not None:
self.n_subepochs_per_epoch = math.ceil(len(self.data_features.index) / n_samples_per_subepoch)
# initiate the indices and shuffle the ids
self.shuffle = training_mode # Only shuffle if the model is being trained. Otherwise no need.
self.indices = np.arange(len(self.list_ids))
self.idx_end = 0 # Keep track of last indice to permute indices accordingly at the end of epoch.
if self.shuffle:
np.random.shuffle(self.indices)
# Input for side NN and CNN
self.side_predictors = side_predictors
self.dir_images = dir_images
self.images_width = images_width
self.images_height = images_height
# Data augmentation
self.data_augmentation = data_augmentation
self.data_augmentation_factor = data_augmentation_factor
self.seed = seed
# Parameters for data augmentation: (rotation range, width shift range, height shift range, zoom range)
self.augmentation_parameters = \
pd.DataFrame(index=['Brain_MRI', 'Eyes_Fundus', 'Eyes_OCT', 'Arterial_Carotids', 'Heart_MRI',
'Abdomen_Liver', 'Abdomen_Pancreas', 'Musculoskeletal_Spine', 'Musculoskeletal_Hips',
'Musculoskeletal_Knees', 'Musculoskeletal_FullBody', 'PhysicalActivity_FullWeek',
'PhysicalActivity_Walking'],
columns=['rotation', 'width_shift', 'height_shift', 'zoom'])
self.augmentation_parameters.loc['Brain_MRI', :] = [10, 0.05, 0.1, 0.0]
self.augmentation_parameters.loc['Eyes_Fundus', :] = [20, 0.02, 0.02, 0]
self.augmentation_parameters.loc['Eyes_OCT', :] = [30, 0.1, 0.2, 0]
self.augmentation_parameters.loc[['Arterial_Carotids'], :] = [0, 0.2, 0.0, 0.0]
self.augmentation_parameters.loc[['Heart_MRI', 'Abdomen_Liver', 'Abdomen_Pancreas',
'Musculoskeletal_Spine'], :] = [10, 0.1, 0.1, 0.0]
self.augmentation_parameters.loc[['Musculoskeletal_Hips', 'Musculoskeletal_Knees'], :] = [10, 0.1, 0.1, 0.1]
self.augmentation_parameters.loc[['Musculoskeletal_FullBody'], :] = [10, 0.05, 0.02, 0.0]
self.augmentation_parameters.loc[['PhysicalActivity_FullWeek'], :] = [0, 0, 0, 0.0]
organ_view = organ + '_' + view
ImageDataGenerator.__init__(self, rescale=1. / 255.,
rotation_range=self.augmentation_parameters.loc[organ_view, 'rotation'],
width_shift_range=self.augmentation_parameters.loc[organ_view, 'width_shift'],
height_shift_range=self.augmentation_parameters.loc[organ_view, 'height_shift'],
zoom_range=self.augmentation_parameters.loc[organ_view, 'zoom'])
def __len__(self):
return self.steps
def on_epoch_end(self):
_ = gc.collect()
self.indices = np.concatenate([self.indices[self.idx_end:], self.indices[:self.idx_end]])
def _generate_image(self, path_image):
img = load_img(path_image, target_size=(self.images_width, self.images_height), color_mode='rgb')
Xi = img_to_array(img)
if hasattr(img, 'close'):
img.close()
if self.data_augmentation:
params = self.get_random_transform(Xi.shape)
Xi = self.apply_transform(Xi, params)
Xi = self.standardize(Xi)
return Xi
def _data_generation(self, list_ids_batch):
# initialize empty matrices
n_samples_batch = min(len(list_ids_batch), self.batch_size)
X = np.empty((n_samples_batch, self.images_width, self.images_height, 3)) * np.nan
x = np.empty((n_samples_batch, len(self.side_predictors))) * np.nan
y = np.empty((n_samples_batch, 1)) * np.nan
# fill the matrices sample by sample
for i, ID in enumerate(list_ids_batch):
y[i] = self.labels[ID]
x[i] = self.data_features.loc[ID, self.side_predictors]
if self.organ + '_' + self.view in self.left_right_organs_views:
if i % 2 == 0:
path = self.dir_images + 'right/'
x[i][-1] = 0
else:
path = self.dir_images + 'left/'
x[i][-1] = 1
if not os.path.exists(path + ID + '.jpg'):
path = path.replace('/right/', '/left/') if i % 2 == 0 else path.replace('/left/', '/right/')
x[i][-1] = 1 - x[i][-1]
else:
path = self.dir_images
X[i, :, :, :] = self._generate_image(path_image=path + ID + '.jpg')
return [X, x], y
def __getitem__(self, index):
# Select the indices
idx_start = (index * self.n_ids_batch) % len(self.list_ids)
idx_end = (((index + 1) * self.n_ids_batch) - 1) % len(self.list_ids) + 1
if idx_start > idx_end:
# If this happens outside of training, that is a mistake
if not self.training_mode:
print('\nERROR: Outside of training, every sample should only be predicted once!')
sys.exit(1)
# Select part of the indices from the end of the epoch
indices = self.indices[idx_start:]
# Generate a new set of indices
# print('\nThe end of the data was reached within this batch, looping.')
if self.shuffle:
np.random.shuffle(self.indices)
# Complete the batch with samples from the new indices
indices = np.concatenate([indices, self.indices[:idx_end]])
else:
indices = self.indices[idx_start: idx_end]
if idx_end == len(self.list_ids) & self.shuffle:
# print('\nThe end of the data was reached. Shuffling for the next epoch.')
np.random.shuffle(self.indices)
# Keep track of last indice for end of subepoch
self.idx_end = idx_end
# Select the corresponding ids
list_ids_batch = [self.list_ids[i] for i in indices]
# For paired organs, two images (left, right eyes) are selected for each id.
if self.organ + '_' + self.view in self.left_right_organs_views:
list_ids_batch = [ID for ID in list_ids_batch for _ in ('right', 'left')]
return self._data_generation(list_ids_batch)
class MyCSVLogger(Callback):
"""
Custom CSV Logger callback class for Keras training: append to existing file if can be found. Allows to keep track
of training over several jobs.
"""
def __init__(self, filename, separator=',', append=False):
self.sep = separator
self.filename = filename
self.append = append
self.writer = None
self.keys = None
self.append_header = True
self.csv_file = None
if six.PY2:
self.file_flags = 'b'
self._open_args = {}
else:
self.file_flags = ''
self._open_args = {'newline': '\n'}
Callback.__init__(self)
def on_train_begin(self, logs=None):
if self.append:
if file_io.file_exists(self.filename):
with open(self.filename, 'r' + self.file_flags) as f:
self.append_header = not bool(len(f.readline()))
mode = 'a'
else:
mode = 'w'
self.csv_file = io.open(self.filename, mode + self.file_flags, **self._open_args)
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
def handle_value(k):
is_zero_dim_ndarray = isinstance(k, np.ndarray) and k.ndim == 0
if isinstance(k, six.string_types):
return k
elif isinstance(k, collections_abc.Iterable) and not is_zero_dim_ndarray:
return '"[%s]"' % (', '.join(map(str, k)))
else:
return k
if self.keys is None:
self.keys = sorted(logs.keys())
if self.model.stop_training:
# We set NA so that csv parsers do not fail for this last epoch.
logs = dict([(k, logs[k]) if k in logs else (k, 'NA') for k in self.keys])
if not self.writer:
class CustomDialect(csv.excel):
delimiter = self.sep
fieldnames = ['epoch', 'learning_rate'] + self.keys
if six.PY2:
fieldnames = [unicode(x) for x in fieldnames]
self.writer = csv.DictWriter(
self.csv_file,
fieldnames=fieldnames,
dialect=CustomDialect)
if self.append_header:
self.writer.writeheader()
row_dict = collections.OrderedDict({'epoch': epoch, 'learning_rate': eval(self.model.optimizer.lr)})
row_dict.update((key, handle_value(logs[key])) for key in self.keys)
self.writer.writerow(row_dict)
self.csv_file.flush()
def on_train_end(self, logs=None):
self.csv_file.close()
self.writer = None
class MyModelCheckpoint(ModelCheckpoint):
"""
Custom checkpoint callback class for Keras training. Handles a baseline performance.
"""
def __init__(self, filepath, monitor='val_loss', baseline=-np.Inf, verbose=0, save_best_only=False,
save_weights_only=False, mode='auto', save_freq='epoch'):
# Parameters
ModelCheckpoint.__init__(self, filepath, monitor=monitor, verbose=verbose, save_best_only=save_best_only,
save_weights_only=save_weights_only, mode=mode, save_freq=save_freq)
if mode == 'min':
self.monitor_op = np.less
self.best = baseline
elif mode == 'max':
self.monitor_op = np.greater
self.best = baseline
else:
print('Error. mode for metric must be either min or max')
sys.exit(1)
class DeepLearning(Metrics):
"""
Core helper class to train models. Used to:
- build the data generators
- generate the CNN architectures
- load the weights
"""
def __init__(self, target=None, organ=None, view=None, transformation=None, architecture=None, n_fc_layers=None,
n_fc_nodes=None, optimizer=None, learning_rate=None, weight_decay=None, dropout_rate=None,
data_augmentation_factor=None, debug_mode=False):
# Initialization
Metrics.__init__(self)
tf.random.set_seed(self.seed)
# Model's version
self.target = target
self.organ = organ
self.view = view
self.transformation = transformation
self.architecture = architecture
self.n_fc_layers = int(n_fc_layers)
self.n_fc_nodes = int(n_fc_nodes)
self.optimizer = optimizer
self.learning_rate = float(learning_rate)
self.weight_decay = float(weight_decay)
self.dropout_rate = float(dropout_rate)
self.data_augmentation_factor = float(data_augmentation_factor)
self.outer_fold = None
self.version = target + '_' + organ + '_' + view + '_' + transformation + '_' + architecture + '_' + \
n_fc_layers + '_' + n_fc_nodes + '_' + optimizer + '_' + learning_rate + '_' + weight_decay + \
'_' + dropout_rate + '_' + data_augmentation_factor
# NNet's architecture and weights
self.side_predictors = self.dict_side_predictors[target]
if self.organ + '_' + self.view in self.left_right_organs_views:
self.side_predictors.append('organ_side')
self.dict_final_activations = {'regression': 'linear', 'binary': 'sigmoid', 'multiclass': 'softmax',
'saliency': 'linear'}
self.path_load_weights = None
self.keras_weights = None
# Generators
self.debug_mode = debug_mode
self.debug_fraction = 0.005
self.DATA_FEATURES = {}
self.mode = None
self.n_cpus = len(os.sched_getaffinity(0))
self.dir_images = '../images/' + organ + '/' + view + '/' + transformation + '/'
# define dictionary to fit the architecture's input size to the images sizes (take min (height, width))
self.dict_organ_view_transformation_to_image_size = {
'Eyes_Fundus_Raw': (316, 316), # initial size (1388, 1388)
'Eyes_OCT_Raw': (312, 320), # initial size (500, 512)
'Musculoskeletal_Spine_Sagittal': (466, 211), # initial size (1513, 684)
'Musculoskeletal_Spine_Coronal': (315, 313), # initial size (724, 720)
'Musculoskeletal_Hips_MRI': (329, 303), # initial size (626, 680)
'Musculoskeletal_Knees_MRI': (347, 286) # initial size (851, 700)
}
self.dict_organ_view_transformation_to_image_size.update(
dict.fromkeys(['Brain_MRI_SagittalRaw', 'Brain_MRI_SagittalReference', 'Brain_MRI_CoronalRaw',
'Brain_MRI_CoronalReference', 'Brain_MRI_TransverseRaw', 'Brain_MRI_TransverseReference'],
(316, 316))) # initial size (88, 88)
self.dict_organ_view_transformation_to_image_size.update(
dict.fromkeys(['Arterial_Carotids_Mixed', 'Arterial_Carotids_LongAxis', 'Arterial_Carotids_CIMT120',
'Arterial_Carotids_CIMT150', 'Arterial_Carotids_ShortAxis'],
(337, 291))) # initial size (505, 436)
self.dict_organ_view_transformation_to_image_size.update(
dict.fromkeys(['Heart_MRI_2chambersRaw', 'Heart_MRI_2chambersContrast', 'Heart_MRI_3chambersRaw',
'Heart_MRI_3chambersContrast', 'Heart_MRI_4chambersRaw', 'Heart_MRI_4chambersContrast'],
(316, 316))) # initial size (200, 200)
self.dict_organ_view_transformation_to_image_size.update(
dict.fromkeys(['Abdomen_Liver_Raw', 'Abdomen_Liver_Contrast'], (288, 364))) # initial size (288, 364)
self.dict_organ_view_transformation_to_image_size.update(
dict.fromkeys(['Abdomen_Pancreas_Raw', 'Abdomen_Pancreas_Contrast'], (288, 350))) # initial size (288, 350)
self.dict_organ_view_transformation_to_image_size.update(
dict.fromkeys(['Musculoskeletal_FullBody_Figure', 'Musculoskeletal_FullBody_Skeleton',
'Musculoskeletal_FullBody_Flesh', 'Musculoskeletal_FullBody_Mixed'],
(541, 181))) # initial size (811, 272)
self.dict_organ_view_transformation_to_image_size.update(
dict.fromkeys(['PhysicalActivity_FullWeek_GramianAngularField1minDifference',
'PhysicalActivity_FullWeek_GramianAngularField1minSummation',
'PhysicalActivity_FullWeek_MarkovTransitionField1min',
'PhysicalActivity_FullWeek_RecurrencePlots1min'],
(316, 316))) # initial size (316, 316)
self.dict_architecture_to_image_size = {'MobileNet': (224, 224), 'MobileNetV2': (224, 224),
'NASNetMobile': (224, 224), 'NASNetLarge': (331, 331)}
if self.architecture in ['MobileNet', 'MobileNetV2', 'NASNetMobile', 'NASNetLarge']:
self.image_width, self.image_height = self.dict_architecture_to_image_size[architecture]
else:
self.image_width, self.image_height = \
self.dict_organ_view_transformation_to_image_size[organ + '_' + view + '_' + transformation]
# define dictionary of batch sizes to fit as many samples as the model's architecture allows
self.dict_batch_sizes = {
# Default, applies to all images with resized input ~100,000 pixels
'Default': {'VGG16': 32, 'VGG19': 32, 'DenseNet121': 16, 'DenseNet169': 16, 'DenseNet201': 16,
'Xception': 32, 'InceptionV3': 32, 'InceptionResNetV2': 8, 'ResNet50': 32, 'ResNet101': 16,
'ResNet152': 16, 'ResNet50V2': 32, 'ResNet101V2': 16, 'ResNet152V2': 16, 'ResNeXt50': 4,
'ResNeXt101': 8, 'EfficientNetB7': 4,
'MobileNet': 128, 'MobileNetV2': 64, 'NASNetMobile': 64, 'NASNetLarge': 4}}
# Define batch size
if organ + '_' + view in self.dict_batch_sizes.keys():
randoself.batch_size = self.dict_batch_sizes[organ + '_' + view][architecture]
else:
self.batch_size = self.dict_batch_sizes['Default'][architecture]
# double the batch size for the teslaM40 cores that have bigger memory
if len(GPUtil.getGPUs()) > 0: # make sure GPUs are available (not truesometimes for debugging)
if GPUtil.getGPUs()[0].memoryTotal > 20000:
self.batch_size *= 2
# Define number of ids per batch (twice fewer for paired organs, because left and right samples)
self.n_ids_batch = self.batch_size
if organ + '_' + view in self.left_right_organs_views:
self.n_ids_batch //= 2
# Define number of samples per subepoch
if debug_mode:
self.n_samples_per_subepoch = self.batch_size * 4
else:
self.n_samples_per_subepoch = 32768
if organ + '_' + view in self.left_right_organs_views:
self.n_samples_per_subepoch //= 2
# dict to decide which field is used to generate the ids when several targets share the same ids
self.dict_target_to_ids = dict.fromkeys(['Age', 'Sex'], 'Age')
# Note: R-Squared and F1-Score are not available, because their batch based values are misleading.
# For some reason, Sensitivity and Specificity are not available either. Might implement later.
self.dict_losses_K = {'MSE': MeanSquaredError(name='MSE'),
'Binary-Crossentropy': BinaryCrossentropy(name='Binary-Crossentropy')}
self.dict_metrics_K = {'R-Squared': RSquare(name='R-Squared', y_shape=(1,)),
'RMSE': RootMeanSquaredError(name='RMSE'),
'F1-Score': F1Score(name='F1-Score', num_classes=1, dtype=tf.float32),
'ROC-AUC': AUC(curve='ROC', name='ROC-AUC'),
'PR-AUC': AUC(curve='PR', name='PR-AUC'),
'Binary-Accuracy': BinaryAccuracy(name='Binary-Accuracy'),
'Precision': Precision(name='Precision'),
'Recall': Recall(name='Recall'),
'True-Positives': TruePositives(name='True-Positives'),
'False-Positives': FalsePositives(name='False-Positives'),
'False-Negatives': FalseNegatives(name='False-Negatives'),
'True-Negatives': TrueNegatives(name='True-Negatives')}
# Metrics
self.prediction_type = self.dict_prediction_types[target]
self.loss_name = self.dict_losses_names[self.prediction_type]
self.loss_function = self.dict_losses_K[self.loss_name]
self.main_metric_name = self.dict_main_metrics_names_K[target]
self.main_metric_mode = self.main_metrics_modes[self.main_metric_name]
self.main_metric = self.dict_metrics_K[self.main_metric_name]
self.metrics_names = [self.main_metric_name]
self.metrics = [self.dict_metrics_K[metric_name] for metric_name in self.metrics_names]
# Optimizers
self.optimizers = {'Adam': Adam, 'RMSprop': RMSprop, 'Adadelta': Adadelta}
# Model
self.model = None
@staticmethod
def _append_ext(fn):
return fn + ".jpg"
def _load_data_features(self):
for fold in self.folds:
self.DATA_FEATURES[fold] = pd.read_csv(
self.path_data + 'data-features_' + self.organ + '_' + self.view + '_' + self.transformation + '_' +
self.dict_target_to_ids[self.target] + '_' + fold + '_' + self.outer_fold + '.csv')
for col_name in self.id_vars:
self.DATA_FEATURES[fold][col_name] = self.DATA_FEATURES[fold][col_name].astype(str)
self.DATA_FEATURES[fold].set_index('id', drop=False, inplace=True)
def _take_subset_to_debug(self):
for fold in self.folds:
# use +1 or +2 to test the leftovers pipeline
leftovers_extra = {'train': 0, 'val': 1, 'test': 2}
n_batches = 2
n_limit_fold = leftovers_extra[fold] + self.batch_size * n_batches
self.DATA_FEATURES[fold] = self.DATA_FEATURES[fold].iloc[:n_limit_fold, :]
def _generate_generators(self, DATA_FEATURES):
GENERATORS = {}
for fold in self.folds:
# do not generate a generator if there are no samples (can happen for leftovers generators)
if fold not in DATA_FEATURES.keys():
continue
# parameters
training_mode = True if self.mode == 'model_training' else False
if (fold == 'train') & (self.mode == 'model_training') & \
(self.organ + '_' + self.view not in self.organsviews_not_to_augment):
data_augmentation = True
else:
data_augmentation = False
# define batch size for testing: data is split between a part that fits in batches, and leftovers
if self.mode == 'model_testing':
if self.organ + '_' + self.view in self.left_right_organs_views:
n_samples = len(DATA_FEATURES[fold].index) * 2
else:
n_samples = len(DATA_FEATURES[fold].index)
batch_size_fold = min(self.batch_size, n_samples)
else:
batch_size_fold = self.batch_size
if (fold == 'train') & (self.mode == 'model_training'):
n_samples_per_subepoch = self.n_samples_per_subepoch
else:
n_samples_per_subepoch = None
# generator
GENERATORS[fold] = \
MyImageDataGenerator(target=self.target, organ=self.organ, view=self.view,
data_features=DATA_FEATURES[fold], n_samples_per_subepoch=n_samples_per_subepoch,
batch_size=batch_size_fold, training_mode=training_mode,
side_predictors=self.side_predictors, dir_images=self.dir_images,
images_width=self.image_width, images_height=self.image_height,
data_augmentation=data_augmentation,
data_augmentation_factor=self.data_augmentation_factor, seed=self.seed)
return GENERATORS
def _generate_class_weights(self):
if self.dict_prediction_types[self.target] == 'binary':
self.class_weights = {}
counts = self.DATA_FEATURES['train'][self.target + '_raw'].value_counts()
n_total = counts.sum()
# weighting the samples for each class inversely proportional to their prevalence, with order of magnitude 1
for i in counts.index.values:
self.class_weights[i] = n_total / (counts.loc[i] * len(counts.index))
def _generate_cnn(self):
# define the arguments
# take special initial weights for EfficientNetB7 (better)
if (self.architecture == 'EfficientNetB7') & (self.keras_weights == 'imagenet'):
w = 'noisy-student'
else:
w = self.keras_weights
kwargs = {"include_top": False, "weights": w, "input_shape": (self.image_width, self.image_height, 3)}
if self.architecture in ['ResNet50', 'ResNet101', 'ResNet152', 'ResNet50V2', 'ResNet101V2', 'ResNet152V2',
'ResNeXt50', 'ResNeXt101']:
import tensorflow.keras
kwargs.update(
{"backend": tensorflow.keras.backend, "layers": tensorflow.keras.layers,
"models": tensorflow.keras.models, "utils": tensorflow.keras.utils})
# load the architecture builder
if self.architecture == 'VGG16':
from tensorflow.keras.applications.vgg16 import VGG16 as ModelBuilder
elif self.architecture == 'VGG19':
from tensorflow.keras.applications.vgg19 import VGG19 as ModelBuilder
elif self.architecture == 'DenseNet121':
from tensorflow.keras.applications.densenet import DenseNet121 as ModelBuilder
elif self.architecture == 'DenseNet169':
from tensorflow.keras.applications.densenet import DenseNet169 as ModelBuilder
elif self.architecture == 'DenseNet201':
from tensorflow.keras.applications.densenet import DenseNet201 as ModelBuilder
elif self.architecture == 'Xception':
from tensorflow.keras.applications.xception import Xception as ModelBuilder
elif self.architecture == 'InceptionV3':
from tensorflow.keras.applications.inception_v3 import InceptionV3 as ModelBuilder
elif self.architecture == 'InceptionResNetV2':
from tensorflow.keras.applications.inception_resnet_v2 import InceptionResNetV2 as ModelBuilder
elif self.architecture == 'ResNet50':
from keras_applications.resnet import ResNet50 as ModelBuilder
elif self.architecture == 'ResNet101':
from keras_applications.resnet import ResNet101 as ModelBuilder
elif self.architecture == 'ResNet152':
from keras_applications.resnet import ResNet152 as ModelBuilder
elif self.architecture == 'ResNet50V2':
from keras_applications.resnet_v2 import ResNet50V2 as ModelBuilder
elif self.architecture == 'ResNet101V2':
from keras_applications.resnet_v2 import ResNet101V2 as ModelBuilder
elif self.architecture == 'ResNet152V2':
from keras_applications.resnet_v2 import ResNet152V2 as ModelBuilder
elif self.architecture == 'ResNeXt50':
from keras_applications.resnext import ResNeXt50 as ModelBuilder
elif self.architecture == 'ResNeXt101':
from keras_applications.resnext import ResNeXt101 as ModelBuilder
elif self.architecture == 'EfficientNetB7':
from efficientnet.tfkeras import EfficientNetB7 as ModelBuilder
# The following model have a fixed input size requirement
elif self.architecture == 'NASNetMobile':
from tensorflow.keras.applications.nasnet import NASNetMobile as ModelBuilder
elif self.architecture == 'NASNetLarge':
from tensorflow.keras.applications.nasnet import NASNetLarge as ModelBuilder
elif self.architecture == 'MobileNet':
from tensorflow.keras.applications.mobilenet import MobileNet as ModelBuilder
elif self.architecture == 'MobileNetV2':
from tensorflow.keras.applications.mobilenet_v2 import MobileNetV2 as ModelBuilder
else:
print('Architecture does not exist.')
sys.exit(1)
# build the model's base
cnn = ModelBuilder(**kwargs)
x = cnn.output
# complete the model's base
if self.architecture in ['VGG16', 'VGG19']:
x = Flatten()(x)
x = Dense(4096, activation='relu', kernel_regularizer=regularizers.l2(self.weight_decay))(x)
x = Dropout(self.dropout_rate)(x)
x = Dense(4096, activation='relu', kernel_regularizer=regularizers.l2(self.weight_decay))(x)
x = Dropout(self.dropout_rate)(x)
else:
x = GlobalAveragePooling2D()(x)
if self.architecture == 'EfficientNetB7':
x = Dropout(self.dropout_rate)(x)
cnn_output = x
return cnn.input, cnn_output
def _generate_side_nn(self):
side_nn = Sequential()
side_nn.add(Dense(16, input_dim=len(self.side_predictors), activation="relu",
kernel_regularizer=regularizers.l2(self.weight_decay)))
return side_nn.input, side_nn.output
def _complete_architecture(self, cnn_input, cnn_output, side_nn_input, side_nn_output):
x = concatenate([cnn_output, side_nn_output])
x = Dropout(self.dropout_rate)(x)
for n in [int(self.n_fc_nodes * (2 ** (2 * (self.n_fc_layers - 1 - i)))) for i in range(self.n_fc_layers)]:
x = Dense(n, activation='relu', kernel_regularizer=regularizers.l2(self.weight_decay))(x)
# scale the dropout proportionally to the number of nodes in a layer. No dropout for the last layers
if n > 16:
x = Dropout(self.dropout_rate * n / 1024)(x)
predictions = Dense(1, activation=self.dict_final_activations[self.prediction_type],
kernel_regularizer=regularizers.l2(self.weight_decay))(x)
self.model = Model(inputs=[cnn_input, side_nn_input], outputs=predictions)
def _generate_architecture(self):
cnn_input, cnn_output = self._generate_cnn()
side_nn_input, side_nn_output = self._generate_side_nn()
self._complete_architecture(cnn_input=cnn_input, cnn_output=cnn_output, side_nn_input=side_nn_input,
side_nn_output=side_nn_output)
def _load_model_weights(self):
try:
self.model.load_weights(self.path_load_weights)
except (FileNotFoundError, TypeError):
# load backup weights if the main weights are corrupted
try:
self.model.load_weights(self.path_load_weights.replace('model-weights', 'backup-model-weights'))
except FileNotFoundError:
print('Error. No file was found. imagenet weights should have been used. Bug somewhere.')
sys.exit(1)
@staticmethod
def clean_exit():
# exit
print('\nDone.\n')
print('Killing JOB PID with kill...')
os.system('touch ../eo/' + os.environ['SLURM_JOBID'])
os.system('kill ' + str(os.getpid()))
time.sleep(60)
print('Escalating to kill JOB PID with kill -9...')
os.system('kill -9 ' + str(os.getpid()))
time.sleep(60)
print('Escalating to kill JOB ID')
os.system('scancel ' + os.environ['SLURM_JOBID'])
time.sleep(60)
print('Everything failed to kill the job. Hanging there until hitting walltime...')
class Training(DeepLearning):
"""
Class to train CNN models:
- Generates the architecture
- Loads the best last weights so that a model can be trained over several jobs
- Generates the callbacks
- Compiles the model
- Trains the model
"""
def __init__(self, target=None, organ=None, view=None, transformation=None, architecture=None, n_fc_layers=None,
n_fc_nodes=None, optimizer=None, learning_rate=None, weight_decay=None, dropout_rate=None,
data_augmentation_factor=None, outer_fold=None, debug_mode=False, transfer_learning=None,
continue_training=True, display_full_metrics=True):
# parameters
DeepLearning.__init__(self, target, organ, view, transformation, architecture, n_fc_layers, n_fc_nodes,
optimizer, learning_rate, weight_decay, dropout_rate, data_augmentation_factor,
debug_mode)
self.outer_fold = outer_fold
self.version = self.version + '_' + str(outer_fold)
# NNet's architecture's weights
self.continue_training = continue_training
self.transfer_learning = transfer_learning
self.list_parameters_to_match = ['organ', 'transformation', 'view']
# dict to decide in which order targets should be used when trying to transfer weight from a similar model
self.dict_alternative_targets_for_transfer_learning = {'Age': ['Age', 'Sex'], 'Sex': ['Sex', 'Age']}
# Generators
self.folds = ['train', 'val']
self.mode = 'model_training'
self.class_weights = None
self.GENERATORS = None
# Metrics
self.baseline_performance = None
if display_full_metrics:
self.metrics_names = self.dict_metrics_names_K[self.prediction_type]
# Model
self.path_load_weights = self.path_data + 'model-weights_' + self.version + '.h5'
if debug_mode:
self.path_save_weights = self.path_data + 'model-weights-debug.h5'
else:
self.path_save_weights = self.path_data + 'model-weights_' + self.version + '.h5'
self.n_epochs_max = 100000
self.callbacks = None
# Load and preprocess the data, build the generators
def data_preprocessing(self):
self._load_data_features()
if self.debug_mode:
self._take_subset_to_debug()
self._generate_class_weights()
self.GENERATORS = self._generate_generators(self.DATA_FEATURES)
# Determine which weights to load, if any.
def _weights_for_transfer_learning(self):
print('Looking for models to transfer weights from...')
# define parameters
parameters = self._version_to_parameters(self.version)
# continue training if possible
if self.continue_training and os.path.exists(self.path_load_weights):
print('Loading the weights from the model\'s previous training iteration.')
return
# Initialize the weights using other the weights from other successful hyperparameters combinations
if self.transfer_learning == 'hyperparameters':
# Check if the same model with other hyperparameters have already been trained. Pick the best for transfer.
params = self.version.split('_')
params_tl_idx = \
[i for i in range(len(names_model_parameters))
if any(names_model_parameters[i] == p for p in
['optimizer', 'learning_rate', 'weight_decay', 'dropout_rate', 'data_augmentation_factor'])]
for idx in params_tl_idx:
params[idx] = '*'
versions = '../eo/MI02_' + '_'.join(params) + '.out'
files = glob.glob(versions)
if self.main_metric_mode == 'min':
best_perf = np.Inf
else:
best_perf = -np.Inf
for file in files:
hand = open(file, 'r')
# find best last performance
final_improvement_line = None
baseline_performance_line = None
for line in hand:
line = line.rstrip()
if re.search('Baseline validation ' + self.main_metric_name + ' = ', line):
baseline_performance_line = line
if re.search('val_' + self.main_metric_name + ' improved from', line):
final_improvement_line = line
hand.close()
if final_improvement_line is not None:
perf = float(final_improvement_line.split(' ')[7].replace(',', ''))
elif baseline_performance_line is not None:
perf = float(baseline_performance_line.split(' ')[-1])
else:
continue
# Keep track of the file with the best performance
if self.main_metric_mode == 'min':
update = perf < best_perf
else:
update = perf > best_perf
if update:
best_perf = perf
self.path_load_weights = \
file.replace('../eo/', self.path_data).replace('MI02', 'model-weights').replace('.out', '.h5')
if best_perf not in [-np.Inf, np.Inf]:
print('Transfering the weights from: ' + self.path_load_weights + ', with ' + self.main_metric_name +
' = ' + str(best_perf))
return
# Initialize the weights based on models trained on different datasets, ranked by similarity
if self.transfer_learning == 'datasets':
while True:
# print('Matching models for the following criterias:');
# print(['architecture', 'target'] + list_parameters_to_match)
# start by looking for models trained on the same target, then move to other targets
for target_to_load in self.dict_alternative_targets_for_transfer_learning[parameters['target']]:
# print('Target used: ' + target_to_load)
parameters_to_match = parameters.copy()
parameters_to_match['target'] = target_to_load
# load the ranked performances table to select the best performing model among the similar
# models available
path_performances_to_load = self.path_data + 'PERFORMANCES_ranked_' + \
parameters_to_match['target'] + '_' + 'val' + '.csv'
try:
Performances = pd.read_csv(path_performances_to_load)
Performances['organ'] = Performances['organ'].astype(str)
except FileNotFoundError:
# print("Could not load the file: " + path_performances_to_load)
break
# iteratively get rid of models that are not similar enough, based on the list
for parameter in ['architecture', 'target'] + self.list_parameters_to_match:
Performances = Performances[Performances[parameter] == parameters_to_match[parameter]]
# if at least one model is similar enough, load weights from the best of them
if len(Performances.index) != 0:
self.path_load_weights = self.path_data + 'model-weights_' + Performances['version'][0] + '.h5'
self.keras_weights = None
print('transfering the weights from: ' + self.path_load_weights)
return
# if no similar model was found, try again after getting rid of the last selection criteria
if len(self.list_parameters_to_match) == 0:
print('No model found for transfer learning.')
break
self.list_parameters_to_match.pop()
# Otherwise use imagenet weights to initialize
print('Using imagenet weights.')
# using string instead of None for path to not ge
self.path_load_weights = None
self.keras_weights = 'imagenet'
def _compile_model(self):
# if learning rate was reduced with success according to logger, start with this reduced learning rate
if self.path_load_weights is not None:
path_logger = self.path_load_weights.replace('model-weights', 'logger').replace('.h5', '.csv')
else:
path_logger = self.path_data + 'logger_' + self.version + '.csv'
if os.path.exists(path_logger):
try:
logger = pd.read_csv(path_logger)
best_log = \
logger[logger['val_' + self.main_metric_name] == logger['val_' + self.main_metric_name].max()]
lr = best_log['learning_rate'].values[0]
except pd.errors.EmptyDataError:
os.remove(path_logger)
lr = self.learning_rate
else:
lr = self.learning_rate
self.model.compile(optimizer=self.optimizers[self.optimizer](lr=lr, clipnorm=1.0), loss=self.loss_function,
metrics=self.metrics)
def _compute_baseline_performance(self):
# calculate initial val_loss value
if self.continue_training:
idx_metric_name = ([self.loss_name] + self.metrics_names).index(self.main_metric_name)
baseline_perfs = self.model.evaluate(self.GENERATORS['val'], steps=self.GENERATORS['val'].steps)
self.baseline_performance = baseline_perfs[idx_metric_name]
elif self.main_metric_mode == 'min':
self.baseline_performance = np.Inf
else:
self.baseline_performance = -np.Inf
print('Baseline validation ' + self.main_metric_name + ' = ' + str(self.baseline_performance))
def _define_callbacks(self):
if self.debug_mode:
path_logger = self.path_data + 'logger-debug.csv'
append = False
else:
path_logger = self.path_data + 'logger_' + self.version + '.csv'
append = self.continue_training
csv_logger = MyCSVLogger(path_logger, separator=',', append=append)
model_checkpoint_backup = MyModelCheckpoint(self.path_save_weights.replace('model-weights',
'backup-model-weights'),
monitor='val_' + self.main_metric.name,
baseline=self.baseline_performance, verbose=1, save_best_only=True,
save_weights_only=True, mode=self.main_metric_mode,
save_freq='epoch')
model_checkpoint = MyModelCheckpoint(self.path_save_weights,
monitor='val_' + self.main_metric.name, baseline=self.baseline_performance,
verbose=1, save_best_only=True, save_weights_only=True,
mode=self.main_metric_mode, save_freq='epoch')
patience_reduce_lr = min(7, 3 * self.GENERATORS['train'].n_subepochs_per_epoch)
reduce_lr_on_plateau = ReduceLROnPlateau(monitor='loss', factor=0.5, patience=patience_reduce_lr, verbose=1,
mode='min', min_delta=0, cooldown=0, min_lr=0)
early_stopping = EarlyStopping(monitor='val_' + self.main_metric.name, min_delta=0, patience=15, verbose=0,
mode=self.main_metric_mode, baseline=self.baseline_performance,
restore_best_weights=True)
self.callbacks = [csv_logger, model_checkpoint_backup, model_checkpoint, early_stopping, reduce_lr_on_plateau]
def build_model(self):
self._weights_for_transfer_learning()
self._generate_architecture()
# Load weights if possible
try:
load_weights = True if os.path.exists(self.path_load_weights) else False
except TypeError:
load_weights = False
if load_weights:
self._load_model_weights()
else:
# save transferred weights as default, in case no better weights are found
self.model.save_weights(self.path_save_weights.replace('model-weights', 'backup-model-weights'))
self.model.save_weights(self.path_save_weights)
self._compile_model()
self._compute_baseline_performance()
self._define_callbacks()
def train_model(self):
# garbage collector
_ = gc.collect()
# use more verbose when debugging
verbose = 1 if self.debug_mode else 2
# train the model
self.model.fit(self.GENERATORS['train'], steps_per_epoch=self.GENERATORS['train'].steps,
validation_data=self.GENERATORS['val'], validation_steps=self.GENERATORS['val'].steps,
shuffle=False, use_multiprocessing=False, workers=self.n_cpus, epochs=self.n_epochs_max,
class_weight=self.class_weights, callbacks=self.callbacks, verbose=verbose)
class PredictionsGenerate(DeepLearning):
"""
Generates the predictions for each model.
Unscales the predictions.
"""
def __init__(self, target=None, organ=None, view=None, transformation=None, architecture=None, n_fc_layers=None,
n_fc_nodes=None, optimizer=None, learning_rate=None, weight_decay=None, dropout_rate=None,
data_augmentation_factor=None, outer_fold=None, debug_mode=False):
# Initialize parameters
DeepLearning.__init__(self, target, organ, view, transformation, architecture, n_fc_layers, n_fc_nodes,
optimizer, learning_rate, weight_decay, dropout_rate, data_augmentation_factor,
debug_mode)
self.outer_fold = outer_fold
self.mode = 'model_testing'
# Define dictionaries attributes for data, generators and predictions
self.DATA_FEATURES_BATCH = {}
self.DATA_FEATURES_LEFTOVERS = {}
self.GENERATORS_BATCH = None
self.GENERATORS_LEFTOVERS = None
self.PREDICTIONS = {}
def _split_batch_leftovers(self):
# split the samples into two groups: what can fit into the batch size, and the leftovers.
for fold in self.folds:
n_leftovers = len(self.DATA_FEATURES[fold].index) % self.n_ids_batch
if n_leftovers > 0:
self.DATA_FEATURES_BATCH[fold] = self.DATA_FEATURES[fold].iloc[:-n_leftovers]
self.DATA_FEATURES_LEFTOVERS[fold] = self.DATA_FEATURES[fold].tail(n_leftovers)
else:
self.DATA_FEATURES_BATCH[fold] = self.DATA_FEATURES[fold] # special case for syntax if no leftovers
if fold in self.DATA_FEATURES_LEFTOVERS.keys():
del self.DATA_FEATURES_LEFTOVERS[fold]
def _generate_outerfold_predictions(self):
# prepare unscaling
if self.target in self.targets_regression:
mean_train = self.DATA_FEATURES['train'][self.target + '_raw'].mean()
std_train = self.DATA_FEATURES['train'][self.target + '_raw'].std()
else:
mean_train, std_train = None, None
# Generate predictions
for fold in self.folds:
print('Predicting samples from fold ' + fold + '.')
print(str(len(self.DATA_FEATURES[fold].index)) + ' samples to predict.')
print('Predicting batches: ' + str(len(self.DATA_FEATURES_BATCH[fold].index)) + ' samples.')
pred_batch = self.model.predict(self.GENERATORS_BATCH[fold], steps=self.GENERATORS_BATCH[fold].steps,
verbose=1)
if fold in self.GENERATORS_LEFTOVERS.keys():
print('Predicting leftovers: ' + str(len(self.DATA_FEATURES_LEFTOVERS[fold].index)) + ' samples.')
pred_leftovers = self.model.predict(self.GENERATORS_LEFTOVERS[fold],
steps=self.GENERATORS_LEFTOVERS[fold].steps, verbose=1)
pred_full = np.concatenate((pred_batch, pred_leftovers)).squeeze()
else:
pred_full = pred_batch.squeeze()
print('Predicted a total of ' + str(len(pred_full)) + ' samples.')
# take the average between left and right predictions for paired organs
if self.organ + '_' + self.view in self.left_right_organs_views:
pred_full = np.mean(pred_full.reshape(-1, 2), axis=1)
# unscale predictions
if self.target in self.targets_regression:
pred_full = pred_full * std_train + mean_train
# format the dataframe
self.DATA_FEATURES[fold]['pred'] = pred_full
self.PREDICTIONS[fold] = self.DATA_FEATURES[fold]
self.PREDICTIONS[fold]['id'] = [ID.replace('.jpg', '') for ID in self.PREDICTIONS[fold]['id']]
def _generate_predictions(self):
self.path_load_weights = self.path_data + 'model-weights_' + self.version + '_' + self.outer_fold + '.h5'
self._load_data_features()
if self.debug_mode:
self._take_subset_to_debug()
self._load_model_weights()
self._split_batch_leftovers()
# generate the generators
self.GENERATORS_BATCH = self._generate_generators(DATA_FEATURES=self.DATA_FEATURES_BATCH)
if self.DATA_FEATURES_LEFTOVERS is not None:
self.GENERATORS_LEFTOVERS = self._generate_generators(DATA_FEATURES=self.DATA_FEATURES_LEFTOVERS)
self._generate_outerfold_predictions()
def _format_predictions(self):
for fold in self.folds:
perf_fun = self.dict_metrics_sklearn[self.dict_main_metrics_names[self.target]]
perf = perf_fun(self.PREDICTIONS[fold][self.target + '_raw'], self.PREDICTIONS[fold]['pred'])
print('The ' + fold + ' performance is: ' + str(perf))
# format the predictions
self.PREDICTIONS[fold].index.name = 'column_names'
self.PREDICTIONS[fold] = self.PREDICTIONS[fold][['id', 'outer_fold', 'pred']]
def generate_predictions(self):
self._generate_architecture()
self._generate_predictions()
self._format_predictions()
def save_predictions(self):
for fold in self.folds:
self.PREDICTIONS[fold].to_csv(self.path_data + 'Predictions_instances_' + self.version + '_' + fold + '_'
+ self.outer_fold + '.csv', index=False)
class PredictionsConcatenate(Basics):
"""
Concatenates the predictions coming from the different cross validation folds.
"""
def __init__(self, target=None, organ=None, view=None, transformation=None, architecture=None, n_fc_layers=None,
n_fc_nodes=None, optimizer=None, learning_rate=None, weight_decay=None, dropout_rate=None,
data_augmentation_factor=None):
# Initialize parameters
Basics.__init__(self)
self.version = target + '_' + organ + '_' + view + '_' + transformation + '_' + architecture + '_' + \
n_fc_layers + '_' + n_fc_nodes + '_' + optimizer + '_' + learning_rate + '_' + weight_decay + \
'_' + dropout_rate + '_' + data_augmentation_factor
# Define dictionaries attributes for data, generators and predictions
self.PREDICTIONS = {}
def concatenate_predictions(self):
for fold in self.folds:
for outer_fold in self.outer_folds:
Predictions_fold = pd.read_csv(self.path_data + 'Predictions_instances_' + self.version + '_' + fold +
'_' + outer_fold + '.csv')
if fold in self.PREDICTIONS.keys():
self.PREDICTIONS[fold] = pd.concat([self.PREDICTIONS[fold], Predictions_fold])
else:
self.PREDICTIONS[fold] = Predictions_fold
def save_predictions(self):
for fold in self.folds:
self.PREDICTIONS[fold].to_csv(self.path_data + 'Predictions_instances_' + self.version + '_' + fold +
'.csv', index=False)
class PredictionsMerge(Basics):
"""
Merges the predictions from all models into a unified dataframe.
"""
def __init__(self, target=None, fold=None):
Basics.__init__(self)
# Define dictionaries attributes for data, generators and predictions
self.target = target
self.fold = fold
self.data_features = None
self.list_models = None
self.Predictions_df_previous = None
self.Predictions_df = None
def _load_data_features(self):
self.data_features = pd.read_csv(self.path_data + 'data-features_instances.csv',
usecols=self.id_vars + self.demographic_vars)
for var in self.id_vars:
self.data_features[var] = self.data_features[var].astype(str)
self.data_features.set_index('id', drop=False, inplace=True)
self.data_features.index.name = 'column_names'
def _preprocess_data_features(self):
# For the training set, each sample is predicted n_CV_outer_folds times, so prepare a larger dataframe
if self.fold == 'train':
df_all_folds = None
for outer_fold in self.outer_folds:
df_fold = self.data_features.copy()
df_all_folds = df_fold if outer_fold == self.outer_folds[0] else df_all_folds.append(df_fold)
self.data_features = df_all_folds
def _load_previous_merged_predictions(self):
if os.path.exists(self.path_data + 'PREDICTIONS_withoutEnsembles_instances_' + self.target + '_' + self.fold +
'.csv'):
self.Predictions_df_previous = pd.read_csv(self.path_data + 'PREDICTIONS_withoutEnsembles_instances_' +
self.target + '_' + self.fold + '.csv')
self.Predictions_df_previous.drop(columns=['eid', 'instance'] + self.demographic_vars, inplace=True)
def _list_models(self):
# generate list of predictions that will be integrated in the Predictions dataframe
self.list_models = glob.glob(self.path_data + 'Predictions_instances_' + self.target + '_*_' + self.fold +
'.csv')
# get rid of ensemble models and models already merged
self.list_models = [model for model in self.list_models if ('*' not in model)]
if self.Predictions_df_previous is not None:
self.list_models = \
[model for model in self.list_models
if ('pred_' + '_'.join(model.split('_')[2:-1]) not in self.Predictions_df_previous.columns)]
self.list_models.sort()
def preprocessing(self):
self._load_data_features()
self._preprocess_data_features()
self._load_previous_merged_predictions()
self._list_models()
def merge_predictions(self):
# merge the predictions
print('There are ' + str(len(self.list_models)) + ' models to merge.')
i = 0
# define subgroups to accelerate merging process
list_subgroups = list(set(['_'.join(model.split('_')[3:7]) for model in self.list_models]))
for subgroup in list_subgroups:
print('Merging models from the subgroup ' + subgroup)
models_subgroup = [model for model in self.list_models if subgroup in model]
Predictions_subgroup = None
# merge the models one by one
for file_name in models_subgroup:
i += 1
version = '_'.join(file_name.split('_')[2:-1])
if self.Predictions_df_previous is not None and \
'pred_' + version in self.Predictions_df_previous.columns:
print('The model ' + version + ' has already been merged before.')
else:
print('Merging the ' + str(i) + 'th model: ' + version)
# load csv and format the predictions
prediction = pd.read_csv(self.path_data + file_name)
print('raw prediction\'s shape: ' + str(prediction.shape))
for var in ['id', 'outer_fold']:
prediction[var] = prediction[var].apply(str)
prediction.rename(columns={'pred': 'pred_' + version}, inplace=True)
# merge data frames
if Predictions_subgroup is None:
Predictions_subgroup = prediction
elif self.fold == 'train':
Predictions_subgroup = Predictions_subgroup.merge(prediction, how='outer',
on=['id', 'outer_fold'])
else:
prediction.drop(['outer_fold'], axis=1, inplace=True)
# not supported for panda version > 0.23.4 for now
Predictions_subgroup = Predictions_subgroup.merge(prediction, how='outer', on=['id'])
# merge group predictions data frames
if self.fold != 'train':
Predictions_subgroup.drop(['outer_fold'], axis=1, inplace=True)
if Predictions_subgroup is not None:
if self.Predictions_df is None:
self.Predictions_df = Predictions_subgroup
elif self.fold == 'train':
self.Predictions_df = self.Predictions_df.merge(Predictions_subgroup, how='outer',
on=['id', 'outer_fold'])
else:
# not supported for panda version > 0.23.4 for now
self.Predictions_df = self.Predictions_df.merge(Predictions_subgroup, how='outer', on=['id'])
print('Predictions_df\'s shape: ' + str(self.Predictions_df.shape))
# garbage collector
gc.collect()
# Merge with the previously merged predictions
if (self.Predictions_df_previous is not None) & (self.Predictions_df is not None):
if self.fold == 'train':
self.Predictions_df = self.Predictions_df_previous.merge(self.Predictions_df, how='outer',
on=['id', 'outer_fold'])
else:
self.Predictions_df.drop(columns=['outer_fold'], inplace=True)
# not supported for panda version > 0.23.4 for now
self.Predictions_df = self.Predictions_df_previous.merge(self.Predictions_df, how='outer', on=['id'])
self.Predictions_df_previous = None
elif self.Predictions_df is None:
print('No new models to merge. Exiting.')
print('Done.')
sys.exit(0)
# Reorder the columns alphabetically
pred_versions = [col for col in self.Predictions_df.columns if 'pred_' in col]
pred_versions.sort()
id_cols = ['id', 'outer_fold'] if self.fold == 'train' else ['id']
self.Predictions_df = self.Predictions_df[id_cols + pred_versions]
def postprocessing(self):
# get rid of useless rows in data_features before merging to keep the memory requirements as low as possible
self.data_features = self.data_features[self.data_features['id'].isin(self.Predictions_df['id'].values)]
# merge data_features and predictions
if self.fold == 'train':
print('Starting to merge a massive dataframe')
self.Predictions_df = self.data_features.merge(self.Predictions_df, how='outer', on=['id', 'outer_fold'])
else:
# not supported for panda version > 0.23.4 for now
self.Predictions_df = self.data_features.merge(self.Predictions_df, how='outer', on=['id'])
print('Merging done')
# remove rows for which no prediction is available (should be none)
subset_cols = [col for col in self.Predictions_df.columns if 'pred_' in col]
self.Predictions_df.dropna(subset=subset_cols, how='all', inplace=True)
# Displaying the R2s
versions = [col.replace('pred_', '') for col in self.Predictions_df.columns if 'pred_' in col]
r2s = []
for version in versions:
df = self.Predictions_df[[self.target, 'pred_' + version]].dropna()
r2s.append(r2_score(df[self.target], df['pred_' + version]))
R2S = pd.DataFrame({'version': versions, 'R2': r2s})
R2S.sort_values(by='R2', ascending=False, inplace=True)
print('R2 for each model: ')
print(R2S)
def save_merged_predictions(self):
print('Writing the merged predictions...')
self.Predictions_df.to_csv(self.path_data + 'PREDICTIONS_withoutEnsembles_instances_' + self.target + '_' +
self.fold + '.csv', index=False)
class PredictionsEids(Basics):
"""
Computes the average age prediction across samples from different instances for every participant.
(Scaled back to instance 0)
"""
def __init__(self, target=None, fold=None, debug_mode=None):
Basics.__init__(self)
# Define dictionaries attributes for data, generators and predictions
self.target = target
self.fold = fold
self.debug_mode = debug_mode
self.Predictions = None
self.Predictions_chunk = None
self.pred_versions = None
self.res_versions = None
self.target_0s = None
self.Predictions_eids = None
self.Predictions_eids_previous = None
self.pred_versions_previous = None
def preprocessing(self):
# Load predictions
self.Predictions = pd.read_csv(
self.path_data + 'PREDICTIONS_withoutEnsembles_instances_' + self.target + '_' + self.fold + '.csv')
self.Predictions.drop(columns=['id'], inplace=True)
self.Predictions['eid'] = self.Predictions['eid'].astype(str)
self.Predictions.index.name = 'column_names'
self.pred_versions = [col for col in self.Predictions.columns.values if 'pred_' in col]
# Prepare target values on instance 0 as a reference
target_0s = pd.read_csv(self.path_data + 'data-features_eids.csv', usecols=['eid', self.target])
target_0s['eid'] = target_0s['eid'].astype(str)
target_0s.set_index('eid', inplace=True)
target_0s = target_0s[self.target]
target_0s.name = 'target_0'
target_0s = target_0s[self.Predictions['eid'].unique()]
self.Predictions = self.Predictions.merge(target_0s, on='eid')
# Compute biological ages reported to target_0
for pred in self.pred_versions:
# Compute the biais of the predictions as a function of age
print('Generating residuals for model ' + pred.replace('pred_', ''))
df_model = self.Predictions[['Age', pred]]
df_model.dropna(inplace=True)
if (len(df_model.index)) > 0:
age = df_model.loc[:, ['Age']]
res = df_model['Age'] - df_model[pred]
regr = LinearRegression()
regr.fit(age, res)
self.Predictions[pred.replace('pred_', 'correction_')] = regr.predict(self.Predictions[['Age']])
# Take the residuals bias into account when "translating" the prediction to instance 0
correction = self.Predictions['target_0'] - self.Predictions[self.target] + \
regr.predict(self.Predictions[['Age']]) - regr.predict(self.Predictions[['target_0']])
self.Predictions[pred] = self.Predictions[pred] + correction
self.Predictions[self.target] = self.Predictions['target_0']
self.Predictions.drop(columns=['target_0'], inplace=True)
self.Predictions.index.name = 'column_names'
def processing(self):
if self.fold == 'train':
# Prepare template to which each model will be appended
Predictions = self.Predictions[['eid'] + self.demographic_vars]
Predictions = Predictions.groupby('eid', as_index=True).mean()
Predictions.index.name = 'column_names'
Predictions['eid'] = Predictions.index.values
Predictions['instance'] = '*'
Predictions['id'] = Predictions['eid'] + '_*'
self.Predictions_eids = Predictions.copy()
self.Predictions_eids['outer_fold'] = -1
for i in range(self.n_CV_outer_folds):
Predictions_i = Predictions.copy()
Predictions_i['outer_fold'] = i
self.Predictions_eids = self.Predictions_eids.append(Predictions_i)
# Append each model one by one because the folds are different
print(str(len(self.pred_versions)) + ' models to compute.')
for pred_version in self.pred_versions:
if pred_version in self.pred_versions_previous:
print(pred_version.replace('pred_', '') + ' had already been computed.')
else:
print("Computing results for version " + pred_version.replace('pred_', ''))
Predictions_version = self.Predictions[['eid', pred_version, 'outer_fold']]
# Use placeholder for NaN in outer_folds
Predictions_version['outer_fold'][Predictions_version['outer_fold'].isna()] = -1
Predictions_version_eids = Predictions_version.groupby(['eid', 'outer_fold'], as_index=False).mean()
self.Predictions_eids = self.Predictions_eids.merge(Predictions_version_eids,
on=['eid', 'outer_fold'], how='outer')
self.Predictions_eids[of_version] = self.Predictions_eids['outer_fold']
self.Predictions_eids[of_version][self.Predictions_eids[of_version] == -1] = np.nan
del Predictions_version
_ = gc.collect
self.Predictions_eids.drop(columns=['outer_fold'], inplace=True)
else:
self.Predictions_eids = self.Predictions.groupby('eid').mean()
self.Predictions_eids['eid'] = self.Predictions_eids.index.values
self.Predictions_eids['instance'] = '*'
self.Predictions_eids['id'] = self.Predictions_eids['eid'].astype(str) + '_' + \
self.Predictions_eids['instance']
# Re-order the columns
self.Predictions_eids = self.Predictions_eids[self.id_vars + self.demographic_vars + self.pred_versions]
def postprocessing(self):
# Displaying the R2s
versions = [col.replace('pred_', '') for col in self.Predictions_eids.columns if 'pred_' in col]
r2s = []
for version in versions:
df = self.Predictions_eids[[self.target, 'pred_' + version]].dropna()
r2s.append(r2_score(df[self.target], df['pred_' + version]))
R2S = | pd.DataFrame({'version': versions, 'R2': r2s}) | pandas.DataFrame |
import pytest
import numpy as np
import pandas
import pandas.util.testing as tm
from pandas.tests.frame.common import TestData
import matplotlib
import modin.pandas as pd
from modin.pandas.utils import to_pandas
from numpy.testing import assert_array_equal
from .utils import (
random_state,
RAND_LOW,
RAND_HIGH,
df_equals,
df_is_empty,
arg_keys,
name_contains,
test_data_values,
test_data_keys,
test_data_with_duplicates_values,
test_data_with_duplicates_keys,
numeric_dfs,
no_numeric_dfs,
test_func_keys,
test_func_values,
query_func_keys,
query_func_values,
agg_func_keys,
agg_func_values,
numeric_agg_funcs,
quantiles_keys,
quantiles_values,
indices_keys,
indices_values,
axis_keys,
axis_values,
bool_arg_keys,
bool_arg_values,
int_arg_keys,
int_arg_values,
)
# TODO remove once modin-project/modin#469 is resolved
agg_func_keys.remove("str")
agg_func_values.remove(str)
pd.DEFAULT_NPARTITIONS = 4
# Force matplotlib to not use any Xwindows backend.
matplotlib.use("Agg")
class TestDFPartOne:
# Test inter df math functions
def inter_df_math_helper(self, modin_df, pandas_df, op):
# Test dataframe to datframe
try:
pandas_result = getattr(pandas_df, op)(pandas_df)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(modin_df)
else:
modin_result = getattr(modin_df, op)(modin_df)
df_equals(modin_result, pandas_result)
# Test dataframe to int
try:
pandas_result = getattr(pandas_df, op)(4)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(4)
else:
modin_result = getattr(modin_df, op)(4)
df_equals(modin_result, pandas_result)
# Test dataframe to float
try:
pandas_result = getattr(pandas_df, op)(4.0)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(4.0)
else:
modin_result = getattr(modin_df, op)(4.0)
df_equals(modin_result, pandas_result)
# Test transposed dataframes to float
try:
pandas_result = getattr(pandas_df.T, op)(4.0)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df.T, op)(4.0)
else:
modin_result = getattr(modin_df.T, op)(4.0)
df_equals(modin_result, pandas_result)
frame_data = {
"{}_other".format(modin_df.columns[0]): [0, 2],
modin_df.columns[0]: [0, 19],
modin_df.columns[1]: [1, 1],
}
modin_df2 = pd.DataFrame(frame_data)
pandas_df2 = pandas.DataFrame(frame_data)
# Test dataframe to different dataframe shape
try:
pandas_result = getattr(pandas_df, op)(pandas_df2)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(modin_df2)
else:
modin_result = getattr(modin_df, op)(modin_df2)
df_equals(modin_result, pandas_result)
# Test dataframe to list
list_test = random_state.randint(RAND_LOW, RAND_HIGH, size=(modin_df.shape[1]))
try:
pandas_result = getattr(pandas_df, op)(list_test, axis=1)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(list_test, axis=1)
else:
modin_result = getattr(modin_df, op)(list_test, axis=1)
df_equals(modin_result, pandas_result)
# Test dataframe to series
series_test_modin = modin_df[modin_df.columns[0]]
series_test_pandas = pandas_df[pandas_df.columns[0]]
try:
pandas_result = getattr(pandas_df, op)(series_test_pandas, axis=0)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(series_test_modin, axis=0)
else:
modin_result = getattr(modin_df, op)(series_test_modin, axis=0)
df_equals(modin_result, pandas_result)
# Test dataframe to series with different index
series_test_modin = modin_df[modin_df.columns[0]].reset_index(drop=True)
series_test_pandas = pandas_df[pandas_df.columns[0]].reset_index(drop=True)
try:
pandas_result = getattr(pandas_df, op)(series_test_pandas, axis=0)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(series_test_modin, axis=0)
else:
modin_result = getattr(modin_df, op)(series_test_modin, axis=0)
df_equals(modin_result, pandas_result)
# Level test
new_idx = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in modin_df.index]
)
modin_df_multi_level = modin_df.copy()
modin_df_multi_level.index = new_idx
# Defaults to pandas
with pytest.warns(UserWarning):
# Operation against self for sanity check
getattr(modin_df_multi_level, op)(modin_df_multi_level, axis=0, level=1)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_add(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "add")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_div(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "div")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_divide(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "divide")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_floordiv(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "floordiv")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_mod(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "mod")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_mul(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "mul")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_multiply(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "multiply")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_pow(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
# TODO: Revert to others once we have an efficient way of preprocessing for positive
# values
try:
pandas_df = pandas_df.abs()
except Exception:
pass
else:
modin_df = modin_df.abs()
self.inter_df_math_helper(modin_df, pandas_df, "pow")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_sub(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "sub")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_subtract(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "subtract")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_truediv(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "truediv")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___div__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__div__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___add__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__add__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___radd__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__radd__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___mul__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__mul__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rmul__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__rmul__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___pow__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__pow__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rpow__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__rpow__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___sub__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__sub__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___floordiv__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__floordiv__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rfloordiv__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__rfloordiv__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___truediv__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__truediv__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rtruediv__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__rtruediv__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___mod__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__mod__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rmod__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__rmod__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rdiv__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__rdiv__")
# END test inter df math functions
# Test comparison of inter operation functions
def comparison_inter_ops_helper(self, modin_df, pandas_df, op):
try:
pandas_result = getattr(pandas_df, op)(pandas_df)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(modin_df)
else:
modin_result = getattr(modin_df, op)(modin_df)
df_equals(modin_result, pandas_result)
try:
pandas_result = getattr(pandas_df, op)(4)
except TypeError:
with pytest.raises(TypeError):
getattr(modin_df, op)(4)
else:
modin_result = getattr(modin_df, op)(4)
df_equals(modin_result, pandas_result)
try:
pandas_result = getattr(pandas_df, op)(4.0)
except TypeError:
with pytest.raises(TypeError):
getattr(modin_df, op)(4.0)
else:
modin_result = getattr(modin_df, op)(4.0)
df_equals(modin_result, pandas_result)
try:
pandas_result = getattr(pandas_df, op)("a")
except TypeError:
with pytest.raises(TypeError):
repr(getattr(modin_df, op)("a"))
else:
modin_result = getattr(modin_df, op)("a")
df_equals(modin_result, pandas_result)
frame_data = {
"{}_other".format(modin_df.columns[0]): [0, 2],
modin_df.columns[0]: [0, 19],
modin_df.columns[1]: [1, 1],
}
modin_df2 = pd.DataFrame(frame_data)
pandas_df2 = pandas.DataFrame(frame_data)
try:
pandas_result = getattr(pandas_df, op)(pandas_df2)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(modin_df2)
else:
modin_result = getattr(modin_df, op)(modin_df2)
df_equals(modin_result, pandas_result)
new_idx = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in modin_df.index]
)
modin_df_multi_level = modin_df.copy()
modin_df_multi_level.index = new_idx
# Defaults to pandas
with pytest.warns(UserWarning):
# Operation against self for sanity check
getattr(modin_df_multi_level, op)(modin_df_multi_level, axis=0, level=1)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_eq(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.comparison_inter_ops_helper(modin_df, pandas_df, "eq")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_ge(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.comparison_inter_ops_helper(modin_df, pandas_df, "ge")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_gt(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.comparison_inter_ops_helper(modin_df, pandas_df, "gt")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_le(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.comparison_inter_ops_helper(modin_df, pandas_df, "le")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_lt(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.comparison_inter_ops_helper(modin_df, pandas_df, "lt")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_ne(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.comparison_inter_ops_helper(modin_df, pandas_df, "ne")
# END test comparison of inter operation functions
# Test dataframe right operations
def inter_df_math_right_ops_helper(self, modin_df, pandas_df, op):
try:
pandas_result = getattr(pandas_df, op)(4)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(4)
else:
modin_result = getattr(modin_df, op)(4)
df_equals(modin_result, pandas_result)
try:
pandas_result = getattr(pandas_df, op)(4.0)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(4.0)
else:
modin_result = getattr(modin_df, op)(4.0)
df_equals(modin_result, pandas_result)
new_idx = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in modin_df.index]
)
modin_df_multi_level = modin_df.copy()
modin_df_multi_level.index = new_idx
# Defaults to pandas
with pytest.warns(UserWarning):
# Operation against self for sanity check
getattr(modin_df_multi_level, op)(modin_df_multi_level, axis=0, level=1)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_radd(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "radd")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rdiv(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rdiv")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rfloordiv(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rfloordiv")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rmod(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rmod")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rmul(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rmul")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rpow(self, request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
# TODO: Revert to others once we have an efficient way of preprocessing for positive values
# We need to check that negative integers are not used efficiently
if "100x100" not in request.node.name:
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rpow")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rsub(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rsub")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rtruediv(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rtruediv")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rsub__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "__rsub__")
# END test dataframe right operations
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_abs(self, request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.abs()
except Exception as e:
with pytest.raises(type(e)):
modin_df.abs()
else:
modin_result = modin_df.abs()
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_add_prefix(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
test_prefix = "TEST"
new_modin_df = modin_df.add_prefix(test_prefix)
new_pandas_df = pandas_df.add_prefix(test_prefix)
df_equals(new_modin_df.columns, new_pandas_df.columns)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("testfunc", test_func_values, ids=test_func_keys)
def test_applymap(self, request, data, testfunc):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
with pytest.raises(ValueError):
x = 2
modin_df.applymap(x)
try:
pandas_result = pandas_df.applymap(testfunc)
except Exception as e:
with pytest.raises(type(e)):
modin_df.applymap(testfunc)
else:
modin_result = modin_df.applymap(testfunc)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("testfunc", test_func_values, ids=test_func_keys)
def test_applymap_numeric(self, request, data, testfunc):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if name_contains(request.node.name, numeric_dfs):
try:
pandas_result = pandas_df.applymap(testfunc)
except Exception as e:
with pytest.raises(type(e)):
modin_df.applymap(testfunc)
else:
modin_result = modin_df.applymap(testfunc)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_add_suffix(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
test_suffix = "TEST"
new_modin_df = modin_df.add_suffix(test_suffix)
new_pandas_df = pandas_df.add_suffix(test_suffix)
df_equals(new_modin_df.columns, new_pandas_df.columns)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_at(self, request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
# We skip nan datasets because nan != nan
if "nan" not in request.node.name:
key1 = modin_df.columns[0]
# Scaler
assert modin_df.at[0, key1] == pandas_df.at[0, key1]
# Series
df_equals(modin_df.loc[0].at[key1], pandas_df.loc[0].at[key1])
# Write Item
modin_df_copy = modin_df.copy()
pandas_df_copy = pandas_df.copy()
modin_df_copy.at[1, key1] = modin_df.at[0, key1]
pandas_df_copy.at[1, key1] = pandas_df.at[0, key1]
df_equals(modin_df_copy, pandas_df_copy)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_axes(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
for modin_axis, pd_axis in zip(modin_df.axes, pandas_df.axes):
assert np.array_equal(modin_axis, pd_axis)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_copy(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
# pandas_df is unused but there so there won't be confusing list comprehension
# stuff in the pytest.mark.parametrize
new_modin_df = modin_df.copy()
assert new_modin_df is not modin_df
assert np.array_equal(
new_modin_df._query_compiler._modin_frame._partitions,
modin_df._query_compiler._modin_frame._partitions,
)
assert new_modin_df is not modin_df
df_equals(new_modin_df, modin_df)
# Shallow copy tests
modin_df = pd.DataFrame(data)
modin_df_cp = modin_df.copy(False)
modin_df[modin_df.columns[0]] = 0
df_equals(modin_df, modin_df_cp)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_dtypes(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.dtypes, pandas_df.dtypes)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_ftypes(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.ftypes, pandas_df.ftypes)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("key", indices_values, ids=indices_keys)
def test_get(self, data, key):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.get(key), pandas_df.get(key))
df_equals(
modin_df.get(key, default="default"), pandas_df.get(key, default="default")
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_get_dtype_counts(self, data):
modin_result = pd.DataFrame(data).get_dtype_counts().sort_index()
pandas_result = pandas.DataFrame(data).get_dtype_counts().sort_index()
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize(
"dummy_na", bool_arg_values, ids=arg_keys("dummy_na", bool_arg_keys)
)
@pytest.mark.parametrize(
"drop_first", bool_arg_values, ids=arg_keys("drop_first", bool_arg_keys)
)
def test_get_dummies(self, request, data, dummy_na, drop_first):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas.get_dummies(
pandas_df, dummy_na=dummy_na, drop_first=drop_first
)
except Exception as e:
with pytest.raises(type(e)):
pd.get_dummies(modin_df, dummy_na=dummy_na, drop_first=drop_first)
else:
modin_result = pd.get_dummies(
modin_df, dummy_na=dummy_na, drop_first=drop_first
)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_get_ftype_counts(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.get_ftype_counts(), pandas_df.get_ftype_counts())
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize("func", agg_func_values, ids=agg_func_keys)
def test_agg(self, data, axis, func):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.agg(func, axis)
except Exception as e:
with pytest.raises(type(e)):
modin_df.agg(func, axis)
else:
modin_result = modin_df.agg(func, axis)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize("func", agg_func_values, ids=agg_func_keys)
def test_agg_numeric(self, request, data, axis, func):
if name_contains(request.node.name, numeric_agg_funcs) and name_contains(
request.node.name, numeric_dfs
):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.agg(func, axis)
except Exception as e:
with pytest.raises(type(e)):
modin_df.agg(func, axis)
else:
modin_result = modin_df.agg(func, axis)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize("func", agg_func_values, ids=agg_func_keys)
def test_aggregate(self, request, data, func, axis):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.aggregate(func, axis)
except Exception as e:
with pytest.raises(type(e)):
modin_df.aggregate(func, axis)
else:
modin_result = modin_df.aggregate(func, axis)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize("func", agg_func_values, ids=agg_func_keys)
def test_aggregate_numeric(self, request, data, axis, func):
if name_contains(request.node.name, numeric_agg_funcs) and name_contains(
request.node.name, numeric_dfs
):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.agg(func, axis)
except Exception as e:
with pytest.raises(type(e)):
modin_df.agg(func, axis)
else:
modin_result = modin_df.agg(func, axis)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_aggregate_error_checking(self, data):
modin_df = pd.DataFrame(data)
assert modin_df.aggregate("ndim") == 2
with pytest.warns(UserWarning):
modin_df.aggregate(
{modin_df.columns[0]: "sum", modin_df.columns[1]: "mean"}
)
with pytest.warns(UserWarning):
modin_df.aggregate("cumproduct")
with pytest.raises(ValueError):
modin_df.aggregate("NOT_EXISTS")
def test_align(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).align(pd.DataFrame(data))
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
@pytest.mark.parametrize(
"bool_only", bool_arg_values, ids=arg_keys("bool_only", bool_arg_keys)
)
def test_all(self, data, axis, skipna, bool_only):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.all(axis=axis, skipna=skipna, bool_only=bool_only)
except Exception as e:
with pytest.raises(type(e)):
modin_df.all(axis=axis, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.all(axis=axis, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
# Test when axis is None. This will get repeated but easier than using list in parameterize decorator
try:
pandas_result = pandas_df.all(axis=None, skipna=skipna, bool_only=bool_only)
except Exception as e:
with pytest.raises(type(e)):
modin_df.all(axis=None, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.all(axis=None, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.all(
axis=axis, skipna=skipna, bool_only=bool_only
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.all(axis=axis, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.T.all(axis=axis, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
# Test when axis is None. This will get repeated but easier than using list in parameterize decorator
try:
pandas_result = pandas_df.T.all(
axis=None, skipna=skipna, bool_only=bool_only
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.all(axis=None, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.T.all(axis=None, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
# test level
modin_df_multi_level = modin_df.copy()
pandas_df_multi_level = pandas_df.copy()
axis = modin_df._get_axis_number(axis) if axis is not None else 0
levels = 3
axis_names_list = [["a", "b", "c"], None]
for axis_names in axis_names_list:
if axis == 0:
new_idx = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in range(len(modin_df.index))],
names=axis_names,
)
modin_df_multi_level.index = new_idx
pandas_df_multi_level.index = new_idx
else:
new_col = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in range(len(modin_df.columns))],
names=axis_names,
)
modin_df_multi_level.columns = new_col
pandas_df_multi_level.columns = new_col
for level in list(range(levels)) + (axis_names if axis_names else []):
try:
pandas_multi_level_result = pandas_df_multi_level.all(
axis=axis, bool_only=bool_only, level=level, skipna=skipna
)
except Exception as e:
with pytest.raises(type(e)):
modin_df_multi_level.all(
axis=axis, bool_only=bool_only, level=level, skipna=skipna
)
else:
modin_multi_level_result = modin_df_multi_level.all(
axis=axis, bool_only=bool_only, level=level, skipna=skipna
)
df_equals(modin_multi_level_result, pandas_multi_level_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
@pytest.mark.parametrize(
"bool_only", bool_arg_values, ids=arg_keys("bool_only", bool_arg_keys)
)
def test_any(self, data, axis, skipna, bool_only):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.any(axis=axis, skipna=skipna, bool_only=bool_only)
except Exception as e:
with pytest.raises(type(e)):
modin_df.any(axis=axis, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.any(axis=axis, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.any(axis=None, skipna=skipna, bool_only=bool_only)
except Exception as e:
with pytest.raises(type(e)):
modin_df.any(axis=None, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.any(axis=None, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.any(
axis=axis, skipna=skipna, bool_only=bool_only
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.any(axis=axis, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.T.any(axis=axis, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.any(
axis=None, skipna=skipna, bool_only=bool_only
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.any(axis=None, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.T.any(axis=None, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
# test level
modin_df_multi_level = modin_df.copy()
pandas_df_multi_level = pandas_df.copy()
axis = modin_df._get_axis_number(axis) if axis is not None else 0
levels = 3
axis_names_list = [["a", "b", "c"], None]
for axis_names in axis_names_list:
if axis == 0:
new_idx = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in range(len(modin_df.index))],
names=axis_names,
)
modin_df_multi_level.index = new_idx
pandas_df_multi_level.index = new_idx
else:
new_col = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in range(len(modin_df.columns))],
names=axis_names,
)
modin_df_multi_level.columns = new_col
pandas_df_multi_level.columns = new_col
for level in list(range(levels)) + (axis_names if axis_names else []):
try:
pandas_multi_level_result = pandas_df_multi_level.any(
axis=axis, bool_only=bool_only, level=level, skipna=skipna
)
except Exception as e:
with pytest.raises(type(e)):
modin_df_multi_level.any(
axis=axis, bool_only=bool_only, level=level, skipna=skipna
)
else:
modin_multi_level_result = modin_df_multi_level.any(
axis=axis, bool_only=bool_only, level=level, skipna=skipna
)
df_equals(modin_multi_level_result, pandas_multi_level_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_append(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
data_to_append = {"append_a": 2, "append_b": 1000}
ignore_idx_values = [True, False]
for ignore in ignore_idx_values:
try:
pandas_result = pandas_df.append(data_to_append, ignore_index=ignore)
except Exception as e:
with pytest.raises(type(e)):
modin_df.append(data_to_append, ignore_index=ignore)
else:
modin_result = modin_df.append(data_to_append, ignore_index=ignore)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.append(pandas_df.iloc[-1])
except Exception as e:
with pytest.raises(type(e)):
modin_df.append(modin_df.iloc[-1])
else:
modin_result = modin_df.append(modin_df.iloc[-1])
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.append(list(pandas_df.iloc[-1]))
except Exception as e:
with pytest.raises(type(e)):
modin_df.append(list(modin_df.iloc[-1]))
else:
modin_result = modin_df.append(list(modin_df.iloc[-1]))
df_equals(modin_result, pandas_result)
verify_integrity_values = [True, False]
for verify_integrity in verify_integrity_values:
try:
pandas_result = pandas_df.append(
[pandas_df, pandas_df], verify_integrity=verify_integrity
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.append(
[modin_df, modin_df], verify_integrity=verify_integrity
)
else:
modin_result = modin_df.append(
[modin_df, modin_df], verify_integrity=verify_integrity
)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.append(
pandas_df, verify_integrity=verify_integrity
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.append(modin_df, verify_integrity=verify_integrity)
else:
modin_result = modin_df.append(
modin_df, verify_integrity=verify_integrity
)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize("func", agg_func_values, ids=agg_func_keys)
def test_apply(self, request, data, func, axis):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
with pytest.raises(TypeError):
modin_df.apply({"row": func}, axis=1)
try:
pandas_result = pandas_df.apply(func, axis)
except Exception as e:
with pytest.raises(type(e)):
modin_df.apply(func, axis)
else:
modin_result = modin_df.apply(func, axis)
df_equals(modin_result, pandas_result)
def test_apply_metadata(self):
def add(a, b, c):
return a + b + c
data = {"A": [1, 2, 3], "B": [4, 5, 6], "C": [7, 8, 9]}
modin_df = pd.DataFrame(data)
modin_df["add"] = modin_df.apply(
lambda row: add(row["A"], row["B"], row["C"]), axis=1
)
pandas_df = pandas.DataFrame(data)
pandas_df["add"] = pandas_df.apply(
lambda row: add(row["A"], row["B"], row["C"]), axis=1
)
df_equals(modin_df, pandas_df)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("func", agg_func_values, ids=agg_func_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
def test_apply_numeric(self, request, data, func, axis):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if name_contains(request.node.name, numeric_dfs):
try:
pandas_result = pandas_df.apply(func, axis)
except Exception as e:
with pytest.raises(type(e)):
modin_df.apply(func, axis)
else:
modin_result = modin_df.apply(func, axis)
df_equals(modin_result, pandas_result)
if "empty_data" not in request.node.name:
key = modin_df.columns[0]
modin_result = modin_df.apply(lambda df: df.drop(key), axis=1)
pandas_result = pandas_df.apply(lambda df: df.drop(key), axis=1)
df_equals(modin_result, pandas_result)
def test_as_blocks(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).as_blocks()
def test_as_matrix(self):
test_data = TestData()
frame = pd.DataFrame(test_data.frame)
mat = frame.as_matrix()
frame_columns = frame.columns
for i, row in enumerate(mat):
for j, value in enumerate(row):
col = frame_columns[j]
if np.isnan(value):
assert np.isnan(frame[col][i])
else:
assert value == frame[col][i]
# mixed type
mat = pd.DataFrame(test_data.mixed_frame).as_matrix(["foo", "A"])
assert mat[0, 0] == "bar"
df = pd.DataFrame({"real": [1, 2, 3], "complex": [1j, 2j, 3j]})
mat = df.as_matrix()
assert mat[0, 1] == 1j
# single block corner case
mat = pd.DataFrame(test_data.frame).as_matrix(["A", "B"])
expected = test_data.frame.reindex(columns=["A", "B"]).values
tm.assert_almost_equal(mat, expected)
def test_to_numpy(self):
test_data = TestData()
frame = pd.DataFrame(test_data.frame)
assert_array_equal(frame.values, test_data.frame.values)
def test_partition_to_numpy(self):
test_data = TestData()
frame = pd.DataFrame(test_data.frame)
for (
partition
) in frame._query_compiler._modin_frame._partitions.flatten().tolist():
assert_array_equal(partition.to_pandas().values, partition.to_numpy())
def test_asfreq(self):
index = pd.date_range("1/1/2000", periods=4, freq="T")
series = pd.Series([0.0, None, 2.0, 3.0], index=index)
df = pd.DataFrame({"s": series})
with pytest.warns(UserWarning):
# We are only testing that this defaults to pandas, so we will just check for
# the warning
df.asfreq(freq="30S")
def test_asof(self):
df = pd.DataFrame(
{"a": [10, 20, 30, 40, 50], "b": [None, None, None, None, 500]},
index=pd.DatetimeIndex(
[
"2018-02-27 09:01:00",
"2018-02-27 09:02:00",
"2018-02-27 09:03:00",
"2018-02-27 09:04:00",
"2018-02-27 09:05:00",
]
),
)
with pytest.warns(UserWarning):
df.asof(pd.DatetimeIndex(["2018-02-27 09:03:30", "2018-02-27 09:04:30"]))
def test_assign(self):
data = test_data_values[0]
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
with pytest.warns(UserWarning):
modin_result = modin_df.assign(new_column=pd.Series(modin_df.iloc[:, 0]))
pandas_result = pandas_df.assign(new_column=pd.Series(pandas_df.iloc[:, 0]))
df_equals(modin_result, pandas_result)
def test_astype(self):
td = TestData()
modin_df = pd.DataFrame(
td.frame.values, index=td.frame.index, columns=td.frame.columns
)
expected_df = pandas.DataFrame(
td.frame.values, index=td.frame.index, columns=td.frame.columns
)
modin_df_casted = modin_df.astype(np.int32)
expected_df_casted = expected_df.astype(np.int32)
df_equals(modin_df_casted, expected_df_casted)
modin_df_casted = modin_df.astype(np.float64)
expected_df_casted = expected_df.astype(np.float64)
df_equals(modin_df_casted, expected_df_casted)
modin_df_casted = modin_df.astype(str)
expected_df_casted = expected_df.astype(str)
df_equals(modin_df_casted, expected_df_casted)
modin_df_casted = modin_df.astype("category")
expected_df_casted = expected_df.astype("category")
df_equals(modin_df_casted, expected_df_casted)
dtype_dict = {"A": np.int32, "B": np.int64, "C": str}
modin_df_casted = modin_df.astype(dtype_dict)
expected_df_casted = expected_df.astype(dtype_dict)
df_equals(modin_df_casted, expected_df_casted)
# Ignore lint because this is testing bad input
bad_dtype_dict = {"B": np.int32, "B": np.int64, "B": str} # noqa F601
modin_df_casted = modin_df.astype(bad_dtype_dict)
expected_df_casted = expected_df.astype(bad_dtype_dict)
df_equals(modin_df_casted, expected_df_casted)
with pytest.raises(KeyError):
modin_df.astype({"not_exists": np.uint8})
def test_astype_category(self):
modin_df = pd.DataFrame(
{"col1": ["A", "A", "B", "B", "A"], "col2": [1, 2, 3, 4, 5]}
)
pandas_df = pandas.DataFrame(
{"col1": ["A", "A", "B", "B", "A"], "col2": [1, 2, 3, 4, 5]}
)
modin_result = modin_df.astype({"col1": "category"})
pandas_result = pandas_df.astype({"col1": "category"})
df_equals(modin_result, pandas_result)
assert modin_result.dtypes.equals(pandas_result.dtypes)
modin_result = modin_df.astype("category")
pandas_result = pandas_df.astype("category")
df_equals(modin_result, pandas_result)
assert modin_result.dtypes.equals(pandas_result.dtypes)
def test_at_time(self):
i = pd.date_range("2018-04-09", periods=4, freq="12H")
ts = pd.DataFrame({"A": [1, 2, 3, 4]}, index=i)
with pytest.warns(UserWarning):
ts.at_time("12:00")
def test_between_time(self):
i = pd.date_range("2018-04-09", periods=4, freq="12H")
ts = pd.DataFrame({"A": [1, 2, 3, 4]}, index=i)
with pytest.warns(UserWarning):
ts.between_time("0:15", "0:45")
def test_bfill(self):
test_data = TestData()
test_data.tsframe["A"][:5] = np.nan
test_data.tsframe["A"][-5:] = np.nan
modin_df = pd.DataFrame(test_data.tsframe)
df_equals(modin_df.bfill(), test_data.tsframe.bfill())
def test_blocks(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).blocks
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_bool(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
with pytest.raises(ValueError):
modin_df.bool()
modin_df.__bool__()
single_bool_pandas_df = pandas.DataFrame([True])
single_bool_modin_df = pd.DataFrame([True])
assert single_bool_pandas_df.bool() == single_bool_modin_df.bool()
with pytest.raises(ValueError):
# __bool__ always raises this error for DataFrames
single_bool_modin_df.__bool__()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_boxplot(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
assert modin_df.boxplot() == to_pandas(modin_df).boxplot()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
def test_clip(self, request, data, axis):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if name_contains(request.node.name, numeric_dfs):
ind_len = (
len(modin_df.index)
if not pandas.DataFrame()._get_axis_number(axis)
else len(modin_df.columns)
)
# set bounds
lower, upper = np.sort(random_state.random_integers(RAND_LOW, RAND_HIGH, 2))
lower_list = random_state.random_integers(RAND_LOW, RAND_HIGH, ind_len)
upper_list = random_state.random_integers(RAND_LOW, RAND_HIGH, ind_len)
# test only upper scalar bound
modin_result = modin_df.clip(None, upper, axis=axis)
pandas_result = pandas_df.clip(None, upper, axis=axis)
df_equals(modin_result, pandas_result)
# test lower and upper scalar bound
modin_result = modin_df.clip(lower, upper, axis=axis)
pandas_result = pandas_df.clip(lower, upper, axis=axis)
df_equals(modin_result, pandas_result)
# test lower and upper list bound on each column
modin_result = modin_df.clip(lower_list, upper_list, axis=axis)
pandas_result = pandas_df.clip(lower_list, upper_list, axis=axis)
df_equals(modin_result, pandas_result)
# test only upper list bound on each column
modin_result = modin_df.clip(np.nan, upper_list, axis=axis)
pandas_result = pandas_df.clip(np.nan, upper_list, axis=axis)
df_equals(modin_result, pandas_result)
with pytest.raises(ValueError):
modin_df.clip(lower=[1, 2, 3], axis=None)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
def test_clip_lower(self, request, data, axis):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if name_contains(request.node.name, numeric_dfs):
ind_len = (
len(modin_df.index)
if not pandas.DataFrame()._get_axis_number(axis)
else len(modin_df.columns)
)
# set bounds
lower = random_state.random_integers(RAND_LOW, RAND_HIGH, 1)[0]
lower_list = random_state.random_integers(RAND_LOW, RAND_HIGH, ind_len)
# test lower scalar bound
pandas_result = pandas_df.clip_lower(lower, axis=axis)
modin_result = modin_df.clip_lower(lower, axis=axis)
df_equals(modin_result, pandas_result)
# test lower list bound on each column
pandas_result = pandas_df.clip_lower(lower_list, axis=axis)
modin_result = modin_df.clip_lower(lower_list, axis=axis)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
def test_clip_upper(self, request, data, axis):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if name_contains(request.node.name, numeric_dfs):
ind_len = (
len(modin_df.index)
if not pandas.DataFrame()._get_axis_number(axis)
else len(modin_df.columns)
)
# set bounds
upper = random_state.random_integers(RAND_LOW, RAND_HIGH, 1)[0]
upper_list = random_state.random_integers(RAND_LOW, RAND_HIGH, ind_len)
# test upper scalar bound
modin_result = modin_df.clip_upper(upper, axis=axis)
pandas_result = pandas_df.clip_upper(upper, axis=axis)
df_equals(modin_result, pandas_result)
# test upper list bound on each column
modin_result = modin_df.clip_upper(upper_list, axis=axis)
pandas_result = pandas_df.clip_upper(upper_list, axis=axis)
df_equals(modin_result, pandas_result)
def test_combine(self):
df1 = pd.DataFrame({"A": [0, 0], "B": [4, 4]})
df2 = pd.DataFrame({"A": [1, 1], "B": [3, 3]})
with pytest.warns(UserWarning):
df1.combine(df2, lambda s1, s2: s1 if s1.sum() < s2.sum() else s2)
def test_combine_first(self):
df1 = pd.DataFrame({"A": [None, 0], "B": [None, 4]})
df2 = pd.DataFrame({"A": [1, 1], "B": [3, 3]})
with pytest.warns(UserWarning):
df1.combine_first(df2)
def test_compound(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).compound()
def test_corr(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).corr()
def test_corrwith(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).corrwith(pd.DataFrame(data))
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"numeric_only", bool_arg_values, ids=arg_keys("numeric_only", bool_arg_keys)
)
def test_count(self, request, data, axis, numeric_only):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_result = modin_df.count(axis=axis, numeric_only=numeric_only)
pandas_result = pandas_df.count(axis=axis, numeric_only=numeric_only)
df_equals(modin_result, pandas_result)
modin_result = modin_df.T.count(axis=axis, numeric_only=numeric_only)
pandas_result = pandas_df.T.count(axis=axis, numeric_only=numeric_only)
df_equals(modin_result, pandas_result)
# test level
modin_df_multi_level = modin_df.copy()
pandas_df_multi_level = pandas_df.copy()
axis = modin_df._get_axis_number(axis) if axis is not None else 0
levels = 3
axis_names_list = [["a", "b", "c"], None]
for axis_names in axis_names_list:
if axis == 0:
new_idx = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in range(len(modin_df.index))],
names=axis_names,
)
modin_df_multi_level.index = new_idx
pandas_df_multi_level.index = new_idx
try: # test error
pandas_df_multi_level.count(
axis=1, numeric_only=numeric_only, level=0
)
except Exception as e:
with pytest.raises(type(e)):
modin_df_multi_level.count(
axis=1, numeric_only=numeric_only, level=0
)
else:
new_col = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in range(len(modin_df.columns))],
names=axis_names,
)
modin_df_multi_level.columns = new_col
pandas_df_multi_level.columns = new_col
try: # test error
pandas_df_multi_level.count(
axis=0, numeric_only=numeric_only, level=0
)
except Exception as e:
with pytest.raises(type(e)):
modin_df_multi_level.count(
axis=0, numeric_only=numeric_only, level=0
)
for level in list(range(levels)) + (axis_names if axis_names else []):
modin_multi_level_result = modin_df_multi_level.count(
axis=axis, numeric_only=numeric_only, level=level
)
pandas_multi_level_result = pandas_df_multi_level.count(
axis=axis, numeric_only=numeric_only, level=level
)
df_equals(modin_multi_level_result, pandas_multi_level_result)
def test_cov(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).cov()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
def test_cummax(self, request, data, axis, skipna):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.cummax(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.cummax(axis=axis, skipna=skipna)
else:
modin_result = modin_df.cummax(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.cummax(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.cummax(axis=axis, skipna=skipna)
else:
modin_result = modin_df.T.cummax(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
def test_cummin(self, request, data, axis, skipna):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.cummin(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.cummin(axis=axis, skipna=skipna)
else:
modin_result = modin_df.cummin(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.cummin(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.cummin(axis=axis, skipna=skipna)
else:
modin_result = modin_df.T.cummin(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
def test_cumprod(self, request, data, axis, skipna):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.cumprod(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.cumprod(axis=axis, skipna=skipna)
else:
modin_result = modin_df.cumprod(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.cumprod(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.cumprod(axis=axis, skipna=skipna)
else:
modin_result = modin_df.T.cumprod(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
def test_cumsum(self, request, data, axis, skipna):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
# pandas exhibits weird behavior for this case
# Remove this case when we can pull the error messages from backend
if name_contains(request.node.name, ["datetime_timedelta_data"]) and (
axis == 0 or axis == "rows"
):
with pytest.raises(TypeError):
modin_df.cumsum(axis=axis, skipna=skipna)
else:
try:
pandas_result = pandas_df.cumsum(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.cumsum(axis=axis, skipna=skipna)
else:
modin_result = modin_df.cumsum(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
if name_contains(request.node.name, ["datetime_timedelta_data"]) and (
axis == 0 or axis == "rows"
):
with pytest.raises(TypeError):
modin_df.T.cumsum(axis=axis, skipna=skipna)
else:
try:
pandas_result = pandas_df.T.cumsum(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.cumsum(axis=axis, skipna=skipna)
else:
modin_result = modin_df.T.cumsum(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_describe(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.describe(), pandas_df.describe())
percentiles = [0.10, 0.11, 0.44, 0.78, 0.99]
df_equals(
modin_df.describe(percentiles=percentiles),
pandas_df.describe(percentiles=percentiles),
)
try:
pandas_result = pandas_df.describe(exclude=[np.float64])
except Exception as e:
with pytest.raises(type(e)):
modin_df.describe(exclude=[np.float64])
else:
modin_result = modin_df.describe(exclude=[np.float64])
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.describe(exclude=np.float64)
except Exception as e:
with pytest.raises(type(e)):
modin_df.describe(exclude=np.float64)
else:
modin_result = modin_df.describe(exclude=np.float64)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.describe(
include=[np.timedelta64, np.datetime64, np.object, np.bool]
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.describe(
include=[np.timedelta64, np.datetime64, np.object, np.bool]
)
else:
modin_result = modin_df.describe(
include=[np.timedelta64, np.datetime64, np.object, np.bool]
)
df_equals(modin_result, pandas_result)
modin_result = modin_df.describe(include=str(modin_df.dtypes.values[0]))
pandas_result = pandas_df.describe(include=str(pandas_df.dtypes.values[0]))
df_equals(modin_result, pandas_result)
modin_result = modin_df.describe(include=[np.number])
pandas_result = pandas_df.describe(include=[np.number])
df_equals(modin_result, pandas_result)
df_equals(modin_df.describe(include="all"), pandas_df.describe(include="all"))
modin_df = pd.DataFrame(data).applymap(str)
pandas_df = pandas.DataFrame(data).applymap(str)
try:
df_equals(modin_df.describe(), pandas_df.describe())
except AssertionError:
# We have to do this because we choose the highest count slightly differently
# than pandas. Because there is no true guarantee which one will be first,
# If they don't match, make sure that the `freq` is the same at least.
df_equals(
modin_df.describe().loc[["count", "unique", "freq"]],
pandas_df.describe().loc[["count", "unique", "freq"]],
)
def test_describe_dtypes(self):
modin_df = pd.DataFrame(
{
"col1": list("abc"),
"col2": list("abc"),
"col3": list("abc"),
"col4": [1, 2, 3],
}
)
pandas_df = pandas.DataFrame(
{
"col1": list("abc"),
"col2": list("abc"),
"col3": list("abc"),
"col4": [1, 2, 3],
}
)
modin_result = modin_df.describe()
pandas_result = pandas_df.describe()
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"periods", int_arg_values, ids=arg_keys("periods", int_arg_keys)
)
def test_diff(self, request, data, axis, periods):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.diff(axis=axis, periods=periods)
except Exception as e:
with pytest.raises(type(e)):
modin_df.diff(axis=axis, periods=periods)
else:
modin_result = modin_df.diff(axis=axis, periods=periods)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.diff(axis=axis, periods=periods)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.diff(axis=axis, periods=periods)
else:
modin_result = modin_df.T.diff(axis=axis, periods=periods)
df_equals(modin_result, pandas_result)
def test_drop(self):
frame_data = {"A": [1, 2, 3, 4], "B": [0, 1, 2, 3]}
simple = pandas.DataFrame(frame_data)
modin_simple = pd.DataFrame(frame_data)
df_equals(modin_simple.drop("A", axis=1), simple[["B"]])
df_equals(modin_simple.drop(["A", "B"], axis="columns"), simple[[]])
df_equals(modin_simple.drop([0, 1, 3], axis=0), simple.loc[[2], :])
df_equals(modin_simple.drop([0, 3], axis="index"), simple.loc[[1, 2], :])
pytest.raises(ValueError, modin_simple.drop, 5)
pytest.raises(ValueError, modin_simple.drop, "C", 1)
pytest.raises(ValueError, modin_simple.drop, [1, 5])
pytest.raises(ValueError, modin_simple.drop, ["A", "C"], 1)
# errors = 'ignore'
df_equals(modin_simple.drop(5, errors="ignore"), simple)
df_equals(modin_simple.drop([0, 5], errors="ignore"), simple.loc[[1, 2, 3], :])
df_equals(modin_simple.drop("C", axis=1, errors="ignore"), simple)
df_equals(modin_simple.drop(["A", "C"], axis=1, errors="ignore"), simple[["B"]])
# non-unique
nu_df = pandas.DataFrame(
zip(range(3), range(-3, 1), list("abc")), columns=["a", "a", "b"]
)
modin_nu_df = pd.DataFrame(nu_df)
df_equals(modin_nu_df.drop("a", axis=1), nu_df[["b"]])
df_equals(modin_nu_df.drop("b", axis="columns"), nu_df["a"])
df_equals(modin_nu_df.drop([]), nu_df)
nu_df = nu_df.set_index(pandas.Index(["X", "Y", "X"]))
nu_df.columns = list("abc")
modin_nu_df = pd.DataFrame(nu_df)
df_equals(modin_nu_df.drop("X", axis="rows"), nu_df.loc[["Y"], :])
df_equals(modin_nu_df.drop(["X", "Y"], axis=0), nu_df.loc[[], :])
# inplace cache issue
frame_data = random_state.randn(10, 3)
df = pandas.DataFrame(frame_data, columns=list("abc"))
modin_df = pd.DataFrame(frame_data, columns=list("abc"))
expected = df[~(df.b > 0)]
modin_df.drop(labels=df[df.b > 0].index, inplace=True)
df_equals(modin_df, expected)
midx = pd.MultiIndex(
levels=[["lama", "cow", "falcon"], ["speed", "weight", "length"]],
codes=[[0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 1, 2, 0, 1, 2, 0, 1, 2]],
)
df = pd.DataFrame(
index=midx,
columns=["big", "small"],
data=[
[45, 30],
[200, 100],
[1.5, 1],
[30, 20],
[250, 150],
[1.5, 0.8],
[320, 250],
[1, 0.8],
[0.3, 0.2],
],
)
with pytest.warns(UserWarning):
df.drop(index="length", level=1)
def test_drop_api_equivalence(self):
# equivalence of the labels/axis and index/columns API's
frame_data = [[1, 2, 3], [3, 4, 5], [5, 6, 7]]
modin_df = pd.DataFrame(
frame_data, index=["a", "b", "c"], columns=["d", "e", "f"]
)
modin_df1 = modin_df.drop("a")
modin_df2 = modin_df.drop(index="a")
df_equals(modin_df1, modin_df2)
modin_df1 = modin_df.drop("d", 1)
modin_df2 = modin_df.drop(columns="d")
df_equals(modin_df1, modin_df2)
modin_df1 = modin_df.drop(labels="e", axis=1)
modin_df2 = modin_df.drop(columns="e")
df_equals(modin_df1, modin_df2)
modin_df1 = modin_df.drop(["a"], axis=0)
modin_df2 = modin_df.drop(index=["a"])
df_equals(modin_df1, modin_df2)
modin_df1 = modin_df.drop(["a"], axis=0).drop(["d"], axis=1)
modin_df2 = modin_df.drop(index=["a"], columns=["d"])
df_equals(modin_df1, modin_df2)
with pytest.raises(ValueError):
modin_df.drop(labels="a", index="b")
with pytest.raises(ValueError):
modin_df.drop(labels="a", columns="b")
with pytest.raises(ValueError):
modin_df.drop(axis=1)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_drop_transpose(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_result = modin_df.T.drop(columns=[0, 1, 2])
pandas_result = pandas_df.T.drop(columns=[0, 1, 2])
df_equals(modin_result, pandas_result)
modin_result = modin_df.T.drop(index=["col3", "col1"])
pandas_result = pandas_df.T.drop(index=["col3", "col1"])
df_equals(modin_result, pandas_result)
modin_result = modin_df.T.drop(columns=[0, 1, 2], index=["col3", "col1"])
pandas_result = pandas_df.T.drop(columns=[0, 1, 2], index=["col3", "col1"])
df_equals(modin_result, pandas_result)
def test_droplevel(self):
df = (
pd.DataFrame([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]])
.set_index([0, 1])
.rename_axis(["a", "b"])
)
df.columns = pd.MultiIndex.from_tuples(
[("c", "e"), ("d", "f")], names=["level_1", "level_2"]
)
with pytest.warns(UserWarning):
df.droplevel("a")
with pytest.warns(UserWarning):
df.droplevel("level_2", axis=1)
@pytest.mark.parametrize(
"data", test_data_with_duplicates_values, ids=test_data_with_duplicates_keys
)
@pytest.mark.parametrize(
"keep", ["last", "first", False], ids=["last", "first", "False"]
)
@pytest.mark.parametrize(
"subset", [None, ["col1", "col3", "col7"]], ids=["None", "subset"]
)
def test_drop_duplicates(self, data, keep, subset):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(
modin_df.drop_duplicates(keep=keep, inplace=False, subset=subset),
pandas_df.drop_duplicates(keep=keep, inplace=False, subset=subset),
)
modin_results = modin_df.drop_duplicates(keep=keep, inplace=True, subset=subset)
pandas_results = pandas_df.drop_duplicates(
keep=keep, inplace=True, subset=subset
)
df_equals(modin_results, pandas_results)
def test_drop_duplicates_with_missing_index_values(self):
data = {
"columns": ["value", "time", "id"],
"index": [
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
20,
21,
22,
23,
24,
25,
26,
27,
32,
33,
34,
35,
36,
37,
38,
39,
40,
41,
],
"data": [
["3", 1279213398000.0, 88.0],
["3", 1279204682000.0, 88.0],
["0", 1245772835000.0, 448.0],
["0", 1270564258000.0, 32.0],
["0", 1267106669000.0, 118.0],
["7", 1300621123000.0, 5.0],
["0", 1251130752000.0, 957.0],
["0", 1311683506000.0, 62.0],
["9", 1283692698000.0, 89.0],
["9", 1270234253000.0, 64.0],
["0", 1285088818000.0, 50.0],
["0", 1218212725000.0, 695.0],
["2", 1383933968000.0, 348.0],
["0", 1368227625000.0, 257.0],
["1", 1454514093000.0, 446.0],
["1", 1428497427000.0, 134.0],
["1", 1459184936000.0, 568.0],
["1", 1502293302000.0, 599.0],
["1", 1491833358000.0, 829.0],
["1", 1485431534000.0, 806.0],
["8", 1351800505000.0, 101.0],
["0", 1357247721000.0, 916.0],
["0", 1335804423000.0, 370.0],
["24", 1327547726000.0, 720.0],
["0", 1332334140000.0, 415.0],
["0", 1309543100000.0, 30.0],
["18", 1309541141000.0, 30.0],
["0", 1298979435000.0, 48.0],
["14", 1276098160000.0, 59.0],
["0", 1233936302000.0, 109.0],
],
}
pandas_df = pandas.DataFrame(
data["data"], index=data["index"], columns=data["columns"]
)
modin_df = pd.DataFrame(
data["data"], index=data["index"], columns=data["columns"]
)
modin_result = modin_df.sort_values(["id", "time"]).drop_duplicates(["id"])
pandas_result = pandas_df.sort_values(["id", "time"]).drop_duplicates(["id"])
df_equals(modin_result, pandas_result)
def test_drop_duplicates_after_sort(self):
data = [
{"value": 1, "time": 2},
{"value": 1, "time": 1},
{"value": 2, "time": 1},
{"value": 2, "time": 2},
]
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_result = modin_df.sort_values(["value", "time"]).drop_duplicates(
["value"]
)
pandas_result = pandas_df.sort_values(["value", "time"]).drop_duplicates(
["value"]
)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize("how", ["any", "all"], ids=["any", "all"])
def test_dropna(self, data, axis, how):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
with pytest.raises(ValueError):
modin_df.dropna(axis=axis, how="invalid")
with pytest.raises(TypeError):
modin_df.dropna(axis=axis, how=None, thresh=None)
with pytest.raises(KeyError):
modin_df.dropna(axis=axis, subset=["NotExists"], how=how)
modin_result = modin_df.dropna(axis=axis, how=how)
pandas_result = pandas_df.dropna(axis=axis, how=how)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_dropna_inplace(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
pandas_result = pandas_df.dropna()
modin_df.dropna(inplace=True)
df_equals(modin_df, pandas_result)
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
pandas_df.dropna(thresh=2, inplace=True)
modin_df.dropna(thresh=2, inplace=True)
df_equals(modin_df, pandas_df)
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
pandas_df.dropna(axis=1, how="any", inplace=True)
modin_df.dropna(axis=1, how="any", inplace=True)
df_equals(modin_df, pandas_df)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_dropna_multiple_axes(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(
modin_df.dropna(how="all", axis=[0, 1]),
pandas_df.dropna(how="all", axis=[0, 1]),
)
df_equals(
modin_df.dropna(how="all", axis=(0, 1)),
pandas_df.dropna(how="all", axis=(0, 1)),
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_dropna_multiple_axes_inplace(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_df_copy = modin_df.copy()
pandas_df_copy = pandas_df.copy()
modin_df_copy.dropna(how="all", axis=[0, 1], inplace=True)
pandas_df_copy.dropna(how="all", axis=[0, 1], inplace=True)
df_equals(modin_df_copy, pandas_df_copy)
modin_df_copy = modin_df.copy()
pandas_df_copy = pandas_df.copy()
modin_df_copy.dropna(how="all", axis=(0, 1), inplace=True)
pandas_df_copy.dropna(how="all", axis=(0, 1), inplace=True)
df_equals(modin_df_copy, pandas_df_copy)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_dropna_subset(self, request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if "empty_data" not in request.node.name:
column_subset = modin_df.columns[0:2]
df_equals(
modin_df.dropna(how="all", subset=column_subset),
pandas_df.dropna(how="all", subset=column_subset),
)
df_equals(
modin_df.dropna(how="any", subset=column_subset),
pandas_df.dropna(how="any", subset=column_subset),
)
row_subset = modin_df.index[0:2]
df_equals(
modin_df.dropna(how="all", axis=1, subset=row_subset),
pandas_df.dropna(how="all", axis=1, subset=row_subset),
)
df_equals(
modin_df.dropna(how="any", axis=1, subset=row_subset),
pandas_df.dropna(how="any", axis=1, subset=row_subset),
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_dropna_subset_error(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
# pandas_df is unused so there won't be confusing list comprehension
# stuff in the pytest.mark.parametrize
with pytest.raises(KeyError):
modin_df.dropna(subset=list("EF"))
if len(modin_df.columns) < 5:
with pytest.raises(KeyError):
modin_df.dropna(axis=1, subset=[4, 5])
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_dot(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
col_len = len(modin_df.columns)
# Test list input
arr = np.arange(col_len)
modin_result = modin_df.dot(arr)
pandas_result = pandas_df.dot(arr)
df_equals(modin_result, pandas_result)
# Test bad dimensions
with pytest.raises(ValueError):
modin_result = modin_df.dot(np.arange(col_len + 10))
# Test series input
modin_series = pd.Series(np.arange(col_len), index=modin_df.columns)
pandas_series = pandas.Series(np.arange(col_len), index=modin_df.columns)
modin_result = modin_df.dot(modin_series)
pandas_result = pandas_df.dot(pandas_series)
df_equals(modin_result, pandas_result)
# Test when input series index doesn't line up with columns
with pytest.raises(ValueError):
modin_result = modin_df.dot(pd.Series(np.arange(col_len)))
with pytest.warns(UserWarning):
modin_df.dot(modin_df.T)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize(
"keep", ["last", "first", False], ids=["last", "first", "False"]
)
def test_duplicated(self, data, keep):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
pandas_result = pandas_df.duplicated(keep=keep)
modin_result = modin_df.duplicated(keep=keep)
df_equals(modin_result, pandas_result)
import random
subset = random.sample(
list(pandas_df.columns), random.randint(1, len(pandas_df.columns))
)
pandas_result = pandas_df.duplicated(keep=keep, subset=subset)
modin_result = modin_df.duplicated(keep=keep, subset=subset)
df_equals(modin_result, pandas_result)
def test_empty_df(self):
df = pd.DataFrame(index=["a", "b"])
df_is_empty(df)
tm.assert_index_equal(df.index, pd.Index(["a", "b"]))
assert len(df.columns) == 0
df = pd.DataFrame(columns=["a", "b"])
df_is_empty(df)
assert len(df.index) == 0
tm.assert_index_equal(df.columns, pd.Index(["a", "b"]))
df = pd.DataFrame()
df_is_empty(df)
assert len(df.index) == 0
assert len(df.columns) == 0
df = pd.DataFrame(index=["a", "b"])
df_is_empty(df)
tm.assert_index_equal(df.index, pd.Index(["a", "b"]))
assert len(df.columns) == 0
df = pd.DataFrame(columns=["a", "b"])
df_is_empty(df)
assert len(df.index) == 0
tm.assert_index_equal(df.columns, pd.Index(["a", "b"]))
df = pd.DataFrame()
df_is_empty(df)
assert len(df.index) == 0
assert len(df.columns) == 0
def test_equals(self):
frame_data = {"col1": [2.9, 3, 3, 3], "col2": [2, 3, 4, 1]}
modin_df1 = pd.DataFrame(frame_data)
modin_df2 = pd.DataFrame(frame_data)
assert modin_df1.equals(modin_df2)
df_equals(modin_df1, modin_df2)
df_equals(modin_df1, pd.DataFrame(modin_df1))
frame_data = {"col1": [2.9, 3, 3, 3], "col2": [2, 3, 5, 1]}
modin_df3 = pd.DataFrame(frame_data, index=list("abcd"))
assert not modin_df1.equals(modin_df3)
with pytest.raises(AssertionError):
df_equals(modin_df3, modin_df1)
with pytest.raises(AssertionError):
df_equals(modin_df3, modin_df2)
assert modin_df1.equals(modin_df2._query_compiler.to_pandas())
def test_eval_df_use_case(self):
frame_data = {"a": random_state.randn(10), "b": random_state.randn(10)}
df = pandas.DataFrame(frame_data)
modin_df = pd.DataFrame(frame_data)
# test eval for series results
tmp_pandas = df.eval("arctan2(sin(a), b)", engine="python", parser="pandas")
tmp_modin = modin_df.eval(
"arctan2(sin(a), b)", engine="python", parser="pandas"
)
assert isinstance(tmp_modin, pd.Series)
df_equals(tmp_modin, tmp_pandas)
# Test not inplace assignments
tmp_pandas = df.eval("e = arctan2(sin(a), b)", engine="python", parser="pandas")
tmp_modin = modin_df.eval(
"e = arctan2(sin(a), b)", engine="python", parser="pandas"
)
df_equals(tmp_modin, tmp_pandas)
# Test inplace assignments
df.eval(
"e = arctan2(sin(a), b)", engine="python", parser="pandas", inplace=True
)
modin_df.eval(
"e = arctan2(sin(a), b)", engine="python", parser="pandas", inplace=True
)
# TODO: Use a series equality validator.
df_equals(modin_df, df)
def test_eval_df_arithmetic_subexpression(self):
frame_data = {"a": random_state.randn(10), "b": random_state.randn(10)}
df = pandas.DataFrame(frame_data)
modin_df = pd.DataFrame(frame_data)
df.eval("not_e = sin(a + b)", engine="python", parser="pandas", inplace=True)
modin_df.eval(
"not_e = sin(a + b)", engine="python", parser="pandas", inplace=True
)
# TODO: Use a series equality validator.
df_equals(modin_df, df)
def test_ewm(self):
df = pd.DataFrame({"B": [0, 1, 2, np.nan, 4]})
with pytest.warns(UserWarning):
df.ewm(com=0.5).mean()
def test_expanding(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).expanding()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_explode(self, data):
modin_df = pd.DataFrame(data)
with pytest.warns(UserWarning):
modin_df.explode(modin_df.columns[0])
def test_ffill(self):
test_data = TestData()
test_data.tsframe["A"][:5] = np.nan
test_data.tsframe["A"][-5:] = np.nan
modin_df = pd.DataFrame(test_data.tsframe)
df_equals(modin_df.ffill(), test_data.tsframe.ffill())
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize(
"method",
["backfill", "bfill", "pad", "ffill", None],
ids=["backfill", "bfill", "pad", "ffill", "None"],
)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize("limit", int_arg_values, ids=int_arg_keys)
def test_fillna(self, data, method, axis, limit):
# We are not testing when limit is not positive until pandas-27042 gets fixed.
# We are not testing when axis is over rows until pandas-17399 gets fixed.
if limit > 0 and axis != 1 and axis != "columns":
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.fillna(
0, method=method, axis=axis, limit=limit
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.fillna(0, method=method, axis=axis, limit=limit)
else:
modin_result = modin_df.fillna(0, method=method, axis=axis, limit=limit)
df_equals(modin_result, pandas_result)
def test_fillna_sanity(self):
test_data = TestData()
tf = test_data.tsframe
tf.loc[tf.index[:5], "A"] = np.nan
tf.loc[tf.index[-5:], "A"] = np.nan
zero_filled = test_data.tsframe.fillna(0)
modin_df = pd.DataFrame(test_data.tsframe).fillna(0)
df_equals(modin_df, zero_filled)
padded = test_data.tsframe.fillna(method="pad")
modin_df = pd.DataFrame(test_data.tsframe).fillna(method="pad")
df_equals(modin_df, padded)
# mixed type
mf = test_data.mixed_frame
mf.loc[mf.index[5:20], "foo"] = np.nan
mf.loc[mf.index[-10:], "A"] = np.nan
result = test_data.mixed_frame.fillna(value=0)
modin_df = pd.DataFrame(test_data.mixed_frame).fillna(value=0)
df_equals(modin_df, result)
result = test_data.mixed_frame.fillna(method="pad")
modin_df = pd.DataFrame(test_data.mixed_frame).fillna(method="pad")
df_equals(modin_df, result)
pytest.raises(ValueError, test_data.tsframe.fillna)
pytest.raises(ValueError, pd.DataFrame(test_data.tsframe).fillna)
with pytest.raises(ValueError):
pd.DataFrame(test_data.tsframe).fillna(5, method="ffill")
# mixed numeric (but no float16)
mf = test_data.mixed_float.reindex(columns=["A", "B", "D"])
mf.loc[mf.index[-10:], "A"] = np.nan
result = mf.fillna(value=0)
modin_df = pd.DataFrame(mf).fillna(value=0)
df_equals(modin_df, result)
result = mf.fillna(method="pad")
modin_df = pd.DataFrame(mf).fillna(method="pad")
df_equals(modin_df, result)
# TODO: Use this when Arrow issue resolves:
# (https://issues.apache.org/jira/browse/ARROW-2122)
# empty frame
# df = DataFrame(columns=['x'])
# for m in ['pad', 'backfill']:
# df.x.fillna(method=m, inplace=True)
# df.x.fillna(method=m)
# with different dtype
frame_data = [
["a", "a", np.nan, "a"],
["b", "b", np.nan, "b"],
["c", "c", np.nan, "c"],
]
df = pandas.DataFrame(frame_data)
result = df.fillna({2: "foo"})
modin_df = pd.DataFrame(frame_data).fillna({2: "foo"})
df_equals(modin_df, result)
modin_df = pd.DataFrame(df)
df.fillna({2: "foo"}, inplace=True)
modin_df.fillna({2: "foo"}, inplace=True)
df_equals(modin_df, result)
frame_data = {
"Date": [pandas.NaT, pandas.Timestamp("2014-1-1")],
"Date2": [pandas.Timestamp("2013-1-1"), pandas.NaT],
}
df = pandas.DataFrame(frame_data)
result = df.fillna(value={"Date": df["Date2"]})
modin_df = pd.DataFrame(frame_data).fillna(value={"Date": df["Date2"]})
df_equals(modin_df, result)
# TODO: Use this when Arrow issue resolves:
# (https://issues.apache.org/jira/browse/ARROW-2122)
# with timezone
"""
frame_data = {'A': [pandas.Timestamp('2012-11-11 00:00:00+01:00'),
pandas.NaT]}
df = pandas.DataFrame(frame_data)
modin_df = pd.DataFrame(frame_data)
df_equals(modin_df.fillna(method='pad'), df.fillna(method='pad'))
frame_data = {'A': [pandas.NaT,
pandas.Timestamp('2012-11-11 00:00:00+01:00')]}
df = pandas.DataFrame(frame_data)
modin_df = pd.DataFrame(frame_data).fillna(method='bfill')
df_equals(modin_df, df.fillna(method='bfill'))
"""
def test_fillna_downcast(self):
# infer int64 from float64
frame_data = {"a": [1.0, np.nan]}
df = pandas.DataFrame(frame_data)
result = df.fillna(0, downcast="infer")
modin_df = pd.DataFrame(frame_data).fillna(0, downcast="infer")
df_equals(modin_df, result)
# infer int64 from float64 when fillna value is a dict
df = pandas.DataFrame(frame_data)
result = df.fillna({"a": 0}, downcast="infer")
modin_df = pd.DataFrame(frame_data).fillna({"a": 0}, downcast="infer")
df_equals(modin_df, result)
def test_ffill2(self):
test_data = TestData()
test_data.tsframe["A"][:5] = np.nan
test_data.tsframe["A"][-5:] = np.nan
modin_df = pd.DataFrame(test_data.tsframe)
df_equals(
modin_df.fillna(method="ffill"), test_data.tsframe.fillna(method="ffill")
)
def test_bfill2(self):
test_data = TestData()
test_data.tsframe["A"][:5] = np.nan
test_data.tsframe["A"][-5:] = np.nan
modin_df = pd.DataFrame(test_data.tsframe)
df_equals(
modin_df.fillna(method="bfill"), test_data.tsframe.fillna(method="bfill")
)
def test_fillna_inplace(self):
frame_data = random_state.randn(10, 4)
df = pandas.DataFrame(frame_data)
df[1][:4] = np.nan
df[3][-4:] = np.nan
modin_df = pd.DataFrame(df)
df.fillna(value=0, inplace=True)
try:
df_equals(modin_df, df)
except AssertionError:
pass
else:
assert False
modin_df.fillna(value=0, inplace=True)
df_equals(modin_df, df)
modin_df = pd.DataFrame(df).fillna(value={0: 0}, inplace=True)
assert modin_df is None
df[1][:4] = np.nan
df[3][-4:] = np.nan
modin_df = pd.DataFrame(df)
df.fillna(method="ffill", inplace=True)
try:
df_equals(modin_df, df)
except AssertionError:
pass
else:
assert False
modin_df.fillna(method="ffill", inplace=True)
df_equals(modin_df, df)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_frame_fillna_limit(self, data):
pandas_df = pandas.DataFrame(data)
index = pandas_df.index
result = pandas_df[:2].reindex(index)
modin_df = pd.DataFrame(result)
df_equals(
modin_df.fillna(method="pad", limit=2), result.fillna(method="pad", limit=2)
)
result = pandas_df[-2:].reindex(index)
modin_df = pd.DataFrame(result)
df_equals(
modin_df.fillna(method="backfill", limit=2),
result.fillna(method="backfill", limit=2),
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_frame_pad_backfill_limit(self, data):
pandas_df = pandas.DataFrame(data)
index = pandas_df.index
result = pandas_df[:2].reindex(index)
modin_df = pd.DataFrame(result)
df_equals(
modin_df.fillna(method="pad", limit=2), result.fillna(method="pad", limit=2)
)
result = pandas_df[-2:].reindex(index)
modin_df = pd.DataFrame(result)
df_equals(
modin_df.fillna(method="backfill", limit=2),
result.fillna(method="backfill", limit=2),
)
def test_fillna_dtype_conversion(self):
# make sure that fillna on an empty frame works
df = pandas.DataFrame(index=range(3), columns=["A", "B"], dtype="float64")
modin_df = pd.DataFrame(index=range(3), columns=["A", "B"], dtype="float64")
df_equals(modin_df.fillna("nan"), df.fillna("nan"))
frame_data = {"A": [1, np.nan], "B": [1.0, 2.0]}
df = pandas.DataFrame(frame_data)
modin_df = pd.DataFrame(frame_data)
for v in ["", 1, np.nan, 1.0]:
df_equals(modin_df.fillna(v), df.fillna(v))
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_fillna_skip_certain_blocks(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
# don't try to fill boolean, int blocks
df_equals(modin_df.fillna(np.nan), pandas_df.fillna(np.nan))
def test_fillna_dict_series(self):
frame_data = {
"a": [np.nan, 1, 2, np.nan, np.nan],
"b": [1, 2, 3, np.nan, np.nan],
"c": [np.nan, 1, 2, 3, 4],
}
df = pandas.DataFrame(frame_data)
modin_df = pd.DataFrame(frame_data)
df_equals(modin_df.fillna({"a": 0, "b": 5}), df.fillna({"a": 0, "b": 5}))
df_equals(
modin_df.fillna({"a": 0, "b": 5, "d": 7}),
df.fillna({"a": 0, "b": 5, "d": 7}),
)
# Series treated same as dict
df_equals(modin_df.fillna(modin_df.max()), df.fillna(df.max()))
def test_fillna_dataframe(self):
frame_data = {
"a": [np.nan, 1, 2, np.nan, np.nan],
"b": [1, 2, 3, np.nan, np.nan],
"c": [np.nan, 1, 2, 3, 4],
}
df = pandas.DataFrame(frame_data, index=list("VWXYZ"))
modin_df = pd.DataFrame(frame_data, index=list("VWXYZ"))
# df2 may have different index and columns
df2 = pandas.DataFrame(
{
"a": [np.nan, 10, 20, 30, 40],
"b": [50, 60, 70, 80, 90],
"foo": ["bar"] * 5,
},
index=list("VWXuZ"),
)
modin_df2 = pd.DataFrame(df2)
# only those columns and indices which are shared get filled
df_equals(modin_df.fillna(modin_df2), df.fillna(df2))
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_fillna_columns(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(
modin_df.fillna(method="ffill", axis=1),
pandas_df.fillna(method="ffill", axis=1),
)
df_equals(
modin_df.fillna(method="ffill", axis=1),
pandas_df.fillna(method="ffill", axis=1),
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_fillna_invalid_method(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
with tm.assert_raises_regex(ValueError, "ffil"):
modin_df.fillna(method="ffil")
def test_fillna_invalid_value(self):
test_data = TestData()
modin_df = pd.DataFrame(test_data.frame)
# list
pytest.raises(TypeError, modin_df.fillna, [1, 2])
# tuple
pytest.raises(TypeError, modin_df.fillna, (1, 2))
# frame with series
pytest.raises(TypeError, modin_df.iloc[:, 0].fillna, modin_df)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_fillna_col_reordering(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.fillna(method="ffill"), pandas_df.fillna(method="ffill"))
"""
TODO: Use this when Arrow issue resolves:
(https://issues.apache.org/jira/browse/ARROW-2122)
def test_fillna_datetime_columns(self):
frame_data = {'A': [-1, -2, np.nan],
'B': date_range('20130101', periods=3),
'C': ['foo', 'bar', None],
'D': ['foo2', 'bar2', None]}
df = pandas.DataFrame(frame_data, index=date_range('20130110', periods=3))
modin_df = pd.DataFrame(frame_data, index=date_range('20130110', periods=3))
df_equals(modin_df.fillna('?'), df.fillna('?'))
frame_data = {'A': [-1, -2, np.nan],
'B': [pandas.Timestamp('2013-01-01'),
pandas.Timestamp('2013-01-02'), pandas.NaT],
'C': ['foo', 'bar', None],
'D': ['foo2', 'bar2', None]}
df = pandas.DataFrame(frame_data, index=date_range('20130110', periods=3))
modin_df = pd.DataFrame(frame_data, index=date_range('20130110', periods=3))
df_equals(modin_df.fillna('?'), df.fillna('?'))
"""
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_filter(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
by = {"items": ["col1", "col5"], "regex": "4$|3$", "like": "col"}
df_equals(
modin_df.filter(items=by["items"]), pandas_df.filter(items=by["items"])
)
df_equals(
modin_df.filter(regex=by["regex"], axis=0),
pandas_df.filter(regex=by["regex"], axis=0),
)
df_equals(
modin_df.filter(regex=by["regex"], axis=1),
pandas_df.filter(regex=by["regex"], axis=1),
)
df_equals(modin_df.filter(like=by["like"]), pandas_df.filter(like=by["like"]))
with pytest.raises(TypeError):
modin_df.filter(items=by["items"], regex=by["regex"])
with pytest.raises(TypeError):
modin_df.filter()
def test_first(self):
i = pd.date_range("2018-04-09", periods=4, freq="2D")
ts = pd.DataFrame({"A": [1, 2, 3, 4]}, index=i)
with pytest.warns(UserWarning):
ts.first("3D")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_first_valid_index(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
assert modin_df.first_valid_index() == (pandas_df.first_valid_index())
@pytest.mark.skip(reason="Defaulting to Pandas")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_from_dict(self, data):
modin_df = pd.DataFrame(data) # noqa F841
pandas_df = pandas.DataFrame(data) # noqa F841
with pytest.raises(NotImplementedError):
pd.DataFrame.from_dict(None)
@pytest.mark.skip(reason="Defaulting to Pandas")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_from_items(self, data):
modin_df = pd.DataFrame(data) # noqa F841
pandas_df = pandas.DataFrame(data) # noqa F841
with pytest.raises(NotImplementedError):
pd.DataFrame.from_items(None)
@pytest.mark.skip(reason="Defaulting to Pandas")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_from_records(self, data):
modin_df = pd.DataFrame(data) # noqa F841
pandas_df = pandas.DataFrame(data) # noqa F841
with pytest.raises(NotImplementedError):
pd.DataFrame.from_records(None)
def test_get_value(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).get_value(0, "col1")
def test_get_values(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).get_values()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("n", int_arg_values, ids=arg_keys("n", int_arg_keys))
def test_head(self, data, n):
# Test normal dataframe head
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.head(n), pandas_df.head(n))
df_equals(modin_df.head(len(modin_df) + 1), pandas_df.head(len(pandas_df) + 1))
# Test head when we call it from a QueryCompilerView
modin_result = modin_df.loc[:, ["col1", "col3", "col3"]].head(n)
pandas_result = pandas_df.loc[:, ["col1", "col3", "col3"]].head(n)
df_equals(modin_result, pandas_result)
def test_hist(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).hist(None)
@pytest.mark.skip(reason="Defaulting to Pandas")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_iat(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
with pytest.raises(NotImplementedError):
modin_df.iat()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
def test_idxmax(self, data, axis, skipna):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
pandas_result = pandas_df.idxmax(axis=axis, skipna=skipna)
modin_result = modin_df.idxmax(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
pandas_result = pandas_df.T.idxmax(axis=axis, skipna=skipna)
modin_result = modin_df.T.idxmax(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
def test_idxmin(self, data, axis, skipna):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_result = modin_df.idxmin(axis=axis, skipna=skipna)
pandas_result = pandas_df.idxmin(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
modin_result = modin_df.T.idxmin(axis=axis, skipna=skipna)
pandas_result = pandas_df.T.idxmin(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
def test_infer_objects(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).infer_objects()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_iloc(self, request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if not name_contains(request.node.name, ["empty_data"]):
# Scaler
np.testing.assert_equal(modin_df.iloc[0, 1], pandas_df.iloc[0, 1])
# Series
df_equals(modin_df.iloc[0], pandas_df.iloc[0])
df_equals(modin_df.iloc[1:, 0], pandas_df.iloc[1:, 0])
df_equals(modin_df.iloc[1:2, 0], pandas_df.iloc[1:2, 0])
# DataFrame
df_equals(modin_df.iloc[[1, 2]], pandas_df.iloc[[1, 2]])
# See issue #80
# df_equals(modin_df.iloc[[1, 2], [1, 0]], pandas_df.iloc[[1, 2], [1, 0]])
df_equals(modin_df.iloc[1:2, 0:2], pandas_df.iloc[1:2, 0:2])
# Issue #43
modin_df.iloc[0:3, :]
# Write Item
modin_df.iloc[[1, 2]] = 42
pandas_df.iloc[[1, 2]] = 42
df_equals(modin_df, pandas_df)
else:
with pytest.raises(IndexError):
modin_df.iloc[0, 1]
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_index(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.index, pandas_df.index)
modin_df_cp = modin_df.copy()
pandas_df_cp = pandas_df.copy()
modin_df_cp.index = [str(i) for i in modin_df_cp.index]
pandas_df_cp.index = [str(i) for i in pandas_df_cp.index]
df_equals(modin_df_cp.index, pandas_df_cp.index)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_indexing_duplicate_axis(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_df.index = pandas_df.index = [i // 3 for i in range(len(modin_df))]
assert any(modin_df.index.duplicated())
assert any(pandas_df.index.duplicated())
df_equals(modin_df.iloc[0], pandas_df.iloc[0])
df_equals(modin_df.loc[0], pandas_df.loc[0])
df_equals(modin_df.iloc[0, 0:4], pandas_df.iloc[0, 0:4])
df_equals(
modin_df.loc[0, modin_df.columns[0:4]],
pandas_df.loc[0, pandas_df.columns[0:4]],
)
def test_info(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).info(memory_usage="deep")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("loc", int_arg_values, ids=arg_keys("loc", int_arg_keys))
def test_insert(self, data, loc):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_df = modin_df.copy()
pandas_df = pandas_df.copy()
column = "New Column"
value = modin_df.iloc[:, 0]
try:
pandas_df.insert(loc, column, value)
except Exception as e:
with pytest.raises(type(e)):
modin_df.insert(loc, column, value)
else:
modin_df.insert(loc, column, value)
df_equals(modin_df, pandas_df)
with pytest.raises(ValueError):
modin_df.insert(0, "Bad Column", modin_df)
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_df.insert(0, "Duplicate", modin_df[modin_df.columns[0]])
pandas_df.insert(0, "Duplicate", pandas_df[pandas_df.columns[0]])
df_equals(modin_df, pandas_df)
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_df.insert(0, "Scalar", 100)
pandas_df.insert(0, "Scalar", 100)
df_equals(modin_df, pandas_df)
with pytest.raises(ValueError):
modin_df.insert(0, "Too Short", list(modin_df[modin_df.columns[0]])[:-1])
with pytest.raises(ValueError):
modin_df.insert(0, modin_df.columns[0], modin_df[modin_df.columns[0]])
with pytest.raises(IndexError):
modin_df.insert(len(modin_df.columns) + 100, "Bad Loc", 100)
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_result = pd.DataFrame(columns=list("ab")).insert(
0, modin_df.columns[0], modin_df[modin_df.columns[0]]
)
pandas_result = pandas.DataFrame(columns=list("ab")).insert(
0, pandas_df.columns[0], pandas_df[pandas_df.columns[0]]
)
df_equals(modin_result, pandas_result)
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_result = pd.DataFrame(index=modin_df.index).insert(
0, modin_df.columns[0], modin_df[modin_df.columns[0]]
)
pandas_result = pandas.DataFrame(index=pandas_df.index).insert(
0, pandas_df.columns[0], pandas_df[pandas_df.columns[0]]
)
df_equals(modin_result, pandas_result)
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_result = modin_df.insert(
0, "DataFrame insert", modin_df[[modin_df.columns[0]]]
)
pandas_result = pandas_df.insert(
0, "DataFrame insert", pandas_df[[pandas_df.columns[0]]]
)
df_equals(modin_result, pandas_result)
def test_interpolate(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).interpolate()
def test_is_copy(self):
data = test_data_values[0]
with pytest.warns(FutureWarning):
assert pd.DataFrame(data).is_copy == pandas.DataFrame(data).is_copy
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_items(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_items = modin_df.items()
pandas_items = pandas_df.items()
for modin_item, pandas_item in zip(modin_items, pandas_items):
modin_index, modin_series = modin_item
pandas_index, pandas_series = pandas_item
df_equals(pandas_series, modin_series)
assert pandas_index == modin_index
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_iteritems(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_items = modin_df.iteritems()
pandas_items = pandas_df.iteritems()
for modin_item, pandas_item in zip(modin_items, pandas_items):
modin_index, modin_series = modin_item
pandas_index, pandas_series = pandas_item
df_equals(pandas_series, modin_series)
assert pandas_index == modin_index
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_iterrows(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_iterrows = modin_df.iterrows()
pandas_iterrows = pandas_df.iterrows()
for modin_row, pandas_row in zip(modin_iterrows, pandas_iterrows):
modin_index, modin_series = modin_row
pandas_index, pandas_series = pandas_row
df_equals(pandas_series, modin_series)
assert pandas_index == modin_index
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_itertuples(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
# test default
modin_it_default = modin_df.itertuples()
pandas_it_default = pandas_df.itertuples()
for modin_row, pandas_row in zip(modin_it_default, pandas_it_default):
np.testing.assert_equal(modin_row, pandas_row)
# test all combinations of custom params
indices = [True, False]
names = [None, "NotPandas", "Pandas"]
for index in indices:
for name in names:
modin_it_custom = modin_df.itertuples(index=index, name=name)
pandas_it_custom = pandas_df.itertuples(index=index, name=name)
for modin_row, pandas_row in zip(modin_it_custom, pandas_it_custom):
np.testing.assert_equal(modin_row, pandas_row)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_ix(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
with pytest.raises(NotImplementedError):
modin_df.ix()
def test_join(self):
frame_data = {
"col1": [0, 1, 2, 3],
"col2": [4, 5, 6, 7],
"col3": [8, 9, 0, 1],
"col4": [2, 4, 5, 6],
}
modin_df = pd.DataFrame(frame_data)
pandas_df = pandas.DataFrame(frame_data)
frame_data2 = {"col5": [0], "col6": [1]}
modin_df2 = pd.DataFrame(frame_data2)
pandas_df2 = pandas.DataFrame(frame_data2)
join_types = ["left", "right", "outer", "inner"]
for how in join_types:
modin_join = modin_df.join(modin_df2, how=how)
pandas_join = pandas_df.join(pandas_df2, how=how)
df_equals(modin_join, pandas_join)
frame_data3 = {"col7": [1, 2, 3, 5, 6, 7, 8]}
modin_df3 = pd.DataFrame(frame_data3)
pandas_df3 = pandas.DataFrame(frame_data3)
join_types = ["left", "outer", "inner"]
for how in join_types:
modin_join = modin_df.join([modin_df2, modin_df3], how=how)
pandas_join = pandas_df.join([pandas_df2, pandas_df3], how=how)
df_equals(modin_join, pandas_join)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_keys(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.keys(), pandas_df.keys())
def test_kurt(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).kurt()
def test_kurtosis(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).kurtosis()
def test_last(self):
i = pd.date_range("2018-04-09", periods=4, freq="2D")
ts = pd.DataFrame({"A": [1, 2, 3, 4]}, index=i)
with pytest.warns(UserWarning):
ts.last("3D")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_last_valid_index(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
assert modin_df.last_valid_index() == (pandas_df.last_valid_index())
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_loc(self, request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
# We skip nan datasets because nan != nan
if "nan" not in request.node.name:
key1 = modin_df.columns[0]
key2 = modin_df.columns[1]
# Scaler
assert modin_df.loc[0, key1] == pandas_df.loc[0, key1]
# Series
df_equals(modin_df.loc[0], pandas_df.loc[0])
df_equals(modin_df.loc[1:, key1], pandas_df.loc[1:, key1])
df_equals(modin_df.loc[1:2, key1], pandas_df.loc[1:2, key1])
# DataFrame
df_equals(modin_df.loc[[1, 2]], pandas_df.loc[[1, 2]])
# List-like of booleans
indices = [
True if i % 3 == 0 else False for i in range(len(modin_df.index))
]
columns = [
True if i % 5 == 0 else False for i in range(len(modin_df.columns))
]
modin_result = modin_df.loc[indices, columns]
pandas_result = pandas_df.loc[indices, columns]
df_equals(modin_result, pandas_result)
# See issue #80
# df_equals(modin_df.loc[[1, 2], ['col1']], pandas_df.loc[[1, 2], ['col1']])
df_equals(modin_df.loc[1:2, key1:key2], pandas_df.loc[1:2, key1:key2])
# From issue #421
df_equals(modin_df.loc[:, [key2, key1]], pandas_df.loc[:, [key2, key1]])
df_equals(modin_df.loc[[2, 1], :], pandas_df.loc[[2, 1], :])
# Write Item
modin_df_copy = modin_df.copy()
pandas_df_copy = pandas_df.copy()
modin_df_copy.loc[[1, 2]] = 42
pandas_df_copy.loc[[1, 2]] = 42
df_equals(modin_df_copy, pandas_df_copy)
def test_loc_multi_index(self):
modin_df = pd.read_csv(
"modin/pandas/test/data/blah.csv", header=[0, 1, 2, 3], index_col=0
)
pandas_df = pandas.read_csv(
"modin/pandas/test/data/blah.csv", header=[0, 1, 2, 3], index_col=0
)
df_equals(modin_df.loc[1], pandas_df.loc[1])
df_equals(modin_df.loc[1, "Presidents"], pandas_df.loc[1, "Presidents"])
df_equals(
modin_df.loc[1, ("Presidents", "Pure mentions")],
pandas_df.loc[1, ("Presidents", "Pure mentions")],
)
assert (
modin_df.loc[1, ("Presidents", "Pure mentions", "IND", "all")]
== pandas_df.loc[1, ("Presidents", "Pure mentions", "IND", "all")]
)
df_equals(
modin_df.loc[(1, 2), "Presidents"], pandas_df.loc[(1, 2), "Presidents"]
)
tuples = [
("bar", "one"),
("bar", "two"),
("bar", "three"),
("bar", "four"),
("baz", "one"),
("baz", "two"),
("baz", "three"),
("baz", "four"),
("foo", "one"),
("foo", "two"),
("foo", "three"),
("foo", "four"),
("qux", "one"),
("qux", "two"),
("qux", "three"),
("qux", "four"),
]
modin_index = pd.MultiIndex.from_tuples(tuples, names=["first", "second"])
pandas_index = pandas.MultiIndex.from_tuples(tuples, names=["first", "second"])
frame_data = np.random.randint(0, 100, size=(16, 100))
modin_df = pd.DataFrame(
frame_data,
index=modin_index,
columns=["col{}".format(i) for i in range(100)],
)
pandas_df = pandas.DataFrame(
frame_data,
index=pandas_index,
columns=["col{}".format(i) for i in range(100)],
)
df_equals(modin_df.loc["bar", "col1"], pandas_df.loc["bar", "col1"])
assert (
modin_df.loc[("bar", "one"), "col1"]
== pandas_df.loc[("bar", "one"), "col1"]
)
df_equals(
modin_df.loc["bar", ("col1", "col2")],
pandas_df.loc["bar", ("col1", "col2")],
)
def test_lookup(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).lookup([0, 1], ["col1", "col2"])
def test_mad(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).mad()
def test_mask(self):
df = pd.DataFrame(np.arange(10).reshape(-1, 2), columns=["A", "B"])
m = df % 3 == 0
with pytest.warns(UserWarning):
try:
df.mask(~m, -df)
except ValueError:
pass
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
@pytest.mark.parametrize(
"numeric_only", bool_arg_values, ids=arg_keys("numeric_only", bool_arg_keys)
)
def test_max(self, request, data, axis, skipna, numeric_only):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.max(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
except Exception:
with pytest.raises(TypeError):
modin_df.max(axis=axis, skipna=skipna, numeric_only=numeric_only)
else:
modin_result = modin_df.max(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.max(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
except Exception:
with pytest.raises(TypeError):
modin_df.T.max(axis=axis, skipna=skipna, numeric_only=numeric_only)
else:
modin_result = modin_df.T.max(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
@pytest.mark.parametrize(
"numeric_only", bool_arg_values, ids=arg_keys("numeric_only", bool_arg_keys)
)
def test_mean(self, request, data, axis, skipna, numeric_only):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.mean(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.mean(axis=axis, skipna=skipna, numeric_only=numeric_only)
else:
modin_result = modin_df.mean(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.mean(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.mean(axis=axis, skipna=skipna, numeric_only=numeric_only)
else:
modin_result = modin_df.T.mean(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
@pytest.mark.parametrize(
"numeric_only", bool_arg_values, ids=arg_keys("numeric_only", bool_arg_keys)
)
def test_median(self, request, data, axis, skipna, numeric_only):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.median(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
except Exception:
with pytest.raises(TypeError):
modin_df.median(axis=axis, skipna=skipna, numeric_only=numeric_only)
else:
modin_result = modin_df.median(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.median(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
except Exception:
with pytest.raises(TypeError):
modin_df.T.median(axis=axis, skipna=skipna, numeric_only=numeric_only)
else:
modin_result = modin_df.T.median(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
df_equals(modin_result, pandas_result)
class TestDFPartTwo:
def test_melt(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).melt()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize(
"index", bool_arg_values, ids=arg_keys("index", bool_arg_keys)
)
def test_memory_usage(self, data, index):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
modin_result = modin_df.memory_usage(index=index)
pandas_result = pandas_df.memory_usage(index=index)
df_equals(modin_result, pandas_result)
def test_merge(self):
frame_data = {
"col1": [0, 1, 2, 3],
"col2": [4, 5, 6, 7],
"col3": [8, 9, 0, 1],
"col4": [2, 4, 5, 6],
}
modin_df = pd.DataFrame(frame_data)
pandas_df = pandas.DataFrame(frame_data)
frame_data2 = {"col1": [0, 1, 2], "col2": [1, 5, 6]}
modin_df2 = pd.DataFrame(frame_data2)
pandas_df2 = pandas.DataFrame(frame_data2)
join_types = ["outer", "inner"]
for how in join_types:
# Defaults
modin_result = modin_df.merge(modin_df2, how=how)
pandas_result = pandas_df.merge(pandas_df2, how=how)
df_equals(modin_result, pandas_result)
# left_on and right_index
modin_result = modin_df.merge(
modin_df2, how=how, left_on="col1", right_index=True
)
pandas_result = pandas_df.merge(
pandas_df2, how=how, left_on="col1", right_index=True
)
df_equals(modin_result, pandas_result)
# left_index and right_on
modin_result = modin_df.merge(
modin_df2, how=how, left_index=True, right_on="col1"
)
pandas_result = pandas_df.merge(
pandas_df2, how=how, left_index=True, right_on="col1"
)
df_equals(modin_result, pandas_result)
# left_on and right_on col1
modin_result = modin_df.merge(
modin_df2, how=how, left_on="col1", right_on="col1"
)
pandas_result = pandas_df.merge(
pandas_df2, how=how, left_on="col1", right_on="col1"
)
df_equals(modin_result, pandas_result)
# left_on and right_on col2
modin_result = modin_df.merge(
modin_df2, how=how, left_on="col2", right_on="col2"
)
pandas_result = pandas_df.merge(
pandas_df2, how=how, left_on="col2", right_on="col2"
)
df_equals(modin_result, pandas_result)
# left_index and right_index
modin_result = modin_df.merge(
modin_df2, how=how, left_index=True, right_index=True
)
pandas_result = pandas_df.merge(
pandas_df2, how=how, left_index=True, right_index=True
)
df_equals(modin_result, pandas_result)
# Named Series promoted to DF
s = pd.Series(frame_data2.get("col1"))
with pytest.raises(ValueError):
modin_df.merge(s)
s = pd.Series(frame_data2.get("col1"), name="col1")
df_equals(modin_df.merge(s), modin_df.merge(modin_df2[["col1"]]))
with pytest.raises(ValueError):
modin_df.merge("Non-valid type")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
@pytest.mark.parametrize(
"numeric_only", bool_arg_values, ids=arg_keys("numeric_only", bool_arg_keys)
)
def test_min(self, data, axis, skipna, numeric_only):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.min(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
except Exception:
with pytest.raises(TypeError):
modin_df.min(axis=axis, skipna=skipna, numeric_only=numeric_only)
else:
modin_result = modin_df.min(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.min(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
except Exception:
with pytest.raises(TypeError):
modin_df.T.min(axis=axis, skipna=skipna, numeric_only=numeric_only)
else:
modin_result = modin_df.T.min(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"numeric_only", bool_arg_values, ids=arg_keys("numeric_only", bool_arg_keys)
)
def test_mode(self, request, data, axis, numeric_only):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.mode(axis=axis, numeric_only=numeric_only)
except Exception:
with pytest.raises(TypeError):
modin_df.mode(axis=axis, numeric_only=numeric_only)
else:
modin_result = modin_df.mode(axis=axis, numeric_only=numeric_only)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_ndim(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
assert modin_df.ndim == pandas_df.ndim
def test_nlargest(self):
df = pd.DataFrame(
{
"population": [
59000000,
65000000,
434000,
434000,
434000,
337000,
11300,
11300,
11300,
],
"GDP": [1937894, 2583560, 12011, 4520, 12128, 17036, 182, 38, 311],
"alpha-2": ["IT", "FR", "MT", "MV", "BN", "IS", "NR", "TV", "AI"],
},
index=[
"Italy",
"France",
"Malta",
"Maldives",
"Brunei",
"Iceland",
"Nauru",
"Tuvalu",
"Anguilla",
],
)
with pytest.warns(UserWarning):
df.nlargest(3, "population")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_notna(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.notna(), pandas_df.notna())
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_notnull(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.notnull(), pandas_df.notnull())
def test_nsmallest(self):
df = pd.DataFrame(
{
"population": [
59000000,
65000000,
434000,
434000,
434000,
337000,
11300,
11300,
11300,
],
"GDP": [1937894, 2583560, 12011, 4520, 12128, 17036, 182, 38, 311],
"alpha-2": ["IT", "FR", "MT", "MV", "BN", "IS", "NR", "TV", "AI"],
},
index=[
"Italy",
"France",
"Malta",
"Maldives",
"Brunei",
"Iceland",
"Nauru",
"Tuvalu",
"Anguilla",
],
)
with pytest.warns(UserWarning):
df.nsmallest(3, "population")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"dropna", bool_arg_values, ids=arg_keys("dropna", bool_arg_keys)
)
def test_nunique(self, data, axis, dropna):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_result = modin_df.nunique(axis=axis, dropna=dropna)
pandas_result = pandas_df.nunique(axis=axis, dropna=dropna)
df_equals(modin_result, pandas_result)
modin_result = modin_df.T.nunique(axis=axis, dropna=dropna)
pandas_result = pandas_df.T.nunique(axis=axis, dropna=dropna)
df_equals(modin_result, pandas_result)
def test_pct_change(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).pct_change()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_pipe(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
n = len(modin_df.index)
a, b, c = 2 % n, 0, 3 % n
col = modin_df.columns[3 % len(modin_df.columns)]
def h(x):
return x.drop(columns=[col])
def g(x, arg1=0):
for _ in range(arg1):
x = x.append(x)
return x
def f(x, arg2=0, arg3=0):
return x.drop([arg2, arg3])
df_equals(
f(g(h(modin_df), arg1=a), arg2=b, arg3=c),
(modin_df.pipe(h).pipe(g, arg1=a).pipe(f, arg2=b, arg3=c)),
)
df_equals(
(modin_df.pipe(h).pipe(g, arg1=a).pipe(f, arg2=b, arg3=c)),
(pandas_df.pipe(h).pipe(g, arg1=a).pipe(f, arg2=b, arg3=c)),
)
def test_pivot(self):
df = pd.DataFrame(
{
"foo": ["one", "one", "one", "two", "two", "two"],
"bar": ["A", "B", "C", "A", "B", "C"],
"baz": [1, 2, 3, 4, 5, 6],
"zoo": ["x", "y", "z", "q", "w", "t"],
}
)
with pytest.warns(UserWarning):
df.pivot(index="foo", columns="bar", values="baz")
def test_pivot_table(self):
df = pd.DataFrame(
{
"A": ["foo", "foo", "foo", "foo", "foo", "bar", "bar", "bar", "bar"],
"B": ["one", "one", "one", "two", "two", "one", "one", "two", "two"],
"C": [
"small",
"large",
"large",
"small",
"small",
"large",
"small",
"small",
"large",
],
"D": [1, 2, 2, 3, 3, 4, 5, 6, 7],
"E": [2, 4, 5, 5, 6, 6, 8, 9, 9],
}
)
with pytest.warns(UserWarning):
df.pivot_table(values="D", index=["A", "B"], columns=["C"], aggfunc=np.sum)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_plot(self, request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if name_contains(request.node.name, numeric_dfs):
# We have to test this way because equality in plots means same object.
zipped_plot_lines = zip(modin_df.plot().lines, pandas_df.plot().lines)
for l, r in zipped_plot_lines:
if isinstance(l.get_xdata(), np.ma.core.MaskedArray) and isinstance(
r.get_xdata(), np.ma.core.MaskedArray
):
assert all((l.get_xdata() == r.get_xdata()).data)
else:
assert np.array_equal(l.get_xdata(), r.get_xdata())
if isinstance(l.get_ydata(), np.ma.core.MaskedArray) and isinstance(
r.get_ydata(), np.ma.core.MaskedArray
):
assert all((l.get_ydata() == r.get_ydata()).data)
else:
assert np.array_equal(l.get_xdata(), r.get_xdata())
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_pop(self, request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if "empty_data" not in request.node.name:
key = modin_df.columns[0]
temp_modin_df = modin_df.copy()
temp_pandas_df = pandas_df.copy()
modin_popped = temp_modin_df.pop(key)
pandas_popped = temp_pandas_df.pop(key)
df_equals(modin_popped, pandas_popped)
df_equals(temp_modin_df, temp_pandas_df)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
@pytest.mark.parametrize(
"numeric_only", bool_arg_values, ids=arg_keys("numeric_only", bool_arg_keys)
)
@pytest.mark.parametrize(
"min_count", int_arg_values, ids=arg_keys("min_count", int_arg_keys)
)
def test_prod(self, request, data, axis, skipna, numeric_only, min_count):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.prod(
axis=axis, skipna=skipna, numeric_only=numeric_only, min_count=min_count
)
except Exception:
with pytest.raises(TypeError):
modin_df.prod(
axis=axis,
skipna=skipna,
numeric_only=numeric_only,
min_count=min_count,
)
else:
modin_result = modin_df.prod(
axis=axis, skipna=skipna, numeric_only=numeric_only, min_count=min_count
)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.prod(
axis=axis, skipna=skipna, numeric_only=numeric_only, min_count=min_count
)
except Exception:
with pytest.raises(TypeError):
modin_df.T.prod(
axis=axis,
skipna=skipna,
numeric_only=numeric_only,
min_count=min_count,
)
else:
modin_result = modin_df.T.prod(
axis=axis, skipna=skipna, numeric_only=numeric_only, min_count=min_count
)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
@pytest.mark.parametrize(
"numeric_only", bool_arg_values, ids=arg_keys("numeric_only", bool_arg_keys)
)
@pytest.mark.parametrize(
"min_count", int_arg_values, ids=arg_keys("min_count", int_arg_keys)
)
def test_product(self, request, data, axis, skipna, numeric_only, min_count):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.product(
axis=axis, skipna=skipna, numeric_only=numeric_only, min_count=min_count
)
except Exception:
with pytest.raises(TypeError):
modin_df.product(
axis=axis,
skipna=skipna,
numeric_only=numeric_only,
min_count=min_count,
)
else:
modin_result = modin_df.product(
axis=axis, skipna=skipna, numeric_only=numeric_only, min_count=min_count
)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("q", quantiles_values, ids=quantiles_keys)
def test_quantile(self, request, data, q):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if not name_contains(request.node.name, no_numeric_dfs):
df_equals(modin_df.quantile(q), pandas_df.quantile(q))
df_equals(modin_df.quantile(q, axis=1), pandas_df.quantile(q, axis=1))
try:
pandas_result = pandas_df.quantile(q, axis=1, numeric_only=False)
except Exception as e:
with pytest.raises(type(e)):
modin_df.quantile(q, axis=1, numeric_only=False)
else:
modin_result = modin_df.quantile(q, axis=1, numeric_only=False)
df_equals(modin_result, pandas_result)
else:
with pytest.raises(ValueError):
modin_df.quantile(q)
if not name_contains(request.node.name, no_numeric_dfs):
df_equals(modin_df.T.quantile(q), pandas_df.T.quantile(q))
df_equals(modin_df.T.quantile(q, axis=1), pandas_df.T.quantile(q, axis=1))
try:
pandas_result = pandas_df.T.quantile(q, axis=1, numeric_only=False)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.quantile(q, axis=1, numeric_only=False)
else:
modin_result = modin_df.T.quantile(q, axis=1, numeric_only=False)
df_equals(modin_result, pandas_result)
else:
with pytest.raises(ValueError):
modin_df.T.quantile(q)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("funcs", query_func_values, ids=query_func_keys)
def test_query(self, data, funcs):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
with pytest.raises(ValueError):
modin_df.query("")
with pytest.raises(NotImplementedError):
x = 2 # noqa F841
modin_df.query("col1 < @x")
try:
pandas_result = pandas_df.query(funcs)
except Exception as e:
with pytest.raises(type(e)):
modin_df.query(funcs)
else:
modin_result = modin_df.query(funcs)
df_equals(modin_result, pandas_result)
def test_query_after_insert(self):
modin_df = pd.DataFrame({"x": [-1, 0, 1, None], "y": [1, 2, None, 3]})
modin_df["z"] = modin_df.eval("x / y")
modin_df = modin_df.query("z >= 0")
modin_result = modin_df.reset_index(drop=True)
modin_result.columns = ["a", "b", "c"]
pandas_df = pd.DataFrame({"x": [-1, 0, 1, None], "y": [1, 2, None, 3]})
pandas_df["z"] = pandas_df.eval("x / y")
pandas_df = pandas_df.query("z >= 0")
pandas_result = pandas_df.reset_index(drop=True)
pandas_result.columns = ["a", "b", "c"]
df_equals(modin_result, pandas_result)
df_equals(modin_df, pandas_df)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"numeric_only", bool_arg_values, ids=arg_keys("numeric_only", bool_arg_keys)
)
@pytest.mark.parametrize(
"na_option", ["keep", "top", "bottom"], ids=["keep", "top", "bottom"]
)
def test_rank(self, data, axis, numeric_only, na_option):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.rank(
axis=axis, numeric_only=numeric_only, na_option=na_option
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.rank(axis=axis, numeric_only=numeric_only, na_option=na_option)
else:
modin_result = modin_df.rank(
axis=axis, numeric_only=numeric_only, na_option=na_option
)
df_equals(modin_result, pandas_result)
def test_reindex(self):
frame_data = {
"col1": [0, 1, 2, 3],
"col2": [4, 5, 6, 7],
"col3": [8, 9, 10, 11],
"col4": [12, 13, 14, 15],
"col5": [0, 0, 0, 0],
}
pandas_df = pandas.DataFrame(frame_data)
modin_df = pd.DataFrame(frame_data)
df_equals(modin_df.reindex([0, 3, 2, 1]), pandas_df.reindex([0, 3, 2, 1]))
df_equals(modin_df.reindex([0, 6, 2]), pandas_df.reindex([0, 6, 2]))
df_equals(
modin_df.reindex(["col1", "col3", "col4", "col2"], axis=1),
pandas_df.reindex(["col1", "col3", "col4", "col2"], axis=1),
)
df_equals(
modin_df.reindex(["col1", "col7", "col4", "col8"], axis=1),
pandas_df.reindex(["col1", "col7", "col4", "col8"], axis=1),
)
df_equals(
modin_df.reindex(index=[0, 1, 5], columns=["col1", "col7", "col4", "col8"]),
pandas_df.reindex(
index=[0, 1, 5], columns=["col1", "col7", "col4", "col8"]
),
)
df_equals(
modin_df.T.reindex(["col1", "col7", "col4", "col8"], axis=0),
pandas_df.T.reindex(["col1", "col7", "col4", "col8"], axis=0),
)
def test_reindex_like(self):
df1 = pd.DataFrame(
[
[24.3, 75.7, "high"],
[31, 87.8, "high"],
[22, 71.6, "medium"],
[35, 95, "medium"],
],
columns=["temp_celsius", "temp_fahrenheit", "windspeed"],
index=pd.date_range(start="2014-02-12", end="2014-02-15", freq="D"),
)
df2 = pd.DataFrame(
[[28, "low"], [30, "low"], [35.1, "medium"]],
columns=["temp_celsius", "windspeed"],
index=pd.DatetimeIndex(["2014-02-12", "2014-02-13", "2014-02-15"]),
)
with pytest.warns(UserWarning):
df2.reindex_like(df1)
def test_rename_sanity(self):
test_data = TestData()
mapping = {"A": "a", "B": "b", "C": "c", "D": "d"}
modin_df = pd.DataFrame(test_data.frame)
df_equals(
modin_df.rename(columns=mapping), test_data.frame.rename(columns=mapping)
)
renamed2 = test_data.frame.rename(columns=str.lower)
df_equals(modin_df.rename(columns=str.lower), renamed2)
modin_df = pd.DataFrame(renamed2)
df_equals(
modin_df.rename(columns=str.upper), renamed2.rename(columns=str.upper)
)
# index
data = {"A": {"foo": 0, "bar": 1}}
# gets sorted alphabetical
df = pandas.DataFrame(data)
modin_df = pd.DataFrame(data)
tm.assert_index_equal(
modin_df.rename(index={"foo": "bar", "bar": "foo"}).index,
df.rename(index={"foo": "bar", "bar": "foo"}).index,
)
tm.assert_index_equal(
modin_df.rename(index=str.upper).index, df.rename(index=str.upper).index
)
# have to pass something
with pytest.raises(TypeError):
modin_df.rename()
# partial columns
renamed = test_data.frame.rename(columns={"C": "foo", "D": "bar"})
modin_df = pd.DataFrame(test_data.frame)
tm.assert_index_equal(
modin_df.rename(columns={"C": "foo", "D": "bar"}).index,
test_data.frame.rename(columns={"C": "foo", "D": "bar"}).index,
)
# TODO: Uncomment when transpose works
# other axis
# renamed = test_data.frame.T.rename(index={'C': 'foo', 'D': 'bar'})
# tm.assert_index_equal(
# test_data.frame.T.rename(index={'C': 'foo', 'D': 'bar'}).index,
# modin_df.T.rename(index={'C': 'foo', 'D': 'bar'}).index)
# index with name
index = pandas.Index(["foo", "bar"], name="name")
renamer = pandas.DataFrame(data, index=index)
modin_df = pd.DataFrame(data, index=index)
renamed = renamer.rename(index={"foo": "bar", "bar": "foo"})
modin_renamed = modin_df.rename(index={"foo": "bar", "bar": "foo"})
tm.assert_index_equal(renamed.index, modin_renamed.index)
assert renamed.index.name == modin_renamed.index.name
def test_rename_multiindex(self):
tuples_index = [("foo1", "bar1"), ("foo2", "bar2")]
tuples_columns = [("fizz1", "buzz1"), ("fizz2", "buzz2")]
index = pandas.MultiIndex.from_tuples(tuples_index, names=["foo", "bar"])
columns = pandas.MultiIndex.from_tuples(tuples_columns, names=["fizz", "buzz"])
frame_data = [(0, 0), (1, 1)]
df = pandas.DataFrame(frame_data, index=index, columns=columns)
modin_df = pd.DataFrame(frame_data, index=index, columns=columns)
#
# without specifying level -> accross all levels
renamed = df.rename(
index={"foo1": "foo3", "bar2": "bar3"},
columns={"fizz1": "fizz3", "buzz2": "buzz3"},
)
modin_renamed = modin_df.rename(
index={"foo1": "foo3", "bar2": "bar3"},
columns={"fizz1": "fizz3", "buzz2": "buzz3"},
)
tm.assert_index_equal(renamed.index, modin_renamed.index)
renamed = df.rename(
index={"foo1": "foo3", "bar2": "bar3"},
columns={"fizz1": "fizz3", "buzz2": "buzz3"},
)
tm.assert_index_equal(renamed.columns, modin_renamed.columns)
assert renamed.index.names == modin_renamed.index.names
assert renamed.columns.names == modin_renamed.columns.names
#
# with specifying a level
# dict
renamed = df.rename(columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level=0)
modin_renamed = modin_df.rename(
columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level=0
)
tm.assert_index_equal(renamed.columns, modin_renamed.columns)
renamed = df.rename(columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level="fizz")
modin_renamed = modin_df.rename(
columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level="fizz"
)
tm.assert_index_equal(renamed.columns, modin_renamed.columns)
renamed = df.rename(columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level=1)
modin_renamed = modin_df.rename(
columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level=1
)
tm.assert_index_equal(renamed.columns, modin_renamed.columns)
renamed = df.rename(columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level="buzz")
modin_renamed = modin_df.rename(
columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level="buzz"
)
tm.assert_index_equal(renamed.columns, modin_renamed.columns)
# function
func = str.upper
renamed = df.rename(columns=func, level=0)
modin_renamed = modin_df.rename(columns=func, level=0)
tm.assert_index_equal(renamed.columns, modin_renamed.columns)
renamed = df.rename(columns=func, level="fizz")
modin_renamed = modin_df.rename(columns=func, level="fizz")
tm.assert_index_equal(renamed.columns, modin_renamed.columns)
renamed = df.rename(columns=func, level=1)
modin_renamed = modin_df.rename(columns=func, level=1)
tm.assert_index_equal(renamed.columns, modin_renamed.columns)
renamed = df.rename(columns=func, level="buzz")
modin_renamed = modin_df.rename(columns=func, level="buzz")
tm.assert_index_equal(renamed.columns, modin_renamed.columns)
# index
renamed = df.rename(index={"foo1": "foo3", "bar2": "bar3"}, level=0)
modin_renamed = modin_df.rename(index={"foo1": "foo3", "bar2": "bar3"}, level=0)
tm.assert_index_equal(modin_renamed.index, renamed.index)
@pytest.mark.skip(reason="Pandas does not pass this test")
def test_rename_nocopy(self):
test_data = TestData().frame
modin_df = pd.DataFrame(test_data)
modin_renamed = modin_df.rename(columns={"C": "foo"}, copy=False)
modin_renamed["foo"] = 1
assert (modin_df["C"] == 1).all()
def test_rename_inplace(self):
test_data = TestData().frame
modin_df = pd.DataFrame(test_data)
df_equals(
modin_df.rename(columns={"C": "foo"}),
test_data.rename(columns={"C": "foo"}),
)
frame = test_data.copy()
modin_frame = modin_df.copy()
frame.rename(columns={"C": "foo"}, inplace=True)
modin_frame.rename(columns={"C": "foo"}, inplace=True)
df_equals(modin_frame, frame)
def test_rename_bug(self):
# rename set ref_locs, and set_index was not resetting
frame_data = {0: ["foo", "bar"], 1: ["bah", "bas"], 2: [1, 2]}
df = pandas.DataFrame(frame_data)
modin_df = pd.DataFrame(frame_data)
df = df.rename(columns={0: "a"})
df = df.rename(columns={1: "b"})
# TODO: Uncomment when set_index is implemented
# df = df.set_index(['a', 'b'])
# df.columns = ['2001-01-01']
modin_df = modin_df.rename(columns={0: "a"})
modin_df = modin_df.rename(columns={1: "b"})
# TODO: Uncomment when set_index is implemented
# modin_df = modin_df.set_index(['a', 'b'])
# modin_df.columns = ['2001-01-01']
df_equals(modin_df, df)
def test_rename_axis(self):
data = {"num_legs": [4, 4, 2], "num_arms": [0, 0, 2]}
index = ["dog", "cat", "monkey"]
modin_df = pd.DataFrame(data, index)
pandas_df = pandas.DataFrame(data, index)
df_equals(modin_df.rename_axis("animal"), pandas_df.rename_axis("animal"))
df_equals(
modin_df.rename_axis("limbs", axis="columns"),
pandas_df.rename_axis("limbs", axis="columns"),
)
modin_df.rename_axis("limbs", axis="columns", inplace=True)
pandas_df.rename_axis("limbs", axis="columns", inplace=True)
df_equals(modin_df, pandas_df)
new_index = pd.MultiIndex.from_product(
[["mammal"], ["dog", "cat", "monkey"]], names=["type", "name"]
)
modin_df.index = new_index
pandas_df.index = new_index
df_equals(
modin_df.rename_axis(index={"type": "class"}),
pandas_df.rename_axis(index={"type": "class"}),
)
df_equals(
modin_df.rename_axis(columns=str.upper),
pandas_df.rename_axis(columns=str.upper),
)
df_equals(
modin_df.rename_axis(
columns=[str.upper(o) for o in modin_df.columns.names]
),
pandas_df.rename_axis(
columns=[str.upper(o) for o in pandas_df.columns.names]
),
)
with pytest.raises(ValueError):
df_equals(
modin_df.rename_axis(str.upper, axis=1),
pandas_df.rename_axis(str.upper, axis=1),
)
def test_rename_axis_inplace(self):
test_frame = TestData().frame
modin_df = pd.DataFrame(test_frame)
result = test_frame.copy()
modin_result = modin_df.copy()
no_return = result.rename_axis("foo", inplace=True)
modin_no_return = modin_result.rename_axis("foo", inplace=True)
assert no_return is modin_no_return
df_equals(modin_result, result)
result = test_frame.copy()
modin_result = modin_df.copy()
no_return = result.rename_axis("bar", axis=1, inplace=True)
modin_no_return = modin_result.rename_axis("bar", axis=1, inplace=True)
assert no_return is modin_no_return
df_equals(modin_result, result)
def test_reorder_levels(self):
df = pd.DataFrame(
index=pd.MultiIndex.from_tuples(
[
(num, letter, color)
for num in range(1, 3)
for letter in ["a", "b", "c"]
for color in ["Red", "Green"]
],
names=["Number", "Letter", "Color"],
)
)
df["Value"] = np.random.randint(1, 100, len(df))
with pytest.warns(UserWarning):
df.reorder_levels(["Letter", "Color", "Number"])
def test_replace(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).replace()
def test_resample(self):
d = dict(
{
"price": [10, 11, 9, 13, 14, 18, 17, 19],
"volume": [50, 60, 40, 100, 50, 100, 40, 50],
}
)
df = pd.DataFrame(d)
df["week_starting"] = pd.date_range("01/01/2018", periods=8, freq="W")
with pytest.warns(UserWarning):
df.resample("M", on="week_starting")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_reset_index(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_result = modin_df.reset_index(inplace=False)
pandas_result = pandas_df.reset_index(inplace=False)
df_equals(modin_result, pandas_result)
modin_df_cp = modin_df.copy()
pd_df_cp = pandas_df.copy()
modin_df_cp.reset_index(inplace=True)
pd_df_cp.reset_index(inplace=True)
df_equals(modin_df_cp, pd_df_cp)
def test_rolling(self):
df = pd.DataFrame({"B": [0, 1, 2, np.nan, 4]})
with pytest.warns(UserWarning):
df.rolling(2, win_type="triang")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_round(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.round(), pandas_df.round())
df_equals(modin_df.round(1), pandas_df.round(1))
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
def test_sample(self, data, axis):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
with pytest.raises(ValueError):
modin_df.sample(n=3, frac=0.4, axis=axis)
with pytest.raises(KeyError):
modin_df.sample(frac=0.5, weights="CoLuMn_No_ExIsT", axis=0)
with pytest.raises(ValueError):
modin_df.sample(frac=0.5, weights=modin_df.columns[0], axis=1)
with pytest.raises(ValueError):
modin_df.sample(
frac=0.5, weights=[0.5 for _ in range(len(modin_df.index[:-1]))], axis=0
)
with pytest.raises(ValueError):
modin_df.sample(
frac=0.5,
weights=[0.5 for _ in range(len(modin_df.columns[:-1]))],
axis=1,
)
with pytest.raises(ValueError):
modin_df.sample(n=-3, axis=axis)
with pytest.raises(ValueError):
modin_df.sample(frac=0.2, weights=pandas.Series(), axis=axis)
if isinstance(axis, str):
num_axis = pandas.DataFrame()._get_axis_number(axis)
else:
num_axis = axis
# weights that sum to 1
sums = sum(i % 2 for i in range(len(modin_df.axes[num_axis])))
weights = [i % 2 / sums for i in range(len(modin_df.axes[num_axis]))]
modin_result = modin_df.sample(
frac=0.5, random_state=42, weights=weights, axis=axis
)
pandas_result = pandas_df.sample(
frac=0.5, random_state=42, weights=weights, axis=axis
)
df_equals(modin_result, pandas_result)
# weights that don't sum to 1
weights = [i % 2 for i in range(len(modin_df.axes[num_axis]))]
modin_result = modin_df.sample(
frac=0.5, random_state=42, weights=weights, axis=axis
)
pandas_result = pandas_df.sample(
frac=0.5, random_state=42, weights=weights, axis=axis
)
df_equals(modin_result, pandas_result)
modin_result = modin_df.sample(n=0, axis=axis)
pandas_result = pandas_df.sample(n=0, axis=axis)
df_equals(modin_result, pandas_result)
modin_result = modin_df.sample(frac=0.5, random_state=42, axis=axis)
pandas_result = pandas_df.sample(frac=0.5, random_state=42, axis=axis)
df_equals(modin_result, pandas_result)
modin_result = modin_df.sample(n=2, random_state=42, axis=axis)
pandas_result = pandas_df.sample(n=2, random_state=42, axis=axis)
df_equals(modin_result, pandas_result)
def test_select_dtypes(self):
frame_data = {
"test1": list("abc"),
"test2": np.arange(3, 6).astype("u1"),
"test3": np.arange(8.0, 11.0, dtype="float64"),
"test4": [True, False, True],
"test5": pandas.date_range("now", periods=3).values,
"test6": list(range(5, 8)),
}
df = pandas.DataFrame(frame_data)
rd = pd.DataFrame(frame_data)
include = np.float, "integer"
exclude = (np.bool_,)
r = rd.select_dtypes(include=include, exclude=exclude)
e = df[["test2", "test3", "test6"]]
df_equals(r, e)
r = rd.select_dtypes(include=np.bool_)
e = df[["test4"]]
df_equals(r, e)
r = rd.select_dtypes(exclude=np.bool_)
e = df[["test1", "test2", "test3", "test5", "test6"]]
df_equals(r, e)
try:
pd.DataFrame().select_dtypes()
assert False
except ValueError:
assert True
def test_sem(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).sem()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
def test_set_axis(self, data, axis):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
x = pandas.DataFrame()._get_axis_number(axis)
index = modin_df.columns if x else modin_df.index
labels = ["{0}_{1}".format(index[i], i) for i in range(modin_df.shape[x])]
modin_result = modin_df.set_axis(labels, axis=axis, inplace=False)
pandas_result = pandas_df.set_axis(labels, axis=axis, inplace=False)
df_equals(modin_result, pandas_result)
with pytest.warns(FutureWarning):
modin_df.set_axis(axis, labels, inplace=False)
modin_df_copy = modin_df.copy()
modin_df.set_axis(labels, axis=axis, inplace=True)
# Check that the copy and original are different
try:
df_equals(modin_df, modin_df_copy)
except AssertionError:
assert True
else:
assert False
pandas_df.set_axis(labels, axis=axis, inplace=True)
df_equals(modin_df, pandas_df)
with pytest.warns(FutureWarning):
modin_df.set_axis(labels, axis=axis, inplace=None)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize(
"drop", bool_arg_values, ids=arg_keys("drop", bool_arg_keys)
)
@pytest.mark.parametrize(
"append", bool_arg_values, ids=arg_keys("append", bool_arg_keys)
)
def test_set_index(self, request, data, drop, append):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if "empty_data" not in request.node.name:
key = modin_df.columns[0]
modin_result = modin_df.set_index(
key, drop=drop, append=append, inplace=False
)
pandas_result = pandas_df.set_index(
key, drop=drop, append=append, inplace=False
)
df_equals(modin_result, pandas_result)
modin_df_copy = modin_df.copy()
modin_df.set_index(key, drop=drop, append=append, inplace=True)
# Check that the copy and original are different
try:
df_equals(modin_df, modin_df_copy)
except AssertionError:
assert True
else:
assert False
pandas_df.set_index(key, drop=drop, append=append, inplace=True)
df_equals(modin_df, pandas_df)
def test_set_value(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).set_value(0, 0, 0)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_shape(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
assert modin_df.shape == pandas_df.shape
def test_shift(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).shift()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_size(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
assert modin_df.size == pandas_df.size
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
@pytest.mark.parametrize(
"numeric_only", bool_arg_values, ids=arg_keys("numeric_only", bool_arg_keys)
)
def test_skew(self, request, data, axis, skipna, numeric_only):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.skew(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
except Exception:
with pytest.raises(TypeError):
modin_df.skew(axis=axis, skipna=skipna, numeric_only=numeric_only)
else:
modin_result = modin_df.skew(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.skew(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
except Exception:
with pytest.raises(TypeError):
modin_df.T.skew(axis=axis, skipna=skipna, numeric_only=numeric_only)
else:
modin_result = modin_df.T.skew(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
df_equals(modin_result, pandas_result)
def test_slice_shift(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).slice_shift()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"ascending", bool_arg_values, ids=arg_keys("ascending", bool_arg_keys)
)
@pytest.mark.parametrize("na_position", ["first", "last"], ids=["first", "last"])
@pytest.mark.parametrize(
"sort_remaining", bool_arg_values, ids=arg_keys("sort_remaining", bool_arg_keys)
)
def test_sort_index(self, data, axis, ascending, na_position, sort_remaining):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
# Change index value so sorting will actually make a difference
if axis == "rows" or axis == 0:
length = len(modin_df.index)
modin_df.index = [(i - length / 2) % length for i in range(length)]
pandas_df.index = [(i - length / 2) % length for i in range(length)]
# Add NaNs to sorted index
if axis == "rows" or axis == 0:
length = len(modin_df.index)
modin_df.index = [
np.nan if i % 2 == 0 else modin_df.index[i] for i in range(length)
]
pandas_df.index = [
np.nan if i % 2 == 0 else pandas_df.index[i] for i in range(length)
]
else:
length = len(modin_df.columns)
modin_df.columns = [
np.nan if i % 2 == 0 else modin_df.columns[i] for i in range(length)
]
pandas_df.columns = [
np.nan if i % 2 == 0 else pandas_df.columns[i] for i in range(length)
]
modin_result = modin_df.sort_index(
axis=axis, ascending=ascending, na_position=na_position, inplace=False
)
pandas_result = pandas_df.sort_index(
axis=axis, ascending=ascending, na_position=na_position, inplace=False
)
df_equals(modin_result, pandas_result)
modin_df_cp = modin_df.copy()
pandas_df_cp = pandas_df.copy()
modin_df_cp.sort_index(
axis=axis, ascending=ascending, na_position=na_position, inplace=True
)
pandas_df_cp.sort_index(
axis=axis, ascending=ascending, na_position=na_position, inplace=True
)
df_equals(modin_df_cp, pandas_df_cp)
# MultiIndex
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_df.index = pd.MultiIndex.from_tuples(
[(i // 10, i // 5, i) for i in range(len(modin_df))]
)
pandas_df.index = pandas.MultiIndex.from_tuples(
[(i // 10, i // 5, i) for i in range(len(pandas_df))]
)
with pytest.warns(UserWarning):
df_equals(modin_df.sort_index(level=0), pandas_df.sort_index(level=0))
with pytest.warns(UserWarning):
df_equals(modin_df.sort_index(axis=0), pandas_df.sort_index(axis=0))
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"ascending", bool_arg_values, ids=arg_keys("ascending", bool_arg_keys)
)
@pytest.mark.parametrize("na_position", ["first", "last"], ids=["first", "last"])
def test_sort_values(self, request, data, axis, ascending, na_position):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if "empty_data" not in request.node.name and (
(axis == 0 or axis == "over rows")
or name_contains(request.node.name, numeric_dfs)
):
index = (
modin_df.index if axis == 1 or axis == "columns" else modin_df.columns
)
key = index[0]
modin_result = modin_df.sort_values(
key,
axis=axis,
ascending=ascending,
na_position=na_position,
inplace=False,
)
pandas_result = pandas_df.sort_values(
key,
axis=axis,
ascending=ascending,
na_position=na_position,
inplace=False,
)
df_equals(modin_result, pandas_result)
modin_df_cp = modin_df.copy()
pandas_df_cp = pandas_df.copy()
modin_df_cp.sort_values(
key,
axis=axis,
ascending=ascending,
na_position=na_position,
inplace=True,
)
pandas_df_cp.sort_values(
key,
axis=axis,
ascending=ascending,
na_position=na_position,
inplace=True,
)
df_equals(modin_df_cp, pandas_df_cp)
keys = [key, index[-1]]
modin_result = modin_df.sort_values(
keys,
axis=axis,
ascending=ascending,
na_position=na_position,
inplace=False,
)
pandas_result = pandas_df.sort_values(
keys,
axis=axis,
ascending=ascending,
na_position=na_position,
inplace=False,
)
df_equals(modin_result, pandas_result)
modin_df_cp = modin_df.copy()
pandas_df_cp = pandas_df.copy()
modin_df_cp.sort_values(
keys,
axis=axis,
ascending=ascending,
na_position=na_position,
inplace=True,
)
pandas_df_cp.sort_values(
keys,
axis=axis,
ascending=ascending,
na_position=na_position,
inplace=True,
)
df_equals(modin_df_cp, pandas_df_cp)
def test_squeeze(self):
frame_data = {
"col1": [0, 1, 2, 3],
"col2": [4, 5, 6, 7],
"col3": [8, 9, 10, 11],
"col4": [12, 13, 14, 15],
"col5": [0, 0, 0, 0],
}
frame_data_2 = {"col1": [0, 1, 2, 3]}
frame_data_3 = {
"col1": [0],
"col2": [4],
"col3": [8],
"col4": [12],
"col5": [0],
}
frame_data_4 = {"col1": [2]}
frame_data_5 = {"col1": ["string"]}
# Different data for different cases
pandas_df = pandas.DataFrame(frame_data).squeeze()
ray_df = pd.DataFrame(frame_data).squeeze()
df_equals(ray_df, pandas_df)
pandas_df_2 = pandas.DataFrame(frame_data_2).squeeze()
ray_df_2 = pd.DataFrame(frame_data_2).squeeze()
df_equals(ray_df_2, pandas_df_2)
pandas_df_3 = pandas.DataFrame(frame_data_3).squeeze()
ray_df_3 = pd.DataFrame(frame_data_3).squeeze()
df_equals(ray_df_3, pandas_df_3)
pandas_df_4 = pandas.DataFrame(frame_data_4).squeeze()
ray_df_4 = pd.DataFrame(frame_data_4).squeeze()
df_equals(ray_df_4, pandas_df_4)
pandas_df_5 = pandas.DataFrame(frame_data_5).squeeze()
ray_df_5 = pd.DataFrame(frame_data_5).squeeze()
df_equals(ray_df_5, pandas_df_5)
data = [
[
pd.Timestamp("2019-01-02"),
pd.Timestamp("2019-01-03"),
pd.Timestamp("2019-01-04"),
pd.Timestamp("2019-01-05"),
],
[1, 1, 1, 2],
]
df = pd.DataFrame(data, index=["date", "value"]).T
pf = | pandas.DataFrame(data, index=["date", "value"]) | pandas.DataFrame |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 31 19:06:02 2018
@author: Jessica
"""
from __future__ import division, print_function
from sklearn.datasets import fetch_mldata
from sklearn.ensemble import RandomForestClassifier
from sklearn.decomposition import PCA
from sklearn.metrics import f1_score
import matplotlib
import matplotlib.pyplot as plt
import csv
import time
import numpy as np
import pandas as pd
mnist = fetch_mldata('MNIST original', data_home='./')
mnist.data.shape
mnist
X, y = mnist['data'], mnist['target']
X.shape
y.shape
# Display 36,000th image
some_digit = X[36000]
some_digit_image = some_digit.reshape(28, 28)
plt.imshow(some_digit_image, cmap = matplotlib.cm.binary,
interpolation = 'nearest')
plt.axis('off')
plt.show()
# Split the data to 60,000 images as training data and 10,000 as test data
X_train, X_test, y_train, y_test = X[:60000], X[60000:], y[:60000], y[60000:]
# --- Random Forest Classifier ---
rfClf = RandomForestClassifier(n_estimators=100, max_leaf_nodes=10, n_jobs=-1)
# Record the time it takes to fit the model
# set random number seed
np.random.seed(seed = 9999)
replications = 10 # repeat the trial ten times
x_time = [] # empty list for storing test results
n = 0 # initialize count
print('--- Time to Fit Random Forest Classifier ---')
while (n < replications):
start_time = time.clock()
# generate 1 million random negative binomials and store in a vector
rfClf.fit(X_train, y_train)
end_time = time.clock()
runtime = end_time - start_time # seconds of wall-clock time
x_time.append(runtime * 1000) # report in milliseconds
print("replication", n + 1, ":", x_time[n], "milliseconds\n")
n = n + 1
# write results to external file
with open('rf_fit.csv', 'wt') as f:
writer = csv.writer(f, quoting = csv.QUOTE_NONNUMERIC, dialect = 'excel')
writer.writerow('x_time')
for i in range(replications):
writer.writerow(([x_time[i],]))
# preliminary analysis for this cell of the design
print( | pd.DataFrame(x_time) | pandas.DataFrame |
from constants_and_util import *
from scipy.stats import norm, pearsonr, spearmanr
import pandas as pd
import copy
import numpy as np
import random
import matplotlib.pyplot as plt
import statsmodels.api as sm
from sklearn.linear_model import Lasso
from sklearn.ensemble import RandomForestRegressor
from scipy.stats import ttest_ind, rankdata
import non_image_data_processing
import patsy
import os
import math
import sklearn
import json
import seaborn as sns
from scipy.stats import scoreatpercentile
import statsmodels
from sklearn.kernel_ridge import KernelRidge
import scipy
from scipy.stats import scoreatpercentile, linregress, ttest_rel
from statsmodels.iolib.summary2 import summary_col
"""
Code to perform analyses on the fitted models. We note two potentially confusing naming conventions in the analysis code.
First, much of the code was written during preliminary analyses looking just at SES; later, we broadened the analysis to look at pain gaps by sex, race, etc.
Hence, many of the variable names/comments contain "ses", but in general, these refer to all three binary variables we consider in the final paper (capturing education and race, not just income).
Second, while the paper refers to "training", "development", and "validation" sets, those correspond in the code to the "train", "val", and "test" sets, respectively.
"""
def make_simple_histogram_of_pain(y, binary_vector_to_use, positive_class_label, negative_class_label, plot_filename):
"""
Make a simple histogram of pain versus binary class (eg, pain for black vs non-black patients).
Checked.
"""
sns.set_style()
bins = np.arange(0, 101, 10)
plt.figure(figsize=[4, 4])
hist_weights = np.ones((binary_vector_to_use == False).sum())/float((binary_vector_to_use == False).sum()) # https://stackoverflow.com/a/16399202/9477154
plt.hist(y[binary_vector_to_use == False], weights=hist_weights, alpha=1, bins=bins, label=negative_class_label, orientation='horizontal')
hist_weights = np.ones((binary_vector_to_use == True).sum())/float((binary_vector_to_use == True).sum())
plt.hist(y[binary_vector_to_use == True], weights=hist_weights, alpha=.7, bins=bins, label=positive_class_label, orientation='horizontal')
plt.ylim([0, 100])
plt.yticks([0, 20, 40, 60, 80, 100], fontsize=12)
plt.xlabel("")
plt.legend(loc=4, fontsize=12)
plt.xticks([])
plt.savefig(plot_filename)
def compare_to_mri_features(datasets, y, yhat, all_ses_vars, ids, df_for_filtering_out_special_values, also_include_xray_features, use_random_forest, mri_features):
"""
Show that yhat still outperforms a predictor which uses MRI features.
df_for_filtering_out_special_values: this is a dataframe for MRI features only which only has rows if there are no 0.5/-0.5 values.
Just a sanity check (those values are rare) because I'm not sure whether binarizing really makes sense for those values.
"""
datasets = copy.deepcopy(datasets)
idxs_with_mris = {}
dfs_to_use_in_regression = {}
for dataset in ['train', 'val', 'test']:
idxs_with_mris[dataset] = np.isnan(datasets[dataset].non_image_data[mri_features].values).sum(axis=1) == 0
if df_for_filtering_out_special_values is not None:
dfs_to_use_in_regression[dataset] = pd.merge(datasets[dataset].non_image_data,
df_for_filtering_out_special_values,
how='left',
on=['id', 'side', 'visit'],
validate='one_to_one')
no_special_values = ~pd.isnull(dfs_to_use_in_regression[dataset]['no_special_values']).values
idxs_with_mris[dataset] = (idxs_with_mris[dataset]) & (no_special_values)
else:
dfs_to_use_in_regression[dataset] = datasets[dataset].non_image_data.copy()
if also_include_xray_features:
mri_features_to_use = ['C(%s)' % a for a in mri_features + CLINICAL_CONTROL_COLUMNS]
else:
mri_features_to_use = ['C(%s)' % a for a in mri_features]
print("\n\n\n\n********Predicting pain from MRI features; including Xray clinical features=%s; using random forest %s; filtering out special values %s" %
(also_include_xray_features, use_random_forest, df_for_filtering_out_special_values is not None))
yhat_from_mri = compare_to_clinical_performance(
train_df=dfs_to_use_in_regression['train'].loc[idxs_with_mris['train']],
val_df=dfs_to_use_in_regression['val'].loc[idxs_with_mris['val']],
test_df=dfs_to_use_in_regression['test'].loc[idxs_with_mris['test']],
y_col='koos_pain_subscore',
features_to_use=mri_features_to_use,
binary_prediction=False,
use_nonlinear_model=use_random_forest,
do_ols_sanity_check=True)
print("Compare to yhat performance")
yhat_performance = assess_performance(y=y[idxs_with_mris['test']],
yhat=yhat[idxs_with_mris['test']],
binary_prediction=False)
for k in yhat_performance:
print('%s: %2.3f' % (k, yhat_performance[k]))
mri_ses_vars = {}
for k in all_ses_vars:
mri_ses_vars[k] = all_ses_vars[k][idxs_with_mris['test']]
print(quantify_pain_gap_reduction_vs_rival(yhat=yhat[idxs_with_mris['test']],
y=y[idxs_with_mris['test']],
rival_severity_measure=yhat_from_mri,
all_ses_vars=mri_ses_vars,
ids=ids[idxs_with_mris['test']]))
def sig_star(p):
assert p >= 0 and p <= 1
if p < .001:
return '***'
elif p < .01:
return '**'
elif p < .05:
return '*'
return ''
def get_pvalue_on_binary_vector_mean_diff(yhat_vector, klg_vector, ids):
"""
Assess whether yhat_vector and KLG_vector are assigning different fractions of people to surgery.
Basically does a paired t-test on the binary vector accounting for clustering.
Used for surgery analysis.
"""
assert len(yhat_vector) == len(klg_vector) == len(ids)
check_is_array(yhat_vector)
check_is_array(klg_vector)
diff_df = pd.DataFrame({'diff':1.*yhat_vector - 1.*klg_vector, 'id':ids})
clustered_diff_model = sm.OLS.from_formula('diff ~ 1', data=diff_df).fit(cov_type='cluster', cov_kwds={'groups':diff_df['id']})
assert np.allclose(clustered_diff_model.params['Intercept'], yhat_vector.mean() - klg_vector.mean())
return clustered_diff_model.pvalues['Intercept']
def get_ci_on_binary_vector(vector, ids):
"""
Compute standard error on a binary vector's mean, accounting for clustering.
Used for surgery analysis.
"""
assert len(vector) == len(ids)
check_is_array(vector)
check_is_array(ids)
df = pd.DataFrame({'val':1.*vector, 'id':ids})
cluster_model = sm.OLS.from_formula('val ~ 1', data=df).fit(cov_type='cluster', cov_kwds={'groups':df['id']})
assert np.allclose(cluster_model.params['Intercept'], vector.mean())
return '(%2.5f, %2.5f)' % (cluster_model.conf_int().loc['Intercept', 0], cluster_model.conf_int().loc['Intercept', 1])
def do_surgery_analysis_ziad_style(yhat, y, klg, all_ses_vars, baseline_idxs, have_actually_had_surgery, df_to_use, ids):
"""
Hopefully the final surgery analysis. Does a couple things:
1. Uses a criterion for allocating surgery based on the prior literature: KLG >= 3 and high pain, as defined using pain threshold from prior literature.
- to compare to yhat we do it two ways: discretize yhat, use discretized_yhat >= 3
- and, to make sure we allocate the same number of surgeries, take all high pain people and just count down by yhat until we have the same number that KLG allocates.
2. Then examines:
- What fraction of people are eligible for surgery both overall and in our racial/SES groups?
- What fraction of people are in a lot of pain but aren't eligible for surgery both overall and in our racial/SES groups?
- Is painkiller use correlated with yhat among those who don't receive surgery?
As a robustness check, all these analyses are repeated on both baseline + overall dataset, both excluding and including those who have already had surgery.
"""
check_is_array(yhat)
check_is_array(y)
check_is_array(klg)
check_is_array(ids)
check_is_array(baseline_idxs)
check_is_array(have_actually_had_surgery)
pd.set_option('precision', 6)
pd.set_option('display.width', 1000)
df_to_use = df_to_use.copy()
in_high_pain = binarize_koos(y) == True
discretized_yhat = discretize_yhat_like_kl_grade(yhat_arr=yhat, kl_grade_arr=klg, y_col='koos_pain_subscore')
klg_cutoff = 3
fit_surgery_criteria_under_klg = (in_high_pain == True) & (klg >= klg_cutoff)
fit_surgery_criteria_under_discretized_yhat = (in_high_pain == True) & (discretized_yhat >= klg_cutoff)
for just_use_baseline in [True, False]:
for exclude_those_who_have_surgery in [True, False]:
idxs_to_use = np.ones(baseline_idxs.shape) == 1
if just_use_baseline:
idxs_to_use = idxs_to_use & (baseline_idxs == 1)
if exclude_those_who_have_surgery:
idxs_to_use = idxs_to_use & (have_actually_had_surgery == 0)
print("\n\n\n\n****Just use baseline: %s; exclude those who have had surgery: %s; analyzing %i knees" %
(just_use_baseline, exclude_those_who_have_surgery, idxs_to_use.sum()))
n_surgeries_under_klg = int(fit_surgery_criteria_under_klg[idxs_to_use].sum())
# Alternate yhat criterion: assign exactly same number of people surgery under yhat as under KLG.
# Do this by taking people with the lowest yhat values subject to being in high pain.
# Compute this independently for each group specified by idxs_to_use.
lowest_yhat_idxs = np.argsort(yhat)
yhat_match_n_surgeries = np.array([False for a in range(len(fit_surgery_criteria_under_discretized_yhat))])
for idx in lowest_yhat_idxs:
if yhat_match_n_surgeries.sum() < n_surgeries_under_klg:
if (in_high_pain[idx] == 1) & (idxs_to_use[idx] == 1):
yhat_match_n_surgeries[idx] = True
assert yhat[yhat_match_n_surgeries == True].mean() < yhat[yhat_match_n_surgeries == False].mean()
assert np.allclose(yhat_match_n_surgeries[idxs_to_use].mean(), fit_surgery_criteria_under_klg[idxs_to_use].mean())
fracs_eligible_for_surgery = []
fracs_eligible_for_surgery.append({'group':'Overall',
'klg':fit_surgery_criteria_under_klg[idxs_to_use].mean(),
'klg_ci':get_ci_on_binary_vector(fit_surgery_criteria_under_klg[idxs_to_use], ids[idxs_to_use]),
'yhat':fit_surgery_criteria_under_discretized_yhat[idxs_to_use].mean(),
'yhat_ci':get_ci_on_binary_vector(fit_surgery_criteria_under_discretized_yhat[idxs_to_use], ids[idxs_to_use]),
'yhat_match_surgeries':yhat_match_n_surgeries[idxs_to_use].mean(),
'yhat_klg_p':get_pvalue_on_binary_vector_mean_diff(yhat_vector=fit_surgery_criteria_under_discretized_yhat[idxs_to_use],
klg_vector=fit_surgery_criteria_under_klg[idxs_to_use],
ids=ids[idxs_to_use])})
for ses_var in all_ses_vars:
fracs_eligible_for_surgery.append({'group':ses_var,
'yhat':fit_surgery_criteria_under_discretized_yhat[(all_ses_vars[ses_var] == True) & idxs_to_use].mean(),
'yhat_ci':get_ci_on_binary_vector(fit_surgery_criteria_under_discretized_yhat[(all_ses_vars[ses_var] == True) & idxs_to_use],
ids[(all_ses_vars[ses_var] == True) & idxs_to_use]),
'klg':fit_surgery_criteria_under_klg[(all_ses_vars[ses_var] == True) & idxs_to_use].mean(),
'klg_ci':get_ci_on_binary_vector(fit_surgery_criteria_under_klg[(all_ses_vars[ses_var] == True) & idxs_to_use],
ids[(all_ses_vars[ses_var] == True) & idxs_to_use]),
'yhat_match_surgeries':yhat_match_n_surgeries[(all_ses_vars[ses_var] == True) & idxs_to_use].mean(),
'yhat_klg_p':get_pvalue_on_binary_vector_mean_diff(yhat_vector=fit_surgery_criteria_under_discretized_yhat[(all_ses_vars[ses_var] == True) & idxs_to_use], klg_vector=fit_surgery_criteria_under_klg[(all_ses_vars[ses_var] == True) & idxs_to_use],
ids=ids[(all_ses_vars[ses_var] == True) & idxs_to_use])})
fracs_eligible_for_surgery = pd.DataFrame(fracs_eligible_for_surgery)
fracs_eligible_for_surgery['yhat/klg'] = fracs_eligible_for_surgery['yhat'] / fracs_eligible_for_surgery['klg']
fracs_eligible_for_surgery['yhat_match_surgeries/klg'] = fracs_eligible_for_surgery['yhat_match_surgeries'] / fracs_eligible_for_surgery['klg']
print("Fraction eligible for surgery")
print(fracs_eligible_for_surgery[['group', 'klg', 'klg_ci', 'yhat', 'yhat_ci', 'yhat/klg', 'yhat_klg_p']])
assert (fracs_eligible_for_surgery['yhat/klg'] > 1).all()
assert (fracs_eligible_for_surgery['yhat_match_surgeries/klg'] >= 1).all()
for check in ['klg', 'yhat']:
# check CIs.
assert np.allclose(
fracs_eligible_for_surgery[check].values - fracs_eligible_for_surgery['%s_ci' % check].map(lambda x:float(x.split()[0].replace(',', '').replace('(', ''))),
fracs_eligible_for_surgery['%s_ci' % check].map(lambda x:float(x.split()[1].replace(',', '').replace(')', ''))) - fracs_eligible_for_surgery[check].values,
atol=1e-5)
# for each population we calculate both under the current regime and under our counterfactual surgery assignment: the rate of people who do not receive surgery and are in pain.
do_not_receive_surgery_and_are_in_pain = []
print("Do not receive surgery and are in pain")
do_not_receive_surgery_and_are_in_pain.append({'group':'Overall',
'klg':((fit_surgery_criteria_under_klg == 0) & in_high_pain)[idxs_to_use].mean(),
'klg_ci':get_ci_on_binary_vector(((fit_surgery_criteria_under_klg == 0) & in_high_pain)[idxs_to_use], ids[idxs_to_use]),
'yhat':((fit_surgery_criteria_under_discretized_yhat == 0) & in_high_pain)[idxs_to_use].mean(),
'yhat_ci':get_ci_on_binary_vector(((fit_surgery_criteria_under_discretized_yhat == 0) & in_high_pain)[idxs_to_use], ids[idxs_to_use]),
'yhat_match_surgeries':((yhat_match_n_surgeries == 0) & in_high_pain)[idxs_to_use].mean(),
'yhat_klg_p':get_pvalue_on_binary_vector_mean_diff(yhat_vector=((fit_surgery_criteria_under_discretized_yhat == 0) & in_high_pain)[idxs_to_use],
klg_vector=((fit_surgery_criteria_under_klg == 0) & in_high_pain)[idxs_to_use],
ids=ids[idxs_to_use])})
for ses_var in all_ses_vars:
do_not_receive_surgery_and_are_in_pain.append({'group':ses_var,
'klg':((fit_surgery_criteria_under_klg == 0) & in_high_pain)[(all_ses_vars[ses_var] == True) & idxs_to_use].mean(),
'klg_ci':get_ci_on_binary_vector(((fit_surgery_criteria_under_klg == 0) & in_high_pain)[idxs_to_use & (all_ses_vars[ses_var] == True)], ids[idxs_to_use & (all_ses_vars[ses_var] == True)]),
'yhat':((fit_surgery_criteria_under_discretized_yhat == 0) & in_high_pain)[(all_ses_vars[ses_var] == True) & idxs_to_use].mean(),
'yhat_ci':get_ci_on_binary_vector(((fit_surgery_criteria_under_discretized_yhat == 0) & in_high_pain)[idxs_to_use & (all_ses_vars[ses_var] == True)], ids[idxs_to_use & (all_ses_vars[ses_var] == True)]),
'yhat_match_surgeries':((yhat_match_n_surgeries == 0) & in_high_pain)[(all_ses_vars[ses_var] == True) & idxs_to_use].mean(),
'yhat_klg_p':get_pvalue_on_binary_vector_mean_diff(yhat_vector=((fit_surgery_criteria_under_discretized_yhat == 0) & in_high_pain)[(all_ses_vars[ses_var] == True) & idxs_to_use],
klg_vector=((fit_surgery_criteria_under_klg == 0) & in_high_pain)[(all_ses_vars[ses_var] == True) & idxs_to_use],
ids=ids[(all_ses_vars[ses_var] == True) & idxs_to_use])})
do_not_receive_surgery_and_are_in_pain = pd.DataFrame(do_not_receive_surgery_and_are_in_pain)
do_not_receive_surgery_and_are_in_pain['yhat/klg'] = do_not_receive_surgery_and_are_in_pain['yhat'] / do_not_receive_surgery_and_are_in_pain['klg']
do_not_receive_surgery_and_are_in_pain['yhat_match_surgeries/klg'] = do_not_receive_surgery_and_are_in_pain['yhat_match_surgeries'] / do_not_receive_surgery_and_are_in_pain['klg']
print(do_not_receive_surgery_and_are_in_pain[['group', 'klg', 'klg_ci', 'yhat', 'yhat_ci', 'yhat/klg', 'yhat_klg_p']])
assert (do_not_receive_surgery_and_are_in_pain['yhat/klg'] < 1).all()
assert (do_not_receive_surgery_and_are_in_pain['yhat_match_surgeries/klg'] <= 1).all()
for check in ['klg', 'yhat']:
# check CIs.
assert np.allclose(
do_not_receive_surgery_and_are_in_pain[check].values - do_not_receive_surgery_and_are_in_pain['%s_ci' % check].map(lambda x:float(x.split()[0].replace(',', '').replace('(', ''))),
do_not_receive_surgery_and_are_in_pain['%s_ci' % check].map(lambda x:float(x.split()[1].replace(',', '').replace(')', ''))) - do_not_receive_surgery_and_are_in_pain[check].values,
atol=1e-5)
# show in the non-surgical population the corrrelation between opioid use and y-hat
predict_medication_results = []
medications = ['rxactm', 'rxanalg', 'rxasprn', 'rxnarc', 'rxnsaid', 'rxothan']
for surgery_criterion in ['yhat', 'yhat_match_surgeries', 'klg']:
if surgery_criterion == 'yhat':
non_surgical_population = (fit_surgery_criteria_under_discretized_yhat == False) & idxs_to_use
elif surgery_criterion == 'klg':
non_surgical_population = (fit_surgery_criteria_under_klg == False) & idxs_to_use
elif surgery_criterion == 'yhat_match_surgeries':
non_surgical_population = (yhat_match_n_surgeries == False) & idxs_to_use
for m in medications:
df_for_regression = pd.DataFrame({'medication':df_to_use.loc[non_surgical_population, m].values,
'yhat':yhat[non_surgical_population],
'id':df_to_use.loc[non_surgical_population, 'id'].values})
df_for_regression = df_for_regression.dropna()
predict_on_medication_in_nonsurgical_population = sm.Logit.from_formula('medication ~ yhat', data=df_for_regression).fit(cov_type='cluster', cov_kwds={'groups':df_for_regression['id']})
predict_medication_results.append({'medication':MEDICATION_CODES[('v00' + m).upper()],
'beta_yhat':predict_on_medication_in_nonsurgical_population.params['yhat'],
'DV mean':df_for_regression['medication'].mean(),
'p_yhat':predict_on_medication_in_nonsurgical_population.pvalues['yhat'],
'surgery_criterion':surgery_criterion,
'n':predict_on_medication_in_nonsurgical_population.nobs})
predict_medication_results = pd.DataFrame(predict_medication_results)[['surgery_criterion', 'medication', 'beta_yhat', 'p_yhat', 'DV mean', 'n']]
predict_medication_results['sig'] = predict_medication_results['p_yhat'].map(sig_star)
assert (predict_medication_results['sig'].map(lambda x:'*' in x) & (predict_medication_results['beta_yhat'] > 0)).sum() == 0 # make sure no significant associations in the wrong direction.
print(predict_medication_results.sort_values(by='medication'))
def extract_all_ses_vars(df):
"""
Small helper method: return a dictionary of variables coded in the proper direction.
"""
for k in ['binarized_income_at_least_50k', 'binarized_education_graduated_college', 'race_black']:
assert df[k].map(lambda x:x in [0, 1]).all()
assert df[k].map(lambda x:x in [True, False]).all()
income_at_least_50k = df['binarized_income_at_least_50k'].values == 1
graduated_college = df['binarized_education_graduated_college'].values == 1
race_black = df['race_black'].values == 1
all_ses_vars = {'did_not_graduate_college':~(graduated_college == 1),
'income_less_than_50k':~(income_at_least_50k == 1),
'race_black':race_black == 1}
return all_ses_vars, income_at_least_50k, graduated_college, race_black
def assess_treatment_gaps_controlling_for_klg(klg, all_ses_vars, baseline_idxs, df_to_use):
"""
Regression:
treatment ~ SES + controls, where controls \in [KLG, none].
"""
check_is_array(klg)
check_is_array(baseline_idxs)
pd.set_option('max_rows', 500)
get_OR_and_CI = lambda m:'%2.2f (%2.2f, %2.2f)' % (np.exp(m.params['ses']), np.exp(m.conf_int().loc['ses', 0]), np.exp(m.conf_int().loc['ses', 1]))
treatment_gaps_regression_results = []
for treatment in ['knee_surgery', 'rxnarc', 'rxactm', 'rxanalg', 'rxasprn', 'rxnsaid', 'rxothan']:
for just_use_baseline in [True, False]:
idxs_to_use = np.ones(baseline_idxs.shape) == 1
if just_use_baseline:
idxs_to_use = idxs_to_use & (baseline_idxs == 1)
for control_for_klg in [True, False]:
for ses_var_name in all_ses_vars:
regression_df = pd.DataFrame({'ses':all_ses_vars[ses_var_name][idxs_to_use] * 1.,
'klg':klg[idxs_to_use],
'treatment':df_to_use.loc[idxs_to_use, treatment].values,
'id':df_to_use.loc[idxs_to_use, 'id'].values,
'visit':df_to_use.loc[idxs_to_use, 'visit'].values}).dropna()
if control_for_klg:
formula = 'treatment ~ ses + C(klg)'
else:
formula = 'treatment ~ ses'
regression_model = sm.Logit.from_formula(formula, data=regression_df).fit(cov_type='cluster', cov_kwds={'groups':regression_df['id'].values})
treatment_gaps_regression_results.append({'n_obs':regression_model.nobs,
'just_baseline':just_use_baseline,
'klg_control':control_for_klg,
'treatment':MEDICATION_CODES[('v00' + treatment).upper()] if treatment != 'knee_surgery' else 'knee_surgery',
'ses_var':ses_var_name,
'ses_OR':get_OR_and_CI(regression_model),
'DV mean':'%2.3f' % regression_df['treatment'].mean() ,
'sig':sig_star(regression_model.pvalues['ses'])})
treatment_gaps_regression_results = pd.DataFrame(treatment_gaps_regression_results)[['just_baseline',
'klg_control',
'treatment',
'ses_var',
'ses_OR',
'sig',
'DV mean',
'n_obs']]
print(treatment_gaps_regression_results)
def study_effect_of_surgery(df_to_use, surgery_col_to_analyze):
"""
The goal here was to show that people are in less pain after surgery, which is true for arthroplasty (not arthroscopy).
"""
pd.set_option('display.width', 500)
df_to_use = df_to_use.copy()
df_to_use['high_pain'] = binarize_koos(df_to_use['koos_pain_subscore'])
print("Prior to dropping people with missing %s data, %i rows" % (surgery_col_to_analyze, len(df_to_use)))
df_to_use = df_to_use.dropna(subset=[surgery_col_to_analyze])
print("After dropping people with missing %s data, %i rows" % (surgery_col_to_analyze, len(df_to_use)))
df_to_use['id_plus_side'] = df_to_use['id'].astype(str) + '*' + df_to_use['side'].astype(str)
medications = ['rxactm', 'rxanalg', 'rxasprn', 'rxnarc', 'rxnsaid', 'rxothan']
outcomes = ['koos_pain_subscore', 'high_pain'] + medications + ['all_pain_medications_combined']
df_to_use['all_pain_medications_combined'] = False
for k in medications:
df_to_use['all_pain_medications_combined'] = (df_to_use['all_pain_medications_combined'] | (df_to_use[k] == 1))
grouped_d = df_to_use.groupby('id_plus_side')
outcomes_to_changes = {}
for outcome in outcomes:
outcomes_to_changes[outcome] = []
outcomes_to_changes['pre_surgery_klg'] = []
outcomes_to_changes['pre_surgery_discretized_yhat'] = []
for group_id, small_d in grouped_d:
small_d = small_d.copy().sort_values(by='visit')
if small_d[surgery_col_to_analyze].sum() == 0:
continue
if small_d[surgery_col_to_analyze].iloc[0] == 1:
continue
small_d.index = range(len(small_d))
before_surgery = small_d[surgery_col_to_analyze] == 0
after_surgery = small_d[surgery_col_to_analyze] == 1
assert before_surgery.sum() > 0
assert after_surgery.sum() > 0
outcomes_to_changes['pre_surgery_klg'].append(small_d.loc[before_surgery, 'xrkl'].dropna().mean())
if 'discretized_yhat' in small_d.columns:
outcomes_to_changes['pre_surgery_discretized_yhat'].append(small_d.loc[before_surgery, 'discretized_yhat'].dropna().mean())
else:
outcomes_to_changes['pre_surgery_discretized_yhat'].append(np.nan)
for outcome in outcomes:
if pd.isnull(small_d[outcome]).mean() > 0:
continue
before_surgery_mean = small_d.loc[before_surgery, outcome].mean()
after_surgery_mean = small_d.loc[after_surgery, outcome].mean()
outcomes_to_changes[outcome].append({'before_surgery':before_surgery_mean, 'after_surgery':after_surgery_mean})
assert sorted(small_d[surgery_col_to_analyze].values) == list(small_d[surgery_col_to_analyze].values)
outcomes_to_changes['pre_surgery_klg'] = np.array(outcomes_to_changes['pre_surgery_klg'])
outcomes_to_changes['pre_surgery_discretized_yhat'] = np.array(outcomes_to_changes['pre_surgery_discretized_yhat'])
if np.isnan(outcomes_to_changes['pre_surgery_discretized_yhat']).mean() < 1:
assert (np.isnan(outcomes_to_changes['pre_surgery_klg']) == np.isnan(outcomes_to_changes['pre_surgery_discretized_yhat'])).all()
for k in ['pre_surgery_klg', 'pre_surgery_discretized_yhat']:
not_nan = ~np.isnan(outcomes_to_changes[k])
print('Mean of %s prior to surgery in people who had surgery: %2.5f; median %2.5f' % (k,
outcomes_to_changes[k][not_nan].mean(),
np.median(outcomes_to_changes[k][not_nan])))
results_df = []
for outcome in outcomes:
pre_surgery_values = np.array([a['before_surgery'] for a in outcomes_to_changes[outcome]])
post_surgery_values = np.array([a['after_surgery'] for a in outcomes_to_changes[outcome]])
t, p = ttest_rel(pre_surgery_values, post_surgery_values)
pretty_outcome_name = MEDICATION_CODES['V00' + outcome.upper()] if 'V00' + outcome.upper() in MEDICATION_CODES else outcome
results_df.append({'outcome':pretty_outcome_name,
'n':len(post_surgery_values),
'pre_surgery_larger':(pre_surgery_values > post_surgery_values).sum(),
'post_surgery_larger':(pre_surgery_values < post_surgery_values).sum(),
'no_change':(pre_surgery_values == post_surgery_values).sum(),
'pre_surgery_mean':pre_surgery_values.mean(),
'post_surgery_mean':post_surgery_values.mean(),
'p':p})
if np.isnan(outcomes_to_changes['pre_surgery_discretized_yhat']).mean() < 1:
# check whether yhat predicts surgical outcomes -- but this turns out to be pretty impossible due to small size o fhte test set.
for outcome in outcomes:
print(outcome)
pre_surgery_values = np.array([a['before_surgery'] for a in outcomes_to_changes[outcome]])
post_surgery_values = np.array([a['after_surgery'] for a in outcomes_to_changes[outcome]])
for k in ['pre_surgery_klg', 'pre_surgery_discretized_yhat']:
not_nan = ~np.isnan(outcomes_to_changes[k])
r, p = pearsonr(outcomes_to_changes[k][not_nan], post_surgery_values[not_nan] - pre_surgery_values[not_nan])
print("Correlation between %s and post-surgery change: %2.3f, p=%2.3e; n=%i" % (k, r, p, not_nan.sum()))
return | pd.DataFrame(results_df) | pandas.DataFrame |
from sklearn.metrics.pairwise import euclidean_distances
from human_ISH_config import *
import pandas as pd
import numpy as np
import scipy
from sklearn import metrics
import json
import os
#print (pd.show_versions())
def create_diagonal_mask(low_to_high_map, target_value=1):
"""
Create a block diagonal mask matrix from the input mapping.
The input pandas data frame has only two columns, the first is the
low level id (image, sample, or probe_id) and the second is the
high level mapping (gene, region, donor). The target_value argument can
be set to np.nan.
The output will be a matrix sized the number of low level ID's squared.
The column and row order will have to be rearranged to match your distance matrix.
"""
low_to_high_map.drop_duplicates()
grouped = low_to_high_map.groupby(low_to_high_map.columns[1])
ordered_low_level_names = list()
group_matrices = []
for name, group in grouped:
group_size = group.shape[0]
# build up row/col names, order doesn't matter within a group = they are all equal
ordered_low_level_names = ordered_low_level_names + group.iloc[:, 0].tolist()
# set the diagonal matrix to be the target value
single_group_matrix = np.full(shape=(group_size, group_size), fill_value=target_value)
group_matrices.append(single_group_matrix)
# add the individual matrices along the diagonal
relationship_matrix = scipy.linalg.block_diag(*group_matrices)
# convert to pandas dataframe and set names
relationship_df = pd.DataFrame(relationship_matrix, columns=ordered_low_level_names, index=ordered_low_level_names)
return relationship_df
def get_general_distance_and_relationship_matrix(path_to_embeddings,image_level_embed_file_name, study=None):
"""
This function uses the image_level_embeddings to create a distance matrix. It also creates a relationship matrix
for images that we have an embedding for.
It calculates the euclidean distance for every possible pair of images and also gives the relationship
(same gene/different gene) for every possible pair of images.
:param path_to_embeddings: path to the image_level_embeddings
:return: 2 pandas Data frames: distance matrix and the relationship matrix.
"""
if study == None:
images_info = pd.read_csv(os.path.join(DATA_DIR,STUDY,"human_ISH_info.csv"))
else:
images_info = pd.read_csv(os.path.join(DATA_DIR, study, "human_ISH_info.csv"))
dist_matrix_df = build_distance_matrix(os.path.join(path_to_embeddings, image_level_embed_file_name))
dist_matrix_rows = list(dist_matrix_df.index) # list of image IDs
dist_matrix_columns = list(dist_matrix_df) # list of image IDs
# --- sanity check -------------
if dist_matrix_rows != dist_matrix_columns:
print ("Something is wrong, the number and order of image IDs in distance matrix's rows and columns should the same.")
return None
# ------------------------------
genes = images_info[images_info['image_id'].isin(dist_matrix_rows)]['gene_symbol']
low_to_high_map = pd.DataFrame(list(zip(dist_matrix_rows, genes))) # create a 2-column df of image IDs and genes
relationship_df = create_diagonal_mask(low_to_high_map, target_value=1)
# --- check to see if rows and columns of dist matrix match the relationship matrix. ---------------------
# if they don't re-arrange them in the relationship matrix to match the dist matrix
dist_matrix_df, relationship_df = match_matrices(dist_matrix_df, relationship_df)
# ---------------------------------------------------------------------------------------------------------
return dist_matrix_df,relationship_df
def match_matrices(first_matrix_df, second_matrix_df):
"""
Checks to see if the two matrices match.
Matching means the number and order of rows and columns are the same (based on titles)
If they do not match, the function re-arranges the order of rows and columns of the second matrix to make it similar
to the first matrix.
The function does not modify the values inside the matrices. It jus re-arranges the order of columns and rows.
:param first_matrix_df: pandas Dataframe.
:param second_matrix_df: pandas Dataframe.
:return: 2 pandas Dataframes.
"""
first_matrix_array = first_matrix_df.to_numpy()
second_matrix_array = second_matrix_df.to_numpy()
first_matrix_rows = list(first_matrix_df.index)
first_matrix_columns = list(first_matrix_df)
second_matrix_rows = list(second_matrix_df.index)
second_matrix_columns = list(second_matrix_df)
if first_matrix_rows == second_matrix_rows and first_matrix_columns == second_matrix_columns:
print("They match!")
else:
print("They don't match. Re-arranging ...")
desired_permutation = []
for item in second_matrix_columns:
ind = first_matrix_columns.index(item) # get the correct order of image IDs from distance matrix columns
desired_permutation.append(ind)
idx = np.empty_like(desired_permutation)
idx[desired_permutation] = np.arange(len(desired_permutation))
second_matrix_array[:] = second_matrix_array[:, idx]
second_matrix_array[:] = second_matrix_array[idx, :]
second_matrix_df = pd.DataFrame(second_matrix_array, columns=first_matrix_columns, index=first_matrix_rows)
return first_matrix_df, second_matrix_df
def apply_mask(mask_matrix_df, original_matrix_df):
"""
Changes elements of the original array based on the mask array.
The original array and the mask array should have the same shape.
:param mask_matrix_df: pandas Data frame. Boolean mask. It has to be the same shape as the target array.
:param original_matrix_df: pandas Data frame. Original matrix that we want to apply the mask to.
:return: pandas data frame. This is the original array after masking. It is converted to pandas df.
"""
print("Applying the mask ...")
original_matrix_columns = list(original_matrix_df)
original_matrix_rows = list(original_matrix_df.index)
mask_array = mask_matrix_df.to_numpy()
original_array = original_matrix_df.to_numpy().astype(float)
# Note: np.nan cannot be inserted into an array of type int. The array needs to be float.
np.putmask(original_array, mask_array, np.nan)
after_masking_df = pd.DataFrame(original_array, columns=original_matrix_columns, index=original_matrix_rows)
return after_masking_df
def AUC(dist_matrix_df, label_matrix_df, title, image_level_embed_file_name):
"""
Calculates the AUC using the positive and negative pairs.
It gets the actual labels of the pairs from the label matrix and the predicted labels based on the distance matrix.
It will ignore np.nan values in the two matrices.
:param dist_matrix_df: pandas data frame that has the euclidean distance between pairs of images. Some cells might be Nan.
:param label_matrix_df: pandas data frame that has the actual labels of the pairs of images. Some cells might be Nan.
These two matrices should completely match. Meaning they should have the same number of rows and columns, the order of rows and
columns should be the same, also any cell that is Nan in one matrix should also be Nan in the other one.
:return: float. The AUC value.
"""
print ("Calculating AUC ...")
dist_matrix_columns = list(dist_matrix_df)
dist_matrix_rows = list(dist_matrix_df.index)
label_matrix_columns = list(label_matrix_df)
label_matrix_rows = list(label_matrix_df.index)
if dist_matrix_columns == label_matrix_columns and dist_matrix_rows == label_matrix_rows:
print ("The two matrix match. Will continue to calculate AUC ...")
dist_matrix_array = dist_matrix_df.to_numpy()
label_matrix_array = label_matrix_df.to_numpy()
top_tri_ind_list = np.triu_indices(len(dist_matrix_columns),1)
top_tri_dist_matrix = dist_matrix_array[top_tri_ind_list]
top_tri_dist_matrix = top_tri_dist_matrix[~np.isnan(top_tri_dist_matrix)] # remove NaNs
print ("top triangle of dist matrix without NaNs:", len(top_tri_dist_matrix))
top_tri_ind_list = np.triu_indices(len(label_matrix_array), 1)
top_tri_label_matrix = label_matrix_array[top_tri_ind_list]
top_tri_label_matrix = top_tri_label_matrix [~np.isnan(top_tri_label_matrix)] # remove NaNs
print ("top triangle of label matrix without NaNs:",len(top_tri_label_matrix))
top_tri_label_matrix = top_tri_label_matrix.astype(int) # convert values to int so they are binary {0,1}
# #positive label is 0 because distance is closer for positive pairs.
fpr, tpr, thresholds = metrics.roc_curve(top_tri_label_matrix, top_tri_dist_matrix, pos_label=0)
auc_val = metrics.auc(fpr, tpr)
"""
if 'training_validation' in image_level_embed_file_name:
set_type = 'Training and Validation'
elif 'training' in image_level_embed_file_name:
set_type = 'Training'
elif 'validation' in image_level_embed_file_name:
set_type = 'Validation'
else:
set_type = None
#plot_curve(fpr, tpr, title, ['fpr', 'tpr'], set_type) #to generate tpr over fpr graphs
l = len(top_tri_dist_matrix)
l_sub = len(top_tri_dist_matrix) // 10
res = np.random.choice(l, l_sub)
top_tri_dist_matrix = [top_tri_dist_matrix[i] for i in res]
top_tri_label_matrix = [top_tri_label_matrix[i] for i in res]
#plot_curve(top_tri_label_matrix, top_tri_dist_matrix, title, ['label', 'distance'], set_type) # to generate distance over actual label graphs
"""
"""
# This piece of code will use the whole dist and label matrix and not just the top triangle.
# This is less efficient because we know that the matrices are symmetric.
dist_matrix_flatten = dist_matrix_array.flatten()
dist_matrix_flatten = dist_matrix_flatten[~np.isnan(dist_matrix_flatten)] # remove NaNs
label_matrix_flatten = label_matrix_array.flatten()
label_matrix_flatten = label_matrix_flatten[~np.isnan(label_matrix_flatten)] # remove NaNs
label_matrix_flatten = label_matrix_flatten.astype(int) # convert values to int so they are binary {0,1}
# #positive label is 0 because distance is closer for positive pairs.
fpr, tpr, thresholds = metrics.roc_curve(label_matrix_flatten, dist_matrix_flatten, pos_label=0)
auc_val = metrics.auc(fpr, tpr)
return auc_val
"""
return auc_val
else:
print ("The two matrices do not match.")
return None
def first_hit_percentage(dist_matrix_df, study=None):
"""
This function finds the image ID of the closest image to every image and then checks to see what percentage of
these pairs are positive, meaning they have the same gene.
It finds the closest image by calling the 'find_closest_image()' function and passing the distance matrix to it.
The distance matrix may have np.nan values in some cells. Those cells will be ignored when looking for the closest image.
Which cells in the distance matrix might be np.nan? That depends on the criteria of the evaluation which determines the
universe of the genes to be considered.
:param dist_matrix_df: data frame that has the euclidean distance between every possible pair of images in the dataset.
The euclidean distances are calculated from the embedding vectors of each images.
:param study: the study that the embeddings belong to (i.e autism, schizophrenia)
# -----------------------
One step of the project is to pass disease images through a model that has been trained on healthy cortex images.
Because these models are trained on cortex, their corresponding files and information are in the cortex folder.
The disease embeddings that are generated by these models will also be stored in the cortex folder.
Therefore, the STUDY argument in the human_ISH_config.py is set to "cortex".
However, the main files of the disease dataset are in its own directory. That is why we have this argument 'study'.
Example: I have a model that has been trained on cortex images at some time stamp.
The info of that model such as its check points and its generated cortex embeddings are in:
DATA_DIR/cortex/segmentation_embeddings/timestamp
Also, when I pass the SZ and autism images through this model, I will store their embeddings in the same directory:
DATA_DIR/cortex/segmentation_embeddings/timestamp
However, he info.csv file which has the information of schizophrenia images and needs to be used in evaluation is in:
DATA_DIR/schizophrenia
To summarize: in this scenario, the disease embeddings that we want to evaluate are in:
DATA_DIR//STUDY/segmentation_embeddings/timestamp
but the disease info file is in:
DATA_DIR/study/
# -----------------------
:return: float. The percentage of images for which the closest image has the same gene.
"""
print ("Calculating first hit match percentage ...")
if study == None:
images_info = pd.read_csv(os.path.join(DATA_DIR,STUDY, "human_ISH_info.csv"))
else:
images_info = pd.read_csv(os.path.join(DATA_DIR, study, "human_ISH_info.csv"))
min_indexes_df = find_closest_image(dist_matrix_df) # min_indexes_df has two columns: an image ID and the ID of the closest image to that image
total_count = len(min_indexes_df) #total number of rows (== number of images)
image_gene_mapping = images_info[['image_id', 'gene_symbol']]
min_indexes_df = pd.merge(min_indexes_df, image_gene_mapping, left_on='id1', right_on='image_id')
min_indexes_df = pd.merge(min_indexes_df, image_gene_mapping, left_on='id2', right_on='image_id')
same_gene = min_indexes_df.query('gene_symbol_x == gene_symbol_y') # definition of positive
match_count = len(same_gene)
proportion = (match_count / total_count) * 100.0
return proportion
def first_hit_match_percentage_and_AUC_results(path_to_embeddings ,image_level_embed_file_name, study = None):
"""
:param path_to_embeddings:
:param image_level_embed_file_name:
:param study: the study that the embeddings belong to (i.e autism, schizophrenia)
# -----------------------
One step of the project is to pass disease images through a model that has been trained on healthy cortex images.
Because these models are trained on cortex, their corresponding files and information are in the cortex folder.
The disease embeddings that are generated by these models will also be stored in the cortex folder.
Therefore, the STUDY argument in the human_ISH_config.py is set to "cortex".
However, the main files of the disease dataset are in its own directory. That is why we have this argument 'study'.
Example: I have a model that has been trained on cortex images at some time stamp.
The info of that model such as its check points and its generated cortex embeddings are in:
DATA_DIR/cortex/segmentation_embeddings/timestamp
Also, when I pass the SZ and autism images through this model, I will store their embeddings in the same directory:
DATA_DIR/cortex/segmentation_embeddings/timestamp
However, he info.csv file which has the information of schizophrenia images and needs to be used in evaluation is in:
DATA_DIR/schizophrenia
To summarize: in this scenario, the disease embeddings that we want to evaluate are in:
DATA_DIR//STUDY/segmentation_embeddings/timestamp
but the disease info file is in:
DATA_DIR/study/
# -----------------------
:return:
"""
general_distance_matrix , general_relationship_matrix = get_general_distance_and_relationship_matrix(path_to_embeddings, image_level_embed_file_name, study)
# ---- General ----------------------------------------------------------------------------------
# General means only look at gene. Is it the same gene or different gene. Do not check donor_id.
print ("---------------------------------- General ---------------------------------- ")
general_first_hit_percentage = first_hit_percentage(general_distance_matrix, study)
general_AUC = AUC(general_distance_matrix, general_relationship_matrix, "General", image_level_embed_file_name)
general_res = [general_first_hit_percentage, general_AUC]
# ---- Among Other Donors ------------------------------------------------------------------------
print ("---------------------------------- Other Donors ----------------------------- ")
if study == None:
images_info = pd.read_csv(os.path.join( DATA_DIR,STUDY,"human_ISH_info.csv"))
else:
images_info = pd.read_csv(os.path.join(DATA_DIR, study, "human_ISH_info.csv"))
dist_matrix_rows = list(general_distance_matrix.index)
donors = images_info[images_info['image_id'].isin(dist_matrix_rows)]['donor_id']
low_to_high_map = pd.DataFrame(list(zip(dist_matrix_rows, donors))) # create a 2-column df of image IDs and genes
mask_df = create_diagonal_mask(low_to_high_map, target_value=1) # the pairs that have the same donor will have label 1
general_relationship_matrix, arranged_mask_df = match_matrices(general_relationship_matrix, mask_df)
# after applying the mask, any cell that corresponds to a pair with the same donor will be Nan.
# therefore, we are limiting our universe of pairs to those that have different donors.
distance_matrix_after_masking = apply_mask(arranged_mask_df, general_distance_matrix)
relationship_matrix_after_masking = apply_mask(arranged_mask_df, general_relationship_matrix)
among_other_donors_first_hit_percentage = first_hit_percentage(distance_matrix_after_masking, study)
among_other_donors_AUC = AUC(distance_matrix_after_masking, relationship_matrix_after_masking, "Other Donors", image_level_embed_file_name)
among_other_donors_res = [among_other_donors_first_hit_percentage, among_other_donors_AUC]
# ---- Within Donor ----------------------------------------------------------------------------
# so far, in the masked_df, the pairs that have the same donor will have label 1, and when we use this as a mask,
# these pairs will be set to Nan. But we need the opposite of that here.
# we need the pairs that have a different donor to be 1, so later when we actually apply the mask, the corresponding pairs would be set to Na.
# the idea is to convert every 0 into 1 and every 1 into 0
print("---------------------------------- Within Donor ------------------------------ ")
inverted_mask_df = np.logical_not(mask_df).astype(int)
general_relationship_matrix, arranged_inverted_mask_df = match_matrices(general_relationship_matrix, inverted_mask_df)
# after applying the mask, any cell that corresponds to a pair with the same donor will be Nan.
# therefore, we are limiting our universe of pairs to those that have different donors.
distance_matrix_after_masking = apply_mask(arranged_inverted_mask_df, general_distance_matrix)
relationship_matrix_after_masking = apply_mask(arranged_inverted_mask_df, general_relationship_matrix)
withing_donor_first_hit_percentage = first_hit_percentage(distance_matrix_after_masking, study)
within_donor_brains_AUC = AUC(distance_matrix_after_masking, relationship_matrix_after_masking, "Within Donor", image_level_embed_file_name)
within_donor_res = [withing_donor_first_hit_percentage, within_donor_brains_AUC]
return [general_res, among_other_donors_res, within_donor_res]
def build_distance_matrix(path_to_embeddings):
"""
Distance from one item to itself shows up as inf.
:param filename: String. This is the name of the folder in the EMBEDDING_DEST folder which contains the embeddings csv file
:return: pandas DataFrame. A distance matrix that has the euclidean distance between all the possible pairs of embedding vectors
"""
embed_df = pd.read_csv(path_to_embeddings)
print ("length is: ", len(embed_df))
columns = list(embed_df)
distances = euclidean_distances(embed_df.iloc[:, 1:], embed_df.iloc[:, 1:])
embed_df = embed_df.set_index([columns[0]])
# format distance matrix
distances_df = pd.DataFrame(distances)
distances_df.columns = list(embed_df.index)
distances_df.index = list(embed_df.index)
print ("finished building the distance matrix ...")
print ("///////////////////")
print (len(distances_df))
return distances_df
def find_closest_image(distances_df):
"""
:param distances_df: pandas DataFrame. A distance matrix that has the euclidean distance between all the possible
pairs of embedding vectors
:return: pandas DataFrame. Has 2 columns. The first column is an image_id, the second column is the image_id of
the corresponding closest image.
"""
# find the closest image in each row
default_value_for_diagonal = distances_df.iloc[0,0]
# set the distance between each image to itself as inf to make sure it doesn't get picked as closest
distances_df.values[[np.arange(distances_df.shape[0])] * 2] = float("inf")
min_indexes = distances_df.idxmin(axis=1, skipna=True)
min_indexes_df = pd.DataFrame(min_indexes).reset_index()
min_indexes_df.columns = ["id1", "id2"]
#min_indexes_df = min_indexes_df.applymap(str)
# set the distance between each image to itself back to the default
distances_df.values[[np.arange(distances_df.shape[0])] * 2] = float(default_value_for_diagonal)
print("finished finding the closest image ...")
return min_indexes_df
def not_the_same_gene(min_indexes_df, level):
"""
This function returns the proportion of images for which the closest image (based on the distance matrix) has a different gene.
This is a helper function to better understand the metrics and results.
Ideally, the closest image to an image should have the same gene. (same gene ==> same pattern ==> less distance)
:param min_indexes_df: a dataframe with two columns: image id, and image id of the closest image
:param level: the integration level in at which we are comparing the embeddings.
:return: float.
"""
if level == 'image':
total_count = len(min_indexes_df)
print ("total number of images: ", total_count)
info_csv_path = os.path.join(DATA_DIR, STUDY, "human_ISH_info.csv")
info_csv = pd.read_csv(info_csv_path, index_col=None)
gene_donor_mapping = info_csv[['gene_symbol', 'donor_id', 'image_id']]
gene_donor_mapping['image_id']=gene_donor_mapping['image_id'].astype(str)
min_indexes_df = pd.merge(min_indexes_df, gene_donor_mapping, left_on='id1', right_on='image_id')
min_indexes_df = pd.merge(min_indexes_df, gene_donor_mapping, left_on='id2', right_on='image_id')
not_the_same_image = min_indexes_df.query('image_id_x != image_id_y')
not_the_same_gene = not_the_same_image.query('gene_symbol_x != gene_symbol_y')
print(not_the_same_gene)
match_count = len(not_the_same_gene)
print("number of matches with not the same gene is: ", match_count)
proportion = (match_count / total_count) * 100.0
print ("proportion is: ", proportion)
return proportion
def get_creation_time(ts):
"""
This function gets the creation time of the embedding csv file.
It is designed to be used for embeddings that are generated by the triplet model.
I am using the embedding file in the experiment_files folder. The reason is that in linux, there is no simple way
of getting the creation time of a file. Instead, we can get the last time it was modifies.
Every embedding file generated by the triplet model is saved in EXPERIMENT_ROOT. From there, it is also copied inside
EMBEDDING_DEST.
After copying, I use the copied version in EMBEDDING_DEST so the initial one in EXPERIMENT_ROOT is probably never
accessed and modified and its last-access-time will be almost the same as its creation time.
:param ts: folder name.
:return: creation time stamp.
"""
path_to_embed_file = os.path.join(DATA_DIR, STUDY, "experiment_files", "experiment_"+ ts, "triplet_training_validation_embeddings.h5")
if os.path.exists(path_to_embed_file):
stat = os.stat(path_to_embed_file)
try:
return stat.st_birthtime
except AttributeError:
# We're probably on Linux. No easy way to get creation dates here,
# so we'll settle for when its content was last modified.
return stat.st_mtime
else:
print ("here, path is: ", path_to_embed_file)
return None
def disease_embed_evaluate(study, ts_list):
for ts in ts_list:
if ts == "random" or "resnet" in ts:
path_to_embeddings = os.path.join(DATA_DIR, study, "segmentation_embeddings", ts)
eval_path = os.path.join(DATA_DIR, study, "segmentation_embeddings", ts)
base_case = True
else:
path_to_embeddings = os.path.join(EMBEDDING_DEST, ts)
eval_path = os.path.join(EMBEDDING_DEST, ts)
base_case = False
image_level_files_list = []
print ("ts is: ", ts)
print ("path to embeddings is: ", path_to_embeddings)
contents = os.listdir(path_to_embeddings)
print (contents)
for item in contents:
if base_case:
if item.endswith("embeddings_image_level.csv"):
image_level_files_list.append(item)
else:
if item.endswith("embeddings_image_level.csv") and study in item:
#if item.endswith("five.csv"):
image_level_files_list.append(item)
print (image_level_files_list)
for item in image_level_files_list:
# for every image level embedding file, call another function to calculate first hit match percentage and AUC
image_level_embed_file_name = item
results = first_hit_match_percentage_and_AUC_results(path_to_embeddings, image_level_embed_file_name, study)
# list of columns to have in the evaluation table.
columns = ["ts", "dataset","number of embeddings", "general_first_hit_percentage", "general_AUC",
"among_other_donors_first_hit_percentage","among_other_donors_AUC",
"within_donor_first_hit_percentage", "within_donor_AUC"]
df = pd.read_csv(os.path.join(path_to_embeddings, image_level_embed_file_name))
number_of_embeddings = len(df)
eval_results_df = pd.DataFrame(columns=columns)
general_res = results[0]
among_other_donors_res = results[1]
within_donor_res = results[2]
eval_results_df.loc[0] = [ts, study, number_of_embeddings, general_res[0], general_res[1],
among_other_donors_res[0],
among_other_donors_res[1],
within_donor_res[0], within_donor_res[1]]
eval_result_file_name = item.split(".")[0] + "_evaluation_result_top_tri.csv"
eval_results_df.to_csv(os.path.join(eval_path, eval_result_file_name), index=None)
def evaluate(ts, not_found_list):
"""
The function evaluates the embeddings. It gets a folder name as input. If embeddings are generated by the triplet model,
the input folder name is a time stamp.
The function then reads all the image level embedding csv files within that folder. Normally, there should be 3 files:
One for training set embeddings, one for validation set embeddings, and one for training+validation.
:param ts: Folder name
:return: None. For every image level embedding file inside this folder, the function creates an evaluation csv file.
"""
path_to_embeddings = os.path.join(EMBEDDING_DEST, ts)
image_level_files_list = []
if not os.path.exists(path_to_embeddings):
print ("Could not find ", path_to_embeddings)
not_found_list.append(path_to_embeddings)
pass
else:
contents = os.listdir(path_to_embeddings)
for item in contents:
if item.endswith("embeddings_image_level.csv") and 'autism' not in item and 'schizophrenia' not in item:
image_level_files_list.append(item)
for item in image_level_files_list:
# for every image level embedding file, call another function to calculate first hit match percentage and AUC
image_level_embed_file_name = item
results = first_hit_match_percentage_and_AUC_results(path_to_embeddings,image_level_embed_file_name)
# from args.json :
args_names, args_val = get_arguments_from_json(ts)
# list of columns to have in the evaluation table.
columns = ["ts", "number of embeddings", "duration", "general_first_hit_percentage", "general_AUC", "among_other_donors_first_hit_percentage",
"among_other_donors_AUC", "within_donor_first_hit_percentage", "within_donor_AUC"]
# ------ number of embeddings and duration ----
df = pd.read_csv(os.path.join(path_to_embeddings, image_level_embed_file_name))
number_of_embeddings = len(df)
# duration means the amount of time between when the folder was created and when the embeddings were generated.
# it is the amount of time that it took the model to generate these embeddings.
# this argument is valid for embeddings that were generated by the triplet model.
print ("/////////////////////////////////")
print (args_names)
if args_names != None and "finish_time" in args_names:
idx = args_names.index("finish_time")
creation_time = int(args_val[idx])
duration = creation_time - int(ts)
else:
creation_time = get_creation_time(ts)
if creation_time != None:
creation_time = int(creation_time)
duration = creation_time - int(ts)
else:
duration = -1
# ---------------------------------------------
if args_names != None and args_val != None:
columns = columns[0:3] + args_names + columns[3:]
eval_results_df = pd.DataFrame(columns=columns)
general_res = results[0]
among_other_donors_res = results[1]
within_donor_res = results[2]
if args_names != None and args_val != None:
eval_results_df.loc[0] = [ts, number_of_embeddings, duration] + args_val + [general_res[0], general_res[1], among_other_donors_res[0],
among_other_donors_res[1],
within_donor_res[0], within_donor_res[1]]
else:
eval_results_df.loc[0] = [ts, number_of_embeddings, duration, general_res[0], general_res[1], among_other_donors_res[0],
among_other_donors_res[1],
within_donor_res[0], within_donor_res[1]]
eval_result_file_name = item.split(".")[0] + "_evaluation_result_top_tri.csv"
eval_path = os.path.join(EMBEDDING_DEST, ts)
eval_results_df.to_csv(os.path.join(eval_path, eval_result_file_name), index=None)
#----------
return not_found_list
def get_json_argument_list():
"""
Returns a list of arguments from json files that we are interested in and we want to keep as columns in the evaluation tables.
:return: list of arguments
"""
list_of_arguments_to_get = ["finish_time", "segmentation_training_samples", "patch_count_per_image", "learning_rate", "batch_k",
"batch_p", "flip_augment", "standardize", "margin", "metric"]
return list_of_arguments_to_get
def get_arguments_from_json(ts):
"""
Embeddings that are generated with the triplet model are saved in folders that have time stamp as name.
There is an args.json file in each folder that has the values for the arguments.
The function checks to see if there is an args.json file within that folder. If yes, it looks for arguments
from a list of arguments and returns the argument value. If that argument does not exist in the json file, it is returned as -1.
:param ts: The embedding folder's name which is usually a time stamp.
:return: two lists. The first list is a list of arguments, the second list is those arguments' values.
"""
list_of_arguments_to_get = get_json_argument_list()
args_val_list = []
path_to_embeddings = os.path.join(EMBEDDING_DEST, ts)
args_file = os.path.join(path_to_embeddings, "args.json")
if not os.path.exists(args_file):
print ("There is no args.json file in ", path_to_embeddings)
return None, None
if os.path.isfile(args_file):
with open(args_file, 'r+') as f:
args_resumed = json.load(f)
for arg in list_of_arguments_to_get:
if arg in args_resumed:
args_val_list.append(args_resumed[arg])
else:
args_val_list.append(-1)
f.close()
return list_of_arguments_to_get, args_val_list
def concat_disease_evaluation_results(study, list_of_folders):
"""
The function uses a list of folders, goes through each folder and reads its evaluation csv files.
:param study: the target study
:param list_of_folders: list of folders that contain the evaluation result of the embeddings
:return:
"""
eval_df_list = []
for item in list_of_folders:
if item == "random" or "resnet" in item:
path_to_eval_folder = os.path.join(DATA_DIR, study, "segmentation_embeddings", item)
base_case = True
else:
path_to_eval_folder = os.path.join(EMBEDDING_DEST, item)
base_case = False
files = os.listdir(path_to_eval_folder)
for f in files:
# for each evaluation result csv file, see whether it is from training set, or validation set, or training+validation
if base_case == True:
if f.endswith("image_level_evaluation_result_top_tri.csv"):
df = pd.read_csv(os.path.join(path_to_eval_folder, f))
eval_df_list.append(df)
else:
if f.endswith("image_level_evaluation_result_top_tri.csv") and study in f:
df = pd.read_csv(os.path.join(path_to_eval_folder, f))
eval_df_list.append(df)
columns = list(eval_df_list[0])
concatenated_df = pd.concat(eval_df_list, sort=False)
concatenated_df.to_csv(os.path.join(EMBEDDING_DEST, study+ "_all_evaluation_result_top_tri.csv"),index=None)
def concat_all_evaluation_results(list_of_folders):
"""
The function uses a list of folders, goes through each folder and reads its evaluation csv files.
Normally, there should be 3 files in each folder: one for training set evaluation, one for validation set evaluation,
and one for training+validation set evaluation.
The function first concatenates each set's results from all the folders (concatenates vertically) into csv files.
So there will be 3 csv files, one for training, one for validation, and one for training+validation.
Number of rows == number of folder.
The function then concatenates those 3 csv files horizontally into a final general csv file.
:param list_of_folders: list of folders that contain the evaluation result of the embeddings
:return: None. The function generates 4 csv files.
training_all_evaluation_result_top_tri.csv
validation_all_evaluation_result_top_tri.csv
training_and_validation_all_evaluation_result_top_tri.csv
all_evaluation_result_top_tri.csv
"""
train_eval_df_list = []
val_eval_df_list = []
train_val_eval_df_list = []
for item in list_of_folders:
path_to_eval_folder = os.path.join(EMBEDDING_DEST, item)
files = os.listdir(path_to_eval_folder)
for f in files:
# for each evaluation result csv file, see whether it is from training set, or validation set, or training+validation
if f.endswith("image_level_evaluation_result_top_tri.csv"):
if "random" in f:
if "random_training_validation" in f:
df = pd.read_csv(os.path.join(path_to_eval_folder, f))
train_val_eval_df_list.append(df)
elif "random_training" in f:
df = pd.read_csv(os.path.join(path_to_eval_folder, f))
train_eval_df_list.append(df)
elif "random_validation" in f:
df = pd.read_csv(os.path.join(path_to_eval_folder, f))
val_eval_df_list.append(df)
else:
if "triplet" in f:
df = pd.read_csv(os.path.join(path_to_eval_folder, f))
train_val_eval_df_list.append(df)
elif "training" in f:
df = pd.read_csv(os.path.join(path_to_eval_folder, f))
train_eval_df_list.append(df)
elif "validation" in f:
df = pd.read_csv(os.path.join(path_to_eval_folder, f))
val_eval_df_list.append(df)
# add 'training_' or 'validation_' to the column names of evaluation results coming from training and validation sets.
# This is to be able to distinguish them in the final general csv file.
columns = list(train_val_eval_df_list[0])
train_columns = ["training_"+item for item in columns[1:]]
train_columns = [columns[0]] + train_columns
train_columns_dict ={}
val_columns = ["validation_"+item for item in columns[1:]]
val_columns = [columns[0]] + val_columns
val_columns_dict ={}
#train_and_val_columns = ["train_and_validation_"+item for item in columns[1:]]
#train_and_val_columns = [columns[0]] + train_and_val_columns
#train_and_val_columns_dict ={}
for i in range(len(columns)):
train_columns_dict[columns[i]] = train_columns[i]
val_columns_dict[columns[i]] = val_columns[i]
#train_and_val_columns_dict[columns[i]] = train_and_val_columns[i]
concatenated_training_df = pd.concat(train_eval_df_list, sort=False)
concatenated_training_df = concatenated_training_df.rename(columns=train_columns_dict)
concatenated_validation_df = pd.concat(val_eval_df_list, sort=False)
concatenated_validation_df = concatenated_validation_df.rename(columns=val_columns_dict)
concatenated_train_and_validation_df = | pd.concat(train_val_eval_df_list, sort=False) | pandas.concat |
# code stolen from https://github.com/xhochy/nyc-taxi-fare-prediction-deployment-example
# download the data from https://www1.nyc.gov/site/tlc/about/tlc-trip-record-data.page
# use the pandas code snippet from here
# https://github.com/xhochy/nyc-taxi-fare-prediction-deployment-example/blob/main/training/Train.ipynb
# to convert the CSV to parquet
import lightgbm
import numpy as np
import pandas as pd
from sklearn.compose import make_column_transformer
from sklearn.preprocessing import FunctionTransformer
def haversine_distance(lat1, lng1, lat2, lng2):
lat1, lng1, lat2, lng2 = (np.radians(x) for x in (lat1, lng1, lat2, lng2))
d = (
np.sin(lat2 / 2 - lat1 / 2) ** 2
+ np.cos(lat1) * np.cos(lat2) * np.sin(lng2 / 2 - lng1 / 2) ** 2
)
return 2 * 6371 * np.arcsin(np.sqrt(d)) # 6,371 km is the earth radius
def haversine_distance_from_df(df):
return pd.DataFrame(
{
"haversine_distance": haversine_distance(
df["pickup_latitude"],
df["pickup_longitude"],
df["dropoff_latitude"],
df["dropoff_longitude"],
)
}
)
def split_pickup_datetime(df):
return pd.DataFrame(
{
"pickup_dayofweek": df["tpep_pickup_datetime"].dt.dayofweek,
"pickup_hour": df["tpep_pickup_datetime"].dt.hour,
"pickup_minute": df["tpep_pickup_datetime"].dt.minute,
}
)
def feature_enginering():
return make_column_transformer(
(FunctionTransformer(), ["passenger_count"]),
(
FunctionTransformer(func=split_pickup_datetime),
["tpep_pickup_datetime"],
),
(
FunctionTransformer(
func=haversine_distance_from_df,
),
[
"pickup_latitude",
"pickup_longitude",
"dropoff_latitude",
"dropoff_longitude",
],
),
)
if __name__ == "__main__":
used_columns = [
"fare_amount",
"pickup_latitude",
"pickup_longitude",
"dropoff_latitude",
"dropoff_longitude",
"tpep_pickup_datetime",
"passenger_count",
]
df = | pd.read_parquet("data/yellow_tripdata_2016-01.parquet", columns=used_columns) | pandas.read_parquet |
import os
import pandas as pd
import numpy as np
def read_config(filename):
"""
Read and parse configuration file containing stored user variables.
These variables are then passed to the analysis notebooks
and input to pipeline functions.
"""
f = open(filename)
config_dict = {}
for lines in f:
items = lines.split("\t", 1)
config_dict[items[0]] = eval(items[1])
return config_dict
def setup_dir(config_file):
"""
Create directories to store files created by VAE training and
simulation analysis
Arguments
----------
config_file: str
File containing user defined parameters
"""
base_dir = os.path.abspath(os.path.join(os.getcwd(), "../"))
# Read in config variables
params = read_config(config_file)
# Load parameters
local_dir = params["local_dir"]
dataset_name = params["dataset_name"]
train_architecture = params["NN_architecture"]
# Create VAE directories
output_dirs = [
os.path.join(base_dir, dataset_name, "models"),
os.path.join(base_dir, dataset_name, "logs"),
]
# Check if the following directories exist
# and if not to create them
for each_dir in output_dirs:
# Check if analysis output directory exist otherwise create
if not os.path.exists(each_dir):
print("creating new directory: {}".format(each_dir))
os.makedirs(each_dir, exist_ok=True)
# Check if NN architecture directory exist otherwise create
NN_dir = os.path.join(each_dir, train_architecture)
if not os.path.exists(NN_dir):
print("creating new directory: {}".format(NN_dir))
os.makedirs(NN_dir, exist_ok=True)
# Check if analysis output directory exist otherwise create
results_dir = os.path.join(base_dir, dataset_name, "results")
if not os.path.exists(results_dir):
print("creating new directory: {}".format(results_dir))
os.makedirs(results_dir, exist_ok=True)
# Check if 'saved_variables' directory exist otherwise create
var_dir = os.path.join(results_dir, "saved_variables")
if not os.path.exists(var_dir):
print("creating new directory: {}".format(var_dir))
os.makedirs(var_dir, exist_ok=True)
# Create local directories to store intermediate files
output_dirs = [
os.path.join(local_dir, "experiment_simulated"),
os.path.join(local_dir, "partition_simulated"),
]
# Check if analysis output directory exist otherwise create
for each_dir in output_dirs:
if not os.path.exists(each_dir):
print("creating new directory: {}".format(each_dir))
os.makedirs(each_dir, exist_ok=True)
def create_experiment_id_file(metadata_file, input_data_file, output_file, config_file):
"""
Create file with experiment ids that are associated with expression data
Arguments
----------
metadata_file: str
File containing metadata annotations per sample
input_data_file: str
File containing normalized expression data
output_file: str
File containing experiment ids with expression data and sample annotations
config_file: str
File containing user defined parameters
"""
# Read in metadata
metadata = | pd.read_csv(metadata_file, header=0, sep="\t", index_col=0) | pandas.read_csv |
import csv
import numpy as np
from matplotlib import pyplot as plt
import scipy.stats as stats
import pandas as pd
def read_data(datafile):
data = pd.read_csv(datafile)
return np.array(data['x']), np.array(data['y'])
def plotdata(data, color):
for x in data:
plt.plot(x[0], x[1], color)
def plotGaussian(mu, var, sigma, label, subplot):
x = np.linspace(mu - 3*sigma, mu + 3*sigma, 100)
ax[subplot].plot(x, stats.norm.pdf(x, mu, sigma), label=label)
ax[subplot].axvline(mu, ls='--', lw=0.5, c='gray')
def calcGaussian(array):
mu = np.mean(array, axis=0)
var = np.var(array, axis=0)
sigma = np.sqrt(var)
return mu, var, sigma
fig, ax = plt.subplots(2, 1)
dataFiles = ['data_38400.csv', 'data_57600.csv', 'data_115200.csv']
pandas = []
for file in dataFiles:
label = file[5:-4]
x_values, y_values = read_data(file)
mu_x, var_x, sigma_x = calcGaussian(x_values)
plotGaussian(mu_x, var_x, sigma_x, label, 0)
mu_y, var_y, sigma_y = calcGaussian(y_values)
plotGaussian(mu_y, var_y, sigma_y, label, 1)
pandas.append([label, mu_x, var_x, sigma_x, mu_y, var_y, sigma_y])
df = | pd.DataFrame(pandas, columns=['Baudrate', 'mu_x', 'var_x', 'sigma_x', 'mu_y', 'var_y', 'sigma_y']) | pandas.DataFrame |
# Copyright 2017 QuantRocket LLC - All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from matplotlib import cycler
from matplotlib.backends.backend_pdf import PdfPages
from .utils import (
with_baseline,
get_cum_returns)
from matplotlib.ticker import FuncFormatter
class BaseTearsheet(object):
"""
Base class for tear sheets.
Parameters
----------
figsize : tuple (width, height), optional
(width, height) of matplotlib figure. Default is (16, 12)
pdf_filename : string, optional
save tear sheet to this filepath as a PDF instead of displaying
max_cols_for_details : int, optional
suppress detailed plots if there are more than this many columns
(i.e. strategies or securities). Too many plots may cause slow
rendering. Default 25.
"""
def __init__(self, pdf_filename=None, figsize=None, max_cols_for_details=25):
self.figsize = figsize or (16.0, 12.0) # width, height in inches
plt.rc("axes", axisbelow=True)
if pdf_filename:
self._pdf = PdfPages(pdf_filename, keep_empty=True)
else:
self._pdf = None
self._suptitle = None
self._suptitle_kwargs = {
"bbox": dict(facecolor="#EAEAF2", edgecolor='white', alpha=0.5)}
self.max_cols_for_details = max_cols_for_details
def _save_or_show(self):
"""
Saves the fig to the multi-page PDF, or shows it.
"""
if self._pdf:
for fignum in plt.get_fignums():
self._pdf.savefig(fignum, bbox_inches='tight')
plt.close("all")
self._pdf.close()
else:
plt.show()
def _y_format_as_percentage(self, axis, max_decimal_places=1):
"""
Sets a Y-axis formatter that converts a decimal to a percentage (e.g.
0.12 -> 12.0%)
"""
def format_as_pct(x, pos):
# Round to max_decimal_places (12.1%) unless it doesn't matter (12%
# not 12.0%)
decimal_places = max_decimal_places
while decimal_places > 0:
rounded_result = round(x, decimal_places+2)
more_rounded_result = round(x, decimal_places+1)
if rounded_result != more_rounded_result:
return ('{:.%d%%}' % decimal_places).format(x)
decimal_places -= 1
return '{:.0%}'.format(x)
y_axis_formatter = FuncFormatter(format_as_pct)
axis.yaxis.set_major_formatter(FuncFormatter(y_axis_formatter))
def _y_format_at_least_two_decimal_places(self, axis):
"""
Sets a Y-axis formatter that rounds a decimal to two decimal places.
"""
def format_at_least_two_decimal_places(x, pos):
if round(x,2) == round(x,3):
return '{:.2f}'.format(x)
else:
decimal_places = 3
while True:
if math.isclose(round(x, decimal_places), round(x, decimal_places+1)):
return round(x, decimal_places)
decimal_places += 1
y_axis_formatter = FuncFormatter(format_at_least_two_decimal_places)
axis.yaxis.set_major_formatter(FuncFormatter(y_axis_formatter))
def _get_plot_dimensions(self, plot_count):
"""
Returns a tuple of rows, cols needed to accomodate the plot_count.
"""
rows = math.ceil(math.sqrt(plot_count))
cols = math.ceil(plot_count/rows)
return rows, cols
def _clear_legend(self, plot, legend_title=None):
"""
Anchors the legend to the outside right of the plot area so you can
see the plot.
"""
plot.legend(
loc='center left', bbox_to_anchor=(1, 0.5), title=legend_title)
def _create_returns_plots(self, performance, subplot, extra_label, figsize=None,
legend_title=None):
"""
Creates agg/details plots for cumulative returns, drawdowns, rolling
Sharpe, and possibly pnl.
"""
figsize = figsize or self.figsize
color_palette = sns.color_palette()
if isinstance(performance.cum_returns, pd.DataFrame):
num_series = len(performance.cum_returns.columns)
if performance.benchmark_returns is not None:
num_series += 1
if num_series > 6:
color_palette = sns.color_palette("hls", num_series)
with sns.color_palette(color_palette):
fig = plt.figure("Cumulative Returns", figsize=figsize)
axis = fig.add_subplot(subplot)
max_return = performance.cum_returns.max(axis=0)
if isinstance(max_return, pd.Series):
max_return = max_return.max(axis=0)
# If the price more than doubled, use a log scale
if max_return >= 2:
axis.set_yscale("log", basey=2)
axis.set_ylabel("Cumulative return (log scale)")
else:
self._y_format_at_least_two_decimal_places(axis)
axis.set_ylabel("Cumulative return")
include_commissions = (
performance.commissions is not None
# if all commissions are null/0, don't show them
and (performance.commissions.fillna(0) != 0).any())
include_slippage = (
performance.slippages is not None
# if all slippages are null/0, don't show them
and (performance.slippages.fillna(0) != 0).any())
if (
# a 212 subplot means a detailed plot, which isn't compatible with
# showing commissions and slippage
subplot != 212 and (include_commissions or include_slippage)):
if include_commissions:
cum_commissions = performance.cum_commissions
cum_commissions.name = "commissions"
if include_slippage:
cum_slippages = performance.cum_slippages
cum_slippages.name = "slippage"
performance.cum_returns.name = "returns"
cum_gross_returns = performance.cum_returns
if include_commissions:
cum_gross_returns = cum_gross_returns.multiply(cum_commissions)
if include_slippage:
cum_gross_returns = cum_gross_returns.multiply(cum_slippages)
cum_gross_returns.name = "gross returns"
breakdown_parts = [performance.cum_returns, cum_gross_returns]
if include_commissions:
breakdown_parts.append(cum_commissions)
if include_slippage:
breakdown_parts.append(cum_slippages)
try:
returns_breakdown = | pd.concat(breakdown_parts, axis=1, sort=True) | pandas.concat |
import numpy as np
import pandas as pd
from glob import glob
import matplotlib.pyplot as plt
'''
turbine-05_helihoist-1_tom_acc-vel-pos_hammerhead_2019-09-10-16-04-47_2019-09-20-02-53-43
turbine-05_helihoist-1_tom_geometry_hammerhead_2019-09-10-16-04-47_2019-09-20-02-53-43
turbine-05_helihoist-1_tom_acc-vel-pos_sbi1_2019-09-20-02-53-43_2019-09-20-07-42-54
turbine-05_sbitroot_tom_acc-vel-pos_sbi1_2019-09-20-02-34-11_2019-09-20-07-33-33
turbine-05_sbittip_tom_acc-vel-pos_sbi1_2019-09-20-02-47-05_2019-09-20-07-43-54
turbine-05_sbittip_tom_acc-vel-pos_sbi2_2019-09-20-12-07-46_2019-09-20-13-00-55
turbine-05_sbitroot_tom_acc-vel-pos_sbi2_2019-09-20-12-03-56_2019-09-20-12-58-11
turbine-05_helihoist-1_tom_acc-vel-pos_sbi2_2019-09-20-12-01-12_2019-09-20-12-51-37
turbine-05_sbittip_tom_acc-vel-pos_tnhb1_2019-09-20-07-43-54_2019-09-20-12-07-46
turbine-05_sbitroot_tom_acc-vel-pos_tnhb1_2019-09-20-07-33-33_2019-09-20-12-03-56
turbine-05_helihoist-1_tom_geometry_tnhb1_2019-09-20-07-42-54_2019-09-20-12-01-11
turbine-05_helihoist-1_tom_acc-vel-pos_tnhb1_2019-09-20-07-42-54_2019-09-20-12-01-11
turbine-05_helihoist-1_tom_acc-vel-pos_tnhb2_2019-09-20-12-51-37_2019-09-20-16-14-47
turbine-05_helihoist-1_tom_geometry_tnhb2_2019-09-20-12-51-37_2019-09-20-16-14-47
turbine-05_sbitroot_tom_acc-vel-pos_tnhb2_2019-09-20-12-58-11_2019-09-20-16-36-36
turbine-05_sbittip_tom_acc-vel-pos_tnhb2_2019-09-20-13-00-55_2019-09-20-16-11-16
wmb-sued-2019-9-10
wmb-sued-2019-9-11
wmb-sued-2019-9-12
wmb-sued-2019-9-13
wmb-sued-2019-9-14
wmb-sued-2019-9-15
wmb-sued-2019-9-16
wmb-sued-2019-9-17
wmb-sued-2019-9-18
wmb-sued-2019-9-19
wmb-sued-2019-9-20
keine winddaten
'''
#loading data and filling it into an array of all dataframes
hammerhead = sorted(glob('Daten/hammerhead/hammerhead/turbine-05**.csv'))
sbi1 = sorted(glob('Daten/sbi1/sbi1/turbine-05**.csv'))
sbi2 = sorted(glob('Daten/sbi2/sbi2/turbine-05*.csv'))
tnhb1 = sorted(glob('Daten/tnhb1/tnhb1/turbine-05**.csv'))
tnhb2 = sorted(glob('Daten/tnhb2/tnhb2/turbine-05**.csv'))
data = []
helihoist_tele_hammerhead = pd.read_csv(hammerhead[0], delimiter = ',')
helihoist_geo_hammerhead = pd.read_csv(hammerhead[1], delimiter = ',')
data.append(helihoist_tele_hammerhead) , data.append(helihoist_geo_hammerhead)
helihoist_sbi1 = pd.read_csv(sbi1[0], delimiter = ',')
sbiroot_sbi1 = pd.read_csv(sbi1[1], delimiter = ',')
sbitip_sbi1 = pd.read_csv(sbi1[2], delimiter = ',')
data.append(helihoist_sbi1) ,data.append(sbiroot_sbi1) ,data.append(sbitip_sbi1)
helihoist_sbi2 = pd.read_csv(sbi2[0], delimiter = ',')
sbiroot_sbi2 = pd.read_csv(sbi2[1], delimiter = ',')
sbitip_sbi2 = pd.read_csv(sbi2[2], delimiter = ',')
data.append(helihoist_sbi2) ,data.append(sbiroot_sbi2) ,data.append(sbitip_sbi2)
helihoist_tnhb1 = pd.read_csv(tnhb1[0], delimiter = ',')
helihoist_geo_tnhb1 = pd.read_csv(tnhb1[1], delimiter = ',')
sbiroot_tnhb1 = pd.read_csv(tnhb1[2], delimiter = ',')
sbitip_tnhb1 = pd.read_csv(tnhb1[3], delimiter = ',')
data.append(helihoist_tnhb1) ,data.append(helihoist_geo_tnhb1) ,data.append(sbiroot_tnhb1),data.append(sbitip_tnhb1)
helihoist_tnhb2 = pd.read_csv(tnhb2[0], delimiter = ',')
helihoist_geo_tnhb2 = pd.read_csv(tnhb2[1], delimiter = ',')
sbiroot_tnhb2 = pd.read_csv(tnhb2[2], delimiter = ',')
sbitip_tnhb2 = pd.read_csv(tnhb2[3], delimiter = ',')
data.append(helihoist_tnhb2) ,data.append(helihoist_geo_tnhb2) ,data.append(sbiroot_tnhb2),data.append(sbitip_tnhb2)
wmb1= pd.read_csv('environment/environment/waves/wmb-sued/wmb-sued_2019-09-10.csv', delimiter = ' ')
wmb2= pd.read_csv('environment/environment/waves/wmb-sued/wmb-sued_2019-09-11.csv', delimiter = ' ')
wmb3= pd.read_csv('environment/environment/waves/wmb-sued/wmb-sued_2019-09-12.csv', delimiter = ' ')
wmb4= pd.read_csv('environment/environment/waves/wmb-sued/wmb-sued_2019-09-13.csv', delimiter = ' ')
wmb5= pd.read_csv('environment/environment/waves/wmb-sued/wmb-sued_2019-09-14.csv', delimiter = ' ')
wmb6= pd.read_csv('environment/environment/waves/wmb-sued/wmb-sued_2019-09-15.csv', delimiter = ' ')
wmb7= pd.read_csv('environment/environment/waves/wmb-sued/wmb-sued_2019-09-16.csv', delimiter = ' ')
wmb8= pd.read_csv('environment/environment/waves/wmb-sued/wmb-sued_2019-09-17.csv', delimiter = ' ')
wmb9= | pd.read_csv('environment/environment/waves/wmb-sued/wmb-sued_2019-09-18.csv', delimiter = ' ') | pandas.read_csv |
import logging
import os
import numpy as np
import pandas as pd
import sqlalchemy
from cached_property import cached_property
from scipy.interpolate import interp1d
from aqueduct.errors import Error
class RiskService(object):
def __init__(self, user_selections):
# DB Connection
self.engine = sqlalchemy.create_engine(os.getenv('POSTGRES_URL'))
self.metadata = sqlalchemy.MetaData(bind=self.engine)
self.metadata.reflect(self.engine)
# BACKGROUND INFO
self.flood_types = ["riverine", "coastal"]
self.exposures = ["gdpexp", "popexp", "urban_damage_v2"]
self.geogunits = ["geogunit_103", "geogunit_108"]
self.scenarios = {"business as usual": ['rcp8p5', 'ssp2', "bau"],
"pessimistic": ['rcp8p5', 'ssp3', "pes"],
"optimistic": ['rcp4p5', 'ssp2', "opt"],
"rcp8p5": ['rcp8p5', 'ssp3', "pes"],
"rcp4p5": ['rcp8p5', 'ssp2', "bau"]}
self.models = {"riverine": ["gf", "ha", "ip", "mi", "nr"],
# "coastal": ["wt"]}
"coastal": ["95", "50", "05"]}
self.years = [2010., 2030., 2050., 2080.]
self.ys = [str(x)[0:4] for x in self.years]
self.rps = [2, 5, 10, 25, 50, 100, 250, 500, 1000]
self.rps_names = ["rp" + str(x).zfill(5) for x in self.rps]
# MANDATORY USER INPUTS
self.flood = user_selections.get("flood") # Flood type
self.exposure = user_selections.get("exposure") # Exposure type
self.geogunit_unique_name = user_selections.get("geogunit_unique_name") # Unique geographical unit name
self.sub_scenario = user_selections.get(
"sub_scenario") # Subsidence option (Will always be no for Riverine floods)
self.existing_prot = user_selections.get(
"existing_prot") # User input for protection standard (triggers on-the-fly calculation)
self.scenario = user_selections.get("scenario")
self.geogunit, self.geogunit_name, self.geogunit_type, self.clim, self.socio, self.scen_abb, self.sub_abb, self.df_precalc, self.prot_pres, self.risk_analysis = self.user_selections()
# Scenario abbreviation
self.mods = self.models.get(self.flood)
def user_selections(self):
"""
Purpose: Gather all necessary inputs to run any analysis
Input:
flood: Riverine of Coastal (User must select)
Geogunit_unique_name: geographical unit name from website. (User must select)
Website should use list of unique names to avoid selecting more than one unit
Scenario: Business as usual, Pessimistic, Optimistic
sub_scenario: Yes (defaul(t), No does the user want to consider subsidence? Only relevant for coastal)
existing_prot: Default protection standard. User can input their own or, which will trigger on-the-fly calculations
Output:
geogunit unit - (geogunit_103 for cities, geogunit_108 for everything else)
geogunit_name - original (ie non-unique) name
geogunit_type - City, State, Country, Basin
clim - rcp4p5, rcp8p4 (climate scenario associated with overall scenario)
socio - base, ssp2, ssp3 (socioeconomic scenario associated with overall scenario)
sub_scenario- Yes, No (Is subsidence included?)
sub_abb - wtsub or nosub (code name for subsidence. wtsub = with sub)
prot_pres - default protection standard for unit as a whole
risk_analysis - can we use precalculated risk data, or do we need to calculate on-the-fly?
"""
# GEOGUNIT INFO
fids, geogunit_name, geogunit_type = pd.read_sql_query(
"SELECT fids, name, type FROM lookup_master where uniqueName = '{0}' ".format(self.geogunit_unique_name),
self.engine).values[0]
geogunit = "geogunit_103" if geogunit_type.lower() == "city" else "geogunit_108"
# IMPACT DRIVER INFO (climate and socioeconomc scenarios
clim, socio, scen_abb = self.scenarios.get(self.scenario)
# SUBSIDENCE INFO
# Make sure subsidence is turned off for river floods
sub_abb = "wtsub" if self.sub_scenario else "nosub"
# DEFAULT DATA
defaultfn = "precalc_agg_{0}_{1}_{2}".format(self.flood, geogunit_type.lower(), sub_abb)
logging.info(f'[RISK - user_selection]: {str(defaultfn)}')
df_precalc = pd.read_sql_query("SELECT * FROM {0} where id like '{1}'".format(defaultfn, geogunit_name),
self.engine, index_col='id')
# PROTECTION STANDARDS and RISK ANALYSIS TYPE
if not self.existing_prot:
risk_analysis = "precalc"
# Hardwire in the protection standards for the Netherlands or Average prot standard for a whole unit (i.e. country)
# here self.exposure should be allways urban_damage_v2
prot_pres = (1000 if geogunit_name in ['Noord-Brabant, Netherlands', 'Zeeland, Netherlands',
'Zeeuwse meren, Netherlands', 'Zuid-Holland, Netherlands',
'Drenthe, Netherlands', 'Flevoland, Netherlands',
'Friesland, Netherlands', 'Gelderland, Netherlands',
'Groningen, Netherlands', 'IJsselmeer, Netherlands',
'Limburg, Netherlands', 'Noord-Holland, Netherlands',
'Overijssel, Netherlands', 'Utrecht, Netherlands',
'Netherlands'] else df_precalc[
["_".join(['urban_damage_v2', '2010', scen_abb, "prot_avg"])]])
else:
risk_analysis = "calc"
prot_pres = self.existing_prot
return geogunit, geogunit_name, geogunit_type.lower(), clim, socio, scen_abb, sub_abb, df_precalc, prot_pres, risk_analysis
def lp_data(self):
inFormat = 'raw_agg_{:s}_{:s}_{:s}'.format(self.flood, self.geogunit_type, self.exposure)
cols = [
'{0} as {1}'.format(col, col.replace(self.clim, 'lp').replace(self.socio + "_" + self.sub_abb + "_", ''))
for col in sqlalchemy.Table(inFormat, self.metadata).columns.keys() if
(self.clim in col) and (self.socio in col) and (self.sub_abb in col)]
df_temp = pd.read_sql_query(
"SELECT {0} FROM {1} where id like '{2}'".format(', '.join(cols), inFormat, self.geogunit_name),
self.engine)
df_lpcurve = df_temp.T
df1 = df_lpcurve.reset_index().rename(columns={"index": "index", 0: "y"})
df2 = df_lpcurve.reset_index()['index'].str.split('_', expand=True).rename(
columns={0: "lp", 1: "c", 2: "year", 3: "x"})
logging.info('[RISK]: lp_curve')
#logging.info(df1)
#logging.info(df2)
return pd.concat([df1, df2], axis=1).reindex(df1.index)[['c', 'year', 'y', 'x']].replace(self.rps_names, self.rps)
#return pd.concat([df1, df2], axis=1, join_axes=[df1.index])[['c', 'year', 'y', 'x']].replace(self.rps_names, self.rps)
def bench(self):
defaultfn = "precalc_agg_{0}_{1}_{2}".format(self.flood, self.geogunit_type, self.sub_abb)
print(defaultfn)
# cols = ['{0} as {1}'.format(col, col.replace(self.exposure, 'bench').replace('urban_damage_v2', 'bench').replace("_"+ self.scen_abb, '')) for col in sqlalchemy.Table(defaultfn, self.metadata).columns.keys() if ((self.exposure in col) or ('urban_damage_v2' in col)) and (self.scen_abb in col) and ("cc" not in col) and ("soc" not in col) and ("sub" not in col) and ("avg" in col)]
cols = ['{0} as {1}'.format(col,
col.replace(self.exposure, 'bench').replace('urban_damage_v2', 'bench').replace(
"_" + self.scen_abb, '')) for col in
sqlalchemy.Table(defaultfn, self.metadata).columns.keys() if
((self.exposure in col) or ('prot' in col)) and (self.scen_abb in col) and ("cc" not in col) and (
"soc" not in col) and ("sub" not in col) and ("avg" in col)]
benchData = pd.read_sql_query("SELECT id, {0} FROM {1}".format(', '.join(cols), defaultfn), self.engine,
index_col='id')
return benchData
def format_risk(self, dataframe):
datalist = ["tot_avg", "tot_min", "tot_max",
"ast", "prot_avg",
"per_avg", "per_min", "per_max",
"cc_avg", "cc_min", "cc_max",
"soc_avg", "sub_avg"]
colNames = ["Annual_Damage_Avg", "Annual_Damage_Min", "Annual_Damage_Max",
"Asset_Value", "Flood_Protection",
"Percent_Damage_Avg", "Percent_Damage_Min", "Percent_Damage_Max",
"CC_Driver_Avg", "CC_Driver_Min", "CC_Driver_Max",
"Soc_Driver", "Sub_Driver"]
df_final = pd.DataFrame(index=self.ys, columns=colNames)
for d in range(0, len(datalist)):
selData = dataframe[[col for col in dataframe.columns.tolist() if (datalist[d] in col)]]
if len(selData.values[0]) == 3:
df_final[colNames[d]][1:] = selData.values[0]
else:
df_final[colNames[d]] = selData.values[0]
return df_final
def find_assets(self):
"""
Purpose: Find total asset value
Output:
df_aggregate = Annual impacts for each year for user-selected geographical unit
"""
# Create term to filter out unnecessary results. Drop SSP2 data if scenario
# is pessemistic. Else, drop SSP3
dropex = "ssp2" if self.scen_abb == "pes" else "ssp3"
assts = self.df_precalc[[col for col in self.df_precalc.columns.tolist() if
(self.exposure in col) and (self.scen_abb in col) and ("ast" in col) and (
dropex not in col)]]
return assts.reset_index(drop=True)
def run_stats(self, dataframe):
"""
Purpose: Finds the average, min, and max impact for all impact types
Input:
dataframe: Data associated with flood, geography, exposure type for all climate models
Output:
Dataframe with average impact data for each year for each impact type. Also includes min and max (uncertainity)
"""
# Create dataframe to hold final data
df_final = pd.DataFrame(index=dataframe.index)
# Define column field name structure
colFormat = '{:s}_{:s}_{:s}_{:s}_{:s}'.format
# Run following analysis for each year and impact type
for y in self.ys:
for t in ["cc", "soc", "sub", "tot", "prot"]:
df_filt = dataframe[[col for col in dataframe.columns if (t in col) and (y in col)]]
df_final[colFormat(self.exposure, y, self.scen_abb, t, "avg")] = df_filt.mean(axis=1)
if y != '2010' and t == "tot" or y != '2010' and t == 'cc':
df_final[colFormat(self.exposure, y, self.scen_abb, t, "min")] = df_filt.min(axis=1)
df_final[colFormat(self.exposure, y, self.scen_abb, t, "max")] = df_filt.max(axis=1)
df_final.replace(np.nan, 0, inplace=True)
return df_final
def ratio_to_total(self, dataframe):
"""
Purpose: Finds the impact attributed to climate change only, socioecon only, and subsidence only
Input:
inData: Annual expected impact data (found using default_risk function)
mods: All possible climate models
Output:
Dataframe with final impact data for each year for each impact type. Column name also specifies given model
"""
# Create dataframe to hold final data
df_final = pd.DataFrame(index=dataframe.index)
# Run analysis for each climate model and each year past 2010
colFormat = '{:s}_{:s}_{:s}_{:s}_{:s}'.format
df_final[colFormat(self.exposure, "2010", self.scen_abb, "prot", "avg")] = dataframe[
colFormat(self.exposure, "2010", self.scen_abb, "prot", "avg")]
tot2010 = dataframe[colFormat(self.exposure, "2010", self.scen_abb, "tot", "avg")]
df_final[colFormat(self.exposure, "2010", self.scen_abb, "tot", "avg")] = tot2010
for y in self.ys[1:]:
# Filter data year
df_filt = dataframe[[col for col in dataframe.columns if (y in col)]]
# Total impact for selected year is already calculated
df_final[colFormat(self.exposure, y, self.scen_abb, "tot", "avg")] = dataframe[
colFormat(self.exposure, y, self.scen_abb, "tot", "avg")]
df_final[colFormat(self.exposure, y, self.scen_abb, "tot", "min")] = dataframe[
colFormat(self.exposure, y, self.scen_abb, "tot", "min")]
df_final[colFormat(self.exposure, y, self.scen_abb, "tot", "max")] = dataframe[
colFormat(self.exposure, y, self.scen_abb, "tot", "max")]
# Find the difference from each impact to the 2010 baseline data
df_filt['tot_diff'] = dataframe[colFormat(self.exposure, y, self.scen_abb, "tot",
"avg")] - tot2010 # Total impact
df_filt['cc_diff_avg'] = dataframe[colFormat(self.exposure, y, self.scen_abb, "cc",
"avg")] - tot2010 # Total impact
df_filt['cc_diff_min'] = dataframe[colFormat(self.exposure, y, self.scen_abb, "cc",
"min")] - tot2010 # Total impact
df_filt['cc_diff_max'] = dataframe[colFormat(self.exposure, y, self.scen_abb, "cc",
"max")] - tot2010 # Total impact
df_filt['soc_diff'] = dataframe[colFormat(self.exposure, y, self.scen_abb, "soc",
"avg")] - tot2010 # Total impact#Soc only impact
df_filt['sub_diff'] = dataframe[colFormat(self.exposure, y, self.scen_abb, "sub",
"avg")] - tot2010 # Total impact #Subsidence only impact
# Correct for values if impact is less than 2010 baseline data
df_filt['cc_diff_avg'] = np.where(df_filt['tot_diff'] > 0,
np.where(df_filt['cc_diff_avg'] < 0, 0, df_filt['cc_diff_avg']),
np.where(df_filt['cc_diff_avg'] > 0, 0, df_filt['cc_diff_avg']))
df_filt['cc_diff_min'] = np.where(df_filt['tot_diff'] > 0,
np.where(df_filt['cc_diff_min'] < 0, 0, df_filt['cc_diff_min']),
np.where(df_filt['cc_diff_min'] > 0, 0, df_filt['cc_diff_min']))
df_filt['cc_diff_max'] = np.where(df_filt['tot_diff'] > 0,
np.where(df_filt['cc_diff_max'] < 0, 0, df_filt['cc_diff_max']),
np.where(df_filt['cc_diff_max'] > 0, 0, df_filt['cc_diff_max']))
df_filt['soc_diff'] = np.where(df_filt['tot_diff'] > 0,
np.where(df_filt['soc_diff'] < 0, 0, df_filt['soc_diff']),
np.where(df_filt['soc_diff'] > 0, 0, df_filt['soc_diff']))
df_filt['sub_diff'] = np.where(df_filt['tot_diff'] > 0,
np.where(df_filt['sub_diff'] < 0, 0, df_filt['sub_diff']),
np.where(df_filt['sub_diff'] > 0, 0, df_filt['sub_diff']))
if self.sub_abb == "nosub":
df_filt['sub_diff'] = 0
# Find the ratio of impact attributed to each impact cause ( use the difference from 2010, not the absolute impact)
# Climate change only = (CC Only) / ( CC Only + Socio Only + Sub Only) * Total Impact
df_final[colFormat(self.exposure, y, self.scen_abb, "cc", "avg")] = (df_filt['cc_diff_avg'] / (
df_filt['cc_diff_avg'] + df_filt['soc_diff'] + df_filt['sub_diff'] + .000000001)) * df_filt[
'tot_diff']
df_final[colFormat(self.exposure, y, self.scen_abb, "cc", "min")] = (df_filt['cc_diff_min'] / (
df_filt['cc_diff_min'] + df_filt['soc_diff'] + df_filt['sub_diff'] + .000000001)) * df_filt[
'tot_diff']
df_final[colFormat(self.exposure, y, self.scen_abb, "cc", "max")] = (df_filt['cc_diff_max'] / (
df_filt['cc_diff_max'] + df_filt['soc_diff'] + df_filt['sub_diff'] + .000000001)) * df_filt[
'tot_diff']
# Socioecon change only = (Soc Only) / ( CC Only + Socio Only + Sub Only) * Total Impact
df_final[colFormat(self.exposure, y, self.scen_abb, "soc", "avg")] = (df_filt['soc_diff'] / (
df_filt['cc_diff_avg'] + df_filt['soc_diff'] + df_filt['sub_diff'] + .000000001)) * df_filt[
'tot_diff']
# Subsidence change only = (Sub Only) / ( CC Only + Socio Only + Sub Only) * Total Impact
df_final[colFormat(self.exposure, y, self.scen_abb, "sub", "avg")] = (df_filt['sub_diff'] / (
df_filt['cc_diff_avg'] + df_filt['soc_diff'] + df_filt['sub_diff'] + .000000001)) * df_filt[
'tot_diff']
df_final[colFormat(self.exposure, y, self.scen_abb, "prot", "avg")] = dataframe[
colFormat(self.exposure, y, self.scen_abb, "prot", "avg")]
# Replace any nulls with 0
df_final.replace(np.nan, 0, inplace=True)
return df_final
@staticmethod
def expected_value(values, RPs, RP_zero, RP_infinite):
"""
Purpose: Annual expected image/damage for given time period
Input:
values: Impact per return period
2D array MxN
M: several time periods
N: several return periods
RPs: return periods (equal to length of N)
RP_zero: return period at which to break the EP-curve to zero (i.e. protection standard)
RP_infinite: return period close to the infinitely high return period
Output:
vector with expected values for each time period
"""
# append the return period at which maximum impact occurs, normally this is set to 1e6 years
RPs = np.append(np.array(RPs), RP_infinite)
# derive the probabilities associated with return periods
prob = 1. / RPs
values = np.array(values)
# append infinite impact (last value in array) to array. Simply copy the last value.
values = np.append(values, values[-1])
# now make a smooth function (function relates prob (on x) to projected future impact (y))
values_func = interp1d(prob, values)
# Returns 10,000 evenly spaced probabilities from most likely prob to most extreme
prob_smooth = np.linspace(prob[0], prob[-1], 10000)
# Insert these probabilites into "smooth function" to find their related impact
values_smooth = values_func(prob_smooth)
# Set all impacts above thres (protection standard) to zero
values_smooth[prob_smooth > 1. / RP_zero] = 0.
# compute expected values from return period values:
# Integrate under curve to find sum of all impact
exp_val = np.trapz(np.flipud(values_smooth), np.flipud(prob_smooth))
# print "Values, RP, Exp Value", values, RP_zero, exp_val,
return exp_val
@staticmethod
def interp_value(x, y, x_i, min_x=-np.Inf, max_x=np.Inf):
"""
Purpose: Find impacts associated with given protection standard
OR Find probability associated with a given impact
Allows for extrapolation to find new Y given user-defined X
Do a linear inter/extrapolation of y(x) to find a value y(x_idx)
"""
### OLD CODE
# Creates a function that relates X and Y and allows for extrapolation to find new Y given user-defined X
# y_interp = extrap1d(interp1d(np.array(x), np.array(y), axis=0))
# return y_interp(np.maximum(np.minimum(np.atleast_1d(x_i), max_x), min_x))
# -#-#-#-#-#-#-#-#-#-#-#-#-#
### NEW CODE
# interpolation only! return y min/max if out of bounds
x = np.atleast_1d(x)
y = np.atleast_1d(y)
f = interp1d(x, y, fill_value=(y.min(), y.max()), bounds_error=False)
y_new = f(x_i)
return y_new
@staticmethod
def extrap1d(interpolator):
"""
Purpose: Make an extrapolation function
"""
xs = interpolator.x
ys = interpolator.y
def pointwise(x):
# If new prob is smaller than smallest prob in function
if x < xs[0]:
return ys[0] + (x - xs[0]) * (ys[1] - ys[0]) / (xs[1] - xs[0])
# If new prob is larger than largest prob in function
elif x > xs[-1]:
return ys[-1] + (x - xs[-1]) * (ys[-1] - ys[-2]) / (xs[-1] - xs[-2])
# If prob falls within set range of prob in function
else:
return interpolator(x)
def ufunclike(xs):
return np.fromiter(map(pointwise, np.array(xs)))
return ufunclike
def compute_rp_change(self, ref_impact, target_impact, rp, min_rp=2, max_rp=1000):
"""
Purpose: Compute how return period protection changes from one impact
distribution to another (e.g. present to future)
Input:
rps: return periods of impacts
ref_impact: set of reference impact
target_impacts: impacts to which protection standard should be mapped
(i.e. year the flood protection should be valid in)
rp, protection standard at reference impacts
"""
### NEW CODE
if target_impact.sum() == 0:
new_prot = np.nan
else:
# interpolate to estimate impacts at protection level 'rp'
prot_impact = self.interp_value(self.rps, ref_impact, rp)
new_prot = self.interp_value(target_impact, self.rps, prot_impact)
return new_prot
def find_impact(self, impact_cc, impact_soc, impact_sub, impact_cc_soc, impact_urb, model):
"""
Purpose: Finds annual impacts for climate only, socio only, subsidence only, and all scenarios together
Input:
impact_cc: Climate change only impacts.Variable consists of 4 dataframes (one for each year)
impact_soc: Socioecon change only impacts. Variable consists of 4 dataframes (one for each year)
impact_sub: Subsidence only impacts. Variable consists of 4 dataframes (one for each year)
impact_cc_sub: Total impacts. Variable consists of 4 dataframes (one for each year)
impact_urb: Climate change only impacts to urban damage. Variable consists of 4 dataframes (one for each year)
model = Climate change model associated with input data
Output:
Dataframe with raw annual impact data for each year for each impact type. Column name also specifies given model
"""
# Create dataframes to hold expected impact (for each model and year)
col = [model + x + j for x in ["_cc_", "_soc_", "_sub_", "_tot_", "_prot_"] for j in self.ys]
model_imps = pd.DataFrame(index=[self.geogunit_name], columns=col)
# Perform for each year we have impact data
for y, imp_cc, imp_soc, imp_sub, imp_cc_soc, imp_urb in zip(self.ys, impact_cc, impact_soc, impact_sub,
impact_cc_soc, impact_urb):
# No transformation needed in 2010
if y == '2010':
prot_trans = self.prot_pres
else:
# Find how the flood protection changes over time
prot_trans = self.compute_rp_change(impact_urb[0], imp_urb.values[0], self.prot_pres,
min_rp=min(self.rps), max_rp=max(self.rps)) # i.e. RP_zero
# Find the annual expected damage with the new protection standard
model_imps.loc[self.geogunit_name, [model + "_cc_" + y]] = self.expected_value(imp_cc.values[0], self.rps,
prot_trans, 1e5)
model_imps.loc[self.geogunit_name, [model + "_soc_" + y]] = self.expected_value(imp_soc.values[0], self.rps,
prot_trans, 1e5)
model_imps.loc[self.geogunit_name, [model + "_sub_" + y]] = self.expected_value(imp_sub, self.rps,
prot_trans, 1e5)
model_imps.loc[self.geogunit_name, [model + "_tot_" + y]] = self.expected_value(imp_cc_soc.values[0],
self.rps, prot_trans, 1e5)
model_imps.loc[self.geogunit_name, [model + "_prot_" + y]] = prot_trans
return model_imps
def select_projection_data(self, dataframe, climate, model, socioecon, year):
"""
Purpose: Pull all historical (2010) raw data
Input:
dataframe: Raw data associated with user-defined flood, geographic unit and exposure
climate = Climate scenario
model = Climate model
socioecon = Socioeconomic scenario
sub_scenario: Is subsidence considered? Yes or No
year: 2030, 2050, or 2080
Output:mpact data for each return period for given year
Dataframe with raw ir
"""
# Select data using year, subsidence type, climate scen, socioecon scen, model
# CHANGEDIT
selCol = climate + "_" + model + "_" + socioecon + "_" + self.sub_abb + "_" + year
#logging.debug(selCol)
# selData = dataframe[[col for col in dataframe.index.tolist() if selCol in col]]
selData = dataframe[[col for col in dataframe.columns if (selCol in col) and ("rp00001" not in col)]]
# selData = dataframe[[col for col in dataframe.columns if (model in col) and (socioecon in col) and (climate in col) and (year in col) and ("rp00001" not in col)]]
#logging.debug(f'[RISK SERVICE - select_projection_data]: {selData}')
return selData
def calc_risk(self):
"""
Purpose: Runs analysis on the fly instead of using precalcuted results
(For when users define current protection level, find annual impact themselves)
Output:
df_aggregate = aggregated annual impacts for each year
"""
# READ IN DATA
# File name format for raw data
inFormat = 'raw_agg_{:s}_{:s}_{:s}'.format
fn = inFormat(self.flood, self.geogunit_type, self.exposure)
# URBAN DAMAGE DATA
urbfn = inFormat(self.flood, self.geogunit_type, "urban_damage_v2")
# Filter by geographic name
df_raw = pd.read_sql_query("SELECT * FROM {0} where id = '{1}' ".format(fn, self.geogunit_name), self.engine,
index_col='id')
df_urb = pd.read_sql_query("SELECT * FROM {0} where id = '{1}' ".format(urbfn, self.geogunit_name), self.engine,
index_col='id')
logging.info(f'[RISK SERVICE - calc_risk]: urbfn => {urbfn} fn => {fn}')
logging.debug('[RISK SERVICE - calc_risk]: prot_press => ' + str(self.prot_pres))
# Find impact for each model
model_impact = | pd.DataFrame(index=[self.geogunit_name]) | pandas.DataFrame |
from __future__ import print_function
# this is a class to deal with aqs data
from builtins import zip
from builtins import range
from builtins import object
import os
from datetime import datetime
from zipfile import ZipFile
import pandas as pd
from numpy import array, arange
import inspect
import requests
class AQS(object):
def __init__(self):
# self.baseurl = 'https://aqs.epa.gov/aqsweb/airdata/'
self.objtype = 'AQS'
self.daily = False
self.baseurl = 'https://aqsdr1.epa.gov/aqsweb/aqstmp/airdata/'
self.dates = [datetime.strptime('2014-06-06 12:00:00', '%Y-%m-%d %H:%M:%S'),
datetime.strptime('2014-06-06 13:00:00', '%Y-%m-%d %H:%M:%S')]
self.renamedhcols = ['datetime_local', 'datetime', 'State_Code', 'County_Code',
'Site_Num', 'Parameter_Code', 'POC', 'Latitude', 'Longitude',
'Datum', 'Parameter_Name', 'Obs', 'Units',
'MDL', 'Uncertainty', 'Qualifier', 'Method_type', 'Method_Code',
'Method_Name', 'State_Name', 'County_Name', 'Date_of_Last_Change']
self.renameddcols = ['datetime_local', 'State_Code', 'County_Code', 'Site_Num',
'Parameter_Code', 'POC', 'Latitude', 'Longitude', 'Datum',
'Parameter_Name', 'Sample_Duration', 'Pollutant_Standard',
'Units', 'Event_Type', 'Observation_Count',
'Observation_Percent', 'Obs', '1st_Max_Value',
'1st_Max Hour', 'AQI', 'Method_Code', 'Method_Name',
'Local_Site_Name', 'Address', 'State_Name', 'County_Name',
'City_Name', 'MSA_Name', 'Date_of_Last_Change']
self.savecols = ['datetime_local', 'datetime', 'SCS',
'Latitude', 'Longitude', 'Obs', 'Units', 'Species']
self.se_states = array(
['Alabama', 'Florida', 'Georgia', 'Mississippi', 'North Carolina', 'South Carolina', 'Tennessee',
'Virginia', 'West Virginia'], dtype='|S14')
self.se_states_abv = array(
['AL', 'FL', 'GA', 'MS', 'NC', 'SC', 'TN',
'VA', 'WV'], dtype='|S14')
self.ne_states = array(['Connecticut', 'Delaware', 'District Of Columbia', 'Maine', 'Maryland', 'Massachusetts',
'New Hampshire', 'New Jersey', 'New York', 'Pennsylvania', 'Rhode Island', 'Vermont'],
dtype='|S20')
self.ne_states_abv = array(['CT', 'DE', 'DC', 'ME', 'MD', 'MA', 'NH', 'NJ', 'NY', 'PA', 'RI', 'VT'],
dtype='|S20')
self.nc_states = array(
['Illinois', 'Indiana', 'Iowa', 'Kentucky', 'Michigan',
'Minnesota', 'Missouri', 'Ohio', 'Wisconsin'],
dtype='|S9')
self.nc_states_abv = array(['IL', 'IN', 'IA', 'KY', 'MI', 'MN', 'MO', 'OH', 'WI'],
dtype='|S9')
self.sc_states = array(
['Arkansas', 'Louisiana', 'Oklahoma', 'Texas'], dtype='|S9')
self.sc_states_abv = array(['AR', 'LA', 'OK', 'TX'], dtype='|S9')
self.r_states = array(['Arizona', 'Colorado', 'Idaho', 'Kansas', 'Montana', 'Nebraska', 'Nevada', 'New Mexico',
'North Dakota', 'South Dakota', 'Utah', 'Wyoming'], dtype='|S12')
self.r_states_abv = array(['AZ', 'CO', 'ID', 'KS', 'MT', 'NE', 'NV', 'NM', 'ND', 'SD', 'UT', 'WY'],
dtype='|S12')
self.p_states = array(
['California', 'Oregon', 'Washington'], dtype='|S10')
self.p_states_abv = array(['CA', 'OR', 'WA'], dtype='|S10')
self.datadir = '.'
self.cwd = os.getcwd()
self.df = None # hourly dataframe
self.monitor_file = inspect.getfile(
self.__class__)[:-13] + '/data/monitoring_site_locations.dat'
self.monitor_df = None
self.d_df = None # daily dataframe
def check_file_size(self, url):
test = requests.head(url).headers
if int(test['Content-Length']) > 1000:
return True
else:
return False
def retrieve_aqs_hourly_pm25_data(self, dates):
import wget
i = dates[0]
year = i.strftime('%Y')
url1 = self.baseurl + 'hourly_88101_' + year + '.zip'
if self.check_file_size(url1):
print('Downloading Hourly PM25 FRM: ' + url1)
filename = wget.download(url1)
print('')
print('Unpacking: ' + url1)
dffrm = pd.read_csv(filename, parse_dates={'datetime': ['Date GMT', 'Time GMT'],
'datetime_local': ["Date Local", "Time Local"]},
infer_datetime_format=True)
dffrm.columns = self.renamedhcols
dffrm['SCS'] = array(
dffrm['State_Code'].values * 1.E7 +
dffrm['County_Code'].values * 1.E4 + dffrm['Site_Num'].values,
dtype='int32')
else:
dffrm = pd.DataFrame(columns=self.renamedhcols)
url2 = self.baseurl + 'hourly_88502_' + year + '.zip'
if self.check_file_size(url2):
print('Downloading Hourly PM25 NON-FRM: ' + url2)
filename = wget.download(url2)
print('')
print('Unpacking: ' + url2)
dfnfrm = pd.read_csv(filename, parse_dates={'datetime': ['Date GMT', 'Time GMT'],
'datetime_local': ["Date Local", "Time Local"]},
infer_datetime_format=True)
dfnfrm.columns = self.renamedhcols
dfnfrm['SCS'] = array(
dfnfrm['State_Code'].values * 1.E7 +
dfnfrm['County_Code'].values *
1.E4 + dfnfrm['Site_Num'].values,
dtype='int32')
else:
dfnfrm = pd.DataFrame(columns=self.renamedhcols)
if self.check_file_size(url1) | self.check_file_size(url2):
df = pd.concat([dfnfrm, dffrm], ignore_index=True)
df.loc[:, 'State_Code'] = pd.to_numeric(
df.State_Code, errors='coerce')
df.loc[:, 'Site_Num'] = pd.to_numeric(df.Site_Num, errors='coerce')
df.loc[:, 'County_Code'] = pd.to_numeric(
df.County_Code, errors='coerce')
df['SCS'] = array(df['State_Code'].values * 1.E7 + df['County_Code'].values * 1.E4 + df['Site_Num'].values,
dtype='int32')
# df.drop('Qualifier', axis=1, inplace=True)
df = self.get_species(df)
# df = self.get_region(df)
df = df.copy()[self.savecols]
df = self.add_metro_metadata2(df)
df['Species'] = 'PM2.5'
print('Saving file to: ' + self.datadir + '/' + \
'AQS_HOURLY_PM_25_88101_' + year + '.hdf')
df.to_hdf('AQS_HOURLY_PM_25_88101_' +
year + '.hdf', 'df', format='table')
else:
df = pd.DataFrame(columns=self.renamedhcols)
return df
def retrieve_aqs_hourly_ozone_data(self, dates):
import wget
i = dates[0]
year = i.strftime('%Y')
url = self.baseurl + 'hourly_44201_' + year + '.zip'
print('Downloading Hourly Ozone: ' + url)
filename = wget.download(url)
print('')
print('Unpacking: ' + url)
df = pd.read_csv(filename, parse_dates={'datetime': ['Date GMT', 'Time GMT'],
'datetime_local': ["Date Local", "Time Local"]},
infer_datetime_format=True)
df.columns = self.renamedhcols
df.loc[:, 'State_Code'] = pd.to_numeric(df.State_Code, errors='coerce')
df.loc[:, 'Site_Num'] = pd.to_numeric(df.Site_Num, errors='coerce')
df.loc[:, 'County_Code'] = pd.to_numeric(
df.County_Code, errors='coerce')
df['SCS'] = array(df['State_Code'].values * 1.E7 + df['County_Code'].values * 1.E4 + df['Site_Num'].values,
dtype='int32')
df.drop('Qualifier', axis=1, inplace=True)
df = self.get_species(df)
df = df.copy()[self.savecols]
df = self.add_metro_metadata2(df)
print('Saving file to: ' + self.datadir + '/' + \
'AQS_HOURLY_OZONE_44201_' + year + '.hdf')
df.to_hdf('AQS_HOURLY_OZONE_44201_' +
year + '.hdf', 'df', format='table')
return df
def retrieve_aqs_hourly_pm10_data(self, dates):
import wget
i = dates[0]
year = i.strftime('%Y')
url = self.baseurl + 'hourly_81102_' + year + '.zip'
print('Downloading Hourly PM10: ' + url)
filename = wget.download(url)
print('')
print('Unpacking: ' + url)
df = pd.read_csv(filename, parse_dates={'datetime': ['Date GMT', 'Time GMT'],
'datetime_local': ["Date Local", "Time Local"]},
infer_datetime_format=True)
df.columns = self.renamedhcols
df.loc[:, 'State_Code'] = pd.to_numeric(df.State_Code, errors='coerce')
df.loc[:, 'Site_Num'] = pd.to_numeric(df.Site_Num, errors='coerce')
df.loc[:, 'County_Code'] = pd.to_numeric(
df.County_Code, errors='coerce')
df['SCS'] = array(df['State_Code'].values * 1.E7 + df['County_Code'].values * 1.E4 + df['Site_Num'].values,
dtype='int32')
df.drop('Qualifier', axis=1, inplace=True)
df = self.get_species(df)
# df = self.get_region(df)
df = df.copy()[self.savecols]
df = self.add_metro_metadata2(df)
print('Saving file to: ' + self.datadir + '/' + \
'AQS_HOURLY_PM_10_81102_' + year + '.hdf')
df.to_hdf('AQS_HOURLY_PM_10_81102_' +
year + '.hdf', 'df', format='table')
return df
def retrieve_aqs_hourly_so2_data(self, dates):
import wget
i = dates[0]
year = i.strftime('%Y')
url = self.baseurl + 'hourly_42401_' + year + '.zip'
print('Downloading Hourly SO2: ' + url)
filename = wget.download(url)
print('')
print('Unpacking: ' + url)
df = pd.read_csv(filename, parse_dates={'datetime': ['Date GMT', 'Time GMT'],
'datetime_local': ["Date Local", "Time Local"]},
infer_datetime_format=True)
df.columns = self.renamedhcols
df.loc[:, 'State_Code'] = pd.to_numeric(df.State_Code, errors='coerce')
df.loc[:, 'Site_Num'] = pd.to_numeric(df.Site_Num, errors='coerce')
df.loc[:, 'County_Code'] = pd.to_numeric(
df.County_Code, errors='coerce')
df['SCS'] = array(df['State_Code'].values * 1.E7 + df['County_Code'].values * 1.E4 + df['Site_Num'].values,
dtype='int32')
df.drop('Qualifier', axis=1, inplace=True)
df = self.get_species(df)
# df = self.get_region(df)
df = df.copy()[self.savecols]
df = self.add_metro_metadata2(df)
print('Saving file to: ' + self.datadir + '/' + \
'AQS_HOURLY_SO2_42401_' + year + '.hdf')
df.to_hdf('AQS_HOURLY_SO2_42401_' + year +
'.hdf', 'df', format='table')
return df
def retrieve_aqs_hourly_no2_data(self, dates):
import wget
i = dates[0]
year = i.strftime('%Y')
url = self.baseurl + 'hourly_42602_' + year + '.zip'
print('Downloading Hourly NO2: ' + url)
filename = wget.download(url)
print('')
print('Unpacking: ' + url)
df = pd.read_csv(filename, parse_dates={'datetime': ['Date GMT', 'Time GMT'],
'datetime_local': ["Date Local", "Time Local"]},
infer_datetime_format=True)
df.columns = self.renamedhcols
df.loc[:, 'State_Code'] = pd.to_numeric(df.State_Code, errors='coerce')
df.loc[:, 'Site_Num'] = pd.to_numeric(df.Site_Num, errors='coerce')
df.loc[:, 'County_Code'] = pd.to_numeric(
df.County_Code, errors='coerce')
df['SCS'] = array(df['State_Code'].values * 1.E7 + df['County_Code'].values * 1.E4 + df['Site_Num'].values,
dtype='int32')
df.drop('Qualifier', axis=1, inplace=True)
df = self.get_species(df)
# df = self.get_region(df)
df = df.copy()[self.savecols]
df = self.add_metro_metadata2(df)
print('Saving file to: ' + self.datadir + '/' + \
'AQS_HOURLY_NO2_42602_' + year + '.hdf')
df.to_hdf('AQS_HOURLY_NO2_42602_' + year +
'.hdf', 'df', format='table')
return df
def retrieve_aqs_hourly_co_data(self, dates):
import wget
i = dates[0]
year = i.strftime('%Y')
url = self.baseurl + 'hourly_42101_' + year + '.zip'
print('Downloading Hourly CO: ' + url)
filename = wget.download(url)
print('')
print('Unpacking: ' + url)
df = pd.read_csv(filename, parse_dates={'datetime': ['Date GMT', 'Time GMT'],
'datetime_local': ["Date Local", "Time Local"]},
infer_datetime_format=True)
df.columns = self.renamedhcols
df.loc[:, 'State_Code'] = pd.to_numeric(df.State_Code, errors='coerce')
df.loc[:, 'Site_Num'] = pd.to_numeric(df.Site_Num, errors='coerce')
df.loc[:, 'County_Code'] = pd.to_numeric(
df.County_Code, errors='coerce')
df['SCS'] = array(df['State_Code'].values * 1.E7 + df['County_Code'].values * 1.E4 + df['Site_Num'].values,
dtype='int32')
df.drop('Qualifier', axis=1, inplace=True)
df = self.get_species(df)
# df = self.get_region(df)
df = df.copy()[self.savecols]
df = self.add_metro_metadata2(df)
print('Saving file to: ' + self.datadir + \
'/' + 'AQS_HOURLY_CO_42101_' + year + '.hdf')
df.to_hdf('AQS_HOURLY_CO_42101_' + year + '.hdf', 'df', format='table')
return df
def retrieve_aqs_hourly_nonoxnoy_data(self, dates):
import wget
i = dates[0]
year = i.strftime('%Y')
url = self.baseurl + 'hourly_NONOxNOy_' + year + '.zip'
print('Downloading Hourly NO NOx NOy: ' + url)
filename = wget.download(url)
print('')
print('Unpacking: ' + url)
df = pd.read_csv(filename, parse_dates={'datetime': ['Date GMT', 'Time GMT'],
'datetime_local': ["Date Local", "Time Local"]},
infer_datetime_format=True)
df.columns = self.renamedhcols
df.loc[:, 'State_Code'] = pd.to_numeric(df.State_Code, errors='coerce')
df.loc[:, 'Site_Num'] = pd.to_numeric(df.Site_Num, errors='coerce')
df.loc[:, 'County_Code'] = pd.to_numeric(
df.County_Code, errors='coerce')
df['SCS'] = array(df['State_Code'].values * 1.E7 + df['County_Code'].values * 1.E4 + df['Site_Num'].values,
dtype='int32')
df.drop('Qualifier', axis=1, inplace=True)
df = self.get_species(df)
# df = self.get_region(df)
df = df.copy()[self.savecols]
df = self.add_metro_metadata2(df)
print('Saving file to: ' + self.datadir + \
'/' + 'AQS_HOURLY_NONOXNOY_' + year + '.hdf')
df.to_hdf('AQS_HOURLY_NONOXNOY_' + year + '.hdf', 'df', format='table')
return df
def retrieve_aqs_hourly_voc_data(self, dates):
import wget
i = dates[0]
year = i.strftime('%Y')
url = self.baseurl + 'hourly_VOCS_' + year + '.zip'
print('Downloading Hourly VOCs: ' + url)
filename = wget.download(url)
print('')
print('Unpacking: ' + url)
df = pd.read_csv(filename, parse_dates={'datetime': ['Date GMT', 'Time GMT'],
'datetime_local': ["Date Local", "Time Local"]},
infer_datetime_format=True)
df.columns = self.renamedhcols
df.loc[:, 'State_Code'] = pd.to_numeric(df.State_Code, errors='coerce')
df.loc[:, 'Site_Num'] = pd.to_numeric(df.Site_Num, errors='coerce')
df.loc[:, 'County_Code'] = pd.to_numeric(
df.County_Code, errors='coerce')
df['SCS'] = array(df['State_Code'].values * 1.E7 + df['County_Code'].values * 1.E4 + df['Site_Num'].values,
dtype='int32')
df.drop('Qualifier', axis=1, inplace=True)
df = self.get_species(df, voc=True)
# df = self.get_region(df)
df = df.copy()[self.savecols]
df = self.add_metro_metadata2(df)
print('Saving file to: ' + self.datadir + \
'/' + 'AQS_HOURLY_VOC_' + year + '.hdf')
df.to_hdf('AQS_HOURLY_VOC_' + year + '.hdf', 'df', format='table')
return df
def retrieve_aqs_hourly_spec_data(self, dates):
import wget
i = dates[0]
year = i.strftime('%Y')
url = self.baseurl + 'hourly_SPEC_' + year + '.zip'
if self.check_file_size(url):
print('Downloading PM Speciation: ' + url)
filename = wget.download(url)
print('')
print('Unpacking: ' + url)
df = pd.read_csv(filename, parse_dates={'datetime': ['Date GMT', 'Time GMT'],
'datetime_local': ["Date Local", "Time Local"]},
infer_datetime_format=True)
df.columns = self.renamedhcols
df.loc[:, 'State_Code'] = pd.to_numeric(
df.State_Code, errors='coerce')
df.loc[:, 'Site_Num'] = pd.to_numeric(df.Site_Num, errors='coerce')
df.loc[:, 'County_Code'] = pd.to_numeric(
df.County_Code, errors='coerce')
df['SCS'] = array(df['State_Code'].values * 1.E7 + df['County_Code'].values * 1.E4 + df['Site_Num'].values,
dtype='int32')
df.drop('Qualifier', axis=1, inplace=True)
df = self.get_species(df)
df = df.copy()[self.savecols]
df = self.add_metro_metadata2(df)
print('Saving file to: ' + self.datadir + \
'/' + 'AQS_HOURLY_SPEC_' + year + '.hdf')
df.to_hdf('AQS_HOURLY_SPEC_' + year + '.hdf', 'df', format='table')
return df
else:
return pd.DataFrame(columns=self.renamedhcols)
def retrieve_aqs_hourly_wind_data(self, dates):
import wget
i = dates[0]
year = i.strftime('%Y')
url = self.baseurl + 'hourly_WIND_' + year + '.zip'
print('Downloading AQS WIND: ' + url)
filename = wget.download(url)
print('')
print('Unpacking: ' + url)
df = pd.read_csv(filename, parse_dates={'datetime': ['Date GMT', 'Time GMT'],
'datetime_local': ["Date Local", "Time Local"]},
infer_datetime_format=True)
df.columns = self.renamedhcols
df.loc[:, 'State_Code'] = pd.to_numeric(df.State_Code, errors='coerce')
df.loc[:, 'Site_Num'] = pd.to_numeric(df.Site_Num, errors='coerce')
df.loc[:, 'County_Code'] = pd.to_numeric(
df.County_Code, errors='coerce')
df['SCS'] = array(df['State_Code'].values * 1.E7 + df['County_Code'].values * 1.E4 + df['Site_Num'].values,
dtype='int32')
df.drop('Qualifier', axis=1, inplace=True)
df = self.get_species(df)
# df = self.get_region(df)
df = df.copy()[self.savecols]
df = self.add_metro_metadata2(df)
print('Saving file to: ' + self.datadir + \
'/' + 'AQS_HOURLY_WIND_' + year + '.hdf')
df.to_hdf('AQS_HOURLY_WIND_' + year + '.hdf', 'df', format='table')
return df
def retrieve_aqs_hourly_temp_data(self, dates):
import wget
i = dates[0]
year = i.strftime('%Y')
url = self.baseurl + 'hourly_TEMP_' + year + '.zip'
print('Downloading AQS TEMP: ' + url)
filename = wget.download(url)
print('')
print('Unpacking: ' + url)
df = pd.read_csv(filename, parse_dates={'datetime': ['Date GMT', 'Time GMT'],
'datetime_local': ["Date Local", "Time Local"]},
infer_datetime_format=True)
df.columns = self.renamedhcols
df.loc[:, 'State_Code'] = pd.to_numeric(df.State_Code, errors='coerce')
df.loc[:, 'Site_Num'] = pd.to_numeric(df.Site_Num, errors='coerce')
df.loc[:, 'County_Code'] = pd.to_numeric(
df.County_Code, errors='coerce')
df['SCS'] = array(df['State_Code'].values * 1.E7 + df['County_Code'].values * 1.E4 + df['Site_Num'].values,
dtype='int32')
df.drop('Qualifier', axis=1, inplace=True)
df = self.get_species(df)
# df = self.get_region(df)
df = df.copy()[self.savecols]
df = self.add_metro_metadata2(df)
print('Saving file to: ' + self.datadir + \
'/' + 'AQS_HOURLY_TEMP_' + year + '.hdf')
df.to_hdf('AQS_HOURLY_TEMP_' + year + '.hdf', 'df', format='table')
return df
def retrieve_aqs_hourly_rhdp_data(self, dates):
import wget
i = dates[0]
year = i.strftime('%Y')
url = self.baseurl + 'hourly_RH_DP_' + year + '.zip'
print('Downloading AQS RH and DP: ' + url)
filename = wget.download(url)
print('')
print('Unpacking: ' + url)
df = pd.read_csv(filename, parse_dates={'datetime': ['Date GMT', 'Time GMT'],
'datetime_local': ["Date Local", "Time Local"]},
infer_datetime_format=True)
df.columns = self.renamedhcols
df.loc[:, 'State_Code'] = | pd.to_numeric(df.State_Code, errors='coerce') | pandas.to_numeric |
#!/usr/bin/python
# coding=utf-8
import time
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
import jieba
import jieba.analyse
import os
from pyecharts import options as opts
from pyecharts.charts import Map
from pyecharts.charts import Pie
from pyecharts.charts import Bar
from pyecharts.charts import TreeMap
from pyecharts.charts import Line
from pyecharts.faker import Faker
from pyecharts.render import make_snapshot
# 使用 snapshot-selenium 渲染图片
from snapshot_selenium import snapshot
from snownlp import SnowNLP
def get_current_time():
return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
class MovieInfoAnalyse(object):
"""
TOP500电影信息分析类
"""
def __init__(self):
if not os.path.exists('analyse_data'):
os.mkdir('analyse_data')
print("所有分析结果保存在 analyse_data 文件夹下...")
def make_geo_map(self):
"""
生成世界地图,根据各国电影发行量
:return:
"""
# print(get_current_time() + '|-------> 正在生成 世界各国电影发行量 图表...')
# 导入TOP500电影数据
csv_path = os.path.abspath(os.path.join(os.path.dirname("__file__"), os.path.pardir, "moviespider", "movie_info_top500.csv"))
rows = pd.read_csv(csv_path, encoding='utf-8', dtype=str)
# 分析并统计数据
col_country = rows['国别'].to_frame()
res = col_country.groupby('国别')['国别'].count().sort_values(ascending=False)
raw_data = [i for i in res.items()]
# 导入映射数据,英文名 -> 中文名
country_name = pd.read_json('countries_zh_to_en.json', orient='index')
stand_data = [i for i in country_name[0].items()]
# 数据转换
res_code = []
for raw_country in raw_data:
for stand_country in stand_data:
if stand_country[1] in raw_country[0]:
res_code.append(stand_country[0])
code = pd.DataFrame(res_code).groupby(0)[0].count().sort_values(ascending=False)
data = []
for k, v in code.items():
data.append([k, v])
# 制作图表
c = Map()
c.add("电影发行量", data, "world")
c.set_series_opts(label_opts=opts.LabelOpts(is_show=False))
c.set_global_opts(title_opts=opts.TitleOpts(title="电影TOP500榜单中 - 世界各国电影发行量"),
visualmap_opts=opts.VisualMapOpts(max_=55))
htmlPath = os.path.abspath(os.path.join(os.path.abspath(os.path.dirname(os.path.dirname(__file__))), "analyse_data", "世界各国电影发行量.html"))
pngPath = os.path.abspath(os.path.join(os.path.abspath(os.path.dirname(os.path.dirname(__file__))), "analyse_data", "世界各国电影发行量.png"))
# 生成html
c.render(htmlPath)
# 生成png
# make_snapshot(snapshot, c.render(), pngPath)
# print(get_current_time() + '|-------> 已生成 世界各国电影发行量 图表...')
return c
def make_pid_charts(self):
"""
根据电影类型生成饼图
:return:
"""
# print(get_current_time() + '|-------> 正在生成 各类型占比 图表...')
# 导入数据并初始化
csv_path = os.path.abspath(os.path.join(os.path.dirname("__file__"), os.path.pardir, "moviespider", "movie_info_top500.csv"))
rows = pd.read_csv(csv_path, encoding='utf-8', dtype=str)
to_drop = ['名称', '导演', '演员', '国别', '年份', '语言', '评分', '评分人数', '五星占比', '四星占比', '三星占比', '二星占比', '一星占比', '短评数',
'简介']
res = rows.drop(to_drop, axis=1)
# 数据分割
type_list = []
for i in res.itertuples():
for j in i[1].split(','):
type_list.append(j)
# 数据统计
df = pd.DataFrame(type_list, columns=['类型'])
res = df.groupby('类型')['类型'].count().sort_values(ascending=False)
res_list = []
for i in res.items():
res_list.append(i)
# 生成饼图
c = Pie()
c.add("", res_list, center=["40%", "55%"], )
c.set_global_opts(
title_opts=opts.TitleOpts(title="电影TOP500榜单中 - 各类型占比"),
legend_opts=opts.LegendOpts(type_="scroll", pos_left="80%", orient="vertical"),
)
c.set_series_opts(label_opts=opts.LabelOpts(formatter="{b}: {c}"))
htmlPath = os.path.abspath(os.path.join(os.path.abspath(os.path.dirname(os.path.dirname(__file__))), "analyse_data", "各类型占比.html"))
pngPath = os.path.abspath(os.path.join(os.path.abspath(os.path.dirname(os.path.dirname(__file__))), "analyse_data", "各类型占比.png"))
# 生成html
c.render(htmlPath)
# 生成png
# make_snapshot(snapshot, c.render(), pngPath)
# print(get_current_time() + '|-------> 已生成 各类型占比 图表...')
return c
def make_relase_year_bar(self):
"""
生成各年份电影发行量柱状图
:return:
"""
# print(get_current_time() + '|-------> 正在生成 各年份电影发行量 图表...')
# 导入数据并初始化
csv_path = os.path.abspath(os.path.join(os.path.dirname("__file__"), os.path.pardir, "moviespider", "movie_info_top500.csv"))
rows = pd.read_csv(csv_path, encoding='utf-8', dtype=str)
to_drop = ['名称', '导演', '演员', '国别', '类型', '语言', '评分', '评分人数', '五星占比', '四星占比', '三星占比', '二星占比', '一星占比', '短评数',
'简介']
res = rows.drop(to_drop, axis=1)
# 数据分析
res_by = res.groupby('年份')['年份'].count().sort_values(ascending=False)
res_by2 = res_by.sort_index(ascending=False)
type(res_by2)
years = []
datas = []
for k, v in res_by2.items():
years.append(k)
datas.append(v)
# 生成图标
c = Bar()
c.add_xaxis(years)
c.add_yaxis("发行电影数量", datas, color=Faker.rand_color())
c.set_global_opts(
title_opts=opts.TitleOpts(title="电影TOP500榜单中 - 各年份电影发行量"),
datazoom_opts=[opts.DataZoomOpts(), opts.DataZoomOpts(type_="inside")],
)
htmlPath = os.path.abspath(os.path.join(os.path.abspath(os.path.dirname(os.path.dirname(__file__))), "analyse_data", "各年份电影发行量.html"))
pngPath = os.path.abspath(os.path.join(os.path.abspath(os.path.dirname(os.path.dirname(__file__))), "analyse_data", "各年份电影发行量.png"))
# 生成html
c.render(htmlPath)
# 生成png
# make_snapshot(snapshot, c.render(), pngPath)
# print(get_current_time() + '|-------> 已生成 各年份电影发行量 图表...')
return c
def make_star_treemap(self):
"""
根据演员参演电影数生成矩形树图
:return:
"""
# print(get_current_time() + '|-------> 正在生成 演员参演电影数 图表...')
# 导入数据并初始化
csv_path = os.path.abspath(os.path.join(os.path.dirname("__file__"), os.path.pardir, "moviespider", "movie_info_top500.csv"))
rows = pd.read_csv(csv_path, encoding='utf-8', dtype=str)
# rows = pd.read_csv('../comments/movie_info_top500.csv', encoding='utf-8', dtype=str)
to_drop = ['名称', '导演', '年份', '国别', '类型', '语言', '评分', '评分人数', '五星占比', '四星占比', '三星占比', '二星占比', '一星占比', '短评数',
'简介']
res = rows.drop(to_drop, axis=1)
# 数据分割
all_star_list = []
for i in res.itertuples():
# print(i[1] + '\n')
for j in i[1].split(','):
all_star_list.append(j)
# 数据统计
df = pd.DataFrame(all_star_list, columns=['演员'])
res = df.groupby('演员')['演员'].count().sort_values(ascending=False)
all_star_list = []
for i in res.items():
if i[1] > 4:
all_star_list.append({"value": i[1], "name": i[0]})
# 生成图标
c = TreeMap()
c.add("参演电影数", all_star_list)
c.set_global_opts(title_opts=opts.TitleOpts(title="电影TOP500榜单中 - 演员参演电影数", subtitle="至少参演5部影评以上"))
htmlPath = os.path.abspath(os.path.join(os.path.abspath(os.path.dirname(os.path.dirname(__file__))), "analyse_data", "演员参演电影数.html"))
pngPath = os.path.abspath(os.path.join(os.path.abspath(os.path.dirname(os.path.dirname(__file__))), "analyse_data", "演员参演电影数.png"))
# 生成html
c.render(htmlPath)
# 生成png
# make_snapshot(snapshot, c.render(), pngPath)
# print(get_current_time() + '|-------> 已生成 演员参演电影数 图表...')
return c
def make_sentiments_line(self):
csv_path = os.path.abspath(os.path.join(os.path.dirname("__file__"), os.path.pardir, "moviespider", "comment_data", "极速车王.csv"))
# csv_path = os.path.abspath(os.path.join("D:\\MoviesAnalyse", "moviespider", "comment_data", "极速车王.csv"))
df = | pd.read_csv(csv_path) | pandas.read_csv |
'''
example of loading FinMind api
'''
from FinMind.Data import Load
import requests
import pandas as pd
url = 'http://finmindapi.servebeer.com/api/data'
list_url = 'http://finmindapi.servebeer.com/api/datalist'
translate_url = 'http://finmindapi.servebeer.com/api/translation'
'''----------------TaiwanStockInfo----------------'''
form_data = {'dataset':'TaiwanStockInfo'}
res = requests.post(
url,verify = True,
data = form_data)
temp = res.json()
data = pd.DataFrame(temp['data'])
data.head()
'''----------------TaiwanStockPrice----------------'''
form_data = {'dataset':'TaiwanStockPrice',
'stock_id':'2317',
'date':'2019-06-01'}
res = requests.post(
url,verify = True,
data = form_data)
temp = res.json()
data = pd.DataFrame(temp['data'])
data.head()
'''----------------TaiwanStockPriceMinute----------------'''
form_data = {'dataset':'TaiwanStockPriceMinute',
'stock_id':'2330',
'date':'2019-06-01'}
res = requests.post(
url,verify = True,
data = form_data)
temp = res.json()
data = pd.DataFrame(temp['data'])
data.head()
'''----------------FinancialStatements----------------'''
form_data = {'dataset':'FinancialStatements',
'stock_id':'2317',
'date':'2019-01-01'}
res = requests.post(
url,verify = True,
data = form_data)
temp = res.json()
data = pd.DataFrame(temp['data'])
data = Load.transpose(data)
data.head()
'''----------------TaiwanCashFlowsStatement----------------'''
form_data = {'dataset':'TaiwanCashFlowsStatement',
'stock_id':'2330',
'date':'2019-06-01'}
res = requests.post(
url,verify = True,
data = form_data)
temp = res.json()
data = pd.DataFrame(temp['data'])
data.head()
'''----------------TaiwanStockStockDividend----------------'''
form_data = {'dataset':'TaiwanStockStockDividend',
'stock_id':'2317',
'date':'2018-01-01'}
res = requests.post(
url,verify = True,
data = form_data)
temp = res.json()
data = pd.DataFrame(temp['data'])
data.head()
'''----------------TaiwanStockMarginPurchaseShortSale----------------'''
form_data = {'dataset':'TaiwanStockMarginPurchaseShortSale',
'stock_id':'2317',
'date':'2019-06-01'}
res = requests.post(
url,verify = True,
data = form_data)
temp = res.json()
data = pd.DataFrame(temp['data'])
data.head()
'''----------------InstitutionalInvestorsBuySell----------------'''
form_data = {'dataset':'InstitutionalInvestorsBuySell',
'stock_id':'2317',
'date':'2019-06-01'}
res = requests.post(
url,verify = True,
data = form_data)
temp = res.json()
data = pd.DataFrame(temp['data'])
data.head()
'''----------------Shareholding----------------'''
form_data = {'dataset':'Shareholding',
'stock_id':'2317',
'date':'2019-06-01'}
res = requests.post(
url,verify = True,
data = form_data)
temp = res.json()
data = pd.DataFrame(temp['data'])
data.head()
'''----------------BalanceSheet----------------'''
form_data = {'dataset':'BalanceSheet',
'stock_id':'2317',
'date':'2019-01-01'}
res = requests.post(
url,verify = True,
data = form_data)
temp = res.json()
data = pd.DataFrame(temp['data'])
data.head()
'''----------------TaiwanStockHoldingSharesPer----------------'''
form_data = {'dataset':'TaiwanStockHoldingSharesPer',
'stock_id':'2317',
'date':'2019-06-01'}
res = requests.post(
url,verify = True,
data = form_data)
temp = res.json()
data = pd.DataFrame(temp['data'])
data.head()
'''----------------TaiwanStockMonthRevenue----------------'''
form_data = {'dataset':'TaiwanStockMonthRevenue',
'stock_id':'2317',
'date':'2019-01-01'}
res = requests.post(
url,verify = True,
data = form_data)
temp = res.json()
data = | pd.DataFrame(temp['data']) | pandas.DataFrame |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.