prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import os
import numpy as np
import pandas as pd
import time
def write_analysis(path, dataset_dict, datasettype, mask_part, start_time, supervised=True):
"""
Creates a text file which contains a short summary of the dataset_dict data
Parameters:
-----------
path: string
path where to save the dataframe
dataset_dict: dict
contains all the analysis data
datasettype: string
adds the name of the subset to the dataframe title
e.g. 'all', 'train', 'valid', 'test'
mask_part: list
contains the segmentation tasks
e.g. ['glomerulus', 'podocytes'], ['glomerulus'], ['podocytes']
start_time:
time at the start of the script. Used to calculate the duration of the analysis
supervised:
(optional)
Returns:
--------
nothing
"""
for mask_el in mask_part:
if mask_el == 'podocytes':
filename = datasettype + '_podos.txt'
filestr = 'podos images'
elif mask_el == 'glomerulus':
filename = datasettype + '_gloms.txt'
filestr = 'gloms images'
else:
filename = datasettype + 'unknown.txt'
filestr = 'unknown type'
write_txt = open(str(os.path.join(path, filename)), "w")
if supervised:
dc_mean = np.sum(np.array(dataset_dict['dice_coeffs_%s' % mask_el])) / len(dataset_dict['dice_coeffs_%s'
% mask_el])
dc_min = np.min(np.array(dataset_dict['dice_coeffs_%s' % mask_el]))
dc_max = np.max(np.array(dataset_dict['dice_coeffs_%s' % mask_el]))
object_dc_mean = np.sum(np.array(dataset_dict['object_dc_%s' % mask_el])) / len(dataset_dict['object_dc_%s'
% mask_el])
object_dc_min = np.min(np.array(dataset_dict['object_dc_%s' % mask_el]))
object_dc_max = np.max(np.array(dataset_dict['object_dc_%s' % mask_el]))
pearson = calculate_pearson(dataset_dict['count_masks_%s' % mask_el], dataset_dict['count_preds_%s'
% mask_el])
write_txt.write(str("Mean dice coefficient on pixels of " + filestr + " compared to groundtruth: ") +
str(dc_mean) + '\n')
write_txt.write(str("Min dice coefficient on pixels of " + filestr + " compared to groundtruth: ") +
str(dc_min) + '\n')
write_txt.write(str("Max dice coefficient on pixels of " + filestr + " compared to groundtruth: ") +
str(dc_max) + '\n')
write_txt.write(str("Pearson correlation coefficient on objects of " + filestr +
" compared to groundtruth: ") + str(pearson) + '\n')
write_txt.write(str("Mean dice coeff on objects of " + filestr + " compared to groundtruth: ") +
str(object_dc_mean) + '\n')
write_txt.write(str("Min dice coeff on objects of " + filestr + " compared to groundtruth: ") +
str(object_dc_min) + '\n')
write_txt.write(str("Max dice coeff on objects of " + filestr + " compared to groundtruth: ") +
str(object_dc_max) + '\n')
write_txt.write('\n')
duration = time.time() - start_time
duration_std = int(duration / 3600)
duration_min = int((duration % 3600) / 60)
duration_sec = int(duration % 60)
write_txt.write(str("Test time: ") + str(duration_std) + "h " + str(duration_min)
+ "min " + str(duration_sec) + 'sec \n')
write_txt.close()
return
def write_dataframe(path, dataset_dict, image_list, datasettype, mask_part):
"""
Creates a pandas dataframe containing the analysis of mask and prediction
Parameters:
-----------
path: string
path where to save the dataframe
dataset_dict: dict
contains all the analysis data
image_list: list
contains all the image names
datasettype: string
adds the name of the subset to the dataframe title
e.g. 'all', 'train', 'valid', 'test'
mask_part: list
contains the segmentation tasks
e.g. ['glomerulus', 'podocytes'], ['glomerulus'], ['podocytes']
Returns:
--------
nothing
"""
for mask_el in mask_part:
titles = []
for i in range(len(image_list)):
# Get rid of .tif and the path before
image_name = os.path.split(image_list[i])[1]
titles.append(image_name[:-4])
df = pd.DataFrame({'Sample name': pd.Series(titles),
'GT count': pd.Series(dataset_dict['count_masks_%s' % mask_el]),
'Network count': pd.Series(dataset_dict['count_preds_%s' % mask_el]),
'GT area': pd.Series(dataset_dict['area_masks_%s' % mask_el]),
'Network area': pd.Series(dataset_dict['area_preds_%s' % mask_el]),
'Network dice pixel': pd.Series(dataset_dict['dice_coeffs_%s' % mask_el]),
'Network dice object': pd.Series(dataset_dict['object_dc_%s' % mask_el]),
'Network True pos': pd.Series(dataset_dict['tp_%s' % mask_el]),
'Network False pos': pd.Series(dataset_dict['fp_%s' % mask_el]),
'Network False neg': pd.Series(dataset_dict['fn_%s' % mask_el])})
df.to_excel(str(os.path.join(path, datasettype + '_Dataframe_' + mask_el + '.xlsx')))
# df.to_csv(path + datasettype + '_Dataframe_' + mask_el + '.csv')
return
def write_readouts(path, dataset_dict, image_list, datasettype, mask_part,
do_wt1_signal, do_dach1_signal, do_stereology_pred, do_stereology_gt):
"""
Creates the csv output which will be used for the classification.
Dataframe contains optionally the WT1 signal of the glomerulus prediction,
the DACH1 signal for the podocoyte prediction and
the stereological calculations.
"""
titles = []
for i in range(len(image_list)):
image_name = os.path.split(image_list[i])[1]
titles.append(image_name[:-4])
# Segmentation of only 1 class was applied (e.g. glomerulus or podocytes)
if len(mask_part) == 1:
mask_el = mask_part.pop()
if mask_el == "glomerulus":
network_area = "glomerulus_area"
# Add a column if GET_WT1_SIGNAL_FOR_GLOMERULUS = True
if do_wt1_signal:
df = pd.DataFrame(
{'image_name': pd.Series(titles),
network_area: pd.Series(dataset_dict['area_preds_%s' % mask_el]),
'mean_WT1_signal_in_glom': pd.Series(dataset_dict['mean_WT1_glom_preds']),
'var_WT1_signal_in_glom': pd.Series(dataset_dict['var_WT1_glom_preds']),
'median_WT1_signal_in_glom': pd.Series(dataset_dict['median_WT1_glom_preds']),
'min_WT1_signal_in_glom': pd.Series(dataset_dict['min_WT1_glom_preds']),
'max_WT1_signal_in_glom': pd.Series(dataset_dict['max_WT1_glom_preds']),
'perc25_WT1_signal_in_glom': pd.Series(dataset_dict['perc25_WT1_glom_preds']),
'perc75_WT1_signal_in_glom': pd.Series(dataset_dict['perc75_WT1_glom_preds'])})
else:
df = pd.DataFrame({'image_name': pd.Series(titles),
network_area: | pd.Series(dataset_dict['area_preds_%s' % mask_el]) | pandas.Series |
# pylint: disable=E1101,E1103,W0232
import operator
from datetime import datetime, date
import numpy as np
import pandas.tseries.offsets as offsets
from pandas.tseries.frequencies import (get_freq_code as _gfc,
_month_numbers, FreqGroup)
from pandas.tseries.index import DatetimeIndex, Int64Index, Index
from pandas.tseries.tools import parse_time_string
import pandas.tseries.frequencies as _freq_mod
import pandas.core.common as com
from pandas.lib import Timestamp
import pandas.lib as lib
import pandas.tslib as tslib
import pandas.algos as _algos
#---------------
# Period logic
def _period_field_accessor(name, alias):
def f(self):
base, mult = _gfc(self.freq)
return tslib.get_period_field(alias, self.ordinal, base)
f.__name__ = name
return property(f)
def _field_accessor(name, alias):
def f(self):
base, mult = _gfc(self.freq)
return tslib.get_period_field_arr(alias, self.values, base)
f.__name__ = name
return property(f)
class Period(object):
__slots__ = ['freq', 'ordinal']
def __init__(self, value=None, freq=None, ordinal=None,
year=None, month=1, quarter=None, day=1,
hour=0, minute=0, second=0):
"""
Represents an period of time
Parameters
----------
value : Period or basestring, default None
The time period represented (e.g., '4Q2005')
freq : str, default None
e.g., 'B' for businessday, ('T', 5) or '5T' for 5 minutes
year : int, default None
month : int, default 1
quarter : int, default None
day : int, default 1
hour : int, default 0
minute : int, default 0
second : int, default 0
"""
# freq points to a tuple (base, mult); base is one of the defined
# periods such as A, Q, etc. Every five minutes would be, e.g.,
# ('T', 5) but may be passed in as a string like '5T'
self.freq = None
# ordinal is the period offset from the gregorian proleptic epoch
self.ordinal = None
if ordinal is not None and value is not None:
raise ValueError(("Only value or ordinal but not both should be "
"given but not both"))
elif ordinal is not None:
if not com.is_integer(ordinal):
raise ValueError("Ordinal must be an integer")
if freq is None:
raise ValueError('Must supply freq for ordinal value')
self.ordinal = ordinal
elif value is None:
if freq is None:
raise ValueError("If value is None, freq cannot be None")
self.ordinal = _ordinal_from_fields(year, month, quarter, day,
hour, minute, second, freq)
elif isinstance(value, Period):
other = value
if freq is None or _gfc(freq) == _gfc(other.freq):
self.ordinal = other.ordinal
freq = other.freq
else:
converted = other.asfreq(freq)
self.ordinal = converted.ordinal
elif isinstance(value, basestring) or com.is_integer(value):
if com.is_integer(value):
value = str(value)
dt, freq = _get_date_and_freq(value, freq)
elif isinstance(value, datetime):
dt = value
if freq is None:
raise ValueError('Must supply freq for datetime value')
elif isinstance(value, date):
dt = datetime(year=value.year, month=value.month, day=value.day)
if freq is None:
raise ValueError('Must supply freq for datetime value')
else:
msg = "Value must be Period, string, integer, or datetime"
raise ValueError(msg)
base, mult = _gfc(freq)
if mult != 1:
raise ValueError('Only mult == 1 supported')
if self.ordinal is None:
self.ordinal = tslib.period_ordinal(dt.year, dt.month, dt.day,
dt.hour, dt.minute, dt.second,
base)
self.freq = _freq_mod._get_freq_str(base)
def __eq__(self, other):
if isinstance(other, Period):
return (self.ordinal == other.ordinal
and _gfc(self.freq) == _gfc(other.freq))
return False
def __hash__(self):
return hash((self.ordinal, self.freq))
def __add__(self, other):
if com.is_integer(other):
return Period(ordinal=self.ordinal + other, freq=self.freq)
else: # pragma: no cover
raise TypeError(other)
def __sub__(self, other):
if com.is_integer(other):
return Period(ordinal=self.ordinal - other, freq=self.freq)
if isinstance(other, Period):
if other.freq != self.freq:
raise ValueError("Cannot do arithmetic with "
"non-conforming periods")
return self.ordinal - other.ordinal
else: # pragma: no cover
raise TypeError(other)
def asfreq(self, freq, how='E'):
"""
Convert Period to desired frequency, either at the start or end of the
interval
Parameters
----------
freq : string
how : {'E', 'S', 'end', 'start'}, default 'end'
Start or end of the timespan
Returns
-------
resampled : Period
"""
how = _validate_end_alias(how)
base1, mult1 = _gfc(self.freq)
base2, mult2 = _gfc(freq)
if mult2 != 1:
raise ValueError('Only mult == 1 supported')
end = how == 'E'
new_ordinal = tslib.period_asfreq(self.ordinal, base1, base2, end)
return Period(ordinal=new_ordinal, freq=base2)
@property
def start_time(self):
return self.to_timestamp(how='S')
@property
def end_time(self):
ordinal = (self + 1).start_time.value - 1
return Timestamp(ordinal)
def to_timestamp(self, freq=None, how='start'):
"""
Return the Timestamp representation of the Period at the target
frequency at the specified end (how) of the Period
Parameters
----------
freq : string or DateOffset, default is 'D' if self.freq is week or
longer and 'S' otherwise
Target frequency
how: str, default 'S' (start)
'S', 'E'. Can be aliased as case insensitive
'Start', 'Finish', 'Begin', 'End'
Returns
-------
Timestamp
"""
how = _validate_end_alias(how)
if freq is None:
base, mult = | _gfc(self.freq) | pandas.tseries.frequencies.get_freq_code |
# -*- coding: utf-8 -*-
import string
from collections import OrderedDict
from datetime import date, datetime
import numpy as np
import pandas as pd
import pandas.util.testing as pdt
import pytest
from kartothek.core.common_metadata import make_meta, store_schema_metadata
from kartothek.core.index import ExplicitSecondaryIndex
from kartothek.core.naming import DEFAULT_METADATA_VERSION
from kartothek.io_components.metapartition import (
MetaPartition,
_unique_label,
parse_input_to_metapartition,
partition_labels_from_mps,
)
from kartothek.serialization import DataFrameSerializer, ParquetSerializer
def test_store_single_dataframe_as_partition(
store, metadata_storage_format, metadata_version
):
df = pd.DataFrame(
{"P": np.arange(0, 10), "L": np.arange(0, 10), "TARGET": np.arange(10, 20)}
)
mp = MetaPartition(
label="test_label", data={"core": df}, metadata_version=metadata_version
)
meta_partition = mp.store_dataframes(
store=store,
df_serializer=ParquetSerializer(),
dataset_uuid="dataset_uuid",
store_metadata=True,
metadata_storage_format=metadata_storage_format,
)
assert len(meta_partition.data) == 0
expected_key = "dataset_uuid/core/test_label.parquet"
assert meta_partition.files == {"core": expected_key}
assert meta_partition.label == "test_label"
files_in_store = list(store.keys())
expected_num_files = 1
assert len(files_in_store) == expected_num_files
stored_df = DataFrameSerializer.restore_dataframe(store=store, key=expected_key)
pdt.assert_frame_equal(df, stored_df)
files_in_store.remove(expected_key)
assert len(files_in_store) == expected_num_files - 1
def test_store_single_dataframe_as_partition_no_metadata(store, metadata_version):
df = pd.DataFrame(
{"P": np.arange(0, 10), "L": np.arange(0, 10), "TARGET": np.arange(10, 20)}
)
mp = MetaPartition(
label="test_label", data={"core": df}, metadata_version=metadata_version
)
partition = mp.store_dataframes(
store=store,
df_serializer=ParquetSerializer(),
dataset_uuid="dataset_uuid",
store_metadata=False,
)
assert len(partition.data) == 0
expected_file = "dataset_uuid/core/test_label.parquet"
assert partition.files == {"core": expected_file}
assert partition.label == "test_label"
# One meta one actual file
files_in_store = list(store.keys())
assert len(files_in_store) == 1
stored_df = DataFrameSerializer.restore_dataframe(store=store, key=expected_file)
pdt.assert_frame_equal(df, stored_df)
def test_load_dataframe_logical_conjunction(
store, meta_partitions_files_only, metadata_version, metadata_storage_format
):
df = pd.DataFrame(
{"P": np.arange(0, 10), "L": np.arange(0, 10), "TARGET": np.arange(10, 20)}
)
mp = MetaPartition(
label="cluster_1",
data={"core": df},
metadata_version=metadata_version,
logical_conjunction=[("P", ">", 4)],
)
meta_partition = mp.store_dataframes(
store=store,
df_serializer=None,
dataset_uuid="dataset_uuid",
store_metadata=True,
metadata_storage_format=metadata_storage_format,
)
predicates = None
loaded_mp = meta_partition.load_dataframes(store=store, predicates=predicates)
data = {
"core": pd.DataFrame(
{"P": [5, 6, 7, 8, 9], "L": [5, 6, 7, 8, 9], "TARGET": [15, 16, 17, 18, 19]}
).set_index(np.arange(5, 10))
}
pdt.assert_frame_equal(loaded_mp.data["core"], data["core"])
predicates = [[("L", ">", 6), ("TARGET", "<", 18)]]
loaded_mp = meta_partition.load_dataframes(store=store, predicates=predicates)
data = {
"core": pd.DataFrame({"P": [7], "L": [7], "TARGET": [17]}).set_index(
np.array([7])
)
}
pdt.assert_frame_equal(loaded_mp.data["core"], data["core"])
predicates = [[("L", ">", 2), ("TARGET", "<", 17)], [("TARGET", "==", 19)]]
loaded_mp = meta_partition.load_dataframes(store=store, predicates=predicates)
data = {
"core": pd.DataFrame(
{"P": [5, 6, 9], "L": [5, 6, 9], "TARGET": [15, 16, 19]}
).set_index(np.array([5, 6, 9]))
}
pdt.assert_frame_equal(loaded_mp.data["core"], data["core"])
def test_store_multiple_dataframes_as_partition(
store, metadata_storage_format, metadata_version
):
df = pd.DataFrame(
{"P": np.arange(0, 10), "L": np.arange(0, 10), "TARGET": np.arange(10, 20)}
)
df_2 = pd.DataFrame({"P": np.arange(0, 10), "info": string.ascii_lowercase[:10]})
mp = MetaPartition(
label="cluster_1",
data={"core": df, "helper": df_2},
metadata_version=metadata_version,
)
meta_partition = mp.store_dataframes(
store=store,
df_serializer=None,
dataset_uuid="dataset_uuid",
store_metadata=True,
metadata_storage_format=metadata_storage_format,
)
expected_file = "dataset_uuid/core/cluster_1.parquet"
expected_file_helper = "dataset_uuid/helper/cluster_1.parquet"
assert meta_partition.files == {
"core": expected_file,
"helper": expected_file_helper,
}
assert meta_partition.label == "cluster_1"
files_in_store = list(store.keys())
assert len(files_in_store) == 2
stored_df = DataFrameSerializer.restore_dataframe(store=store, key=expected_file)
pdt.assert_frame_equal(df, stored_df)
files_in_store.remove(expected_file)
stored_df = DataFrameSerializer.restore_dataframe(
store=store, key=expected_file_helper
)
pdt.assert_frame_equal(df_2, stored_df)
files_in_store.remove(expected_file_helper)
@pytest.mark.parametrize("predicate_pushdown_to_io", [True, False])
def test_load_dataframes(
meta_partitions_files_only, store_session, predicate_pushdown_to_io
):
expected_df = pd.DataFrame(
OrderedDict(
[
("P", [1]),
("L", [1]),
("TARGET", [1]),
("DATE", pd.to_datetime([date(2010, 1, 1)])),
]
)
)
expected_df_2 = pd.DataFrame(OrderedDict([("P", [1]), ("info", ["a"])]))
mp = meta_partitions_files_only[0]
assert len(mp.files) > 0
assert len(mp.data) == 0
mp = meta_partitions_files_only[0].load_dataframes(
store=store_session, predicate_pushdown_to_io=predicate_pushdown_to_io
)
assert len(mp.data) == 2
data = mp.data
pdt.assert_frame_equal(data["core"], expected_df, check_dtype=False)
pdt.assert_frame_equal(data["helper"], expected_df_2, check_dtype=False)
empty_mp = MetaPartition("empty_mp", metadata_version=mp.metadata_version)
empty_mp.load_dataframes(
store_session, predicate_pushdown_to_io=predicate_pushdown_to_io
)
assert empty_mp.data == {}
def test_remove_dataframes(meta_partitions_files_only, store_session):
mp = meta_partitions_files_only[0].load_dataframes(store=store_session)
assert len(mp.data) == 2
mp = mp.remove_dataframes()
assert mp.data == {}
def test_load_dataframes_selective(meta_partitions_files_only, store_session):
expected_df = pd.DataFrame(
OrderedDict(
[
("P", [1]),
("L", [1]),
("TARGET", [1]),
("DATE", pd.to_datetime([date(2010, 1, 1)])),
]
)
)
mp = meta_partitions_files_only[0]
assert len(mp.files) > 0
assert len(mp.data) == 0
mp = meta_partitions_files_only[0].load_dataframes(
store=store_session, tables=["core"]
)
assert len(mp.data) == 1
data = mp.data
pdt.assert_frame_equal(data["core"], expected_df, check_dtype=False)
def test_load_dataframes_columns_projection(
meta_partitions_evaluation_files_only, store_session
):
expected_df = pd.DataFrame(OrderedDict([("P", [1]), ("L", [1]), ("HORIZON", [1])]))
mp = meta_partitions_evaluation_files_only[0]
assert len(mp.files) > 0
assert len(mp.data) == 0
mp = meta_partitions_evaluation_files_only[0].load_dataframes(
store=store_session, tables=["PRED"], columns={"PRED": ["P", "L", "HORIZON"]}
)
assert len(mp.data) == 1
data = mp.data
pdt.assert_frame_equal(data["PRED"], expected_df, check_dtype=False)
def test_load_dataframes_columns_raises_missing(
meta_partitions_evaluation_files_only, store_session
):
mp = meta_partitions_evaluation_files_only[0]
assert len(mp.files) > 0
assert len(mp.data) == 0
with pytest.raises(ValueError) as e:
meta_partitions_evaluation_files_only[0].load_dataframes(
store=store_session,
tables=["PRED"],
columns={"PRED": ["P", "L", "HORIZON", "foo", "bar"]},
)
assert str(e.value) == "Columns cannot be found in stored dataframe: bar, foo"
def test_load_dataframes_columns_table_missing(
meta_partitions_evaluation_files_only, store_session
):
# test behavior of load_dataframes for columns argument given
# specifying table that doesn't exist
mp = meta_partitions_evaluation_files_only[0]
assert len(mp.files) > 0
assert len(mp.data) == 0
with pytest.raises(
ValueError,
match=r"You are trying to read columns from invalid table\(s\). .*PRED_typo.*",
):
mp.load_dataframes(
store=store_session,
columns={"PRED_typo": ["P", "L", "HORIZON", "foo", "bar"]},
)
# ensure typo in tables argument doesn't raise, as specified in docstring
dfs = mp.load_dataframes(store=store_session, tables=["PRED_typo"])
assert len(dfs) > 0
def test_from_dict():
df = pd.DataFrame({"a": [1]})
dct = {"data": {"core": df}, "label": "test_label"}
meta_partition = MetaPartition.from_dict(dct)
| pdt.assert_frame_equal(meta_partition.data["core"], df) | pandas.util.testing.assert_frame_equal |
import pandas as pd
import numpy as np
import scipy.stats as st
DAYS_IN_YEAR=256.0
ROOT_DAYS_IN_YEAR=DAYS_IN_YEAR**.5
useroot=""
def cap_forecast(xrow, capmin,capmax):
"""
Cap forecasts.
"""
## Assumes we have a single column
x=xrow[0]
if x<capmin:
return capmin
elif x>capmax:
return capmax
return x
def cap_series(xseries, capmin=-20.0,capmax=20.0):
"""
Apply capping to each element of a time series
For a long only investor, replace -20.0 with 0.0
"""
return xseries.apply(cap_forecast, axis=1, args=(capmin, capmax))
def get_list_code():
ans=pd.read_csv("%sconfig.csv" % useroot)
return list(ans.Instrument)
def get_point_sizes():
ans=pd.read_csv("%sconfig.csv" % useroot)
psizes=dict([(x[1].Instrument, float(x[1].Pointsize)) for x in ans.iterrows()])
return psizes
def pd_readcsv(filename):
"""
Reads the pandas dataframe from a filename, given the index is correctly labelled
"""
ans= | pd.read_csv(filename) | pandas.read_csv |
import pandas as pd
wb = | pd.read_excel(
'/home/rogeriogama/Área de Trabalho/Projetos/tese/Dados/sigeo_dado.xlsx') | pandas.read_excel |
from math import e
import numpy as np
import pandas as pd
import pytest
from etna.transforms.log import LogTransform
@pytest.fixture
def non_positive_df_(random_seed) -> pd.DataFrame:
"""Generate dataset with non-positive target."""
periods = 100
df1 = pd.DataFrame({"timestamp": pd.date_range("2020-01-01", periods=periods)})
df1["segment"] = ["segment_1"] * periods
df1["target"] = np.random.uniform(-10, 0, size=periods)
df2 = pd.DataFrame({"timestamp": pd.date_range("2020-01-01", periods=periods)})
df2["segment"] = ["segment_2"] * periods
df2["target"] = np.random.uniform(0, 10, size=periods)
df = pd.concat((df1, df2))
df = df.pivot(index="timestamp", columns="segment").reorder_levels([1, 0], axis=1).sort_index(axis=1)
df.columns.names = ["segment", "feature"]
return df
@pytest.fixture
def positive_df_(random_seed) -> pd.DataFrame:
"""Generate dataset with positive target."""
periods = 100
df1 = pd.DataFrame({"timestamp": pd.date_range("2020-01-01", periods=periods)})
df1["segment"] = ["segment_1"] * periods
df1["target"] = np.random.uniform(10, 20, size=periods)
df1["expected"] = np.log10(df1["target"] + 1)
df2 = pd.DataFrame({"timestamp": pd.date_range("2020-01-01", periods=periods)})
df2["segment"] = ["segment_2"] * periods
df2["target"] = np.random.uniform(1, 15, size=periods)
df2["expected"] = np.log10(df2["target"] + 1)
df = | pd.concat((df1, df2)) | pandas.concat |
# -*- coding: utf-8 -*-
"""
This file is part of the Shotgun Lipidomics Assistant (SLA) project.
Copyright 2020 <NAME> (UCLA), <NAME> (UCLA), <NAME> (UW).
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
import pandas as pd
# from pyopenms import *
import os
# import tkinter as tk
# from tkinter import ttk
from tkinter import messagebox
# from tkinter.messagebox import showinfo
from tkinter import *
# import matplotlib.pyplot as plt
# import matplotlib
# matplotlib.use("Agg")
from tkinter import filedialog
# import glob
import re
# import statistics
import datetime
# from matplotlib.pyplot import cm
# import seaborn as sns
def imp_map(maploc):
# map1 = filedialog.askopenfilename(filetypes=(("excel Files", "*.xlsx"),("all files", "*.*")))
maploc.configure(state="normal")
maploc.delete(1.0, END)
map1 = filedialog.askopenfilename(filetypes=(("excel Files", "*.xlsx"), ("all files", "*.*")))
maploc.insert(INSERT, map1)
maploc.configure(state="disabled")
def imp_method1(method1loc):
# file1 = filedialog.askopenfilename(filetypes=(("excel Files", "*.xlsx"),("all files", "*.*")))
method1loc.configure(state="normal")
method1loc.delete(1.0, END)
file1 = filedialog.askopenfilename(filetypes=(("excel Files", "*.xlsx"), ("all files", "*.*")))
method1loc.insert(INSERT, file1)
method1loc.configure(state="disabled")
def imp_method2(method2loc):
# file2 = filedialog.askopenfilename(filetypes=(("excel Files", "*.xlsx"),("all files", "*.*")))
method2loc.configure(state="normal")
method2loc.delete(1.0, END)
file2 = filedialog.askopenfilename(filetypes=(("excel Files", "*.xlsx"), ("all files", "*.*")))
method2loc.insert(INSERT, file2)
method2loc.configure(state="disabled")
def set_dir(dirloc_aggregate):
# setdir = filedialog.askdirectory()
dirloc_aggregate.configure(state="normal")
dirloc_aggregate.delete(1.0, END)
setdir = filedialog.askdirectory()
dirloc_aggregate.insert(INSERT, setdir)
dirloc_aggregate.configure(state="disabled")
def MergeApp(dirloc_aggregate, proname, method1loc, method2loc, maploc, CheckClustVis):
start = datetime.datetime.now()
os.chdir(dirloc_aggregate.get('1.0', 'end-1c'))
project = proname.get()
file1 = method1loc.get('1.0', 'end-1c')
file2 = method2loc.get('1.0', 'end-1c')
map1 = maploc.get('1.0', 'end-1c')
# Fix sample index(name) type for proper merge
def qcname(indname):
# if 'QC_SPIKE' in str(indname):
# return('QC_SPIKE')
# elif 'QC' in str(indname):
# return('QC')
# elif 'b' in str(indname) or 'm' in str(indname):
if re.search('[a-zA-Z]', str(indname)):
return (str(indname))
else:
return (int(indname))
# Import DataFrames from file1
spequant1 = pd.read_excel(file1, sheet_name='Lipid Species Concentrations', header=0, index_col=0, na_values='.')
specomp1 = pd.read_excel(file1, sheet_name='Lipid Species Composition', header=0, index_col=0, na_values='.')
claquant1 = pd.read_excel(file1, sheet_name='Lipid Class Concentration', header=0, index_col=0, na_values='.')
faquant1 = pd.read_excel(file1, sheet_name='Fatty Acid Concentration', header=0, index_col=0, na_values='.')
facomp1 = pd.read_excel(file1, sheet_name='Fatty Acid Composition', header=0, index_col=0, na_values='.')
spequant1.index = list(map(qcname, list(spequant1.index)))
specomp1.index = list(map(qcname, list(specomp1.index)))
claquant1.index = list(map(qcname, list(claquant1.index)))
faquant1.index = list(map(qcname, list(faquant1.index)))
facomp1.index = list(map(qcname, list(facomp1.index)))
# Import DataFrames from file2
if file2 != '':
spequant2 = pd.read_excel(file2, sheet_name='Lipid Species Concentrations', header=0, index_col=0,
na_values='.')
specomp2 = pd.read_excel(file2, sheet_name='Lipid Species Composition', header=0, index_col=0, na_values='.')
claquant2 = pd.read_excel(file2, sheet_name='Lipid Class Concentration', header=0, index_col=0, na_values='.')
faquant2 = pd.read_excel(file2, sheet_name='Fatty Acid Concentration', header=0, index_col=0, na_values='.')
facomp2 = pd.read_excel(file2, sheet_name='Fatty Acid Composition', header=0, index_col=0, na_values='.')
spequant2.index = list(map(qcname, list(spequant2.index)))
specomp2.index = list(map(qcname, list(specomp2.index)))
claquant2.index = list(map(qcname, list(claquant2.index)))
faquant2.index = list(map(qcname, list(faquant2.index)))
facomp2.index = list(map(qcname, list(facomp2.index)))
else:
spequant2 = pd.DataFrame()
specomp2 = pd.DataFrame()
claquant2 = pd.DataFrame()
faquant2 = pd.DataFrame()
facomp2 = pd.DataFrame()
# Merge DataFrames
spequant = pd.concat([spequant1, spequant2], axis=1, sort=False)
specomp = pd.concat([specomp1, specomp2], axis=1, sort=False)
claquant = pd.concat([claquant1, claquant2], axis=1, sort=False)
faquant = pd.concat([faquant1, faquant2], axis=1, sort=False)
facomp = pd.concat([facomp1, facomp2], axis=1, sort=False)
# Sort Columns in Merged DataFrames
spequant = spequant.reindex(sorted(spequant.columns), axis=1)
specomp = specomp.reindex(sorted(specomp.columns), axis=1)
claquant = claquant.reindex(sorted(claquant.columns), axis=1)
clacomp = claquant.apply(lambda x: 100 * x / x.sum(), axis=1) # get class composit
faquant = faquant.reindex(sorted(faquant.columns), axis=1)
facomp = facomp.reindex(sorted(facomp.columns), axis=1)
# Write Master data sheet
# master = pd.ExcelWriter(project+'_master.xlsx')
# spequant.to_excel(master, 'Species Quant')
# specomp.to_excel(master, 'Species Composit')
# claquant.to_excel(master, 'Class Quant')
# clacomp.to_excel(master, 'Class Composit')
# faquant.to_excel(master, 'FattyAcid Quant')
# facomp.to_excel(master, 'FattyAcid Composit')
# master.save()
# print('master sheet saved')
# Import Map
sampinfo = pd.read_excel(map1, sheet_name=0, header=1, index_col=0, na_values='.')
# Exp name dict
expname = dict(zip(sampinfo.ExpNum, sampinfo.ExpName))
sampinfo = sampinfo.drop(['ExpName'], axis=1)
sampinfo.index = list(map(qcname, list(sampinfo.index)))
sampinfo['SampleNorm'] = sampinfo['SampleNorm'].astype('float64')
# Create Normalized Sheets
# spenorm = spequant[list(map(lambda x: isinstance(x, int), spequant.index))].copy()
# #exclude sample with string name
# clanorm = claquant[list(map(lambda x: isinstance(x, int), claquant.index))].copy()
# fanorm = faquant[list(map(lambda x: isinstance(x, int), faquant.index))].copy()
spenorm = spequant.copy() # inlude all samples
clanorm = claquant.copy()
fanorm = faquant.copy()
spenorm = spenorm.divide(
40) # x0.025 to reverse /0.025 in the standard coef. /0.025 is there to simulate LWM result
clanorm = clanorm.divide(40)
fanorm = fanorm.divide(40)
spenorm = spenorm.divide(sampinfo['SampleNorm'], axis='index')
clanorm = clanorm.divide(sampinfo['SampleNorm'], axis='index')
fanorm = fanorm.divide(sampinfo['SampleNorm'], axis='index')
# Fix GroupName. If GroupName and GroupNum doesn't match, change GroupName to match GroupNum
for i in sampinfo['ExpNum'].unique().astype(int):
for ii in sampinfo.loc[sampinfo['ExpNum'] == i, 'GroupNum'].unique().astype(int):
gNamlogic = np.logical_and(sampinfo['GroupNum'] == ii, sampinfo['ExpNum'] == i)
sampinfo.loc[gNamlogic, "GroupName"] = sampinfo['GroupName'][gNamlogic].reset_index(drop=True)[0]
# for i in range(1, int(max(sampinfo['ExpNum'])) + 1):
# for ii in range(1, int(max(sampinfo.loc[sampinfo['ExpNum'] == i, 'GroupNum'])) + 1):
# gNamlogic = np.logical_and(sampinfo['GroupNum'] == ii, sampinfo['ExpNum'] == i)
# sampinfo.loc[gNamlogic, "GroupName"] = sampinfo['GroupName'][gNamlogic].reset_index(drop=True)[0]
# Merge Map, using index (Sample column in map and sample name in raw data)
spequantin = | pd.concat([sampinfo, spequant], axis=1, sort=False, join='inner') | pandas.concat |
"""
python 3.8
portions of code and/or methodology based on https://github.com/thinkingmachines/ph-poverty-mapping
Extract features features OSM data
download OSM data from
http://download.geofabrik.de/asia/philippines.html#
buildings (polygons)
types : residential, damaged, commercial, industrial, education, health
For each type, we calculated
- the total number of buildings (count poly features intersecting with buffer)
- the total area of buildings (sum of area of poly features which intersect with buffer)
- the mean area of buildings (avg area of poly features which intersect with buffer)
- the proportion of the cluster area occupied by the buildings (ratio of total area of buildings which intersect with buffer to buffer area)
pois (points)
types: 100+ different types
For each type, we calculated
- the total number of each POI within a proximity of the area (point in poly)
roads (lines)
types: primary, trunk, paved, unpaved, intersection
for each type of road, we calculated
- the distance to the closest road (point to line vertice dist)
- total number of roads (count line features which intersect with buffer)
- total road length (length of lines which intersect with buffer)
"""
import os
import math
import pandas as pd
import geopandas as gpd
from shapely.geometry import Point
from shapely.ops import nearest_points
from sklearn.neighbors import BallTree
import numpy as np
project_dir = "/Users/sasanfaraj/Desktop/folders/AidData/PHL_WORK"
data_dir = os.path.join(project_dir, 'data')
date = "210101"
# >>>>>>>>>>>>>>>>>>>>
# DHS CLUSTERS
geom_label = "dhs-buffers"
geom_path = os.path.join(data_dir, 'dhs_buffers.geojson')
geom_id = "DHSID"
# load buffers/geom created during data prep
buffers_gdf = gpd.read_file(geom_path)
# calculate area of each buffer
# convert to UTM 51N (meters) first, then back to WGS84 (degrees)
buffers_gdf = buffers_gdf.to_crs("EPSG:32651") # UTM 51N
buffers_gdf["buffer_area"] = buffers_gdf.area
buffers_gdf = buffers_gdf.to_crs("EPSG:4326") # WGS84
# >>>>>>>>>>>>>>>>>>>>
# KC CLUSTERS
# geom_label = "kc-5km-buffers"
# geom_path = os.path.join(data_dir, 'kc_clusters_5km-buffer.geojson')
# geom_id = "cluster_name"
# # load point geom created during prep
# buffers_gdf = gpd.read_file(geom_path)
# buffers_gdf.columns = [i if i != "cluster" else "cluster_name" for i in buffers_gdf.columns]
# # calculate area of each buffer
# # convert to UTM 51N (meters) first, then back to WGS84 (degrees)
# buffers_gdf = buffers_gdf.to_crs("EPSG:32651") # UTM 51N
# buffers_gdf["buffer_area"] = buffers_gdf.area
# buffers_gdf = buffers_gdf.to_crs("EPSG:4326") # WGS84
# >>>>>>>>>>>>>>>>>>>>
# OSM PLACES
# geom_label = "osm-places-3km-buffers"
# geom_path = os.path.join(data_dir, 'osm-places_3km-buffer_{}.geojson'.format(date))
# geom_id = "osm_id"
# # load buffers/geom created during data prep
# buffers_gdf = gpd.read_file(geom_path)
# # calculate area of each buffer
# # convert to UTM 51N (meters) first, then back to WGS84 (degrees)
# buffers_gdf = buffers_gdf.to_crs("EPSG:32651") # UTM 51N
# buffers_gdf["buffer_area"] = buffers_gdf.area
# buffers_gdf = buffers_gdf.to_crs("EPSG:4326") # WGS84
# >>>>>>>>>>>>>>>>>>>>
# ---------------------------------------------------------
# pois
# count of each type of pois (100+) in each buffer
print("Running pois...")
osm_pois_shp_path = os.path.join(data_dir, 'osm/philippines-{}-free.shp/gis_osm_pois_free_1.shp'.format(date))
osm_pois_a_shp_path = os.path.join(data_dir, 'osm/philippines-{}-free.shp/gis_osm_pois_a_free_1.shp'.format(date))
raw_pois_geo = gpd.read_file(osm_pois_shp_path)
raw_pois_a_geo = gpd.read_file(osm_pois_a_shp_path)
pois_geo = pd.concat([raw_pois_geo, raw_pois_a_geo])
"""
# manually generate crosswalk
# first prep CSV with all types - can combine multiple OSM timesteps (see below)
# then in Excel/whatever, assign group to each type/fclass
type_df = pd.DataFrame({"type": list(set(pois_geo["fclass"]))})
type_df["group"]= 0
type_df.to_csv(os.path.join(project_dir, "OSM/crosswalks/pois_type_crosswalk.csv"), index=False, encoding="utf-8")
"""
# load crosswalk for types and assign any not grouped to "other"
pois_type_crosswalk_path = os.path.join(project_dir, 'OSM/osm_code/crosswalks/pois_type_crosswalk.csv')
pois_type_crosswalk_df = pd.read_csv(pois_type_crosswalk_path)
pois_type_crosswalk_df.loc[pois_type_crosswalk_df["group"] == "0", "group"] = "other"
# merge new classification and assign any features without a type to unclassifid
pois_geo = pois_geo.merge(pois_type_crosswalk_df, left_on="fclass", right_on="type", how="left")
pois_geo.loc[pois_geo["fclass"].isna(), "group"] = "unclassified"
# show breakdown of groups
print(pois_geo.group.value_counts())
# group_field = "fclass"
group_field = "group"
# split by group
# pois_group_list = ["all"] + [i for i in set(pois_geo[group_field])]
pois_group_list = [i for i in set(pois_geo[group_field])]
# copy of buffers gdf to use for output
buffers_gdf_pois = buffers_gdf.copy(deep=True)
for group in pois_group_list:
print(group)
# subet by group
if group == "all":
pois_geo_subset = pois_geo.reset_index(inplace=True).copy(deep=True)
else:
pois_geo_subset = pois_geo.loc[pois_geo[group_field] == group].reset_index().copy(deep=True)
# query to find pois in each buffer
bquery = pois_geo_subset.sindex.query_bulk(buffers_gdf.geometry)
# pois dataframe where each column contains a cluster and one building in it (can have multiple rows per cluster)
bquery_df = pd.DataFrame({"cluster": bquery[0], "pois": bquery[1]})
# add pois data to spatial query dataframe
bquery_full = bquery_df.merge(pois_geo_subset, left_on="pois", right_index=True, how="left")
# aggregate spatial query df with pois info, by cluster
bquery_agg = bquery_full.groupby("cluster").agg({"pois": "count"})
bquery_agg.columns = [group + "_pois_count"]
# join cluster back to original buffer_geo dataframe with columns for specific building type queries
z1 = buffers_gdf.merge(bquery_agg, left_index=True, right_on="cluster", how="left")
# not each cluster will have relevant pois, set those to zero
z1.fillna(0, inplace=True)
# set index and drop unnecessary columns
if z1.index.name != "cluster": z1.set_index("cluster", inplace=True)
z2 = z1[group + "_pois_count"]
# merge group columns back to main cluster dataframe
buffers_gdf_pois = buffers_gdf_pois.merge(z2, left_index=True, right_index=True)
# output final features
pois_feature_cols = [geom_id] + [i for i in buffers_gdf_pois.columns if "_pois_" in i]
pois_features = buffers_gdf_pois[pois_feature_cols].copy(deep=True)
pois_features_path = os.path.join(data_dir, 'osm/features/{}_pois_{}.csv'.format(geom_label, date))
pois_features.to_csv(pois_features_path, index=False, encoding="utf-8")
# ---------------------------------------------------------
# traffic
# count of each type of traffic item in each buffer
print("Running traffic...")
osm_traffic_shp_path = os.path.join(data_dir, 'osm/philippines-{}-free.shp/gis_osm_traffic_free_1.shp'.format(date))
osm_traffic_a_shp_path = os.path.join(data_dir, 'osm/philippines-{}-free.shp/gis_osm_traffic_a_free_1.shp'.format(date))
raw_traffic_geo = gpd.read_file(osm_traffic_shp_path)
raw_traffic_a_geo = gpd.read_file(osm_traffic_a_shp_path)
traffic_geo = pd.concat([raw_traffic_geo, raw_traffic_a_geo])
"""
# manually generate crosswalk
# first prep CSV with all types - can combine multiple OSM timesteps (see below)
# then in Excel/whatever, assign group to each type/fclass
type_df = pd.DataFrame({"type": list(set(traffic_geo["fclass"]))})
type_df["group"]= 0
type_df.to_csv(os.path.join(project_dir, "OSM/crosswalks/traffic_type_crosswalk.csv"), index=False, encoding="utf-8")
"""
# load crosswalk for types and assign any not grouped to "other"
traffic_type_crosswalk_path = os.path.join(project_dir, 'OSM/osm_code/crosswalks/traffic_type_crosswalk.csv')
traffic_type_crosswalk_df = pd.read_csv(traffic_type_crosswalk_path)
traffic_type_crosswalk_df.loc[traffic_type_crosswalk_df["group"] == "0", "group"] = "other"
# merge new classification and assign any features without a type to unclassifid
traffic_geo = traffic_geo.merge(traffic_type_crosswalk_df, left_on="fclass", right_on="type", how="left")
traffic_geo.loc[traffic_geo["fclass"].isna(), "group"] = "unclassified"
# show breakdown of groups
print(traffic_geo.group.value_counts())
# group_field = "fclass"
group_field = "group"
# split by group
# traffic_group_list = ["all"] + [i for i in set(traffic_geo[group_field])]
traffic_group_list = [i for i in set(traffic_geo[group_field])]
# copy of buffers gdf to use for output
buffers_gdf_traffic = buffers_gdf.copy(deep=True)
for group in traffic_group_list:
print(group)
# subet by group
if group == "all":
traffic_geo_subset = traffic_geo.copy(deep=True)
else:
traffic_geo_subset = traffic_geo.loc[traffic_geo[group_field] == group].reset_index().copy(deep=True)
# query to find traffic in each buffer
bquery = traffic_geo_subset.sindex.query_bulk(buffers_gdf.geometry)
# traffic dataframe where each column contains a cluster and one building in it (can have multiple rows per cluster)
bquery_df = pd.DataFrame({"cluster": bquery[0], "traffic": bquery[1]})
# add traffic data to spatial query dataframe
bquery_full = bquery_df.merge(traffic_geo_subset, left_on="traffic", right_index=True, how="left")
# aggregate spatial query df with traffic info, by cluster
bquery_agg = bquery_full.groupby("cluster").agg({"traffic": "count"})
bquery_agg.columns = [group + "_traffic_count"]
# join cluster back to original buffer_geo dataframe with columns for specific building type queries
z1 = buffers_gdf.merge(bquery_agg, left_index=True, right_on="cluster", how="left")
# not each cluster will have relevant traffic, set those to zero
z1.fillna(0, inplace=True)
# set index and drop unnecessary columns
if z1.index.name != "cluster": z1.set_index("cluster", inplace=True)
z2 = z1[group + "_traffic_count"]
# merge group columns back to main cluster dataframe
buffers_gdf_traffic = buffers_gdf_traffic.merge(z2, left_index=True, right_index=True)
# output final features
traffic_feature_cols = [geom_id] + [i for i in buffers_gdf_traffic.columns if "_traffic_" in i]
traffic_features = buffers_gdf_traffic[traffic_feature_cols].copy(deep=True)
traffic_features_path = os.path.join(data_dir, 'osm/features/{}_traffic_{}.csv'.format(geom_label, date))
traffic_features.to_csv(traffic_features_path, index=False, encoding="utf-8")
# ---------------------------------------------------------
# transport
# count of each type of transport item in each buffer
print("Running transport...")
osm_transport_shp_path = os.path.join(data_dir, 'osm/philippines-{}-free.shp/gis_osm_transport_free_1.shp'.format(date))
osm_transport_a_shp_path = os.path.join(data_dir, 'osm/philippines-{}-free.shp/gis_osm_transport_a_free_1.shp'.format(date))
raw_transport_geo = gpd.read_file(osm_transport_shp_path)
raw_transport_a_geo = gpd.read_file(osm_transport_a_shp_path)
transport_geo = pd.concat([raw_transport_geo, raw_transport_a_geo])
"""
manually generate crosswalk
first prep CSV with all types - can combine multiple OSM timesteps (see below)
then in Excel/whatever, assign group to each type/fclass
type_df = pd.DataFrame({"type": list(set(transport_geo["fclass"]))})
type_df["group"]= 0
type_df.to_csv(os.path.join(project_dir, "OSM/crosswalks/transport_type_crosswalk.csv"), index=False, encoding="utf-8")
"""
# load crosswalk for types and assign any not grouped to "other"
transport_type_crosswalk_path = os.path.join(project_dir, 'OSM/osm_code/crosswalks/transport_type_crosswalk.csv')
transport_type_crosswalk_df = pd.read_csv(transport_type_crosswalk_path)
transport_type_crosswalk_df.loc[transport_type_crosswalk_df["group"] == "0", "group"] = "other"
# merge new classification and assign any features without a type to unclassifid
transport_geo = transport_geo.merge(transport_type_crosswalk_df, left_on="fclass", right_on="type", how="left")
transport_geo.loc[transport_geo["fclass"].isna(), "group"] = "unclassified"
# show breakdown of groups
print(transport_geo.group.value_counts())
# group_field = "fclass"
group_field = "group"
# split by group
# transport_group_list = ["all"] + [i for i in set(transport_geo[group_field])]
transport_group_list = [i for i in set(transport_geo[group_field])]
# copy of buffers gdf to use for output
buffers_gdf_transport = buffers_gdf.copy(deep=True)
for group in transport_group_list:
print(group)
# subet by group
if group == "all":
transport_geo_subset = transport_geo.copy(deep=True)
else:
transport_geo_subset = transport_geo.loc[transport_geo[group_field] == group].reset_index().copy(deep=True)
# query to find transport in each buffer
bquery = transport_geo_subset.sindex.query_bulk(buffers_gdf.geometry)
# transport dataframe where each column contains a cluster and one building in it (can have multiple rows per cluster)
bquery_df = pd.DataFrame({"cluster": bquery[0], "transport": bquery[1]})
# add transport data to spatial query dataframe
bquery_full = bquery_df.merge(transport_geo_subset, left_on="transport", right_index=True, how="left")
# aggregate spatial query df with transport info, by cluster
bquery_agg = bquery_full.groupby("cluster").agg({"transport": "count"})
bquery_agg.columns = [group + "_transport_count"]
# join cluster back to original buffer_geo dataframe with columns for specific building type queries
z1 = buffers_gdf.merge(bquery_agg, left_index=True, right_on="cluster", how="left")
# not each cluster will have relevant transport, set those to zero
z1.fillna(0, inplace=True)
# set index and drop unnecessary columns
if z1.index.name != "cluster": z1.set_index("cluster", inplace=True)
z2 = z1[group + "_transport_count"]
# merge group columns back to main cluster dataframe
buffers_gdf_transport = buffers_gdf_transport.merge(z2, left_index=True, right_index=True)
# output final features
transport_feature_cols = [geom_id] + [i for i in buffers_gdf_transport.columns if "_transport_" in i]
transport_features = buffers_gdf_transport[transport_feature_cols].copy(deep=True)
transport_features_path = os.path.join(data_dir, 'osm/features/{}_transport_{}.csv'.format(geom_label, date))
transport_features.to_csv(transport_features_path, index=False, encoding="utf-8")
# ---------------------------------------------------------
# # buildings
# # for each type of building (and all buildings combined)
# # count of buildings in each buffer, average areas of buildings in each buffer, total area of building in each buffer, ratio of building area to total area of buffer
print("Running buildings...")
osm_buildings_shp_path = os.path.join(data_dir, 'osm/philippines-{}-free.shp/gis_osm_buildings_a_free_1.shp'.format(date))
buildings_geo_raw = gpd.read_file(osm_buildings_shp_path)
"""
manually generate crosswalk
first prep CSV with all types - can combine multiple OSM timesteps (see below)
then in Excel/whatever, assign group to each type/fclass
type_df = pd.DataFrame({"type": list(set(buildings_geo["type"]))})
type_df["group"]= 0
type_df.to_csv(os.path.join(project_dir, "OSM/crosswalks/building_type_crosswalk.csv"), index=False, encoding="utf-8")
"""
# # load crosswalk for building types and assign any not grouped to "other"
building_type_crosswalk_path = os.path.join(project_dir, 'OSM/osm_code/osm_code/crosswalks/building_type_crosswalk.csv')
building_type_crosswalk_df = pd.read_csv(building_type_crosswalk_path)
building_type_crosswalk_df.loc[building_type_crosswalk_df["group"] == "0", "group"] = "other"
# # merge new classification and assign any buildings without a type to unclassifid
buildings_geo_raw = buildings_geo_raw.merge(building_type_crosswalk_df, on="type", how="left")
buildings_geo_raw.loc[buildings_geo_raw["type"].isna(), "group"] = "unclassified"
group_field = "group"
# # show breakdown of groups
print(buildings_geo_raw.group.value_counts())
buildings_geo = buildings_geo_raw.copy(deep=True)
# # split by building types
# # group_list = ["residential"]
# # group_list = ["all"] + [i for i in set(buildings_geo["group"]) if i not in ["other", "unclassified"]]
buildings_group_list = [i for i in set(buildings_geo["group"]) if i not in ["other", "unclassified"]]
buildings_group_list = [i for i in buildings_group_list if str(i) != 'nan'] #removes nan from building_group_list - Sasan
buildings_group_list = buildings_group_list + ['all'] #add a section for all buildings into group lost
if "all" not in buildings_group_list:
buildings_geo = buildings_geo.loc[buildings_geo["group"].isin(buildings_group_list)]
# calculate area of each building
# convert to UTM 51N (meters) first, then back to WGS84 (degrees)
buildings_geo = buildings_geo.to_crs("EPSG:32651") # UTM 51N
buildings_geo["area"] = buildings_geo.area
buildings_geo = buildings_geo.to_crs("EPSG:4326") # WGS84
# copy of buffers gdf to use for output
buffers_gdf_buildings = buffers_gdf.copy(deep=True)
for group in buildings_group_list:
print(group)
# subet by group
if group == "all":
buildings_geo_subset = buildings_geo.copy(deep=True)
else:
buildings_geo_subset = buildings_geo.loc[buildings_geo[group_field] == group].reset_index().copy(deep=True)
# query to find buildings in each buffer
bquery = buildings_geo_subset.sindex.query_bulk(buffers_gdf.geometry)
# building dataframe where each column contains a cluster and one building in it (can have multiple rows per cluster)
bquery_df = pd.DataFrame({"cluster": bquery[0], "building": bquery[1]})
# add building data to spatial query dataframe
bquery_full = bquery_df.merge(buildings_geo_subset, left_on="building", right_index=True, how="left")
# aggregate spatial query df with building info, by cluster
bquery_agg = bquery_full.groupby("cluster").agg({
"area": ["count", "mean", "sum"]
})
# rename agg df
basic_building_cols = ["buildings_count", "buildings_avgarea", "buildings_totalarea"]
bquery_agg.columns = ["{}_{}".format(group, i) for i in basic_building_cols]
# join cluster back to original buffer_geo dataframe with columns for specific building type queries
z1 = buffers_gdf.merge(bquery_agg, left_index=True, right_on="cluster", how="left")
# not each cluster will have relevant buildings, set those to zero
z1.fillna(0, inplace=True)
# calculate ratio for building type
z1["{}_buildings_ratio".format(group)] = z1["{}_buildings_totalarea".format(group)] / z1["buffer_area"]
# set index and drop unnecessary columns
if z1.index.name != "cluster": z1.set_index("cluster", inplace=True)
z2 = z1[bquery_agg.columns.to_list() + ["{}_buildings_ratio".format(group)]]
# merge group columns back to main cluster dataframe
buffers_gdf_buildings = buffers_gdf_buildings.merge(z2, left_index=True, right_index=True)
# output final features
buildings_feature_cols = [geom_id] + [i for i in buffers_gdf_buildings.columns if "_buildings_" in i]
buildings_features = buffers_gdf_buildings[buildings_feature_cols].copy(deep=True)
buildings_features_path = os.path.join(data_dir, 'osm/features/{}_buildings_{}.csv'.format(geom_label, date))
buildings_features.to_csv(buildings_features_path, index=False, encoding="utf-8")
# ---------------------------------------------------------
# roads
# for each type of road
# distance to closest road from cluster centroid, total number of roads in each cluster, and total length of roads in each cluster
print("Running roads...")
osm_roads_shp_path = os.path.join(data_dir, 'osm/philippines-{}-free.shp/gis_osm_roads_free_1.shp'.format(date))
roads_geo = gpd.read_file(osm_roads_shp_path)
# get each road length
# convert to UTM 51N (meters) first, then back to WGS84 (degrees)
roads_geo = roads_geo.to_crs("EPSG:32651") # UTM 51N
roads_geo["road_length"] = roads_geo.geometry.length
roads_geo = roads_geo.to_crs("EPSG:4326") # WGS84
"""
# manually generate crosswalk
# first prep CSV with all types - can combine multiple OSM timesteps (see below)
# then in Excel/whatever, assign group to each type/fclass
type_df = pd.DataFrame({"type": list(set(roads_geo["fclass"]))})
type_df["group"]= 0
type_df.to_csv(os.path.join(project_dir, "OSM/crosswalks/roads_type_crosswalk.csv"), index=False, encoding="utf-8")
"""
# load crosswalk for types and assign any not grouped to "other"
roads_type_crosswalk_path = os.path.join(project_dir, 'OSM/osm_code/crosswalks/roads_type_crosswalk.csv')
roads_type_crosswalk_df = pd.read_csv(roads_type_crosswalk_path)
roads_type_crosswalk_df.loc[roads_type_crosswalk_df["group"] == "0", "group"] = "other"
# merge new classification and assign any features without a type to unclassifid
roads_geo = roads_geo.merge(roads_type_crosswalk_df, left_on="fclass", right_on="type", how="left")
roads_geo.loc[roads_geo["fclass"].isna(), "group"] = "unclassified"
# group_field = "fclass"
group_field = "group"
# show breakdown of groups
print(roads_geo[group_field].value_counts())
# split by groups
roads_group_list = [i for i,j in roads_geo[group_field].value_counts().to_dict().items() if j > 1000]
# roads_group_list = ["all"] + [i for i,j in roads_geo[group_field].value_counts().to_dict().items() if j > 1000]
# roads_group_list = ["all"] + [i for i in set(roads_geo["fclass"])]
# roads_group_list = ["all", "primary", "secondary"]
#-----------------
#find distance to nearest road (based on vertices of roads)
# generate centroids of buffers
cluster_centroids = buffers_gdf.copy(deep=True)
cluster_centroids.geometry = cluster_centroids.apply(lambda x: Point(x.longitude, x.latitude), axis=1)
cluster_centroids = gpd.GeoDataFrame(cluster_centroids)
src_points = cluster_centroids.apply(lambda x: (x.longitude, x.latitude), axis=1).to_list()
for group in roads_group_list:
print(group)
# subset based on group
if group == "all":
subset_roads_geo = roads_geo.copy(deep=True)
else:
subset_roads_geo = roads_geo.loc[roads_geo[group_field] == group].reset_index().copy(deep=True)
# generate list of all road vertices and convert to geodataframe
line_xy = subset_roads_geo.apply(lambda x: (x.osm_id, x.geometry.xy), axis=1)
line_xy_lookup = [j for i in line_xy for j in list(zip([i[0]]*len(i[1][0]), *i[1]))]
line_xy_df = | pd.DataFrame(line_xy_lookup, columns=["osm_id", "x", "y"]) | pandas.DataFrame |
import pandas as pd
import numpy as np
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
from PyQt5.QtGui import *
import sys
class temsilci(QItemDelegate):
def __init__(self, parent=None):
super().__init__()
def olustur(self, parent, option, index):
olustur = QLineEdit(parent)
olustur.setValidator(QDoubleValidator())
return olustur
class bakkalBorcDefteri(QTableWidget):
def __init__(self, df):
super().__init__()
self.df = df
self.setStyleSheet('font-size: 25px;')
Satirlar, Sütünlar = self.df.shape
self.setColumnCount(Sütünlar)
self.setRowCount(Satirlar)
self.setHorizontalHeaderLabels(("Müşteri Adı-Soyad","Borç Tutarı","Son Ödeme tarihi","Ödendi/Ödenmedi","Ödeme Tipi"))
self.verticalHeader().setSectionResizeMode(QHeaderView.Stretch)
self.horizontalHeader().setSectionResizeMode(QHeaderView.Stretch)
self.setItemDelegateForColumn(1, temsilci())
for x in range(self.rowCount()):
for y in range(self.columnCount()):
self.setItem(x, y, QTableWidgetItem(str(self.df.iloc[x, y])))
self.cellChanged[int, int].connect(self.guncellemeDF)
def guncellemeDF(self, satir, sütün):
metin = self.item(satir, sütün).text()
self.df.iloc[satir, sütün] = metin
class DF(QWidget):
veri = np.array([["Müşteri Bilgisi Giriniz",0,"Tarih Belirt"], ["Müşteri Bilgisi Giriniz",0,"Tarih Belirt"],["Müşteri Bilgisi Giriniz",0,"Tarih Belirt"]])
df = pd.DataFrame(data=veri, index=[1,2,3], columns =["Müşteri Adı-Soyad","Borç Tutarı","Son Ödeme tarihi"])
df["Ödendi/Ödenmedi"] = pd | .Series(data=["Durum Ne", "Durum Ne","Durum Ne"], index=[1,2,3]) | pandas.Series |
import training.train
import pandas as pd
import numpy as np
from skopt.space import Real, Categorical, Integer
from skopt.utils import use_named_args
from skopt import gp_minimize
def store_results(search_result, prior_names):
params = pd.DataFrame(search_result['x_iters'])
params.columns = [*prior_names]
params = params.rename_axis('call').reset_index()
scores = | pd.DataFrame(search_result['func_vals']) | pandas.DataFrame |
from datetime import datetime, timedelta
import unittest
from pandas.core.datetools import (
bday, BDay, BQuarterEnd, BMonthEnd, BYearEnd, MonthEnd,
DateOffset, Week, YearBegin, YearEnd, Hour, Minute, Second,
format, ole2datetime, to_datetime, normalize_date,
getOffset, getOffsetName, inferTimeRule, hasOffsetName)
from nose.tools import assert_raises
####
## Misc function tests
####
def test_format():
actual = format(datetime(2008, 1, 15))
assert actual == '20080115'
def test_ole2datetime():
actual = ole2datetime(60000)
assert actual == datetime(2064, 4, 8)
assert_raises(Exception, ole2datetime, 60)
def test_to_datetime1():
actual = to_datetime(datetime(2008, 1, 15))
assert actual == datetime(2008, 1, 15)
actual = to_datetime('20080115')
assert actual == datetime(2008, 1, 15)
# unparseable
s = 'Month 1, 1999'
assert to_datetime(s) == s
def test_normalize_date():
actual = normalize_date(datetime(2007, 10, 1, 1, 12, 5, 10))
assert actual == datetime(2007, 10, 1)
#####
### DateOffset Tests
#####
class TestDateOffset(object):
def setUp(self):
self.d = datetime(2008, 1, 2)
def test_repr(self):
repr(DateOffset())
repr(DateOffset(2))
repr(2 * DateOffset())
repr(2 * DateOffset(months=2))
def test_mul(self):
assert DateOffset(2) == 2 * DateOffset(1)
assert DateOffset(2) == DateOffset(1) * 2
def test_constructor(self):
assert((self.d + DateOffset(months=2)) == datetime(2008, 3, 2))
assert((self.d - DateOffset(months=2)) == datetime(2007, 11, 2))
assert((self.d + DateOffset(2)) == datetime(2008, 1, 4))
assert not DateOffset(2).isAnchored()
assert DateOffset(1).isAnchored()
d = datetime(2008, 1, 31)
assert((d + DateOffset(months=1)) == datetime(2008, 2, 29))
def test_copy(self):
assert(DateOffset(months=2).copy() == DateOffset(months=2))
class TestBusinessDay(unittest.TestCase):
def setUp(self):
self.d = datetime(2008, 1, 1)
self.offset = BDay()
self.offset2 = BDay(2)
def test_repr(self):
assert repr(self.offset) == '<1 BusinessDay>'
assert repr(self.offset2) == '<2 BusinessDays>'
expected = '<1 BusinessDay: offset=datetime.timedelta(1)>'
assert repr(self.offset + timedelta(1)) == expected
def test_with_offset(self):
offset = self.offset + timedelta(hours=2)
assert (self.d + offset) == datetime(2008, 1, 2, 2)
def testEQ(self):
self.assertEqual(self.offset2, self.offset2)
def test_mul(self):
pass
def test_hash(self):
self.assertEqual(hash(self.offset2), hash(self.offset2))
def testCall(self):
self.assertEqual(self.offset2(self.d), datetime(2008, 1, 3))
def testRAdd(self):
self.assertEqual(self.d + self.offset2, self.offset2 + self.d)
def testSub(self):
off = self.offset2
self.assertRaises(Exception, off.__sub__, self.d)
self.assertEqual(2 * off - off, off)
self.assertEqual(self.d - self.offset2, self.d + BDay(-2))
def testRSub(self):
self.assertEqual(self.d - self.offset2, (-self.offset2).apply(self.d))
def testMult1(self):
self.assertEqual(self.d + 10*self.offset, self.d + BDay(10))
def testMult2(self):
self.assertEqual(self.d + (-5*BDay(-10)),
self.d + BDay(50))
def testRollback1(self):
self.assertEqual(BDay(10).rollback(self.d), self.d)
def testRollback2(self):
self.assertEqual(BDay(10).rollback(datetime(2008, 1, 5)), datetime(2008, 1, 4))
def testRollforward1(self):
self.assertEqual(BDay(10).rollforward(self.d), self.d)
def testRollforward2(self):
self.assertEqual(BDay(10).rollforward(datetime(2008, 1, 5)), datetime(2008, 1, 7))
def test_onOffset(self):
tests = [(BDay(), datetime(2008, 1, 1), True),
(BDay(), datetime(2008, 1, 5), False)]
for offset, date, expected in tests:
assertOnOffset(offset, date, expected)
def test_apply(self):
tests = []
tests.append((bday,
{datetime(2008, 1, 1): datetime(2008, 1, 2),
datetime(2008, 1, 4): datetime(2008, 1, 7),
datetime(2008, 1, 5): datetime(2008, 1, 7),
datetime(2008, 1, 6): datetime(2008, 1, 7),
datetime(2008, 1, 7): datetime(2008, 1, 8)}))
tests.append((2*bday,
{datetime(2008, 1, 1): datetime(2008, 1, 3),
datetime(2008, 1, 4): datetime(2008, 1, 8),
datetime(2008, 1, 5): datetime(2008, 1, 8),
datetime(2008, 1, 6): datetime(2008, 1, 8),
datetime(2008, 1, 7): datetime(2008, 1, 9)}))
tests.append((-bday,
{datetime(2008, 1, 1): datetime(2007, 12, 31),
datetime(2008, 1, 4): datetime(2008, 1, 3),
datetime(2008, 1, 5): datetime(2008, 1, 4),
datetime(2008, 1, 6): datetime(2008, 1, 4),
datetime(2008, 1, 7): datetime(2008, 1, 4),
datetime(2008, 1, 8): datetime(2008, 1, 7)}))
tests.append((-2*bday,
{datetime(2008, 1, 1): datetime(2007, 12, 28),
datetime(2008, 1, 4): datetime(2008, 1, 2),
datetime(2008, 1, 5): datetime(2008, 1, 3),
datetime(2008, 1, 6): datetime(2008, 1, 3),
datetime(2008, 1, 7): datetime(2008, 1, 3),
datetime(2008, 1, 8): datetime(2008, 1, 4),
datetime(2008, 1, 9): datetime(2008, 1, 7)}))
tests.append((BDay(0),
{datetime(2008, 1, 1): datetime(2008, 1, 1),
datetime(2008, 1, 4): datetime(2008, 1, 4),
datetime(2008, 1, 5): datetime(2008, 1, 7),
datetime(2008, 1, 6): datetime(2008, 1, 7),
datetime(2008, 1, 7): datetime(2008, 1, 7)}))
for dateOffset, cases in tests:
for baseDate, expected in cases.iteritems():
assertEq(dateOffset, baseDate, expected)
def test_apply_corner(self):
self.assertRaises(Exception, BDay().apply, BMonthEnd())
def assertOnOffset(offset, date, expected):
actual = offset.onOffset(date)
assert actual == expected
class TestWeek(unittest.TestCase):
def test_corner(self):
self.assertRaises(Exception, Week, weekday=7)
self.assertRaises(Exception, Week, weekday=-1)
def test_isAnchored(self):
self.assert_(Week(weekday=0).isAnchored())
self.assert_(not Week().isAnchored())
self.assert_(not Week(2, weekday=2).isAnchored())
self.assert_(not Week(2).isAnchored())
def test_offset(self):
tests = []
tests.append((Week(), # not business week
{datetime(2008, 1, 1): datetime(2008, 1, 8),
datetime(2008, 1, 4): datetime(2008, 1, 11),
datetime(2008, 1, 5): datetime(2008, 1, 12),
datetime(2008, 1, 6): datetime(2008, 1, 13),
datetime(2008, 1, 7): datetime(2008, 1, 14)}))
tests.append((Week(weekday=0), # Mon
{datetime(2007, 12, 31): datetime(2008, 1, 7),
datetime(2008, 1, 4): datetime(2008, 1, 7),
datetime(2008, 1, 5): datetime(2008, 1, 7),
datetime(2008, 1, 6): datetime(2008, 1, 7),
datetime(2008, 1, 7): datetime(2008, 1, 14)}))
tests.append((Week(0, weekday=0), # n=0 -> roll forward. Mon
{datetime(2007, 12, 31): datetime(2007, 12, 31),
datetime(2008, 1, 4): datetime(2008, 1, 7),
datetime(2008, 1, 5): datetime(2008, 1, 7),
datetime(2008, 1, 6): datetime(2008, 1, 7),
datetime(2008, 1, 7): datetime(2008, 1, 7)}))
tests.append((Week(-2, weekday=1), # n=0 -> roll forward. Mon
{datetime(2010, 4, 6): datetime(2010, 3, 23),
datetime(2010, 4, 8): datetime(2010, 3, 30),
datetime(2010, 4, 5): datetime(2010, 3, 23)}))
for dateOffset, cases in tests:
for baseDate, expected in cases.iteritems():
assertEq(dateOffset, baseDate, expected)
def test_onOffset(self):
tests = [(Week(weekday=0), datetime(2008, 1, 1), False),
(Week(weekday=0), datetime(2008, 1, 2), False),
(Week(weekday=0), datetime(2008, 1, 3), False),
(Week(weekday=0), datetime(2008, 1, 4), False),
(Week(weekday=0), datetime(2008, 1, 5), False),
(Week(weekday=0), datetime(2008, 1, 6), False),
(Week(weekday=0), datetime(2008, 1, 7), True),
(Week(weekday=1), datetime(2008, 1, 1), True),
(Week(weekday=1), datetime(2008, 1, 2), False),
(Week(weekday=1), datetime(2008, 1, 3), False),
(Week(weekday=1), datetime(2008, 1, 4), False),
(Week(weekday=1), datetime(2008, 1, 5), False),
(Week(weekday=1), datetime(2008, 1, 6), False),
(Week(weekday=1), datetime(2008, 1, 7), False),
(Week(weekday=2), datetime(2008, 1, 1), False),
(Week(weekday=2), datetime(2008, 1, 2), True),
(Week(weekday=2), datetime(2008, 1, 3), False),
(Week(weekday=2), datetime(2008, 1, 4), False),
(Week(weekday=2), datetime(2008, 1, 5), False),
(Week(weekday=2), datetime(2008, 1, 6), False),
(Week(weekday=2), datetime(2008, 1, 7), False),
(Week(weekday=3), datetime(2008, 1, 1), False),
(Week(weekday=3), datetime(2008, 1, 2), False),
(Week(weekday=3), datetime(2008, 1, 3), True),
(Week(weekday=3), datetime(2008, 1, 4), False),
(Week(weekday=3), datetime(2008, 1, 5), False),
(Week(weekday=3), datetime(2008, 1, 6), False),
(Week(weekday=3), datetime(2008, 1, 7), False),
(Week(weekday=4), datetime(2008, 1, 1), False),
(Week(weekday=4), datetime(2008, 1, 2), False),
(Week(weekday=4), datetime(2008, 1, 3), False),
(Week(weekday=4), datetime(2008, 1, 4), True),
(Week(weekday=4), datetime(2008, 1, 5), False),
(Week(weekday=4), datetime(2008, 1, 6), False),
(Week(weekday=4), datetime(2008, 1, 7), False),
(Week(weekday=5), datetime(2008, 1, 1), False),
(Week(weekday=5), datetime(2008, 1, 2), False),
(Week(weekday=5), datetime(2008, 1, 3), False),
(Week(weekday=5), datetime(2008, 1, 4), False),
(Week(weekday=5), datetime(2008, 1, 5), True),
(Week(weekday=5), datetime(2008, 1, 6), False),
(Week(weekday=5), datetime(2008, 1, 7), False),
(Week(weekday=6), datetime(2008, 1, 1), False),
(Week(weekday=6), datetime(2008, 1, 2), False),
(Week(weekday=6), datetime(2008, 1, 3), False),
(Week(weekday=6), datetime(2008, 1, 4), False),
(Week(weekday=6), datetime(2008, 1, 5), False),
(Week(weekday=6), datetime(2008, 1, 6), True),
(Week(weekday=6), datetime(2008, 1, 7), False),
]
for offset, date, expected in tests:
assertOnOffset(offset, date, expected)
class TestBMonthEnd(unittest.TestCase):
def test_offset(self):
tests = []
tests.append((BMonthEnd(),
{datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 2, 29),
datetime(2006, 12, 29): datetime(2007, 1, 31),
datetime(2006, 12, 31): datetime(2007, 1, 31),
datetime(2007, 1, 1): datetime(2007, 1, 31),
datetime(2006, 12, 1): datetime(2006, 12, 29)}))
tests.append((BMonthEnd(0),
{datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 1, 31),
datetime(2006, 12, 29): datetime(2006, 12, 29),
datetime(2006, 12, 31): datetime(2007, 1, 31),
datetime(2007, 1, 1): datetime(2007, 1, 31)}))
tests.append((BMonthEnd(2),
{datetime(2008, 1, 1): datetime(2008, 2, 29),
datetime(2008, 1, 31): datetime(2008, 3, 31),
datetime(2006, 12, 29): datetime(2007, 2, 28),
datetime(2006, 12, 31): datetime(2007, 2, 28),
datetime(2007, 1, 1): datetime(2007, 2, 28),
datetime(2006, 11, 1): datetime(2006, 12, 29)}))
tests.append((BMonthEnd(-1),
{datetime(2007, 1, 1): datetime(2006, 12, 29),
datetime(2008, 6, 30): datetime(2008, 5, 30),
datetime(2008, 12, 31): datetime(2008, 11, 28),
datetime(2006, 12, 29): datetime(2006, 11, 30),
datetime(2006, 12, 30): datetime(2006, 12, 29),
datetime(2007, 1, 1): datetime(2006, 12, 29)}))
for dateOffset, cases in tests:
for baseDate, expected in cases.iteritems():
assertEq(dateOffset, baseDate, expected)
def test_onOffset(self):
tests = [(BMonthEnd(), datetime(2007, 12, 31), True),
(BMonthEnd(), datetime(2008, 1, 1), False)]
for offset, date, expected in tests:
assertOnOffset(offset, date, expected)
class TestMonthEnd(unittest.TestCase):
def test_offset(self):
tests = []
tests.append((MonthEnd(),
{datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 2, 29),
datetime(2006, 12, 29): datetime(2006, 12, 31),
datetime(2006, 12, 31): datetime(2007, 1, 31),
datetime(2007, 1, 1): datetime(2007, 1, 31),
datetime(2006, 12, 1): datetime(2006, 12, 31)}))
tests.append((MonthEnd(0),
{datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 1, 31),
datetime(2006, 12, 29): datetime(2006, 12, 31),
datetime(2006, 12, 31): datetime(2006, 12, 31),
datetime(2007, 1, 1): datetime(2007, 1, 31)}))
tests.append((MonthEnd(2),
{datetime(2008, 1, 1): datetime(2008, 2, 29),
datetime(2008, 1, 31): datetime(2008, 3, 31),
datetime(2006, 12, 29): datetime(2007, 1, 31),
datetime(2006, 12, 31): datetime(2007, 2, 28),
datetime(2007, 1, 1): datetime(2007, 2, 28),
datetime(2006, 11, 1): datetime(2006, 12, 31)}))
tests.append((MonthEnd(-1),
{datetime(2007, 1, 1): datetime(2006, 12, 31),
datetime(2008, 6, 30): datetime(2008, 5, 31),
datetime(2008, 12, 31): datetime(2008, 11, 30),
datetime(2006, 12, 29): datetime(2006, 11, 30),
datetime(2006, 12, 30): datetime(2006, 11, 30),
datetime(2007, 1, 1): datetime(2006, 12, 31)}))
for dateOffset, cases in tests:
for baseDate, expected in cases.iteritems():
assertEq(dateOffset, baseDate, expected)
def test_onOffset(self):
tests = [(MonthEnd(), datetime(2007, 12, 31), True),
(MonthEnd(), datetime(2008, 1, 1), False)]
for offset, date, expected in tests:
assertOnOffset(offset, date, expected)
class TestBQuarterEnd(unittest.TestCase):
def test_corner(self):
self.assertRaises(Exception, BQuarterEnd, startingMonth=4)
self.assertRaises(Exception, BQuarterEnd, startingMonth=-1)
def test_isAnchored(self):
self.assert_(BQuarterEnd(startingMonth=1).isAnchored())
self.assert_(BQuarterEnd().isAnchored())
self.assert_(not BQuarterEnd(2, startingMonth=1).isAnchored())
def test_offset(self):
tests = []
tests.append((BQuarterEnd(startingMonth=1),
{datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 4, 30),
datetime(2008, 2, 15): datetime(2008, 4, 30),
datetime(2008, 2, 29): datetime(2008, 4, 30),
datetime(2008, 3, 15): datetime(2008, 4, 30),
datetime(2008, 3, 31): datetime(2008, 4, 30),
datetime(2008, 4, 15): datetime(2008, 4, 30),
datetime(2008, 4, 30): datetime(2008, 7, 31),}))
tests.append((BQuarterEnd(startingMonth=2),
{datetime(2008, 1, 1): datetime(2008, 2, 29),
datetime(2008, 1, 31): datetime(2008, 2, 29),
datetime(2008, 2, 15): datetime(2008, 2, 29),
datetime(2008, 2, 29): datetime(2008, 5, 30),
datetime(2008, 3, 15): datetime(2008, 5, 30),
datetime(2008, 3, 31): datetime(2008, 5, 30),
datetime(2008, 4, 15): datetime(2008, 5, 30),
datetime(2008, 4, 30): datetime(2008, 5, 30),}))
tests.append((BQuarterEnd(startingMonth=1, n=0),
{datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 1, 31),
datetime(2008, 2, 15): datetime(2008, 4, 30),
datetime(2008, 2, 29): datetime(2008, 4, 30),
datetime(2008, 3, 15): datetime(2008, 4, 30),
datetime(2008, 3, 31): datetime(2008, 4, 30),
datetime(2008, 4, 15): datetime(2008, 4, 30),
datetime(2008, 4, 30): datetime(2008, 4, 30),}))
tests.append((BQuarterEnd(startingMonth=1, n=-1),
{datetime(2008, 1, 1): datetime(2007, 10, 31),
datetime(2008, 1, 31): datetime(2007, 10, 31),
datetime(2008, 2, 15): datetime(2008, 1, 31),
datetime(2008, 2, 29): datetime(2008, 1, 31),
datetime(2008, 3, 15): datetime(2008, 1, 31),
datetime(2008, 3, 31): datetime(2008, 1, 31),
datetime(2008, 4, 15): datetime(2008, 1, 31),
datetime(2008, 4, 30): datetime(2008, 1, 31),}))
tests.append((BQuarterEnd(startingMonth=1, n=2),
{datetime(2008, 1, 31): datetime(2008, 7, 31),
datetime(2008, 2, 15): datetime(2008, 7, 31),
datetime(2008, 2, 29): datetime(2008, 7, 31),
datetime(2008, 3, 15): datetime(2008, 7, 31),
datetime(2008, 3, 31): datetime(2008, 7, 31),
datetime(2008, 4, 15): datetime(2008, 7, 31),
datetime(2008, 4, 30): datetime(2008, 10, 31),}))
for dateOffset, cases in tests:
for baseDate, expected in cases.iteritems():
assertEq(dateOffset, baseDate, expected)
# corner
offset = BQuarterEnd(n=-1, startingMonth=1)
self.assertEqual(datetime(2010, 1, 31) + offset, datetime(2010, 1, 29))
def test_onOffset(self):
tests = [(BQuarterEnd(1, startingMonth=1), datetime(2008, 1, 31), True),
(BQuarterEnd(1, startingMonth=1), datetime(2007, 12, 31), False),
(BQuarterEnd(1, startingMonth=1), datetime(2008, 2, 29), False),
(BQuarterEnd(1, startingMonth=1), datetime(2007, 3, 30), False),
(BQuarterEnd(1, startingMonth=1), datetime(2007, 3, 31), False),
(BQuarterEnd(1, startingMonth=1), datetime(2008, 4, 30), True),
(BQuarterEnd(1, startingMonth=1), datetime(2008, 5, 30), False),
(BQuarterEnd(1, startingMonth=1), datetime(2007, 6, 29), False),
(BQuarterEnd(1, startingMonth=1), datetime(2007, 6, 30), False),
(BQuarterEnd(1, startingMonth=2), datetime(2008, 1, 31), False),
(BQuarterEnd(1, startingMonth=2), datetime(2007, 12, 31), False),
(BQuarterEnd(1, startingMonth=2), datetime(2008, 2, 29), True),
(BQuarterEnd(1, startingMonth=2), datetime(2007, 3, 30), False),
( | BQuarterEnd(1, startingMonth=2) | pandas.core.datetools.BQuarterEnd |
import string
import numpy as np
import pandas as pd
import pytest
from pandas.util import testing as pdt
from .. import util
@pytest.fixture
def test_df():
return pd.DataFrame(
{'col1': range(5),
'col2': range(5, 10)},
index=['a', 'b', 'c', 'd', 'e'])
@pytest.fixture
def choosers():
return pd.DataFrame(
{'var1': range(5),
'var2': range(5, 10),
'var3': ['q', 'w', 'e', 'r', 't'],
'building_id': range(100, 105)},
index=['a', 'b', 'c', 'd', 'e'])
@pytest.fixture
def rates():
return pd.DataFrame(
{'var1_min': [np.nan, np.nan, np.nan],
'var1_max': [1, np.nan, np.nan],
'var2_min': [np.nan, 7, np.nan],
'var2_max': [np.nan, 8, np.nan],
'var3': [np.nan, np.nan, 't'],
'probability_of_relocating': [1, 1, 1]})
def test_apply_filter_query(test_df):
filters = ['col1 < 3', 'col2 > 6']
filtered = util.apply_filter_query(test_df, filters)
expected = pd.DataFrame(
{'col1': [2], 'col2': [7]},
index=['c'])
pdt.assert_frame_equal(filtered, expected)
def test_apply_filter_query_empty(test_df):
filters = ['col1 < 1', 'col2 > 8']
filtered = util.apply_filter_query(test_df, filters)
expected = pd.DataFrame(
{'col1': [], 'col2': []},
index=[])
| pdt.assert_frame_equal(filtered, expected, check_dtype=False) | pandas.util.testing.assert_frame_equal |
import download_decompress as dd
import os
import pandas as pd
import time
import logging
import matplotlib.pyplot as plt
import matplotlib as mpl
logging.basicConfig(level=logging.NOTSET)
#logging.disable()
# https://database.lichess.org/
start = time.perf_counter()
#make dir to hold downloads
dir = 'Downloads'
if not os.path.exists(dir):
os.mkdir(dir)
# load source files into list
sources = []
with open('pgn_source.txt','r') as file:
for line in file:
sources.append(line.strip())
# totals across multiple files
openings = {}
total_games = 0
total_lines = 0
for source in reversed(sources):
loopstart = time.perf_counter()
filename = os.path.join(dir,source[38:])
# filename after it is decompressed (remove .bz2)
decomp_filename = filename[:-4]
yyyymm = filename[-15:-8] # '2013-01'
yyyymm = yyyymm[:4]+yyyymm[5:7] # 201301
# download and decompress
dd.download_file(source,filename)
dd.bz2_decompress(filename)
# read source .pgn file line by line
with open(decomp_filename,'r') as file:
file_games = 0
file_lines = 0
for line in file:
line = line.strip()
file_lines += 1
total_lines += 1
# maintain dict with counts of each opening
if line[:8] == '[Opening':
opening = line[10:-2]
if opening in openings.keys() and yyyymm in openings[opening].keys():
openings[opening][yyyymm] = openings[opening][yyyymm]+1
elif opening in openings.keys() and yyyymm not in openings[opening].keys():
openings[opening][yyyymm] = 1
else:
openings[opening] = {yyyymm:1}
# Count number of games. Using each occurance of "[Result"
if line[:7] == '[Result':
file_games += 1
total_games += 1
else:
pass
logging.info(f"File lines: {file_lines}, File games: {file_games}")
logging.info(f"Total lines: {total_lines},Total games: {total_games}")
# delete .pgn & .bz2 files
for filename in os.listdir(dir):
if filename.endswith(".bz2") or filename.endswith(".pgn"):
fullpath = os.path.join(dir,filename)
os.remove(fullpath)
logging.info(f"{fullpath} deleted")
else:
continue
loopend = time.perf_counter()
logging.info(f"{yyyymm} Execution time: {round((loopend - loopstart),2)} seconds")
# get openings into dataframe
df = | pd.DataFrame.from_dict(openings, orient='index') | pandas.DataFrame.from_dict |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 25 15:50:20 2019
work flow for ZWD and PW retreival after python copy_gipsyx_post_from_geo.py:
1)save_PPP_field_unselected_data_and_errors(field='ZWD')
2)select_PPP_field_thresh_and_combine_save_all(field='ZWD')
3)use mean_ZWD_over_sound_time_and_fit_tstm to obtain the mda (model dataarray)
3*) can't use produce_kappa_ml_with_cats for hour on 5 mins data, dahhh!
can do that with dayofyear, month, season (need to implement it first)
4)save_GNSS_PW_israeli_stations using mda (e.g., season) from 3
5) do homogenization using Homogenization_R.py and run homogenize_pw_dataset
6) for hydro analysis and more run produce_all_GNSS_PW_anomalies
@author: shlomi
"""
import pandas as pd
import numpy as np
from PW_paths import work_yuval
from PW_paths import work_path
from PW_paths import geo_path
from pathlib import Path
from sklearn.linear_model import LinearRegression
from scipy import stats
hydro_path = work_yuval / 'hydro'
garner_path = work_yuval / 'garner'
ims_path = work_yuval / 'IMS_T'
gis_path = work_yuval / 'gis'
sound_path = work_yuval / 'sounding'
climate_path = work_yuval / 'climate'
dem_path = work_yuval / 'AW3D30'
phys_soundings = sound_path / 'bet_dagan_phys_sounding_2007-2019.nc'
tela_zwd = work_yuval / 'gipsyx_results/tela_newocean/TELA_PPP_1996-2019.nc'
jslm_zwd = work_yuval / 'gipsyx_results/jslm_newocean/JSLM_PPP_2001-2019.nc'
alon_zwd = work_yuval / 'gipsyx_results/alon_newocean/ALON_PPP_2005-2019.nc'
tela_zwd_aligned = work_yuval / 'tela_zwd_aligned_with_physical_bet_dagan.nc'
alon_zwd_aligned = work_yuval / 'ALON_zwd_aligned_with_physical_bet_dagan.nc'
jslm_zwd_aligned = work_yuval / 'JSLM_zwd_aligned_with_physical_bet_dagan.nc'
tela_ims = ims_path / '10mins/TEL-AVIV-COAST_178_TD_10mins_filled.nc'
alon_ims = ims_path / '10mins/ASHQELON-PORT_208_TD_10mins_filled.nc'
jslm_ims = ims_path / '10mins/JERUSALEM-CENTRE_23_TD_10mins_filled.nc'
station_on_geo = geo_path / 'Work_Files/PW_yuval/GNSS_stations'
era5_path = work_yuval / 'ERA5'
PW_stations_path = work_yuval / '1minute'
# stations = pd.read_csv('All_gps_stations.txt', header=0, delim_whitespace=True,
# index_col='name')
logs_path = geo_path / 'Python_Projects/PW_from_GPS/log_files'
GNSS = work_yuval / 'GNSS_stations'
cwd = Path().cwd()
gnss_sound_stations_dict = {'acor': '08001', 'mall': '08302'}
# TODO: kappa_ml_with_cats yields smaller k using cats not None, check it...
# TODO: then assemble PW for all the stations.
class LinearRegression_with_stats(LinearRegression):
"""
LinearRegression class after sklearn's, but calculate t-statistics
and p-values for model coefficients (betas).
Additional attributes available after .fit()
are `t` and `p` which are of the shape (y.shape[1], X.shape[1])
which is (n_features, n_coefs)
This class sets the intercept to 0 by default, since usually we include it
in X.
"""
def __init__(self, *args, **kwargs):
# if not "fit_intercept" in kwargs:
# kwargs['fit_intercept'] = False
super().__init__(*args,**kwargs)
def fit(self, X, y=None, verbose=True, **fit_params):
from scipy import linalg
""" A wrapper around the fitting function.
Improved: adds the X_ and y_ and results_ attrs to class.
Parameters
----------
X : xarray DataArray, Dataset other other array-like
The training input samples.
y : xarray DataArray, Dataset other other array-like
The target values.
Returns
-------
Returns self.
"""
self = super().fit(X, y, **fit_params)
n, k = X.shape
yHat = np.matrix(self.predict(X)).T
# Change X and Y into numpy matricies. x also has a column of ones added to it.
x = np.hstack((np.ones((n,1)),np.matrix(X)))
y = np.matrix(y).T
# Degrees of freedom.
df = float(n-k-1)
# Sample variance.
sse = np.sum(np.square(yHat - y),axis=0)
self.sampleVariance = sse/df
# Sample variance for x.
self.sampleVarianceX = x.T*x
# Covariance Matrix = [(s^2)(X'X)^-1]^0.5. (sqrtm = matrix square root. ugly)
self.covarianceMatrix = linalg.sqrtm(self.sampleVariance[0,0]*self.sampleVarianceX.I)
# Standard erros for the difference coefficients: the diagonal elements of the covariance matrix.
self.se = self.covarianceMatrix.diagonal()[1:]
# T statistic for each beta.
self.betasTStat = np.zeros(len(self.se))
for i in range(len(self.se)):
self.betasTStat[i] = self.coef_[i]/self.se[i]
# P-value for each beta. This is a two sided t-test, since the betas can be
# positive or negative.
self.betasPValue = 1 - stats.t.cdf(abs(self.betasTStat),df)
return self
def compare_different_cats_bet_dagan_tela():
from aux_gps import error_mean_rmse
ds, mda = mean_ZWD_over_sound_time_and_fit_tstm(
plot=False, times=['2013-09', '2020'], cats=None)
ds_hour, mda = mean_ZWD_over_sound_time_and_fit_tstm(
plot=False, times=['2013-09', '2020'], cats=['hour'])
ds_season, mda = mean_ZWD_over_sound_time_and_fit_tstm(
plot=False, times=['2013-09', '2020'], cats=['season'])
ds_hour_season, mda = mean_ZWD_over_sound_time_and_fit_tstm(
plot=False, times=['2013-09', '2020'], cats=['hour', 'season'])
ds = ds.dropna('sound_time')
ds_hour = ds_hour.dropna('sound_time')
ds_season = ds_season.dropna('sound_time')
ds_hour_season = ds_hour_season.dropna('sound_time')
mean_none, rmse_none = error_mean_rmse(ds['tpw_bet_dagan'], ds['tela_pw'])
mean_hour, rmse_hour = error_mean_rmse(
ds_hour['tpw_bet_dagan'], ds_hour['tela_pw'])
mean_season, rmse_season = error_mean_rmse(
ds_season['tpw_bet_dagan'], ds_season['tela_pw'])
mean_hour_season, rmse_hour_season = error_mean_rmse(
ds_hour_season['tpw_bet_dagan'], ds_hour_season['tela_pw'])
hour_mean_per = 100 * (abs(mean_none) - abs(mean_hour)) / abs(mean_none)
hour_rmse_per = 100 * (abs(rmse_none) - abs(rmse_hour)) / abs(rmse_none)
season_mean_per = 100 * (abs(mean_none) - abs(mean_season)) / abs(mean_none)
season_rmse_per = 100 * (abs(rmse_none) - abs(rmse_season)) / abs(rmse_none)
hour_season_mean_per = 100 * (abs(mean_none) - abs(mean_hour_season)) / abs(mean_none)
hour_season_rmse_per = 100 * (abs(rmse_none) - abs(rmse_hour_season)) / abs(rmse_none)
print(
'whole data mean: {:.2f} and rmse: {:.2f}'.format(
mean_none,
rmse_none))
print(
'hour data mean: {:.2f} and rmse: {:.2f}, {:.1f} % and {:.1f} % better than whole data.'.format(
mean_hour, rmse_hour, hour_mean_per, hour_rmse_per))
print(
'season data mean: {:.2f} and rmse: {:.2f}, {:.1f} % and {:.1f} % better than whole data.'.format(
mean_season, rmse_season, season_mean_per, season_rmse_per))
print(
'hour and season data mean: {:.2f} and rmse: {:.2f}, {:.1f} % and {:.1f} % better than whole data.'.format(
mean_hour_season, rmse_hour_season, hour_season_mean_per, hour_season_rmse_per))
return
def PW_trend_analysis(path=work_yuval, anom=False, station='tela'):
import xarray as xr
pw = xr.open_dataset(path / 'GNSS_daily_PW.nc')[station]
if anom:
pw = pw.groupby('time.month') - pw.groupby('time.month').mean('time')
pw_lr = ML_fit_model_to_tmseries(pw, modelname='LR', plot=False, verbose=True)
pw_tsen = ML_fit_model_to_tmseries(pw, modelname='TSEN', plot=False, verbose=True)
return pw_tsen
def produce_gnss_pw_from_uerra(era5_path=era5_path,
glob_str='UERRA_TCWV_*.nc',
pw_path=work_yuval, savepath=None):
from aux_gps import path_glob
import xarray as xr
from aux_gps import save_ncfile
udf = add_UERRA_xy_to_israeli_gps_coords(pw_path, era5_path)
files = path_glob(era5_path, glob_str)
uerra_list = [xr.open_dataset(file) for file in files]
ds_attrs = uerra_list[0].attrs
ds_list = []
for i, uerra in enumerate(uerra_list):
print('proccessing {}'.format(files[i].as_posix().split('/')[-1]))
st_list = []
for station in udf.index:
y = udf.loc[station, 'y']
x = udf.loc[station, 'x']
uerra_st = uerra['tciwv'].isel(y=y, x=x).reset_coords(drop=True)
uerra_st.name = station
uerra_st.attrs = uerra['tciwv'].attrs
uerra_st.attrs['lon'] = udf.loc[station, 'lon']
uerra_st.attrs['lat'] = udf.loc[station, 'lat']
st_list.append(uerra_st)
ds_st = xr.merge(st_list)
ds_list.append(ds_st)
ds = xr.concat(ds_list, 'time')
ds = ds.sortby('time')
ds.attrs = ds_attrs
ds_monthly = ds.resample(time='MS', keep_attrs=True).mean(keep_attrs=True)
if savepath is not None:
filename = 'GNSS_uerra_4xdaily_PW.nc'
save_ncfile(ds, savepath, filename)
filename = 'GNSS_uerra_monthly_PW.nc'
save_ncfile(ds_monthly, savepath, filename)
return ds
def produce_PWV_flux_from_ERA5_UVQ(
path=era5_path,
savepath=None,
pw_path=work_yuval, return_magnitude=False):
import xarray as xr
from aux_gps import calculate_pressure_integral
from aux_gps import calculate_g
from aux_gps import save_ncfile
import numpy as np
ds = xr.load_dataset(era5_path / 'ERA5_UVQ_mm_israel_1979-2020.nc')
ds = ds.sel(expver=1).reset_coords(drop=True)
g = calculate_g(ds['latitude']).mean().item()
qu = calculate_pressure_integral(ds['q'] * ds['u'])
qv = calculate_pressure_integral(ds['q'] * ds['v'])
qu.name = 'qu'
qv.name = 'qv'
# convert to mm/sec units
qu = 100 * qu / (g * 1000)
qv = 100 * qv / (g * 1000)
# add attrs:
qu.attrs['units'] = 'mm/sec'
qv.attrs['units'] = 'mm/sec'
qu_gnss = produce_era5_field_at_gnss_coords(
qu, savepath=None, pw_path=pw_path)
qv_gnss = produce_era5_field_at_gnss_coords(
qv, savepath=None, pw_path=pw_path)
if return_magnitude:
qflux = np.sqrt(qu_gnss**2 + qv_gnss**2)
qflux.attrs['units'] = 'mm/sec'
return qflux
else:
return qu_gnss, qv_gnss
def produce_era5_field_at_gnss_coords(era5_da, savepath=None,
pw_path=work_yuval):
import xarray as xr
from aux_gps import save_ncfile
print('reading ERA5 {} field.'.format(era5_da.name))
gps = produce_geo_gnss_solved_stations(plot=False)
era5_pw_list = []
for station in gps.index:
slat = gps.loc[station, 'lat']
slon = gps.loc[station, 'lon']
da = era5_da.sel(latitude=slat, longitude=slon, method='nearest')
da.name = station
da.attrs['era5_lat'] = da.latitude.values.item()
da.attrs['era5_lon'] = da.longitude.values.item()
da = da.reset_coords(drop=True)
era5_pw_list.append(da)
ds = xr.merge(era5_pw_list)
if savepath is not None:
name = era5_da.name
yrmin = era5_da['time'].dt.year.min().item()
yrmax = era5_da['time'].dt.year.max().item()
filename = 'GNSS_ERA5_{}_{}-{}.nc'.format(name, yrmin, yrmax)
save_ncfile(ds, savepath, filename)
return ds
def produce_gnss_pw_from_era5(era5_path=era5_path,
glob_str='era5_TCWV_israel*.nc',
pw_path=work_yuval, savepath=None):
from aux_gps import path_glob
import xarray as xr
from aux_gps import save_ncfile
filepath = path_glob(era5_path, glob_str)[0]
print('opening ERA5 file {}'.format(filepath.as_posix().split('/')[-1]))
era5_pw = xr.open_dataarray(filepath)
era5_pw = era5_pw.sortby('time')
gps = produce_geo_gnss_solved_stations(plot=False)
era5_pw_list = []
for station in gps.index:
slat = gps.loc[station, 'lat']
slon = gps.loc[station, 'lon']
da = era5_pw.sel(lat=slat, lon=slon, method='nearest')
da.name = station
da.attrs['era5_lat'] = da.lat.values.item()
da.attrs['era5_lon'] = da.lon.values.item()
da = da.reset_coords(drop=True)
era5_pw_list.append(da)
ds_hourly = xr.merge(era5_pw_list)
ds_monthly = ds_hourly.resample(time='MS', keep_attrs=True).mean(keep_attrs=True)
if savepath is not None:
filename = 'GNSS_era5_hourly_PW.nc'
save_ncfile(ds_hourly, savepath, filename)
filename = 'GNSS_era5_monthly_PW.nc'
save_ncfile(ds_monthly, savepath, filename)
return ds_hourly
def plug_in_approx_loc_gnss_stations(log_path=logs_path, file_path=cwd):
from aux_gps import path_glob
import pandas as pd
def plug_loc_to_log_file(logfile, loc):
def replace_field(content_list, string, replacment):
pos = [(i, x) for i, x in enumerate(content_list)
if string in x][0][0]
con = content_list[pos].split(':')
con[-1] = ' {}'.format(replacment)
con = ':'.join(con)
content_list[pos] = con
return content_list
with open(logfile) as f:
content = f.read().splitlines()
repl = [
'X coordinate (m)',
'Y coordinate (m)',
'Z coordinate (m)',
'Latitude (deg)',
'Longitude (deg)',
'Elevation (m)']
location = [loc['X'], loc['Y'], loc['Z'], '+' +
str(loc['lat']), '+' + str(loc['lon']), loc['alt']]
for rep, loca in list(zip(repl, location)):
try:
content = replace_field(content, rep, loca)
except IndexError:
print('did not found {} field...'.format(rep))
pass
with open(logfile, 'w') as f:
for item in content:
f.write('{}\n'.format(item))
print('writing {}'.format(logfile))
return
# load gnss accurate loc:
acc_loc_df = pd.read_csv(file_path / 'israeli_gnss_coords.txt',
delim_whitespace=True)
log_files = path_glob(log_path, '*updated_by_shlomi*.log')
for logfile in log_files:
st_log = logfile.as_posix().split('/')[-1].split('_')[0]
try:
loc = acc_loc_df.loc[st_log, :]
except KeyError:
print('station {} not found in accurate location df, skipping'.format(st_log))
continue
plug_loc_to_log_file(logfile, loc)
print('Done!')
return
def build_df_lat_lon_alt_gnss_stations(gnss_path=GNSS, savepath=None):
from aux_gps import path_glob
import pandas as pd
import pyproj
from pathlib import Path
stations_in_gnss = [x.as_posix().split('/')[-1]
for x in path_glob(GNSS, '*')]
dss = [
load_gipsyx_results(
x,
sample_rate='MS',
plot_fields=None) for x in stations_in_gnss]
# stations_not_found = [x for x in dss if isinstance(x, str)]
# [stations_in_gnss.remove(x) for x in stations_in_gnss if x is None]
dss = [x for x in dss if not isinstance(x, str)]
dss = [x for x in dss if x is not None]
lats = [x.dropna('time').lat[0].values.item() for x in dss]
lons = [x.dropna('time').lon[0].values.item() for x in dss]
alts = [x.dropna('time').alt[0].values.item() for x in dss]
df = pd.DataFrame(lats)
df.index = [x.attrs['station'].lower() for x in dss]
df['lon'] = lons
df['alt'] = alts
df.columns = ['lat', 'lon', 'alt']
ecef = pyproj.Proj(proj='geocent', ellps='WGS84', datum='WGS84')
lla = pyproj.Proj(proj='latlong', ellps='WGS84', datum='WGS84')
X, Y, Z = pyproj.transform(lla, ecef, df['lon'].values, df['lat'].values,
df['alt'].values, radians=False)
df['X'] = X
df['Y'] = Y
df['Z'] = Z
# read station names from log files:
stations_approx = pd.read_fwf(Path().cwd()/'stations_approx_loc.txt',
delim_whitespace=False, skiprows=1, header=None)
stations_approx.columns=['index','X','Y','Z','name', 'extra']
stations_approx['name'] = stations_approx['name'].fillna('') +' ' + stations_approx['extra'].fillna('')
stations_approx.drop('extra', axis=1, inplace=True)
stations_approx = stations_approx.set_index('index')
df['name'] = stations_approx['name']
df.sort_index(inplace=True)
if savepath is not None:
filename = 'israeli_gnss_coords.txt'
df.to_csv(savepath/filename, sep=' ')
return df
def produce_homogeniety_results_xr(ds, alpha=0.05, test='snht', sim=20000):
import pyhomogeneity as hg
import xarray as xr
from aux_gps import homogeneity_test_xr
hg_tests_dict = {
'snht': hg.snht_test,
'pett': hg.pettitt_test,
'b_like': hg.buishand_likelihood_ratio_test,
'b_u': hg.buishand_u_test,
'b_q': hg.buishand_q_test,
'b_range': hg.buishand_range_test}
if test == 'all':
tests = [x for x in hg_tests_dict.keys()]
ds_list = []
for t in tests:
print('running {} test...'.format(t))
rds = ds.map(homogeneity_test_xr, hg_test_func=hg_tests_dict[t],
alpha=alpha, sim=sim, verbose=False)
rds = rds.to_array('station').to_dataset('results')
ds_list.append(rds)
rds = xr.concat(ds_list, 'test')
rds['test'] = tests
rds.attrs['alpha'] = alpha
rds.attrs['sim'] = sim
else:
rds = ds.map(homogeneity_test_xr, hg_test_func=hg_tests_dict[test],
alpha=alpha, sim=sim, verbose=False)
rds = rds.to_array('station').to_dataset('results')
rds.attrs['alpha'] = alpha
rds.attrs['sim'] = sim
# df=rds.to_array('st').to_dataset('results').to_dataframe()
print('Done!')
return rds
def run_error_analysis(station='tela', task='edit30hr'):
station_on_geo = geo_path / 'Work_Files/PW_yuval/GNSS_stations'
if task == 'edit30hr':
path = station_on_geo / station / 'rinex/30hr'
err, df = gipsyx_runs_error_analysis(path, glob_str='*.dr.gz')
elif task == 'run':
path = station_on_geo / station / 'rinex/30hr/results'
err, df = gipsyx_runs_error_analysis(path, glob_str='*.tdp')
return err, df
def gipsyx_runs_error_analysis(path, glob_str='*.tdp'):
from collections import Counter
from aux_gps import get_timedate_and_station_code_from_rinex
from aux_gps import path_glob
import pandas as pd
import logging
def find_errors(content_list, name):
keys = [x for x in content_list if 'KeyError' in x]
vals = [x for x in content_list if 'ValueError' in x]
excpt = [x for x in content_list if 'Exception' in x]
err = [x for x in content_list if 'Error' in x]
trouble = [x for x in content_list if 'Trouble' in x]
problem = [x for x in content_list if 'Problem' in x]
fatal = [x for x in content_list if 'FATAL' in x]
timed = [x for x in content_list if 'Timed' in x]
errors = keys + vals + excpt + err + trouble + problem + fatal + timed
if not errors:
dt, _ = get_timedate_and_station_code_from_rinex(name)
logger.warning('found new error on {} ({})'.format(name, dt.strftime('%Y-%m-%d')))
return errors
logger = logging.getLogger('gipsyx_post_proccesser')
rfns = []
files = path_glob(path, glob_str, True)
for file in files:
# first get all the rinex filenames that gipsyx ran successfuly:
rfn = file.as_posix().split('/')[-1][0:12]
rfns.append(rfn)
if files:
logger.info('running error analysis for station {}'.format(rfn[0:4].upper()))
all_errors = []
errors = []
dates = []
rinex = []
files = path_glob(path, '*.err')
for file in files:
rfn = file.as_posix().split('/')[-1][0:12]
# now, filter the error files that were copyed but there is tdp file
# i.e., the gipsyx run was successful:
if rfn in rfns:
continue
else:
dt, _ = get_timedate_and_station_code_from_rinex(rfn)
dates.append(dt)
rinex.append(rfn)
with open(file) as f:
content = f.readlines()
# you may also want to remove whitespace characters like `\n` at
# the end of each line
content = [x.strip() for x in content]
all_errors.append(content)
errors.append(find_errors(content, rfn))
er = [','.join(x) for x in all_errors]
df = pd.DataFrame(data=rinex, index=dates, columns=['rinex'])
df['error'] = er
df = df.sort_index()
total = len(rfns) + len(df)
good = len(rfns)
bad = len(df)
logger.info('total files: {}, successful runs: {}, errornous runs: {}'.format(
total, good, bad))
logger.info('success percent: {0:.1f}%'.format(100.0 * good / total))
logger.info('error percent: {0:.1f}%'.format(100.0 * bad / total))
# now count the similar errors and sort:
flat_list = [item for sublist in errors for item in sublist]
counted_errors = Counter(flat_list)
errors_sorted = sorted(counted_errors.items(), key=lambda x: x[1],
reverse=True)
return errors_sorted, df
def compare_gipsyx_soundings(sound_path=sound_path, gps_station='acor',
times=['1996', '2019'], var='pw'):
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import matplotlib.dates as mdates
import xarray as xr
from aux_gps import path_glob
# sns.set_style('whitegrid')
# ds = mean_zwd_over_sound_time(
# physical_file, ims_path=ims_path, gps_station='tela',
# times=times)
sound_station = gnss_sound_stations_dict.get(gps_station)
gnss = load_gipsyx_results(plot_fields=None, station=gps_station)
sound_file = path_glob(sound_path, 'station_{}_soundings_ts_tm_tpw*.nc'.format(sound_station))[0]
sds = xr.open_dataset(sound_file)
time_dim = list(set(sds.dims))[0]
sds = sds.rename({time_dim: 'time'})
sds[gps_station] = gnss.WetZ
if var == 'zwd':
k = kappa(sds['Tm'], Tm_input=True)
sds['sound'] = sds.Tpw / k
sds[gps_station] = gnss.WetZ
elif var == 'pw':
linear_model = ml_models_T_from_sounding(times=times,
station=sound_station,
plot=False, models=['LR'])
linear_model = linear_model.sel(name='LR').values.item()
k = kappa_ml(sds['Ts'] - 273.15, model=linear_model, no_error=True)
sds[gps_station] = sds[gps_station] * k
sds['sound'] = sds.Tpw
sds = sds.dropna('time')
sds = sds.sel(time=slice(*times))
df = sds[['sound', gps_station]].to_dataframe()
fig, axes = plt.subplots(2, 1, sharex=True, figsize=(12, 8))
[x.set_xlim([ | pd.to_datetime(times[0]) | pandas.to_datetime |
import pandas as pd
import numpy as np
import math
import random
import time
'''
This section is designed to simulate examinees for use in other modules or for any other experimental purpose.
It is possible to simulate IRT parameters for items, ability parameters for examinees or response strings (correct/incorrect)
for each simulated item for each examinee.
'''
def simulate_items(difficulty={'mean': 0, 'sd': 1}, discrimination={'mean': 1, 'sd': 0}, guessing=None, item_count=50):
'''
Simulates item parameters for the one, two and three parameter models. For the one parameter model keep the the
discrimination sd equal to 0.
:param difficulty: a dictionary with keys mean and sd
:param discrimination: a dictionary with keys mean and sd
:param guessing: a dictionary with keys mean and sd
:param item_count: an integer for how many items you wish to simulate
:return: A dictionary with keys 'a', 'b' and 'c' each containing a vector of length equal to item count with the corresponding parameters
'''
b = np.random.normal(difficulty['mean'], difficulty['sd'], item_count)
if discrimination['sd'] != 0:
a = np.random.normal(discrimination['mean'], discrimination['sd'], item_count)
else:
a = np.array([discrimination['mean']] * item_count)
a = np.array(a)
if guessing is not None:
c = np.random.normal(guessing['mean'], guessing['sd'], item_count)
else:
c = None
item_dict = {
'a': a,
'b': b,
'c': c
}
return item_dict
def get_probabilities(discrimination, ability, difficulty):
'''
Estimates the probability of a correct response for the 2 parameter model for an item of a given difficult and discrimination for an examinee with ability theta
:param discrimination: discrimination parameter for item
:param ability: examinee estimated theta
:param difficulty: a difficutly parameter for an item
:return: the probability an examinee with a given ability will get the question correct.
'''
probability = math.exp(discrimination*(ability-difficulty))/(1+math.exp(discrimination*(ability-difficulty)))
return probability
def simulate_people(examinee_count, information):
'''
Simulates theta parameters for examinees sampled from a normal distribution.
:param examinee_count: how many examinees you wish to simulate
:param information: dictionary with keys 'mean' and 'sd' representing the mean and standard deveviation.
:return: a list of examinee abilities
'''
examinee_abilities = list(np.random.normal(information['mean'], information['sd'], examinee_count))
return examinee_abilities
def item_vectors(items, abilities):
'''
:param items: a dictionary (usually from the simulate items function) which contains keys 'a', 'b', and 'c' which are vectors containing the parameters of items.
:param abilities: a list of examinee abilities
:return: two dataframes: one containing the probabilities of getting each item correct for each examinee and another containing the correct (1) and incorrect (0) response vectors for each examinee
'''
items = pd.DataFrame(items)
list_of_probabilities = []
list_of_correct = []
for ability in abilities:
person_probabilities = []
person_correct = []
for index, row in items.iterrows():
prob = get_probabilities(discrimination=row['a'], difficulty=row['b'], ability=ability)
rand_num = random.uniform(0, 1)
if prob >= rand_num:
correct = 0
else:
correct = 1
person_probabilities.append(prob)
person_correct.append(correct)
list_of_probabilities.append(person_probabilities)
list_of_correct.append(person_correct)
df_probabilities = pd.DataFrame(list_of_probabilities)
df_correct = | pd.DataFrame(list_of_correct) | pandas.DataFrame |
'''
create Regional Profile Section data in the column sequence
name, installed_capacity, max_avc, day_max_actual, day_max_actual_time, day_min_actual, day_min_actual_time, sch_mu, act_mu, dev_mu, cuf
'''
import pandas as pd
from data_fetchers.inp_ts_data_store import getPntData
from data_fetchers.remc_data_store import getRemcPntData, FCA_FORECAST_VS_ACTUAL_STORE_NAME
from utils.excel_utils import append_df_to_excel
from utils.printUtils import printWithTs
def populateRegProfSectionData(configFilePath, configSheetName, outputFilePath, outputSheetName, truncateSheet=False):
sectionDataDf = getRegProfSectionDataDf(configFilePath, configSheetName)
# dump data to excel
append_df_to_excel(outputFilePath, sectionDataDf, sheet_name=outputSheetName,
startrow=0, truncate_sheet=truncateSheet, index=False, header=False)
def getRegProfSectionDataDf(configFilePath, configSheetName):
# get conf dataframe
confDf = | pd.read_excel(configFilePath, sheet_name=configSheetName) | pandas.read_excel |
import pandas as pd
def generate_demand_csv(input_fn: str, user_data_dir: str):
# Demand
demand = pd.read_excel(input_fn, sheet_name='2.3 EUD', index_col=0, header=1, usecols=range(5))
demand.columns = [x.strip() for x in demand.columns]
demand.index = [x.strip() for x in demand.index]
# Add additional information
demand_aux = pd.read_csv(f"{user_data_dir}/aux_demand.csv", index_col=0)
demand = pd.merge(demand, demand_aux, left_index=True, right_index=True)
# Rename and reorder columns
demand.index.name = 'parameter name'
demand = demand.reset_index()
demand = demand[['Category', 'Subcategory', 'parameter name', 'HOUSEHOLDS',
'SERVICES', 'INDUSTRY', 'TRANSPORTATION', 'Units']]
demand.to_csv(f"{user_data_dir}/Demand.csv", sep=',', index=False)
def generate_resources_csv(input_fn: str, user_data_dir: str):
# Resources
resources = pd.read_excel(input_fn, sheet_name='2.1 RESOURCES', index_col=0, header=1,
usecols=range(5))
resources.index = [x.strip() for x in resources.index]
resources.columns = [x.split(" ")[0] for x in resources.columns]
# Add additional information
resources_aux = pd.read_csv(f"{user_data_dir}/aux_resources.csv", index_col=0)
resources = pd.merge(resources, resources_aux, left_index=True, right_index=True)
# Rename and reorder columns
resources.index.name = 'parameter name'
resources = resources.reset_index()
resources = resources[['Category', 'Subcategory', 'parameter name', 'avail', 'gwp_op', 'c_op', 'einv_op']]
# resources.columns = ['Category', 'Subcategory', 'parameter name', 'Availability', 'Direct and indirect emissions',
# 'Price', 'Direct emissions']
# Add a line with units
units = pd.Series(['', '', 'units', '[GWh/y]', '[ktCO2-eq./GWh]', '[Meuro/GWh]', '[GWh/y]'],
index=resources.columns)
resources = pd.concat((units.to_frame().T, resources), axis=0)
resources.to_csv(f"{user_data_dir}/Resources.csv", sep=',', index=False)
def generate_technologies_csv(input_fn: str, user_data_dir: str):
# Technologies
technologies = pd.read_excel(input_fn, sheet_name='3.2 TECH', index_col=1)
technologies = technologies.drop(technologies.columns[[0]], axis=1)
technologies.index = [x.strip() for x in technologies.index]
# Add additional information
technologies_aux = | pd.read_csv(f"{user_data_dir}/aux_technologies.csv", index_col=0) | pandas.read_csv |
from flowsa.common import WITHDRAWN_KEYWORD
from flowsa.flowbyfunctions import assign_fips_location_system
from flowsa.location import US_FIPS
import math
import pandas as pd
import io
from flowsa.settings import log
from string import digits
YEARS_COVERED = {
"asbestos": "2014-2018",
"barite": "2014-2018",
"bauxite": "2013-2017",
"beryllium": "2014-2018",
"boron": "2014-2018",
"chromium": "2014-2018",
"clay": "2015-2016",
"cobalt": "2013-2017",
"copper": "2011-2015",
"diatomite": "2014-2018",
"feldspar": "2013-2017",
"fluorspar": "2013-2017",
"fluorspar_inports": ["2016", "2017"],
"gallium": "2014-2018",
"garnet": "2014-2018",
"gold": "2013-2017",
"graphite": "2013-2017",
"gypsum": "2014-2018",
"iodine": "2014-2018",
"ironore": "2014-2018",
"kyanite": "2014-2018",
"lead": "2012-2018",
"lime": "2014-2018",
"lithium": "2013-2017",
"magnesium": "2013-2017",
"manganese": "2012-2016",
"manufacturedabrasive": "2017-2018",
"mica": "2014-2018",
"molybdenum": "2014-2018",
"nickel": "2012-2016",
"niobium": "2014-2018",
"peat": "2014-2018",
"perlite": "2013-2017",
"phosphate": "2014-2018",
"platinum": "2014-2018",
"potash": "2014-2018",
"pumice": "2014-2018",
"rhenium": "2014-2018",
"salt": "2013-2017",
"sandgravelconstruction": "2013-2017",
"sandgravelindustrial": "2014-2018",
"silver": "2012-2016",
"sodaash": "2010-2017",
"sodaash_t4": ["2016", "2017"],
"stonecrushed": "2013-2017",
"stonedimension": "2013-2017",
"strontium": "2014-2018",
"talc": "2013-2017",
"titanium": "2013-2017",
"tungsten": "2013-2017",
"vermiculite": "2014-2018",
"zeolites": "2014-2018",
"zinc": "2013-2017",
"zirconium": "2013-2017",
}
def usgs_myb_year(years, current_year_str):
"""
Sets the column for the string based on the year. Checks that the year
you picked is in the last file.
:param years: string, with hypthon
:param current_year_str: string, year of interest
:return: string, year
"""
years_array = years.split("-")
lower_year = int(years_array[0])
upper_year = int(years_array[1])
current_year = int(current_year_str)
if lower_year <= current_year <= upper_year:
column_val = current_year - lower_year + 1
return "year_" + str(column_val)
else:
log.info("Your year is out of scope. Pick a year between %s and %s",
lower_year, upper_year)
def usgs_myb_name(USGS_Source):
"""
Takes the USGS source name and parses it so it can be used in other parts
of Flow by activity.
:param USGS_Source: string, usgs source name
:return:
"""
source_split = USGS_Source.split("_")
name_cc = str(source_split[2])
name = ""
for char in name_cc:
if char.isupper():
name = name + " " + char
else:
name = name + char
name = name.lower()
name = name.strip()
return name
def usgs_myb_static_variables():
"""
Populates the data values for Flow by activity that are the same
for all of USGS_MYB Files
:return:
"""
data = {}
data["Class"] = "Geological"
data['FlowType'] = "ELEMENTARY_FLOWS"
data["Location"] = US_FIPS
data["Compartment"] = "ground"
data["Context"] = None
data["ActivityConsumedBy"] = None
return data
def usgs_myb_remove_digits(value_string):
"""
Eliminates numbers in a string
:param value_string:
:return:
"""
remove_digits = str.maketrans('', '', digits)
return_string = value_string.translate(remove_digits)
return return_string
def usgs_myb_url_helper(*, build_url, **_):
"""
This helper function uses the "build_url" input from flowbyactivity.py,
which is a base url for data imports that requires parts of the url text
string to be replaced with info specific to the data year. This function
does not parse the data, only modifies the urls from which data is
obtained.
:param build_url: string, base url
:param config: dictionary, items in FBA method yaml
:param args: dictionary, arguments specified when running flowbyactivity.py
flowbyactivity.py ('year' and 'source')
:return: list, urls to call, concat, parse, format into Flow-By-Activity
format
"""
return [build_url]
def usgs_asbestos_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing df
into FBA format
:param resp: df, response from url call
:param year: year
:return: pandas dataframe of original source data
"""
df_raw_data = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data = pd.DataFrame(df_raw_data.loc[4:11]).reindex()
df_data = df_data.reset_index()
del df_data["index"]
if len(df_data.columns) > 12:
for x in range(12, len(df_data.columns)):
col_name = "Unnamed: " + str(x)
del df_data[col_name]
if len(df_data. columns) == 12:
df_data.columns = ["Production", "Unit", "space_1", "year_1",
"space_2", "year_2", "space_3",
"year_3", "space_4", "year_4", "space_5", "year_5"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['asbestos'], year))
for col in df_data.columns:
if col not in col_to_use:
del df_data[col]
return df_data
def usgs_asbestos_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param source: source
:param year: year
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Quantity"]
product = ""
name = usgs_myb_name(source)
des = name
dataframe = pd.DataFrame()
col_name = usgs_myb_year(YEARS_COVERED['asbestos'], year)
for df in df_list:
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() == \
"Imports for consumption:":
product = "imports"
elif df.iloc[index]["Production"].strip() == \
"Exports and reexports:":
product = "exports"
if df.iloc[index]["Production"].strip() in row_to_use:
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Metric Tons"
data['FlowName'] = name + " " + product
data["Description"] = name
data["ActivityProducedBy"] = name
col_name = usgs_myb_year(YEARS_COVERED['asbestos'], year)
if str(df.iloc[index][col_name]) == "--":
data["FlowAmount"] = str(0)
elif str(df.iloc[index][col_name]) == "nan":
data["FlowAmount"] = WITHDRAWN_KEYWORD
else:
data["FlowAmount"] = str(df.iloc[index][col_name])
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(dataframe,
str(year))
return dataframe
def usgs_barite_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing df
into FBA format
:param url: string, url
:param resp: df, response from url call
:param args: dictionary, arguments specified when running
flowbyactivity.py ('year' and 'source')
:return: pandas dataframe of original source data
"""
df_raw_data = pd.io.excel.read_excel(
io.BytesIO(resp.content), sheet_name='T1')
df_data = pd.DataFrame(df_raw_data.loc[7:14]).reindex()
df_data = df_data.reset_index()
del df_data["index"]
if len(df_data. columns) == 11:
df_data.columns = ["Production", "space_1", "year_1", "space_2",
"year_2", "space_3", "year_3", "space_4",
"year_4", "space_5", "year_5"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['barite'], year))
for col in df_data.columns:
if col not in col_to_use:
del df_data[col]
return df_data
def usgs_barite_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param args: dictionary, used to run flowbyactivity.py
('year' and 'source')
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Quantity"]
prod = ""
name = usgs_myb_name(source)
des = name
dataframe = pd.DataFrame()
col_name = usgs_myb_year(YEARS_COVERED['barite'], year)
for df in df_list:
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() == \
"Imports for consumption:3":
product = "imports"
elif df.iloc[index]["Production"].strip() == \
"Crude, sold or used by producers:":
product = "production"
elif df.iloc[index]["Production"].strip() == "Exports:2":
product = "exports"
if df.iloc[index]["Production"].strip() in row_to_use:
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Metric Tons"
data['FlowName'] = name + " " + product
data["Description"] = name
data["ActivityProducedBy"] = name
col_name = usgs_myb_year(YEARS_COVERED['barite'], year)
if str(df.iloc[index][col_name]) == "--" or \
str(df.iloc[index][col_name]) == "(3)":
data["FlowAmount"] = str(0)
else:
data["FlowAmount"] = str(df.iloc[index][col_name])
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_bauxite_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing df
into FBA format
:param resp: df, response from url call
:param year: year
:return: pandas dataframe of original source data
"""
df_raw_data_one = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data_one = pd.DataFrame(df_raw_data_one.loc[6:14]).reindex()
df_data_one = df_data_one.reset_index()
del df_data_one["index"]
if len(df_data_one. columns) == 11:
df_data_one.columns = ["Production", "space_2", "year_1", "space_3",
"year_2", "space_4", "year_3", "space_5",
"year_4", "space_6", "year_5"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['bauxite'], year))
for col in df_data_one.columns:
if col not in col_to_use:
del df_data_one[col]
frames = [df_data_one]
df_data = pd.concat(frames)
df_data = df_data.reset_index()
del df_data["index"]
return df_data
def usgs_bauxite_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param args: dictionary, used to run flowbyactivity.py
('year' and 'source')
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Production", "Total"]
prod = ""
name = usgs_myb_name(source)
des = name
dataframe = pd.DataFrame()
col_name = usgs_myb_year(YEARS_COVERED['bauxite'], year)
for df in df_list:
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() == "Production":
prod = "production"
elif df.iloc[index]["Production"].strip() == \
"Imports for consumption, as shipped:":
prod = "import"
elif df.iloc[index]["Production"].strip() == \
"Exports, as shipped:":
prod = "export"
if df.iloc[index]["Production"].strip() in row_to_use:
product = df.iloc[index]["Production"].strip()
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Metric Tons"
flow_amount = str(df.iloc[index][col_name])
if str(df.iloc[index][col_name]) == "W":
data["FlowAmount"] = WITHDRAWN_KEYWORD
else:
data["FlowAmount"] = flow_amount
data["Description"] = des
data["ActivityProducedBy"] = name
data['FlowName'] = name + " " + prod
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_beryllium_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing df
into FBA format
:param url: string, url
:param resp: df, response from url call
:param args: dictionary, arguments specified when running
flowbyactivity.py ('year' and 'source')
:return: pandas dataframe of original source data
"""
df_raw_data = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T4')
df_raw_data_two = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data_1 = pd.DataFrame(df_raw_data_two.loc[6:9]).reindex()
df_data_1 = df_data_1.reset_index()
del df_data_1["index"]
df_data_2 = pd.DataFrame(df_raw_data.loc[12:12]).reindex()
df_data_2 = df_data_2.reset_index()
del df_data_2["index"]
if len(df_data_2.columns) > 11:
for x in range(11, len(df_data_2.columns)):
col_name = "Unnamed: " + str(x)
del df_data_2[col_name]
if len(df_data_1. columns) == 11:
df_data_1.columns = ["Production", "space_1", "year_1", "space_2",
"year_2", "space_3", "year_3", "space_4",
"year_4", "space_5", "year_5"]
if len(df_data_2. columns) == 11:
df_data_2.columns = ["Production", "space_1", "year_1", "space_2",
"year_2", "space_3", "year_3", "space_4",
"year_4", "space_5", "year_5"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['beryllium'], year))
for col in df_data_1.columns:
if col not in col_to_use:
del df_data_1[col]
for col in df_data_2.columns:
if col not in col_to_use:
del df_data_2[col]
frames = [df_data_1, df_data_2]
df_data = pd.concat(frames)
df_data = df_data.reset_index()
del df_data["index"]
return df_data
def usgs_beryllium_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param args: dictionary, used to run flowbyactivity.py
('year' and 'source')
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["United States6", "Mine shipments1",
"Imports for consumption, beryl2"]
prod = ""
name = usgs_myb_name(source)
des = name
dataframe = pd.DataFrame()
col_name = usgs_myb_year(YEARS_COVERED['beryllium'], year)
for df in df_list:
for index, row in df.iterrows():
prod = "production"
if df.iloc[index]["Production"].strip() == \
"Imports for consumption, beryl2":
prod = "imports"
if df.iloc[index]["Production"].strip() in row_to_use:
remove_digits = str.maketrans('', '', digits)
product = df.iloc[index][
"Production"].strip().translate(remove_digits)
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Thousand Metric Tons"
data["Description"] = name
data["ActivityProducedBy"] = name
data['FlowName'] = name + " " + prod
data["FlowAmount"] = str(df.iloc[index][col_name])
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_boron_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing
df into FBA format
:param url: string, url
:param resp: df, response from url call
:param args: dictionary, arguments specified when running
flowbyactivity.py ('year' and 'source')
:return: pandas dataframe of original source data
"""
df_raw_data = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data_one = pd.DataFrame(df_raw_data.loc[8:8]).reindex()
df_data_one = df_data_one.reset_index()
del df_data_one["index"]
df_data_two = pd.DataFrame(df_raw_data.loc[21:22]).reindex()
df_data_two = df_data_two.reset_index()
del df_data_two["index"]
df_data_three = pd.DataFrame(df_raw_data.loc[27:28]).reindex()
df_data_three = df_data_three.reset_index()
del df_data_three["index"]
if len(df_data_one. columns) == 11:
df_data_one.columns = ["Production", "space_1", "year_1", "space_2",
"year_2", "space_3", "year_3", "space_4",
"year_4", "space_5", "year_5"]
df_data_two.columns = ["Production", "space_1", "year_1", "space_2",
"year_2", "space_3", "year_3", "space_4",
"year_4", "space_5", "year_5"]
df_data_three.columns = ["Production", "space_1", "year_1", "space_2",
"year_2", "space_3", "year_3", "space_4",
"year_4", "space_5", "year_5"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['boron'], year))
for col in df_data_one.columns:
if col not in col_to_use:
del df_data_one[col]
del df_data_two[col]
del df_data_three[col]
frames = [df_data_one, df_data_two, df_data_three]
df_data = pd.concat(frames)
df_data = df_data.reset_index()
del df_data["index"]
return df_data
def usgs_boron_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param args: dictionary, used to run flowbyactivity.py
('year' and 'source')
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["B2O3 content", "Quantity"]
prod = ""
name = usgs_myb_name(source)
des = name
dataframe = pd.DataFrame()
col_name = usgs_myb_year(YEARS_COVERED['boron'], year)
for df in df_list:
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() == "B2O3 content" or \
df.iloc[index]["Production"].strip() == "Quantity":
product = "production"
if df.iloc[index]["Production"].strip() == "Colemanite:4":
des = "Colemanite"
elif df.iloc[index]["Production"].strip() == "Ulexite:4":
des = "Ulexite"
if df.iloc[index]["Production"].strip() in row_to_use:
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Metric Tons"
if des == name:
data['FlowName'] = name + " " + product
else:
data['FlowName'] = name + " " + product + " " + des
data["Description"] = des
data["ActivityProducedBy"] = name
if str(df.iloc[index][col_name]) == "--" or \
str(df.iloc[index][col_name]) == "(3)":
data["FlowAmount"] = str(0)
elif str(df.iloc[index][col_name]) == "W":
data["FlowAmount"] = WITHDRAWN_KEYWORD
else:
data["FlowAmount"] = str(df.iloc[index][col_name])
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_chromium_call(*, resp, year, **_):
""""
Convert response for calling url to pandas dataframe,
begin parsing df into FBA format
:param url: string, url
:param resp: df, response from url call
:param args: dictionary, arguments specified when running
flowbyactivity.py ('year' and 'source')
:return: pandas dataframe of original source data
"""
df_raw_data = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data = pd.DataFrame(df_raw_data.loc[4:24]).reindex()
df_data = df_data.reset_index()
del df_data["index"]
if len(df_data. columns) == 12:
df_data.columns = ["Production", "Unit", "space_1", "year_1",
"space_2", "year_2", "space_3", "year_3",
"space_4", "year_4", "space_5", "year_5"]
elif len(df_data. columns) == 13:
df_data.columns = ["Production", "Unit", "space_1", "year_1",
"space_2", "year_2", "space_3", "year_3",
"space_4", "year_4", "space_5", "year_5", "space_6"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['chromium'], year))
for col in df_data.columns:
if col not in col_to_use:
del df_data[col]
return df_data
def usgs_chromium_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param args: dictionary, used to run flowbyactivity.py
('year' and 'source')
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Secondary2", "Total"]
prod = ""
name = usgs_myb_name(source)
des = name
dataframe = pd.DataFrame()
col_name = usgs_myb_year(YEARS_COVERED['chromium'], year)
for df in df_list:
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() == "Imports:":
product = "imports"
elif df.iloc[index]["Production"].strip() == "Secondary2":
product = "production"
elif df.iloc[index]["Production"].strip() == "Exports:":
product = "exports"
if df.iloc[index]["Production"].strip() in row_to_use:
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Metric Tons"
data['FlowName'] = name + " " + product
data["Description"] = name
data["ActivityProducedBy"] = name
col_name = usgs_myb_year(YEARS_COVERED['chromium'], year)
if str(df.iloc[index][col_name]) == "--" or \
str(df.iloc[index][col_name]) == "(3)":
data["FlowAmount"] = str(0)
else:
data["FlowAmount"] = str(df.iloc[index][col_name])
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_clay_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing
df into FBA format
:param url: string, url
:param resp: df, response from url call
:param args: dictionary, arguments specified when running
flowbyactivity.py ('year' and 'source')
:return: pandas dataframe of original source data
"""
df_raw_data_ball = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T3')
df_data_ball = pd.DataFrame(df_raw_data_ball.loc[19:19]).reindex()
df_data_ball = df_data_ball.reset_index()
del df_data_ball["index"]
df_raw_data_bentonite = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T4 ')
df_data_bentonite = pd.DataFrame(
df_raw_data_bentonite.loc[28:28]).reindex()
df_data_bentonite = df_data_bentonite.reset_index()
del df_data_bentonite["index"]
df_raw_data_common = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T5 ')
df_data_common = pd.DataFrame(df_raw_data_common.loc[40:40]).reindex()
df_data_common = df_data_common.reset_index()
del df_data_common["index"]
df_raw_data_fire = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T6 ')
df_data_fire = pd.DataFrame(df_raw_data_fire.loc[12:12]).reindex()
df_data_fire = df_data_fire.reset_index()
del df_data_fire["index"]
df_raw_data_fuller = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T7 ')
df_data_fuller = pd.DataFrame(df_raw_data_fuller.loc[17:17]).reindex()
df_data_fuller = df_data_fuller.reset_index()
del df_data_fuller["index"]
df_raw_data_kaolin = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T8 ')
df_data_kaolin = pd.DataFrame(df_raw_data_kaolin.loc[18:18]).reindex()
df_data_kaolin = df_data_kaolin.reset_index()
del df_data_kaolin["index"]
df_raw_data_export = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T13')
df_data_export = pd.DataFrame(df_raw_data_export.loc[6:15]).reindex()
df_data_export = df_data_export.reset_index()
del df_data_export["index"]
df_raw_data_import = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T14')
df_data_import = pd.DataFrame(df_raw_data_import.loc[6:13]).reindex()
df_data_import = df_data_import.reset_index()
del df_data_import["index"]
df_data_ball.columns = ["Production", "space_1", "year_1", "space_2",
"value_1", "space_3", "year_2", "space_4",
"value_2"]
df_data_bentonite.columns = ["Production", "space_1", "year_1", "space_2",
"value_1", "space_3", "year_2", "space_4",
"value_2"]
df_data_common.columns = ["Production", "space_1", "year_1", "space_2",
"value_1", "space_3", "year_2", "space_4",
"value_2"]
df_data_fire.columns = ["Production", "space_1", "year_1", "space_2",
"value_1", "space_3", "year_2", "space_4",
"value_2"]
df_data_fuller.columns = ["Production", "space_1", "year_1", "space_2",
"value_1", "space_3", "year_2", "space_4",
"value_2"]
df_data_kaolin.columns = ["Production", "space_1", "year_1", "space_2",
"value_1", "space_3", "year_2", "space_4",
"value_2"]
df_data_export.columns = ["Production", "space_1", "year_1", "space_2",
"value_1", "space_3", "year_2", "space_4",
"value_2", "space_5", "extra"]
df_data_import.columns = ["Production", "space_1", "year_1", "space_2",
"value_1", "space_3", "year_2", "space_4",
"value_2", "space_5", "extra"]
df_data_ball["type"] = "Ball clay"
df_data_bentonite["type"] = "Bentonite"
df_data_common["type"] = "Common clay"
df_data_fire["type"] = "Fire clay"
df_data_fuller["type"] = "Fuller’s earth"
df_data_kaolin["type"] = "Kaolin"
df_data_export["type"] = "export"
df_data_import["type"] = "import"
col_to_use = ["Production", "type"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['clay'], year))
for col in df_data_import.columns:
if col not in col_to_use:
del df_data_import[col]
del df_data_export[col]
for col in df_data_ball.columns:
if col not in col_to_use:
del df_data_ball[col]
del df_data_bentonite[col]
del df_data_common[col]
del df_data_fire[col]
del df_data_fuller[col]
del df_data_kaolin[col]
frames = [df_data_import, df_data_export, df_data_ball, df_data_bentonite,
df_data_common, df_data_fire, df_data_fuller, df_data_kaolin]
df_data = pd.concat(frames)
df_data = df_data.reset_index()
del df_data["index"]
return df_data
def usgs_clay_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param args: dictionary, used to run flowbyactivity.py
('year' and 'source')
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Ball clay", "Bentonite", "Fire clay", "Kaolin",
"Fuller’s earth", "Total", "Grand total",
"Artificially activated clay and earth",
"Clays, not elsewhere classified",
"Clays, not elsewhere classified"]
dataframe = pd.DataFrame()
for df in df_list:
for index, row in df.iterrows():
if df.iloc[index]["type"].strip() == "import":
product = "imports"
elif df.iloc[index]["type"].strip() == "export":
product = "exports"
else:
product = "production"
if str(df.iloc[index]["Production"]).strip() in row_to_use:
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Metric Tons"
if product == "production":
data['FlowName'] = \
df.iloc[index]["type"].strip() + " " + product
data["Description"] = df.iloc[index]["type"].strip()
data["ActivityProducedBy"] = df.iloc[index]["type"].strip()
else:
data['FlowName'] = \
df.iloc[index]["Production"].strip() + " " + product
data["Description"] = df.iloc[index]["Production"].strip()
data["ActivityProducedBy"] = \
df.iloc[index]["Production"].strip()
col_name = usgs_myb_year(YEARS_COVERED['clay'], year)
if str(df.iloc[index][col_name]) == "--" or \
str(df.iloc[index][col_name]) == "(3)" or \
str(df.iloc[index][col_name]) == "(2)":
data["FlowAmount"] = str(0)
else:
data["FlowAmount"] = str(df.iloc[index][col_name])
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_cobalt_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing
df into FBA format
:param url: string, url
:param resp: df, response from url call
:param args: dictionary, arguments specified when running
flowbyactivity.py ('year' and 'source')
:return: pandas dataframe of original source data
"""
df_raw_data = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T8')
df_raw_data_two = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data_1 = pd.DataFrame(df_raw_data_two.loc[6:11]).reindex()
df_data_1 = df_data_1.reset_index()
del df_data_1["index"]
df_data_2 = pd.DataFrame(df_raw_data.loc[23:23]).reindex()
df_data_2 = df_data_2.reset_index()
del df_data_2["index"]
if len(df_data_2.columns) > 11:
for x in range(11, len(df_data_2.columns)):
col_name = "Unnamed: " + str(x)
del df_data_2[col_name]
if len(df_data_1. columns) == 12:
df_data_1.columns = ["Production", "space_6", "space_1", "year_1",
"space_2", "year_2", "space_3", "year_3",
"space_4", "year_4", "space_5", "year_5"]
if len(df_data_2. columns) == 11:
df_data_2.columns = ["Production", "space_1", "year_1", "space_2",
"year_2", "space_3", "year_3", "space_4",
"year_4", "space_5", "year_5"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['cobalt'], year))
for col in df_data_1.columns:
if col not in col_to_use:
del df_data_1[col]
for col in df_data_2.columns:
if col not in col_to_use:
del df_data_2[col]
frames = [df_data_1, df_data_2]
df_data = pd.concat(frames)
df_data = df_data.reset_index()
del df_data["index"]
return df_data
def usgs_cobalt_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param args: dictionary, used to run flowbyactivity.py
('year' and 'source')
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
name = usgs_myb_name(source)
des = name
row_to_use = ["United Statese, 16, 17", "Mine productione",
"Imports for consumption", "Exports"]
dataframe = pd.DataFrame()
for df in df_list:
for index, row in df.iterrows():
prod = "production"
if df.iloc[index]["Production"].strip() == \
"United Statese, 16, 17":
prod = "production"
elif df.iloc[index]["Production"].strip() == \
"Imports for consumption":
prod = "imports"
elif df.iloc[index]["Production"].strip() == "Exports":
prod = "exports"
if df.iloc[index]["Production"].strip() in row_to_use:
remove_digits = str.maketrans('', '', digits)
product = df.iloc[index][
"Production"].strip().translate(remove_digits)
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Thousand Metric Tons"
col_name = usgs_myb_year(YEARS_COVERED['cobalt'], year)
data["Description"] = des
data["ActivityProducedBy"] = name
data['FlowName'] = name + " " + prod
data["FlowAmount"] = str(df.iloc[index][col_name])
remove_rows = ["(18)", "(2)"]
if data["FlowAmount"] not in remove_rows:
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_copper_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin
parsing df into FBA format
:param resp: df, response from url call
:param year: year
:return: pandas dataframe of original source data
"""
df_raw_data = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data_1 = pd.DataFrame(df_raw_data.loc[12:12]).reindex()
df_data_1 = df_data_1.reset_index()
del df_data_1["index"]
df_data_2 = pd.DataFrame(df_raw_data.loc[30:31]).reindex()
df_data_2 = df_data_2.reset_index()
del df_data_2["index"]
if len(df_data_1. columns) == 12:
df_data_1.columns = ["Production", "Unit", "space_1", "year_1",
"space_2", "year_2", "space_3", "year_3",
"space_4", "year_4", "space_5", "year_5"]
df_data_2.columns = ["Production", "Unit", "space_1", "year_1",
"space_2", "year_2", "space_3", "year_3",
"space_4", "year_4", "space_5", "year_5"]
col_to_use = ["Production", "Unit"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['copper'], year))
for col in df_data_1.columns:
if col not in col_to_use:
del df_data_1[col]
for col in df_data_2.columns:
if col not in col_to_use:
del df_data_2[col]
frames = [df_data_1, df_data_2]
df_data = pd.concat(frames)
df_data = df_data.reset_index()
del df_data["index"]
return df_data
def usgs_copper_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param args: dictionary, used to run flowbyactivity.py
('year' and 'source')
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
name = usgs_myb_name(source)
des = name
dataframe = pd.DataFrame()
for df in df_list:
for index, row in df.iterrows():
remove_digits = str.maketrans('', '', digits)
product = df.iloc[index][
"Production"].strip().translate(remove_digits)
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
if product == "Total":
prod = "production"
elif product == "Exports, refined":
prod = "exports"
elif product == "Imports, refined":
prod = "imports"
data["ActivityProducedBy"] = "Copper; Mine"
data['FlowName'] = name + " " + prod
data["Unit"] = "Metric Tons"
col_name = usgs_myb_year(YEARS_COVERED['copper'], year)
data["Description"] = "Copper; Mine"
data["FlowAmount"] = str(df.iloc[index][col_name])
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_diatomite_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing
df into FBA format
:param url: string, url
:param resp: df, response from url call
:param year: year
:return: pandas dataframe of original source data
"""
df_raw_data_one = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data_one = pd.DataFrame(df_raw_data_one.loc[7:10]).reindex()
df_data_one = df_data_one.reset_index()
del df_data_one["index"]
if len(df_data_one.columns) == 10:
df_data_one.columns = ["Production", "year_1", "space_2", "year_2",
"space_3", "year_3", "space_4", "year_4",
"space_5", "year_5"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['diatomite'], year))
for col in df_data_one.columns:
if col not in col_to_use:
del df_data_one[col]
frames = [df_data_one]
df_data = pd.concat(frames)
df_data = df_data.reset_index()
del df_data["index"]
return df_data
def usgs_diatomite_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param source: source
:param year: year
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Quantity", "Exports2", "Imports for consumption2"]
prod = ""
name = usgs_myb_name(source)
des = name
dataframe = pd.DataFrame()
for df in df_list:
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() == "Exports2":
prod = "exports"
elif df.iloc[index]["Production"].strip() == \
"Imports for consumption2":
prod = "imports"
elif df.iloc[index]["Production"].strip() == "Quantity":
prod = "production"
if df.iloc[index]["Production"].strip() in row_to_use:
product = df.iloc[index]["Production"].strip()
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Thousand metric tons"
col_name = usgs_myb_year(YEARS_COVERED['diatomite'], year)
data["FlowAmount"] = str(df.iloc[index][col_name])
data["Description"] = name
data["ActivityProducedBy"] = name
data['FlowName'] = name + " " + prod
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_feldspar_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin
parsing df into FBA format
:param url: string, url
:param resp: df, response from url call
:param year: year
:return: pandas dataframe of original source data
"""
df_raw_data_two = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data_two = pd.DataFrame(df_raw_data_two.loc[4:8]).reindex()
df_data_two = df_data_two.reset_index()
del df_data_two["index"]
df_data_one = pd.DataFrame(df_raw_data_two.loc[10:15]).reindex()
df_data_one = df_data_one.reset_index()
del df_data_one["index"]
if len(df_data_two. columns) == 13:
df_data_two.columns = ["Production", "space_1", "unit", "space_2",
"year_1", "space_3", "year_2", "space_4",
"year_3", "space_5", "year_4", "space_6",
"year_5"]
df_data_one.columns = ["Production", "space_1", "unit", "space_2",
"year_1", "space_3", "year_2", "space_4",
"year_3", "space_5", "year_4", "space_6",
"year_5"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['feldspar'], year))
for col in df_data_two.columns:
if col not in col_to_use:
del df_data_two[col]
del df_data_one[col]
frames = [df_data_two, df_data_one]
df_data = pd.concat(frames)
df_data = df_data.reset_index()
del df_data["index"]
return df_data
def usgs_feldspar_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param args: dictionary, used to run flowbyactivity.py
('year' and 'source')
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Quantity", "Quantity3"]
prod = ""
name = usgs_myb_name(source)
des = name
dataframe = pd.DataFrame()
for df in df_list:
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() == "Exports, feldspar:4":
prod = "exports"
elif df.iloc[index]["Production"].strip() == \
"Imports for consumption:4":
prod = "imports"
elif df.iloc[index]["Production"].strip() == \
"Production, feldspar:e, 2":
prod = "production"
elif df.iloc[index]["Production"].strip() == "Nepheline syenite:":
prod = "production"
des = "Nepheline syenite"
if df.iloc[index]["Production"].strip() in row_to_use:
product = df.iloc[index]["Production"].strip()
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Metric Tons"
col_name = usgs_myb_year(YEARS_COVERED['feldspar'], year)
data["FlowAmount"] = str(df.iloc[index][col_name])
data["Description"] = des
data["ActivityProducedBy"] = name
if name == des:
data['FlowName'] = name + " " + prod
else:
data['FlowName'] = name + " " + prod + " " + des
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_fluorspar_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin
parsing df into FBA format
:param url: string, url
:param resp: df, response from url call
:param year: year
:return: pandas dataframe of original source data
"""
df_raw_data_one = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
if year in YEARS_COVERED['fluorspar_inports']:
df_raw_data_two = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T2')
df_raw_data_three = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T7')
df_raw_data_four = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T8')
df_data_one = pd.DataFrame(df_raw_data_one.loc[5:15]).reindex()
df_data_one = df_data_one.reset_index()
del df_data_one["index"]
if year in YEARS_COVERED['fluorspar_inports']:
df_data_two = pd.DataFrame(df_raw_data_two.loc[7:8]).reindex()
df_data_three = pd.DataFrame(df_raw_data_three.loc[19:19]).reindex()
df_data_four = pd.DataFrame(df_raw_data_four.loc[11:11]).reindex()
if len(df_data_two.columns) == 13:
df_data_two.columns = ["Production", "space_1", "not_1", "space_2",
"not_2", "space_3", "not_3", "space_4",
"not_4", "space_5", "year_4", "space_6",
"year_5"]
if len(df_data_three.columns) == 9:
df_data_three.columns = ["Production", "space_1", "year_4",
"space_2", "not_1", "space_3", "year_5",
"space_4", "not_2"]
df_data_four.columns = ["Production", "space_1", "year_4",
"space_2", "not_1", "space_3", "year_5",
"space_4", "not_2"]
if len(df_data_one. columns) == 13:
df_data_one.columns = ["Production", "space_1", "unit", "space_2",
"year_1", "space_3", "year_2", "space_4",
"year_3", "space_5", "year_4", "space_6",
"year_5"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['fluorspar'], year))
for col in df_data_one.columns:
if col not in col_to_use:
del df_data_one[col]
if year in YEARS_COVERED['fluorspar_inports']:
for col in df_data_two.columns:
if col not in col_to_use:
del df_data_two[col]
for col in df_data_three.columns:
if col not in col_to_use:
del df_data_three[col]
for col in df_data_four.columns:
if col not in col_to_use:
del df_data_four[col]
df_data_one["type"] = "data_one"
if year in YEARS_COVERED['fluorspar_inports']:
# aluminum fluoride
# cryolite
df_data_two["type"] = "data_two"
df_data_three["type"] = "Aluminum Fluoride"
df_data_four["type"] = "Cryolite"
frames = [df_data_one, df_data_two, df_data_three, df_data_four]
else:
frames = [df_data_one]
df_data = pd.concat(frames)
df_data = df_data.reset_index()
del df_data["index"]
return df_data
def usgs_fluorspar_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param args: dictionary, used to run flowbyactivity.py
('year' and 'source')
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Quantity", "Quantity3", "Total", "Hydrofluoric acid",
"Metallurgical", "Production"]
prod = ""
name = usgs_myb_name(source)
dataframe = pd.DataFrame()
for df in df_list:
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() == "Exports:3":
prod = "exports"
des = name
elif df.iloc[index]["Production"].strip() == \
"Imports for consumption:3":
prod = "imports"
des = name
elif df.iloc[index]["Production"].strip() == "Fluorosilicic acid:":
prod = "production"
des = "Fluorosilicic acid:"
if str(df.iloc[index]["type"]).strip() == "data_two":
prod = "imports"
des = df.iloc[index]["Production"].strip()
elif str(df.iloc[index]["type"]).strip() == \
"Aluminum Fluoride" or \
str(df.iloc[index]["type"]).strip() == "Cryolite":
prod = "imports"
des = df.iloc[index]["type"].strip()
if df.iloc[index]["Production"].strip() in row_to_use:
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Metric Tons"
col_name = usgs_myb_year(YEARS_COVERED['fluorspar'], year)
if str(df.iloc[index][col_name]) == "W":
data["FlowAmount"] = WITHDRAWN_KEYWORD
else:
data["FlowAmount"] = str(df.iloc[index][col_name])
data["Description"] = des
data["ActivityProducedBy"] = name
data['FlowName'] = name + " " + prod
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_gallium_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing df
into FBA format
:param url: string, url
:param resp: df, response from url call
:param args: dictionary, arguments specified when running
flowbyactivity.py ('year' and 'source')
:return: pandas dataframe of original source data
"""
df_raw_data = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data = pd.DataFrame(df_raw_data.loc[5:7]).reindex()
df_data = df_data.reset_index()
del df_data["index"]
if len(df_data.columns) > 11:
for x in range(11, len(df_data.columns)):
col_name = "Unnamed: " + str(x)
del df_data[col_name]
if len(df_data.columns) == 11:
df_data.columns = ["Production", "space_1", "year_1", "space_2",
"year_2", "space_3", "year_3", "space_4",
"year_4", "space_5", "year_5"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['gallium'], year))
for col in df_data.columns:
if col not in col_to_use:
del df_data[col]
return df_data
def usgs_gallium_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param source: source
:param year: year
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Production, primary crude", "Metal"]
prod = ""
name = usgs_myb_name(source)
des = name
dataframe = pd.DataFrame()
col_name = usgs_myb_year(YEARS_COVERED['gallium'], year)
for df in df_list:
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() == \
"Imports for consumption:":
product = "imports"
elif df.iloc[index]["Production"].strip() == \
"Production, primary crude":
product = "production"
if df.iloc[index]["Production"].strip() in row_to_use:
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Kilograms"
data['FlowName'] = name + " " + product
data["Description"] = name
data["ActivityProducedBy"] = name
col_name = usgs_myb_year(YEARS_COVERED['gallium'], year)
if str(df.iloc[index][col_name]).strip() == "--":
data["FlowAmount"] = str(0)
elif str(df.iloc[index][col_name]) == "nan":
data["FlowAmount"] = WITHDRAWN_KEYWORD
else:
data["FlowAmount"] = str(df.iloc[index][col_name])
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_garnet_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing
df into FBA format
:param resp: df, response from url call
:param year: year
:return: pandas dataframe of original source data
"""
df_raw_data_two = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data_two = pd.DataFrame(df_raw_data_two.loc[4:5]).reindex()
df_data_two = df_data_two.reset_index()
del df_data_two["index"]
df_data_one = pd.DataFrame(df_raw_data_two.loc[10:14]).reindex()
df_data_one = df_data_one.reset_index()
del df_data_one["index"]
if len(df_data_one.columns) > 13:
for x in range(13, len(df_data_one.columns)):
col_name = "Unnamed: " + str(x)
del df_data_one[col_name]
del df_data_two[col_name]
if len(df_data_two. columns) == 13:
df_data_two.columns = ["Production", "space_1", "unit", "space_2",
"year_1", "space_3", "year_2", "space_4",
"year_3", "space_5", "year_4", "space_6",
"year_5"]
df_data_one.columns = ["Production", "space_1", "unit", "space_2",
"year_1", "space_3", "year_2", "space_4",
"year_3", "space_5", "year_4", "space_6",
"year_5"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['garnet'], year))
for col in df_data_two.columns:
if col not in col_to_use:
del df_data_two[col]
del df_data_one[col]
frames = [df_data_two, df_data_one]
df_data = pd.concat(frames)
df_data = df_data.reset_index()
del df_data["index"]
return df_data
def usgs_garnet_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param source: source
:param year: year
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Quantity"]
prod = ""
name = usgs_myb_name(source)
des = name
dataframe = pd.DataFrame()
for df in df_list:
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() == "Exports:2":
prod = "exports"
elif df.iloc[index]["Production"].strip() == \
"Imports for consumption: 3":
prod = "imports"
elif df.iloc[index]["Production"].strip() == "Crude production:":
prod = "production"
if df.iloc[index]["Production"].strip() in row_to_use:
product = df.iloc[index]["Production"].strip()
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Metric Tons"
col_name = usgs_myb_year(YEARS_COVERED['garnet'], year)
data["FlowAmount"] = str(df.iloc[index][col_name])
data["Description"] = des
data["ActivityProducedBy"] = name
data['FlowName'] = name + " " + prod
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_gold_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing
df into FBA format
:param url: string, url
:param resp: df, response from url call
:param args: dictionary, arguments specified when running
flowbyactivity.py ('year' and 'source')
:return: pandas dataframe of original source data
"""
df_raw_data = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data = pd.DataFrame(df_raw_data.loc[6:14]).reindex()
df_data = df_data.reset_index()
del df_data["index"]
if len(df_data.columns) == 13:
df_data.columns = ["Production", "Space", "Units", "space_1",
"year_1", "space_2", "year_2", "space_3",
"year_3", "space_4", "year_4", "space_5", "year_5"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['gold'], year))
for col in df_data.columns:
if col not in col_to_use:
del df_data[col]
return df_data
def usgs_gold_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param source: source
:param year: year
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Quantity", "Exports, refined bullion",
"Imports for consumption, refined bullion"]
dataframe = pd.DataFrame()
product = "production"
name = usgs_myb_name(source)
des = name
for df in df_list:
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() == "Quantity":
product = "production"
elif df.iloc[index]["Production"].strip() == \
"Exports, refined bullion":
product = "exports"
elif df.iloc[index]["Production"].strip() == \
"Imports for consumption, refined bullion":
product = "imports"
if df.iloc[index]["Production"].strip() in row_to_use:
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "kilograms"
data['FlowName'] = name + " " + product
data["Description"] = des
data["ActivityProducedBy"] = name
col_name = usgs_myb_year(YEARS_COVERED['gold'], year)
if str(df.iloc[index][col_name]) == "--":
data["FlowAmount"] = str(0)
else:
data["FlowAmount"] = str(df.iloc[index][col_name])
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_graphite_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing
df into FBA format
:param resp: df, response from url call
:param year: year
:return: pandas dataframe of original source data
"""
df_raw_data = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data = pd.DataFrame(df_raw_data.loc[5:9]).reindex()
df_data = df_data.reset_index()
del df_data["index"]
if len(df_data. columns) == 13:
df_data.columns = ["Production", "space_1", "Unit", "space_6",
"year_1", "space_2", "year_2", "space_3",
"year_3", "space_4", "year_4", "space_5", "year_5"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['graphite'], year))
for col in df_data.columns:
if col not in col_to_use:
del df_data[col]
return df_data
def usgs_graphite_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param source: source
:param year: year
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Quantiy", "Quantity"]
prod = ""
name = usgs_myb_name(source)
des = name
dataframe = pd.DataFrame()
col_name = usgs_myb_year(YEARS_COVERED['graphite'], year)
for df in df_list:
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() == \
"Imports for consumption:":
product = "imports"
elif df.iloc[index]["Production"].strip() == "Exports:":
product = "exports"
if df.iloc[index]["Production"].strip() in row_to_use:
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Metric Tons"
data['FlowName'] = name + " " + product
data["Description"] = name
data["ActivityProducedBy"] = name
col_name = usgs_myb_year(YEARS_COVERED['graphite'], year)
if str(df.iloc[index][col_name]) == "--":
data["FlowAmount"] = str(0)
elif str(df.iloc[index][col_name]) == "nan":
data["FlowAmount"] = WITHDRAWN_KEYWORD
else:
data["FlowAmount"] = str(df.iloc[index][col_name])
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_gypsum_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin
parsing df into FBA format
:param url: string, url
:param resp: df, response from url call
:param args: dictionary, arguments specified when running
flowbyactivity.py ('year' and 'source')
:return: pandas dataframe of original source data
"""
df_raw_data_one = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data_one = pd.DataFrame(df_raw_data_one.loc[7:10]).reindex()
df_data_one = df_data_one.reset_index()
del df_data_one["index"]
if len(df_data_one.columns) > 11:
for x in range(11, len(df_data_one.columns)):
col_name = "Unnamed: " + str(x)
del df_data_one[col_name]
if len(df_data_one.columns) == 11:
df_data_one.columns = ["Production", "space_1", "year_1", "space_3",
"year_2", "space_4", "year_3", "space_5",
"year_4", "space_6", "year_5"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['gypsum'], year))
for col in df_data_one.columns:
if col not in col_to_use:
del df_data_one[col]
frames = [df_data_one]
df_data = pd.concat(frames)
df_data = df_data.reset_index()
del df_data["index"]
return df_data
def usgs_gypsum_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param source: source
:param year: year
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Quantity", "Imports for consumption"]
prod = ""
name = usgs_myb_name(source)
des = name
dataframe = pd.DataFrame()
col_name = usgs_myb_year(YEARS_COVERED['gypsum'], year)
for df in df_list:
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() == \
"Imports for consumption":
prod = "imports"
elif df.iloc[index]["Production"].strip() == "Quantity":
prod = "production"
if df.iloc[index]["Production"].strip() in row_to_use:
product = df.iloc[index]["Production"].strip()
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Metric Tons"
data["FlowAmount"] = str(df.iloc[index][col_name])
if str(df.iloc[index][col_name]) == "W":
data["FlowAmount"] = WITHDRAWN_KEYWORD
data["Description"] = des
data["ActivityProducedBy"] = name
data['FlowName'] = name + " " + prod
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_iodine_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing
df into FBA format
:param url: string, url
:param resp: df, response from url call
:param args: dictionary, arguments specified when running
flowbyactivity.py ('year' and 'source')
:return: pandas dataframe of original source data
"""
df_raw_data = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data = pd.DataFrame(df_raw_data.loc[6:10]).reindex()
df_data = df_data.reset_index()
del df_data["index"]
if len(df_data. columns) == 11:
df_data.columns = ["Production", "space_1", "year_1", "space_2",
"year_2", "space_3", "year_3", "space_4",
"year_4", "space_5", "year_5"]
elif len(df_data. columns) == 13:
df_data.columns = ["Production", "unit", "space_1", "year_1", "space_2",
"year_2", "space_3", "year_3", "space_4",
"year_4", "space_5", "year_5", "space_6"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['iodine'], year))
for col in df_data.columns:
if col not in col_to_use:
del df_data[col]
return df_data
def usgs_iodine_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param source: source
:param year: year
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Production", "Quantity, for consumption", "Exports2"]
prod = ""
name = usgs_myb_name(source)
des = name
dataframe = pd.DataFrame()
col_name = usgs_myb_year(YEARS_COVERED['iodine'], year)
for df in df_list:
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() == "Imports:2":
product = "imports"
elif df.iloc[index]["Production"].strip() == "Production":
product = "production"
elif df.iloc[index]["Production"].strip() == "Exports2":
product = "exports"
if df.iloc[index]["Production"].strip() in row_to_use:
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Metric Tons"
data['FlowName'] = name + " " + product
data["Description"] = name
data["ActivityProducedBy"] = name
col_name = usgs_myb_year(YEARS_COVERED['iodine'], year)
if str(df.iloc[index][col_name]) == "--":
data["FlowAmount"] = str(0)
elif str(df.iloc[index][col_name]) == "W":
data["FlowAmount"] = WITHDRAWN_KEYWORD
else:
data["FlowAmount"] = str(df.iloc[index][col_name])
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_iron_ore_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing
df into FBA format
:param resp: df, response from url call
:param year: year
:return: pandas dataframe of original source data
"""
df_raw_data = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data = pd.DataFrame(df_raw_data.loc[7:25]).reindex()
df_data = df_data.reset_index()
del df_data["index"]
if len(df_data. columns) == 12:
df_data.columns = ["Production", "Units", "space_1", "year_1",
"space_2", "year_2", "space_3", "year_3",
"space_4", "year_4", "space_5", "year_5"]
col_to_use = ["Production", "Units"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['ironore'], year))
for col in df_data.columns:
if col not in col_to_use:
del df_data[col]
return df_data
def usgs_iron_ore_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param source: source
:param year: year
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
name = usgs_myb_name(source)
des = name
row_to_use = ["Gross weight", "Quantity"]
dataframe = pd.DataFrame()
for df in df_list:
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() == "Production:":
product = "production"
elif df.iloc[index]["Production"].strip() == "Exports:":
product = "exports"
elif df.iloc[index]["Production"].strip() == \
"Imports for consumption:":
product = "imports"
if df.iloc[index]["Production"].strip() in row_to_use:
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Thousand Metric Tons"
data['FlowName'] = "Iron Ore " + product
data["Description"] = "Iron Ore"
data["ActivityProducedBy"] = "Iron Ore"
col_name = usgs_myb_year(YEARS_COVERED['ironore'], year)
if str(df.iloc[index][col_name]) == "--":
data["FlowAmount"] = str(0)
else:
data["FlowAmount"] = str(df.iloc[index][col_name])
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_kyanite_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing
df into FBA format
:param url: string, url
:param resp: df, response from url call
:param year: year
:return: pandas dataframe of original source data
"""
df_raw_data_one = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data_one = pd.DataFrame(df_raw_data_one.loc[4:13]).reindex()
df_data_one = df_data_one.reset_index()
del df_data_one["index"]
if len(df_data_one. columns) == 12:
df_data_one.columns = ["Production", "unit", "space_2", "year_1",
"space_3", "year_2", "space_4", "year_3",
"space_5", "year_4", "space_6", "year_5"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['kyanite'], year))
for col in df_data_one.columns:
if col not in col_to_use:
del df_data_one[col]
frames = [df_data_one]
df_data = pd.concat(frames)
df_data = df_data.reset_index()
del df_data["index"]
return df_data
def usgs_kyanite_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param args: dictionary, used to run flowbyactivity.py
('year' and 'source')
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Quantity", "Quantity2"]
prod = ""
name = usgs_myb_name(source)
des = name
dataframe = pd.DataFrame()
col_name = usgs_myb_year(YEARS_COVERED['kyanite'], year)
for df in df_list:
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() == \
"Exports of kyanite concentrate:3":
prod = "exports"
elif df.iloc[index]["Production"].strip() == \
"Imports for consumption, all kyanite minerals:3":
prod = "imports"
elif df.iloc[index]["Production"].strip() == "Production:":
prod = "production"
if df.iloc[index]["Production"].strip() in row_to_use:
product = df.iloc[index]["Production"].strip()
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Metric Tons"
data["FlowAmount"] = str(df.iloc[index][col_name])
if str(df.iloc[index][col_name]) == "W":
data["FlowAmount"] = WITHDRAWN_KEYWORD
data["Description"] = des
data["ActivityProducedBy"] = name
data['FlowName'] = name + " " + prod
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_lead_url_helper(*, year, **_):
"""
This helper function uses the "build_url" input from flowbyactivity.py,
which is a base url for data imports that requires parts of the url text
string to be replaced with info specific to the data year. This function
does not parse the data, only modifies the urls from which data is
obtained.
:param build_url: string, base url
:return: list, urls to call, concat, parse, format into Flow-By-Activity
format
"""
if int(year) < 2013:
build_url = ('https://d9-wret.s3.us-west-2.amazonaws.com/assets/'
'palladium/production/atoms/files/myb1-2016-lead.xls')
elif int(year) < 2014:
build_url = ('https://d9-wret.s3.us-west-2.amazonaws.com/assets/'
'palladium/production/atoms/files/myb1-2017-lead.xls')
else:
build_url = ('https://d9-wret.s3.us-west-2.amazonaws.com/assets/'
'palladium/production/s3fs-public/media/files/myb1-2018-lead-advrel.xlsx')
url = build_url
return [url]
def usgs_lead_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing
df into FBA format
:param url: string, url
:param resp: df, response from url call
:param year: year
:return: pandas dataframe of original source data
"""
df_raw_data = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data = pd.DataFrame(df_raw_data.loc[8:15]).reindex()
df_data = df_data.reset_index()
del df_data["index"]
if len(df_data.columns) > 12:
for x in range(12, len(df_data.columns)):
col_name = "Unnamed: " + str(x)
del df_data[col_name]
if len(df_data. columns) == 12:
df_data.columns = ["Production", "Units", "space_1", "year_1",
"space_2", "year_2", "space_3", "year_3",
"space_4", "year_4", "space_5", "year_5"]
col_to_use = ["Production", "Units"]
if int(year) == 2013:
modified_sy = "2013-2018"
col_to_use.append(usgs_myb_year(modified_sy, year))
elif int(year) > 2013:
modified_sy = "2014-2018"
col_to_use.append(usgs_myb_year(modified_sy, year))
else:
col_to_use.append(usgs_myb_year(YEARS_COVERED['lead'], year))
for col in df_data.columns:
if col not in col_to_use:
del df_data[col]
return df_data
def usgs_lead_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param source: source
:param year: year
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
name = usgs_myb_name(source)
des = name
row_to_use = ["Primary lead, refined content, "
"domestic ores and base bullion",
"Secondary lead, lead content",
"Lead ore and concentrates", "Lead in base bullion"]
import_export = ["Exports, lead content:",
"Imports for consumption, lead content:"]
dataframe = pd.DataFrame()
product = "production"
for df in df_list:
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() in import_export:
if df.iloc[index]["Production"].strip() == \
"Exports, lead content:":
product = "exports"
elif df.iloc[index]["Production"].strip() == \
"Imports for consumption, lead content:":
product = "imports"
if df.iloc[index]["Production"].strip() in row_to_use:
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Metric Tons"
data['FlowName'] = name + " " + product
data["ActivityProducedBy"] = df.iloc[index]["Production"]
if int(year) == 2013:
modified_sy = "2013-2018"
col_name = usgs_myb_year(modified_sy, year)
elif int(year) > 2013:
modified_sy = "2014-2018"
col_name = usgs_myb_year(modified_sy, year)
else:
col_name = usgs_myb_year(YEARS_COVERED['lead'], year)
if str(df.iloc[index][col_name]) == "--":
data["FlowAmount"] = str(0)
else:
data["FlowAmount"] = str(df.iloc[index][col_name])
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_lime_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing
df into FBA format
:param url: string, url
:param resp: df, response from url call
:param year: year
:return: pandas dataframe of original source data
"""
df_raw_data_two = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data_1 = pd.DataFrame(df_raw_data_two.loc[16:16]).reindex()
df_data_1 = df_data_1.reset_index()
del df_data_1["index"]
df_data_2 = pd.DataFrame(df_raw_data_two.loc[28:32]).reindex()
df_data_2 = df_data_2.reset_index()
del df_data_2["index"]
if len(df_data_1.columns) > 12:
for x in range(12, len(df_data_1.columns)):
col_name = "Unnamed: " + str(x)
del df_data_1[col_name]
del df_data_2[col_name]
if len(df_data_1. columns) == 12:
df_data_1.columns = ["Production", "Unit", "space_1", "year_1",
"space_2", "year_2", "space_3", "year_3",
"space_4", "year_4", "space_5", "year_5"]
df_data_2.columns = ["Production", "Unit", "space_1", "year_1",
"space_2", "year_2", "space_3", "year_3",
"space_4", "year_4", "space_5", "year_5"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['lime'], year))
for col in df_data_1.columns:
if col not in col_to_use:
del df_data_1[col]
for col in df_data_2.columns:
if col not in col_to_use:
del df_data_2[col]
frames = [df_data_1, df_data_2]
df_data = pd.concat(frames)
df_data = df_data.reset_index()
del df_data["index"]
return df_data
def usgs_lime_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param source: source
:param year: year
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Total", "Quantity"]
import_export = ["Exports:7", "Imports for consumption:7"]
name = usgs_myb_name(source)
des = name
dataframe = pd.DataFrame()
for df in df_list:
prod = "production"
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() == "Exports:7":
prod = "exports"
elif df.iloc[index]["Production"].strip() == \
"Imports for consumption:7":
prod = "imports"
if df.iloc[index]["Production"].strip() in row_to_use:
remove_digits = str.maketrans('', '', digits)
product = df.iloc[index][
"Production"].strip().translate(remove_digits)
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Thousand Metric Tons"
col_name = usgs_myb_year(YEARS_COVERED['lime'], year)
data["Description"] = des
data["ActivityProducedBy"] = name
if product.strip() == "Total":
data['FlowName'] = name + " " + prod
elif product.strip() == "Quantity":
data['FlowName'] = name + " " + prod
data["FlowAmount"] = str(df.iloc[index][col_name])
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_lithium_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing
df into FBA format
:param url: string, url
:param resp: df, response from url call
:param year: year
:return: pandas dataframe of original source data
"""
df_raw_data_one = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data_one = pd.DataFrame(df_raw_data_one.loc[6:8]).reindex()
df_data_one = df_data_one.reset_index()
del df_data_one["index"]
if len(df_data_one.columns) > 11:
for x in range(11, len(df_data_one.columns)):
col_name = "Unnamed: " + str(x)
del df_data_one[col_name]
if len(df_data_one. columns) == 11:
df_data_one.columns = ["Production", "space_2", "year_1", "space_3",
"year_2", "space_4", "year_3", "space_5",
"year_4", "space_6", "year_5"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['lithium'], year))
for col in df_data_one.columns:
if col not in col_to_use:
del df_data_one[col]
frames = [df_data_one]
df_data = pd.concat(frames)
df_data = df_data.reset_index()
del df_data["index"]
return df_data
def usgs_lithium_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param source: source
:param year: year
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Exports3", "Imports3", "Production"]
prod = ""
name = usgs_myb_name(source)
des = name
dataframe = pd.DataFrame()
col_name = usgs_myb_year(YEARS_COVERED['lithium'], year)
for df in df_list:
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() == "Exports3":
prod = "exports"
elif df.iloc[index]["Production"].strip() == "Imports3":
prod = "imports"
elif df.iloc[index]["Production"].strip() == "Production":
prod = "production"
if df.iloc[index]["Production"].strip() in row_to_use:
product = df.iloc[index]["Production"].strip()
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Metric Tons"
data["FlowAmount"] = str(df.iloc[index][col_name])
if str(df.iloc[index][col_name]) == "W":
data["FlowAmount"] = WITHDRAWN_KEYWORD
data["Description"] = des
data["ActivityProducedBy"] = name
data['FlowName'] = name + " " + prod
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_magnesium_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing df
into FBA format
:param url: string, url
:param resp: df, response from url call
:param year: year
:return: pandas dataframe of original source data
"""
df_raw_data = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data = pd.DataFrame(df_raw_data.loc[7:15]).reindex()
df_data = df_data.reset_index()
del df_data["index"]
if len(df_data. columns) == 12:
df_data.columns = ["Production", "Units", "space_1", "year_1",
"space_2", "year_2", "space_3", "year_3",
"space_4", "year_4", "space_5", "year_5"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['magnesium'], year))
for col in df_data.columns:
if col not in col_to_use:
del df_data[col]
return df_data
def usgs_magnesium_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param source: source
:param year: year
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Secondary", "Primary", "Exports", "Imports for consumption"]
dataframe = pd.DataFrame()
name = usgs_myb_name(source)
des = name
for df in df_list:
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() == "Exports":
product = "exports"
elif df.iloc[index]["Production"].strip() == \
"Imports for consumption":
product = "imports"
elif df.iloc[index]["Production"].strip() == "Secondary" or \
df.iloc[index]["Production"].strip() == "Primary":
product = "production" + " " + \
df.iloc[index]["Production"].strip()
if df.iloc[index]["Production"].strip() in row_to_use:
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Metric Tons"
data['FlowName'] = name + " " + product
data["Description"] = name
data["ActivityProducedBy"] = name
col_name = usgs_myb_year(YEARS_COVERED['magnesium'], year)
if str(df.iloc[index][col_name]) == "--":
data["FlowAmount"] = str(0)
elif str(df.iloc[index][col_name]) == "W":
data["FlowAmount"] = WITHDRAWN_KEYWORD
else:
data["FlowAmount"] = str(df.iloc[index][col_name])
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_manganese_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing df
into FBA format
:param resp: df, response from url call
:param year: year
:return: pandas dataframe of original source data
"""
df_raw_data = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data = pd.DataFrame(df_raw_data.loc[7:9]).reindex()
df_data = df_data.reset_index()
del df_data["index"]
if len(df_data.columns) > 12:
for x in range(12, len(df_data.columns)):
col_name = "Unnamed: " + str(x)
del df_data[col_name]
if len(df_data. columns) == 12:
df_data.columns = ["Production", "Unit", "space_1", "year_1",
"space_2", "year_2", "space_3",
"year_3", "space_4", "year_4", "space_5", "year_5"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['manganese'], year))
for col in df_data.columns:
if col not in col_to_use:
del df_data[col]
return df_data
def usgs_manganese_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param source: source
:param year: year
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Production", "Exports", "Imports for consumption"]
prod = ""
name = usgs_myb_name(source)
des = name
dataframe = pd.DataFrame()
col_name = usgs_myb_year(YEARS_COVERED['manganese'], year)
for df in df_list:
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() == \
"Imports for consumption":
product = "imports"
elif df.iloc[index]["Production"].strip() == "Production":
product = "production"
elif df.iloc[index]["Production"].strip() == "Exports":
product = "exports"
if df.iloc[index]["Production"].strip() in row_to_use:
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Metric Tons"
data['FlowName'] = name + " " + product
data["Description"] = name
data["ActivityProducedBy"] = name
col_name = usgs_myb_year(YEARS_COVERED['manganese'], year)
if str(df.iloc[index][col_name]) == "--" or \
str(df.iloc[index][col_name]) == "(3)":
data["FlowAmount"] = str(0)
else:
data["FlowAmount"] = str(df.iloc[index][col_name])
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_ma_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing df
into FBA format
:param url: string, url
:param resp: df, response from url call
:param args: dictionary, arguments specified when running
flowbyactivity.py ('year' and 'source')
:return: pandas dataframe of original source data
"""
df_raw_data = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T2')
df_data = pd.DataFrame(df_raw_data.loc[6:7]).reindex()
df_data = df_data.reset_index()
del df_data["index"]
if len(df_data.columns) > 9:
for x in range(9, len(df_data.columns)):
col_name = "Unnamed: " + str(x)
del df_data[col_name]
if len(df_data. columns) == 9:
df_data.columns = ["Product", "space_1", "quality_year_1", "space_2",
"value_year_1", "space_3",
"quality_year_2", "space_4", "value_year_2"]
elif len(df_data. columns) == 9:
df_data.columns = ["Product", "space_1", "quality_year_1", "space_2",
"value_year_1", "space_3",
"quality_year_2", "space_4", "value_year_2"]
col_to_use = ["Product"]
col_to_use.append("quality_"
+ usgs_myb_year(YEARS_COVERED['manufacturedabrasive'],
year))
for col in df_data.columns:
if col not in col_to_use:
del df_data[col]
return df_data
def usgs_ma_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param args: dictionary, used to run flowbyactivity.py
('year' and 'source')
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Silicon carbide"]
name = usgs_myb_name(source)
des = name
dataframe = pd.DataFrame()
for df in df_list:
for index, row in df.iterrows():
remove_digits = str.maketrans('', '', digits)
product = df.iloc[index][
"Product"].strip().translate(remove_digits)
if product in row_to_use:
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data['FlowName'] = "Silicon carbide"
data["ActivityProducedBy"] = "Silicon carbide"
data["Unit"] = "Metric Tons"
col_name = ("quality_"
+ usgs_myb_year(
YEARS_COVERED['manufacturedabrasive'], year))
col_name_array = col_name.split("_")
data["Description"] = product + " " + col_name_array[0]
data["FlowAmount"] = str(df.iloc[index][col_name])
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_mica_call(*, resp, source, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing df
into FBA format
:param url: string, url
:param resp: df, response from url call
:param year: year
:return: pandas dataframe of original source data
"""
df_raw_data_one = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data_one = pd.DataFrame(df_raw_data_one.loc[4:6]).reindex()
df_data_one = df_data_one.reset_index()
del df_data_one["index"]
name = usgs_myb_name(source)
des = name
if len(df_data_one. columns) == 12:
df_data_one.columns = ["Production", "Unit", "space_2", "year_1",
"space_3", "year_2", "space_4", "year_3",
"space_5", "year_4", "space_6", "year_5"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['mica'], year))
for col in df_data_one.columns:
if col not in col_to_use:
del df_data_one[col]
frames = [df_data_one]
df_data = pd.concat(frames)
df_data = df_data.reset_index()
del df_data["index"]
return df_data
def usgs_mica_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param source: source
:param year: year
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Quantity"]
prod = ""
name = usgs_myb_name(source)
des = name
dataframe = pd.DataFrame()
col_name = usgs_myb_year(YEARS_COVERED['mica'], year)
for df in df_list:
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() == \
"Production, sold or used by producers:":
prod = "production"
if df.iloc[index]["Production"].strip() in row_to_use:
product = df.iloc[index]["Production"].strip()
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Metric Tons"
data["FlowAmount"] = str(df.iloc[index][col_name])
if str(df.iloc[index][col_name]) == "W":
data["FlowAmount"] = WITHDRAWN_KEYWORD
data["Description"] = des
data["ActivityProducedBy"] = name
data['FlowName'] = name + " " + prod
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_molybdenum_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing df
into FBA format
:param url: string, url
:param resp: df, response from url call
:param year: year
:return: pandas dataframe of original source data
"""
df_raw_data = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data = pd.DataFrame(df_raw_data.loc[7:11]).reindex()
df_data = df_data.reset_index()
del df_data["index"]
if len(df_data. columns) == 11:
df_data.columns = ["Production", "space_1", "year_1", "space_2",
"year_2", "space_3", "year_3", "space_4",
"year_4", "space_5", "year_5"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['molybdenum'], year))
for col in df_data.columns:
if col not in col_to_use:
del df_data[col]
return df_data
def usgs_molybdenum_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param source: source
:param year: year
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Production", "Imports for consumption", "Exports"]
dataframe = pd.DataFrame()
name = usgs_myb_name(source)
des = name
for df in df_list:
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() == "Exports":
product = "exports"
elif df.iloc[index]["Production"].strip() == \
"Imports for consumption":
product = "imports"
elif df.iloc[index]["Production"].strip() == "Production":
product = "production"
if df.iloc[index]["Production"].strip() in row_to_use:
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Metric Tons"
data['FlowName'] = name + " " + product
data["Description"] = des
data["ActivityProducedBy"] = name
col_name = usgs_myb_year(YEARS_COVERED['molybdenum'], year)
if str(df.iloc[index][col_name]) == "--":
data["FlowAmount"] = str(0)
else:
data["FlowAmount"] = str(df.iloc[index][col_name])
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_nickel_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing df
into FBA format
:param url: string, url
:param resp: df, response from url call
:param year: year
:return: pandas dataframe of original source data
"""
df_raw_data = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T10')
df_data_1 = pd.DataFrame(df_raw_data.loc[36:36]).reindex()
df_data_1 = df_data_1.reset_index()
del df_data_1["index"]
df_raw_data_two = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data_2 = pd.DataFrame(df_raw_data_two.loc[11:16]).reindex()
df_data_2 = df_data_2.reset_index()
del df_data_2["index"]
if len(df_data_1.columns) > 11:
for x in range(11, len(df_data_1.columns)):
col_name = "Unnamed: " + str(x)
del df_data_1[col_name]
if len(df_data_1. columns) == 11:
df_data_1.columns = ["Production", "space_1", "year_1", "space_2",
"year_2", "space_3", "year_3", "space_4",
"year_4", "space_5", "year_5"]
if len(df_data_2.columns) == 12:
df_data_2.columns = ["Production", "space_1", "space_2", "year_1",
"space_3", "year_2", "space_4", "year_3",
"space_5", "year_4", "space_6", "year_5"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['nickel'], year))
for col in df_data_1.columns:
if col not in col_to_use:
del df_data_1[col]
for col in df_data_2.columns:
if col not in col_to_use:
del df_data_2[col]
frames = [df_data_1, df_data_2]
df_data = pd.concat(frames)
df_data = df_data.reset_index()
del df_data["index"]
return df_data
def usgs_nickel_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param source: source
:param year: year
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Ores and concentrates3",
"United States, sulfide ore, concentrate"]
import_export = ["Exports:", "Imports for consumption:"]
name = usgs_myb_name(source)
des = name
dataframe = pd.DataFrame()
for df in df_list:
prod = "production"
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() == "Exports:":
prod = "exports"
elif df.iloc[index]["Production"].strip() == \
"Imports for consumption:":
prod = "imports"
if df.iloc[index]["Production"].strip() in row_to_use:
remove_digits = str.maketrans('', '', digits)
product = df.iloc[index][
"Production"].strip().translate(remove_digits)
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Metric Tons"
col_name = usgs_myb_year(YEARS_COVERED['nickel'], year)
if product.strip() == \
"United States, sulfide ore, concentrate":
data["Description"] = \
"United States, sulfide ore, concentrate Nickel"
data["ActivityProducedBy"] = name
data['FlowName'] = name + " " + prod
elif product.strip() == "Ores and concentrates":
data["Description"] = "Ores and concentrates Nickel"
data["ActivityProducedBy"] = name
data['FlowName'] = name + " " + prod
if str(df.iloc[index][col_name]) == "--" or \
str(df.iloc[index][col_name]) == "(4)":
data["FlowAmount"] = str(0)
else:
data["FlowAmount"] = str(df.iloc[index][col_name])
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_niobium_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing df
into FBA format
:param url: string, url
:param resp: df, response from url call
:param year: year
:return: pandas dataframe of original source data
"""
df_raw_data = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data = pd.DataFrame(df_raw_data.loc[4:19]).reindex()
df_data = df_data.reset_index()
del df_data["index"]
if len(df_data.columns) > 13:
for x in range(13, len(df_data.columns)):
col_name = "Unnamed: " + str(x)
del df_data[col_name]
if len(df_data. columns) == 13:
df_data.columns = ["Production", "space_1", "Unit_1", "space_2",
"year_1", "space_3", "year_2", "space_4",
"year_3", "space_5", "year_4", "space_6", "year_5"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['niobium'], year))
for col in df_data.columns:
if col not in col_to_use:
del df_data[col]
return df_data
def usgs_niobium_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param source: source
:param year: year
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Total imports, Nb content", "Total exports, Nb content"]
prod = ""
name = usgs_myb_name(source)
des = name
dataframe = pd.DataFrame()
col_name = usgs_myb_year(YEARS_COVERED['niobium'], year)
for df in df_list:
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() == \
"Imports for consumption:":
product = "imports"
elif df.iloc[index]["Production"].strip() == "Exports:":
product = "exports"
if df.iloc[index]["Production"].strip() in row_to_use:
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Metric Tons"
data['FlowName'] = name + " " + product
data["Description"] = name
data["ActivityProducedBy"] = name
col_name = usgs_myb_year(YEARS_COVERED['niobium'], year)
if str(df.iloc[index][col_name]) == "--" or \
str(df.iloc[index][col_name]) == "(3)":
data["FlowAmount"] = str(0)
else:
data["FlowAmount"] = str(df.iloc[index][col_name])
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_peat_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing
df into FBA format
:param url: string, url
:param resp: df, response from url call
:param year: year
:return: pandas dataframe of original source data
"""
"""Calls the excel sheet for nickel and removes extra columns"""
df_raw_data_one = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data_one = | pd.DataFrame(df_raw_data_one.loc[7:18]) | pandas.DataFrame |
#
# Analysis of the hvorg_movies
#
import os
import pickle
from copy import deepcopy
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import astropy.units as u
from sunpy.time import parse_time
import hvorg_style as hvos
plt.rc('text', usetex=True)
plt.rc('font', size=14)
figsize = (10, 5)
topicality_calculated_using = 'movie_end_time'
# Read in the data
directory = os.path.expanduser('~/Data/hvanalysis/derived')
# Event annotation style
edit = 1
# application
application = 'JHelioviewer'
application_short = 'jhv'
# data product
data_product = 'movies'
# Image output location
img = os.path.join(os.path.expanduser(hvos.img), application)
# Type of data we are looking at
data_analyzed = '{:s} {:s}'.format(application, data_product)
data_type = '{:s}'.format(data_analyzed)
# Movie request times
f = os.path.join(directory, "jhv_movie_request_timestamps_only.pkl")
movie_request_time = pickle.load(open(f, 'rb'))
# Number of movies
nmovies = len(movie_request_time)
# Figure 6
# Number of requests as a function of time
title = '{:s} movies per quarter'.format(application)
df = pd.DataFrame(movie_request_time, columns=['date'])
# Setting the date as the index since the TimeGrouper works on Index, the date column is not dropped to be able to count
df.set_index('date', drop=False, inplace=True)
plt.close('all')
fig = plt.figure()
ax = fig.add_subplot(111)
h = df.groupby( | pd.TimeGrouper(freq='Q') | pandas.TimeGrouper |
# -*- coding: utf-8 -*-
'''
Copyright 2018, University of Freiburg.
Chair of Algorithms and Data Structures.
<NAME> <<EMAIL>>
'''
'''
Simple script for looping through many csv Files and concatenating their
columns.
'''
import os
import sys
import glob
import argparse
import pandas as pd
def combine_csvs(input_dir, outname):
'''
'''
csv_files = glob.glob(os.path.join(args.input_dir, '*.csv'))
dff = pd.DataFrame()
for file in csv_files:
# Fetch the content
temp = | pd.read_csv(file, sep='\t', encoding='utf-8') | pandas.read_csv |
from unittest import mock
import pandas as pd
import pytest
import ray
from ray.air.checkpoint import Checkpoint
from ray.data import Preprocessor
from ray.train.predictor import Predictor, PredictorNotSerializableException
class DummyPreprocessor(Preprocessor):
def transform_batch(self, df):
return df * 2
class DummyPredictor(Predictor):
def __init__(self, factor: float = 1.0):
self.factor = factor
self.preprocessor = DummyPreprocessor()
@classmethod
def from_checkpoint(cls, checkpoint: Checkpoint, **kwargs) -> "DummyPredictor":
checkpoint_data = checkpoint.to_dict()
return DummyPredictor(**checkpoint_data)
def _predict_pandas(self, data: pd.DataFrame, **kwargs) -> pd.DataFrame:
return data * self.factor
def test_serialization():
"""Tests that Predictor instances are not serializable."""
# Class is serializable.
ray.put(DummyPredictor)
# Instance is not serializable.
predictor = DummyPredictor()
with pytest.raises(PredictorNotSerializableException):
ray.put(predictor)
def test_from_checkpoint():
checkpoint = Checkpoint.from_dict({"factor": 2.0})
assert DummyPredictor.from_checkpoint(checkpoint).factor == 2.0
@mock.patch(
"ray.train.predictor.convert_batch_type_to_pandas",
return_value=mock.DEFAULT,
)
@mock.patch(
"ray.train.predictor.convert_pandas_to_batch_type",
return_value=mock.DEFAULT,
)
def test_predict(convert_from_pandas_mock, convert_to_pandas_mock):
checkpoint = Checkpoint.from_dict({"factor": 2.0})
predictor = DummyPredictor.from_checkpoint(checkpoint)
input = pd.DataFrame({"x": [1, 2, 3]})
expected_output = input * 4.0
actual_output = predictor.predict(input)
assert actual_output.equals(expected_output)
# Ensure the proper conversion functions are called.
convert_to_pandas_mock.assert_called_once()
convert_from_pandas_mock.assert_called_once()
@mock.patch.object(DummyPredictor, "_predict_pandas", return_value=mock.DEFAULT)
def test_kwargs(predict_pandas_mock):
checkpoint = Checkpoint.from_dict({"factor": 2.0})
predictor = DummyPredictor.from_checkpoint(checkpoint)
input = | pd.DataFrame({"x": [1, 2, 3]}) | pandas.DataFrame |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 29 10:24:14 2019
@author: nmei
"""
import pandas as pd
import numpy as np
from glob import glob
import os
import statsmodels.api as sm
from statsmodels.formula.api import ols
from matplotlib.ticker import FormatStrFormatter
import seaborn as sns
from matplotlib import pyplot as plt
sns.set_style('whitegrid')
sns.set_context('poster')
from shutil import copyfile
copyfile('../../../utils.py','utils.py')
import utils
model = 'Image2vec Encoding Models'
experiment = 'metasema'
alpha = int(1e2)
here = '7 rois image2vec'
model_name = 'Ridge Regression'
cv = 'Random Partition 100 folds'
img_dir = '../../../../results/{}/RP/{}'.format(experiment,here)
here = '7 rois word2vec'
word_dir = '../../../../results/{}/RP/{}'.format(experiment,here)
here = 'compare word2vec and image2vec'
figure_dir = '../../../../figures/{}/RP/{}'.format(experiment,here)
if not os.path.exists(figure_dir):
os.mkdir(figure_dir)
df_img = pd.concat([ | pd.read_csv(f) | pandas.read_csv |
"""
Provide a generic structure to support window functions,
similar to how we have a Groupby object.
"""
from collections import defaultdict
from datetime import timedelta
from textwrap import dedent
from typing import List, Optional, Set
import warnings
import numpy as np
import pandas._libs.window as libwindow
from pandas.compat._optional import import_optional_dependency
from pandas.compat.numpy import function as nv
from pandas.util._decorators import Appender, Substitution, cache_readonly
from pandas.core.dtypes.common import (
ensure_float64,
is_bool,
is_float_dtype,
is_integer,
is_integer_dtype,
is_list_like,
is_scalar,
is_timedelta64_dtype,
needs_i8_conversion,
)
from pandas.core.dtypes.generic import (
ABCDataFrame,
ABCDateOffset,
ABCDatetimeIndex,
ABCPeriodIndex,
ABCSeries,
ABCTimedeltaIndex,
)
from pandas._typing import Axis, FrameOrSeries
from pandas.core.base import DataError, PandasObject, SelectionMixin
import pandas.core.common as com
from pandas.core.generic import _shared_docs
from pandas.core.groupby.base import GroupByMixin
_shared_docs = dict(**_shared_docs)
_doc_template = """
Returns
-------
Series or DataFrame
Return type is determined by the caller.
See Also
--------
Series.%(name)s : Series %(name)s.
DataFrame.%(name)s : DataFrame %(name)s.
"""
class _Window(PandasObject, SelectionMixin):
_attributes = [
"window",
"min_periods",
"center",
"win_type",
"axis",
"on",
"closed",
] # type: List[str]
exclusions = set() # type: Set[str]
def __init__(
self,
obj,
window=None,
min_periods: Optional[int] = None,
center: Optional[bool] = False,
win_type: Optional[str] = None,
axis: Axis = 0,
on: Optional[str] = None,
closed: Optional[str] = None,
**kwargs
):
self.__dict__.update(kwargs)
self.obj = obj
self.on = on
self.closed = closed
self.window = window
self.min_periods = min_periods
self.center = center
self.win_type = win_type
self.win_freq = None
self.axis = obj._get_axis_number(axis) if axis is not None else None
self.validate()
@property
def _constructor(self):
return Window
@property
def is_datetimelike(self) -> Optional[bool]:
return None
@property
def _on(self):
return None
@property
def is_freq_type(self) -> bool:
return self.win_type == "freq"
def validate(self):
if self.center is not None and not is_bool(self.center):
raise ValueError("center must be a boolean")
if self.min_periods is not None and not is_integer(self.min_periods):
raise ValueError("min_periods must be an integer")
if self.closed is not None and self.closed not in [
"right",
"both",
"left",
"neither",
]:
raise ValueError("closed must be 'right', 'left', 'both' or " "'neither'")
def _create_blocks(self):
"""
Split data into blocks & return conformed data.
"""
obj = self._selected_obj
# filter out the on from the object
if self.on is not None:
if obj.ndim == 2:
obj = obj.reindex(columns=obj.columns.difference([self.on]), copy=False)
blocks = obj._to_dict_of_blocks(copy=False).values()
return blocks, obj
def _gotitem(self, key, ndim, subset=None):
"""
Sub-classes to define. Return a sliced object.
Parameters
----------
key : str / list of selections
ndim : 1,2
requested ndim of result
subset : object, default None
subset to act on
"""
# create a new object to prevent aliasing
if subset is None:
subset = self.obj
self = self._shallow_copy(subset)
self._reset_cache()
if subset.ndim == 2:
if is_scalar(key) and key in subset or is_list_like(key):
self._selection = key
return self
def __getattr__(self, attr):
if attr in self._internal_names_set:
return object.__getattribute__(self, attr)
if attr in self.obj:
return self[attr]
raise AttributeError(
"%r object has no attribute %r" % (type(self).__name__, attr)
)
def _dir_additions(self):
return self.obj._dir_additions()
def _get_window(self, other=None):
return self.window
@property
def _window_type(self) -> str:
return self.__class__.__name__
def __repr__(self) -> str:
"""
Provide a nice str repr of our rolling object.
"""
attrs = (
"{k}={v}".format(k=k, v=getattr(self, k))
for k in self._attributes
if getattr(self, k, None) is not None
)
return "{klass} [{attrs}]".format(
klass=self._window_type, attrs=",".join(attrs)
)
def __iter__(self):
url = "https://github.com/pandas-dev/pandas/issues/11704"
raise NotImplementedError("See issue #11704 {url}".format(url=url))
def _get_index(self) -> Optional[np.ndarray]:
"""
Return index as an ndarray.
Returns
-------
None or ndarray
"""
if self.is_freq_type:
return self._on.asi8
return None
def _prep_values(self, values: Optional[np.ndarray] = None) -> np.ndarray:
"""Convert input to numpy arrays for Cython routines"""
if values is None:
values = getattr(self._selected_obj, "values", self._selected_obj)
# GH #12373 : rolling functions error on float32 data
# make sure the data is coerced to float64
if is_float_dtype(values.dtype):
values = ensure_float64(values)
elif is_integer_dtype(values.dtype):
values = ensure_float64(values)
elif needs_i8_conversion(values.dtype):
raise NotImplementedError(
"ops for {action} for this "
"dtype {dtype} are not "
"implemented".format(action=self._window_type, dtype=values.dtype)
)
else:
try:
values = ensure_float64(values)
except (ValueError, TypeError):
raise TypeError(
"cannot handle this type -> {0}" "".format(values.dtype)
)
# Always convert inf to nan
values[np.isinf(values)] = np.NaN
return values
def _wrap_result(self, result, block=None, obj=None) -> FrameOrSeries:
"""
Wrap a single result.
"""
if obj is None:
obj = self._selected_obj
index = obj.index
if isinstance(result, np.ndarray):
# coerce if necessary
if block is not None:
if is_timedelta64_dtype(block.values.dtype):
from pandas import to_timedelta
result = to_timedelta(result.ravel(), unit="ns").values.reshape(
result.shape
)
if result.ndim == 1:
from pandas import Series
return Series(result, index, name=obj.name)
return type(obj)(result, index=index, columns=block.columns)
return result
def _wrap_results(self, results, blocks, obj, exclude=None) -> FrameOrSeries:
"""
Wrap the results.
Parameters
----------
results : list of ndarrays
blocks : list of blocks
obj : conformed data (may be resampled)
exclude: list of columns to exclude, default to None
"""
from pandas import Series, concat
from pandas.core.index import ensure_index
final = []
for result, block in zip(results, blocks):
result = self._wrap_result(result, block=block, obj=obj)
if result.ndim == 1:
return result
final.append(result)
# if we have an 'on' column
# we want to put it back into the results
# in the same location
columns = self._selected_obj.columns
if self.on is not None and not self._on.equals(obj.index):
name = self._on.name
final.append(Series(self._on, index=obj.index, name=name))
if self._selection is not None:
selection = ensure_index(self._selection)
# need to reorder to include original location of
# the on column (if its not already there)
if name not in selection:
columns = self.obj.columns
indexer = columns.get_indexer(selection.tolist() + [name])
columns = columns.take(sorted(indexer))
# exclude nuisance columns so that they are not reindexed
if exclude is not None and exclude:
columns = [c for c in columns if c not in exclude]
if not columns:
raise DataError("No numeric types to aggregate")
if not len(final):
return obj.astype("float64")
return concat(final, axis=1).reindex(columns=columns, copy=False)
def _center_window(self, result, window) -> np.ndarray:
"""
Center the result in the window.
"""
if self.axis > result.ndim - 1:
raise ValueError(
"Requested axis is larger then no. of argument " "dimensions"
)
offset = _offset(window, True)
if offset > 0:
if isinstance(result, (ABCSeries, ABCDataFrame)):
result = result.slice_shift(-offset, axis=self.axis)
else:
lead_indexer = [slice(None)] * result.ndim
lead_indexer[self.axis] = slice(offset, None)
result = np.copy(result[tuple(lead_indexer)])
return result
def aggregate(self, func, *args, **kwargs):
result, how = self._aggregate(func, *args, **kwargs)
if result is None:
return self.apply(func, raw=False, args=args, kwargs=kwargs)
return result
agg = aggregate
_shared_docs["sum"] = dedent(
"""
Calculate %(name)s sum of given DataFrame or Series.
Parameters
----------
*args, **kwargs
For compatibility with other %(name)s methods. Has no effect
on the computed value.
Returns
-------
Series or DataFrame
Same type as the input, with the same index, containing the
%(name)s sum.
See Also
--------
Series.sum : Reducing sum for Series.
DataFrame.sum : Reducing sum for DataFrame.
Examples
--------
>>> s = pd.Series([1, 2, 3, 4, 5])
>>> s
0 1
1 2
2 3
3 4
4 5
dtype: int64
>>> s.rolling(3).sum()
0 NaN
1 NaN
2 6.0
3 9.0
4 12.0
dtype: float64
>>> s.expanding(3).sum()
0 NaN
1 NaN
2 6.0
3 10.0
4 15.0
dtype: float64
>>> s.rolling(3, center=True).sum()
0 NaN
1 6.0
2 9.0
3 12.0
4 NaN
dtype: float64
For DataFrame, each %(name)s sum is computed column-wise.
>>> df = pd.DataFrame({"A": s, "B": s ** 2})
>>> df
A B
0 1 1
1 2 4
2 3 9
3 4 16
4 5 25
>>> df.rolling(3).sum()
A B
0 NaN NaN
1 NaN NaN
2 6.0 14.0
3 9.0 29.0
4 12.0 50.0
"""
)
_shared_docs["mean"] = dedent(
"""
Calculate the %(name)s mean of the values.
Parameters
----------
*args
Under Review.
**kwargs
Under Review.
Returns
-------
Series or DataFrame
Returned object type is determined by the caller of the %(name)s
calculation.
See Also
--------
Series.%(name)s : Calling object with Series data.
DataFrame.%(name)s : Calling object with DataFrames.
Series.mean : Equivalent method for Series.
DataFrame.mean : Equivalent method for DataFrame.
Examples
--------
The below examples will show rolling mean calculations with window sizes of
two and three, respectively.
>>> s = pd.Series([1, 2, 3, 4])
>>> s.rolling(2).mean()
0 NaN
1 1.5
2 2.5
3 3.5
dtype: float64
>>> s.rolling(3).mean()
0 NaN
1 NaN
2 2.0
3 3.0
dtype: float64
"""
)
class Window(_Window):
"""
Provide rolling window calculations.
.. versionadded:: 0.18.0
Parameters
----------
window : int, or offset
Size of the moving window. This is the number of observations used for
calculating the statistic. Each window will be a fixed size.
If its an offset then this will be the time period of each window. Each
window will be a variable sized based on the observations included in
the time-period. This is only valid for datetimelike indexes. This is
new in 0.19.0
min_periods : int, default None
Minimum number of observations in window required to have a value
(otherwise result is NA). For a window that is specified by an offset,
`min_periods` will default to 1. Otherwise, `min_periods` will default
to the size of the window.
center : bool, default False
Set the labels at the center of the window.
win_type : str, default None
Provide a window type. If ``None``, all points are evenly weighted.
See the notes below for further information.
on : str, optional
For a DataFrame, a datetime-like column on which to calculate the rolling
window, rather than the DataFrame's index. Provided integer column is
ignored and excluded from result since an integer index is not used to
calculate the rolling window.
axis : int or str, default 0
closed : str, default None
Make the interval closed on the 'right', 'left', 'both' or
'neither' endpoints.
For offset-based windows, it defaults to 'right'.
For fixed windows, defaults to 'both'. Remaining cases not implemented
for fixed windows.
.. versionadded:: 0.20.0
Returns
-------
a Window or Rolling sub-classed for the particular operation
See Also
--------
expanding : Provides expanding transformations.
ewm : Provides exponential weighted functions.
Notes
-----
By default, the result is set to the right edge of the window. This can be
changed to the center of the window by setting ``center=True``.
To learn more about the offsets & frequency strings, please see `this link
<http://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__.
The recognized win_types are:
* ``boxcar``
* ``triang``
* ``blackman``
* ``hamming``
* ``bartlett``
* ``parzen``
* ``bohman``
* ``blackmanharris``
* ``nuttall``
* ``barthann``
* ``kaiser`` (needs beta)
* ``gaussian`` (needs std)
* ``general_gaussian`` (needs power, width)
* ``slepian`` (needs width)
* ``exponential`` (needs tau), center is set to None.
If ``win_type=None`` all points are evenly weighted. To learn more about
different window types see `scipy.signal window functions
<https://docs.scipy.org/doc/scipy/reference/signal.html#window-functions>`__.
Examples
--------
>>> df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]})
>>> df
B
0 0.0
1 1.0
2 2.0
3 NaN
4 4.0
Rolling sum with a window length of 2, using the 'triang'
window type.
>>> df.rolling(2, win_type='triang').sum()
B
0 NaN
1 0.5
2 1.5
3 NaN
4 NaN
Rolling sum with a window length of 2, min_periods defaults
to the window length.
>>> df.rolling(2).sum()
B
0 NaN
1 1.0
2 3.0
3 NaN
4 NaN
Same as above, but explicitly set the min_periods
>>> df.rolling(2, min_periods=1).sum()
B
0 0.0
1 1.0
2 3.0
3 2.0
4 4.0
A ragged (meaning not-a-regular frequency), time-indexed DataFrame
>>> df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]},
... index = [pd.Timestamp('20130101 09:00:00'),
... pd.Timestamp('20130101 09:00:02'),
... pd.Timestamp('20130101 09:00:03'),
... pd.Timestamp('20130101 09:00:05'),
... pd.Timestamp('20130101 09:00:06')])
>>> df
B
2013-01-01 09:00:00 0.0
2013-01-01 09:00:02 1.0
2013-01-01 09:00:03 2.0
2013-01-01 09:00:05 NaN
2013-01-01 09:00:06 4.0
Contrasting to an integer rolling window, this will roll a variable
length window corresponding to the time period.
The default for min_periods is 1.
>>> df.rolling('2s').sum()
B
2013-01-01 09:00:00 0.0
2013-01-01 09:00:02 1.0
2013-01-01 09:00:03 3.0
2013-01-01 09:00:05 NaN
2013-01-01 09:00:06 4.0
"""
def validate(self):
super().validate()
window = self.window
if isinstance(window, (list, tuple, np.ndarray)):
pass
elif is_integer(window):
if window <= 0:
raise ValueError("window must be > 0 ")
import_optional_dependency(
"scipy", extra="Scipy is required to generate window weight."
)
import scipy.signal as sig
if not isinstance(self.win_type, str):
raise ValueError("Invalid win_type {0}".format(self.win_type))
if getattr(sig, self.win_type, None) is None:
raise ValueError("Invalid win_type {0}".format(self.win_type))
else:
raise ValueError("Invalid window {0}".format(window))
def _prep_window(self, **kwargs):
"""
Provide validation for our window type, return the window
we have already been validated.
"""
window = self._get_window()
if isinstance(window, (list, tuple, np.ndarray)):
return com.asarray_tuplesafe(window).astype(float)
elif is_integer(window):
import scipy.signal as sig
# the below may pop from kwargs
def _validate_win_type(win_type, kwargs):
arg_map = {
"kaiser": ["beta"],
"gaussian": ["std"],
"general_gaussian": ["power", "width"],
"slepian": ["width"],
"exponential": ["tau"],
}
if win_type in arg_map:
win_args = _pop_args(win_type, arg_map[win_type], kwargs)
if win_type == "exponential":
# exponential window requires the first arg (center)
# to be set to None (necessary for symmetric window)
win_args.insert(0, None)
return tuple([win_type] + win_args)
return win_type
def _pop_args(win_type, arg_names, kwargs):
msg = "%s window requires %%s" % win_type
all_args = []
for n in arg_names:
if n not in kwargs:
raise ValueError(msg % n)
all_args.append(kwargs.pop(n))
return all_args
win_type = _validate_win_type(self.win_type, kwargs)
# GH #15662. `False` makes symmetric window, rather than periodic.
return sig.get_window(win_type, window, False).astype(float)
def _apply_window(self, mean=True, **kwargs):
"""
Applies a moving window of type ``window_type`` on the data.
Parameters
----------
mean : bool, default True
If True computes weighted mean, else weighted sum
Returns
-------
y : same type as input argument
"""
window = self._prep_window(**kwargs)
center = self.center
blocks, obj = self._create_blocks()
block_list = list(blocks)
results = []
exclude = []
for i, b in enumerate(blocks):
try:
values = self._prep_values(b.values)
except (TypeError, NotImplementedError):
if isinstance(obj, ABCDataFrame):
exclude.extend(b.columns)
del block_list[i]
continue
else:
raise DataError("No numeric types to aggregate")
if values.size == 0:
results.append(values.copy())
continue
offset = _offset(window, center)
additional_nans = np.array([np.NaN] * offset)
def f(arg, *args, **kwargs):
minp = _use_window(self.min_periods, len(window))
return libwindow.roll_window(
np.concatenate((arg, additional_nans)) if center else arg,
window,
minp,
avg=mean,
)
result = np.apply_along_axis(f, self.axis, values)
if center:
result = self._center_window(result, window)
results.append(result)
return self._wrap_results(results, block_list, obj, exclude)
_agg_see_also_doc = dedent(
"""
See Also
--------
pandas.DataFrame.rolling.aggregate
pandas.DataFrame.aggregate
"""
)
_agg_examples_doc = dedent(
"""
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'])
>>> df
A B C
0 -2.385977 -0.102758 0.438822
1 -1.004295 0.905829 -0.954544
2 0.735167 -0.165272 -1.619346
3 -0.702657 -1.340923 -0.706334
4 -0.246845 0.211596 -0.901819
5 2.463718 3.157577 -1.380906
6 -1.142255 2.340594 -0.039875
7 1.396598 -1.647453 1.677227
8 -0.543425 1.761277 -0.220481
9 -0.640505 0.289374 -1.550670
>>> df.rolling(3, win_type='boxcar').agg('mean')
A B C
0 NaN NaN NaN
1 NaN NaN NaN
2 -0.885035 0.212600 -0.711689
3 -0.323928 -0.200122 -1.093408
4 -0.071445 -0.431533 -1.075833
5 0.504739 0.676083 -0.996353
6 0.358206 1.903256 -0.774200
7 0.906020 1.283573 0.085482
8 -0.096361 0.818139 0.472290
9 0.070889 0.134399 -0.031308
"""
)
@Substitution(
see_also=_agg_see_also_doc,
examples=_agg_examples_doc,
versionadded="",
klass="Series/DataFrame",
axis="",
)
@Appender(_shared_docs["aggregate"])
def aggregate(self, arg, *args, **kwargs):
result, how = self._aggregate(arg, *args, **kwargs)
if result is None:
# these must apply directly
result = arg(self)
return result
agg = aggregate
@Substitution(name="window")
@Appender(_shared_docs["sum"])
def sum(self, *args, **kwargs):
nv.validate_window_func("sum", args, kwargs)
return self._apply_window(mean=False, **kwargs)
@Substitution(name="window")
@Appender(_shared_docs["mean"])
def mean(self, *args, **kwargs):
nv.validate_window_func("mean", args, kwargs)
return self._apply_window(mean=True, **kwargs)
class _GroupByMixin(GroupByMixin):
"""
Provide the groupby facilities.
"""
def __init__(self, obj, *args, **kwargs):
parent = kwargs.pop("parent", None) # noqa
groupby = kwargs.pop("groupby", None)
if groupby is None:
groupby, obj = obj, obj.obj
self._groupby = groupby
self._groupby.mutated = True
self._groupby.grouper.mutated = True
super().__init__(obj, *args, **kwargs)
count = GroupByMixin._dispatch("count")
corr = GroupByMixin._dispatch("corr", other=None, pairwise=None)
cov = GroupByMixin._dispatch("cov", other=None, pairwise=None)
def _apply(
self, func, name=None, window=None, center=None, check_minp=None, **kwargs
):
"""
Dispatch to apply; we are stripping all of the _apply kwargs and
performing the original function call on the grouped object.
"""
def f(x, name=name, *args):
x = self._shallow_copy(x)
if isinstance(name, str):
return getattr(x, name)(*args, **kwargs)
return x.apply(name, *args, **kwargs)
return self._groupby.apply(f)
class _Rolling(_Window):
@property
def _constructor(self):
return Rolling
def _apply(
self, func, name=None, window=None, center=None, check_minp=None, **kwargs
):
"""
Rolling statistical measure using supplied function.
Designed to be used with passed-in Cython array-based functions.
Parameters
----------
func : str/callable to apply
name : str, optional
name of this function
window : int/array, default to _get_window()
center : bool, default to self.center
check_minp : function, default to _use_window
Returns
-------
y : type of input
"""
if center is None:
center = self.center
if window is None:
window = self._get_window()
if check_minp is None:
check_minp = _use_window
blocks, obj = self._create_blocks()
block_list = list(blocks)
index_as_array = self._get_index()
results = []
exclude = []
for i, b in enumerate(blocks):
try:
values = self._prep_values(b.values)
except (TypeError, NotImplementedError):
if isinstance(obj, ABCDataFrame):
exclude.extend(b.columns)
del block_list[i]
continue
else:
raise DataError("No numeric types to aggregate")
if values.size == 0:
results.append(values.copy())
continue
# if we have a string function name, wrap it
if isinstance(func, str):
cfunc = getattr(libwindow, func, None)
if cfunc is None:
raise ValueError(
"we do not support this function "
"in libwindow.{func}".format(func=func)
)
def func(arg, window, min_periods=None, closed=None):
minp = check_minp(min_periods, window)
# ensure we are only rolling on floats
arg = ensure_float64(arg)
return cfunc(arg, window, minp, index_as_array, closed, **kwargs)
# calculation function
if center:
offset = _offset(window, center)
additional_nans = np.array([np.NaN] * offset)
def calc(x):
return func(
np.concatenate((x, additional_nans)),
window,
min_periods=self.min_periods,
closed=self.closed,
)
else:
def calc(x):
return func(
x, window, min_periods=self.min_periods, closed=self.closed
)
with np.errstate(all="ignore"):
if values.ndim > 1:
result = np.apply_along_axis(calc, self.axis, values)
else:
result = calc(values)
if center:
result = self._center_window(result, window)
results.append(result)
return self._wrap_results(results, block_list, obj, exclude)
class _Rolling_and_Expanding(_Rolling):
_shared_docs["count"] = dedent(
r"""
The %(name)s count of any non-NaN observations inside the window.
Returns
-------
Series or DataFrame
Returned object type is determined by the caller of the %(name)s
calculation.
See Also
--------
Series.%(name)s : Calling object with Series data.
DataFrame.%(name)s : Calling object with DataFrames.
DataFrame.count : Count of the full DataFrame.
Examples
--------
>>> s = pd.Series([2, 3, np.nan, 10])
>>> s.rolling(2).count()
0 1.0
1 2.0
2 1.0
3 1.0
dtype: float64
>>> s.rolling(3).count()
0 1.0
1 2.0
2 2.0
3 2.0
dtype: float64
>>> s.rolling(4).count()
0 1.0
1 2.0
2 2.0
3 3.0
dtype: float64
"""
)
def count(self):
blocks, obj = self._create_blocks()
# Validate the index
self._get_index()
window = self._get_window()
window = min(window, len(obj)) if not self.center else window
results = []
for b in blocks:
result = b.notna().astype(int)
result = self._constructor(
result,
window=window,
min_periods=0,
center=self.center,
axis=self.axis,
closed=self.closed,
).sum()
results.append(result)
return self._wrap_results(results, blocks, obj)
_shared_docs["apply"] = dedent(
r"""
The %(name)s function's apply function.
Parameters
----------
func : function
Must produce a single value from an ndarray input if ``raw=True``
or a single value from a Series if ``raw=False``.
raw : bool, default None
* ``False`` : passes each row or column as a Series to the
function.
* ``True`` or ``None`` : the passed function will receive ndarray
objects instead.
If you are just applying a NumPy reduction function this will
achieve much better performance.
The `raw` parameter is required and will show a FutureWarning if
not passed. In the future `raw` will default to False.
.. versionadded:: 0.23.0
*args, **kwargs
Arguments and keyword arguments to be passed into func.
Returns
-------
Series or DataFrame
Return type is determined by the caller.
See Also
--------
Series.%(name)s : Series %(name)s.
DataFrame.%(name)s : DataFrame %(name)s.
"""
)
def apply(self, func, raw=None, args=(), kwargs={}):
from pandas import Series
kwargs.pop("_level", None)
window = self._get_window()
offset = _offset(window, self.center)
index_as_array = self._get_index()
# TODO: default is for backward compat
# change to False in the future
if raw is None:
warnings.warn(
"Currently, 'apply' passes the values as ndarrays to the "
"applied function. In the future, this will change to passing "
"it as Series objects. You need to specify 'raw=True' to keep "
"the current behaviour, and you can pass 'raw=False' to "
"silence this warning",
FutureWarning,
stacklevel=3,
)
raw = True
def f(arg, window, min_periods, closed):
minp = _use_window(min_periods, window)
if not raw:
arg = Series(arg, index=self.obj.index)
return libwindow.roll_generic(
arg,
window,
minp,
index_as_array,
closed,
offset,
func,
raw,
args,
kwargs,
)
return self._apply(f, func, args=args, kwargs=kwargs, center=False, raw=raw)
def sum(self, *args, **kwargs):
nv.validate_window_func("sum", args, kwargs)
return self._apply("roll_sum", "sum", **kwargs)
_shared_docs["max"] = dedent(
"""
Calculate the %(name)s maximum.
Parameters
----------
*args, **kwargs
Arguments and keyword arguments to be passed into func.
"""
)
def max(self, *args, **kwargs):
nv.validate_window_func("max", args, kwargs)
return self._apply("roll_max", "max", **kwargs)
_shared_docs["min"] = dedent(
"""
Calculate the %(name)s minimum.
Parameters
----------
**kwargs
Under Review.
Returns
-------
Series or DataFrame
Returned object type is determined by the caller of the %(name)s
calculation.
See Also
--------
Series.%(name)s : Calling object with a Series.
DataFrame.%(name)s : Calling object with a DataFrame.
Series.min : Similar method for Series.
DataFrame.min : Similar method for DataFrame.
Examples
--------
Performing a rolling minimum with a window size of 3.
>>> s = pd.Series([4, 3, 5, 2, 6])
>>> s.rolling(3).min()
0 NaN
1 NaN
2 3.0
3 2.0
4 2.0
dtype: float64
"""
)
def min(self, *args, **kwargs):
nv.validate_window_func("min", args, kwargs)
return self._apply("roll_min", "min", **kwargs)
def mean(self, *args, **kwargs):
nv.validate_window_func("mean", args, kwargs)
return self._apply("roll_mean", "mean", **kwargs)
_shared_docs["median"] = dedent(
"""
Calculate the %(name)s median.
Parameters
----------
**kwargs
For compatibility with other %(name)s methods. Has no effect
on the computed median.
Returns
-------
Series or DataFrame
Returned type is the same as the original object.
See Also
--------
Series.%(name)s : Calling object with Series data.
DataFrame.%(name)s : Calling object with DataFrames.
Series.median : Equivalent method for Series.
DataFrame.median : Equivalent method for DataFrame.
Examples
--------
Compute the rolling median of a series with a window size of 3.
>>> s = pd.Series([0, 1, 2, 3, 4])
>>> s.rolling(3).median()
0 NaN
1 NaN
2 1.0
3 2.0
4 3.0
dtype: float64
"""
)
def median(self, **kwargs):
return self._apply("roll_median_c", "median", **kwargs)
_shared_docs["std"] = dedent(
"""
Calculate %(name)s standard deviation.
Normalized by N-1 by default. This can be changed using the `ddof`
argument.
Parameters
----------
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.
*args, **kwargs
For NumPy compatibility. No additional arguments are used.
Returns
-------
Series or DataFrame
Returns the same object type as the caller of the %(name)s calculation.
See Also
--------
Series.%(name)s : Calling object with Series data.
DataFrame.%(name)s : Calling object with DataFrames.
Series.std : Equivalent method for Series.
DataFrame.std : Equivalent method for DataFrame.
numpy.std : Equivalent method for Numpy array.
Notes
-----
The default `ddof` of 1 used in Series.std is different than the default
`ddof` of 0 in numpy.std.
A minimum of one period is required for the rolling calculation.
Examples
--------
>>> s = pd.Series([5, 5, 6, 7, 5, 5, 5])
>>> s.rolling(3).std()
0 NaN
1 NaN
2 0.577350
3 1.000000
4 1.000000
5 1.154701
6 0.000000
dtype: float64
>>> s.expanding(3).std()
0 NaN
1 NaN
2 0.577350
3 0.957427
4 0.894427
5 0.836660
6 0.786796
dtype: float64
"""
)
def std(self, ddof=1, *args, **kwargs):
nv.validate_window_func("std", args, kwargs)
window = self._get_window()
index_as_array = self._get_index()
def f(arg, *args, **kwargs):
minp = _require_min_periods(1)(self.min_periods, window)
return _zsqrt(
libwindow.roll_var(arg, window, minp, index_as_array, self.closed, ddof)
)
return self._apply(
f, "std", check_minp=_require_min_periods(1), ddof=ddof, **kwargs
)
_shared_docs["var"] = dedent(
"""
Calculate unbiased %(name)s variance.
Normalized by N-1 by default. This can be changed using the `ddof`
argument.
Parameters
----------
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.
*args, **kwargs
For NumPy compatibility. No additional arguments are used.
Returns
-------
Series or DataFrame
Returns the same object type as the caller of the %(name)s calculation.
See Also
--------
Series.%(name)s : Calling object with Series data.
DataFrame.%(name)s : Calling object with DataFrames.
Series.var : Equivalent method for Series.
DataFrame.var : Equivalent method for DataFrame.
numpy.var : Equivalent method for Numpy array.
Notes
-----
The default `ddof` of 1 used in :meth:`Series.var` is different than the
default `ddof` of 0 in :func:`numpy.var`.
A minimum of 1 period is required for the rolling calculation.
Examples
--------
>>> s = pd.Series([5, 5, 6, 7, 5, 5, 5])
>>> s.rolling(3).var()
0 NaN
1 NaN
2 0.333333
3 1.000000
4 1.000000
5 1.333333
6 0.000000
dtype: float64
>>> s.expanding(3).var()
0 NaN
1 NaN
2 0.333333
3 0.916667
4 0.800000
5 0.700000
6 0.619048
dtype: float64
"""
)
def var(self, ddof=1, *args, **kwargs):
nv.validate_window_func("var", args, kwargs)
return self._apply(
"roll_var", "var", check_minp=_require_min_periods(1), ddof=ddof, **kwargs
)
_shared_docs[
"skew"
] = """
Unbiased %(name)s skewness.
Parameters
----------
**kwargs
Keyword arguments to be passed into func.
"""
def skew(self, **kwargs):
return self._apply(
"roll_skew", "skew", check_minp=_require_min_periods(3), **kwargs
)
_shared_docs["kurt"] = dedent(
"""
Calculate unbiased %(name)s kurtosis.
This function uses Fisher's definition of kurtosis without bias.
Parameters
----------
**kwargs
Under Review.
Returns
-------
Series or DataFrame
Returned object type is determined by the caller of the %(name)s
calculation.
See Also
--------
Series.%(name)s : Calling object with Series data.
DataFrame.%(name)s : Calling object with DataFrames.
Series.kurt : Equivalent method for Series.
DataFrame.kurt : Equivalent method for DataFrame.
scipy.stats.skew : Third moment of a probability density.
scipy.stats.kurtosis : Reference SciPy method.
Notes
-----
A minimum of 4 periods is required for the %(name)s calculation.
"""
)
def kurt(self, **kwargs):
return self._apply(
"roll_kurt", "kurt", check_minp=_require_min_periods(4), **kwargs
)
_shared_docs["quantile"] = dedent(
"""
Calculate the %(name)s quantile.
Parameters
----------
quantile : float
Quantile to compute. 0 <= quantile <= 1.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
.. versionadded:: 0.23.0
This optional parameter specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`:
* linear: `i + (j - i) * fraction`, where `fraction` is the
fractional part of the index surrounded by `i` and `j`.
* lower: `i`.
* higher: `j`.
* nearest: `i` or `j` whichever is nearest.
* midpoint: (`i` + `j`) / 2.
**kwargs:
For compatibility with other %(name)s methods. Has no effect on
the result.
Returns
-------
Series or DataFrame
Returned object type is determined by the caller of the %(name)s
calculation.
See Also
--------
Series.quantile : Computes value at the given quantile over all data
in Series.
DataFrame.quantile : Computes values at the given quantile over
requested axis in DataFrame.
Examples
--------
>>> s = pd.Series([1, 2, 3, 4])
>>> s.rolling(2).quantile(.4, interpolation='lower')
0 NaN
1 1.0
2 2.0
3 3.0
dtype: float64
>>> s.rolling(2).quantile(.4, interpolation='midpoint')
0 NaN
1 1.5
2 2.5
3 3.5
dtype: float64
"""
)
def quantile(self, quantile, interpolation="linear", **kwargs):
window = self._get_window()
index_as_array = self._get_index()
def f(arg, *args, **kwargs):
minp = _use_window(self.min_periods, window)
if quantile == 1.0:
return libwindow.roll_max(
arg, window, minp, index_as_array, self.closed
)
elif quantile == 0.0:
return libwindow.roll_min(
arg, window, minp, index_as_array, self.closed
)
else:
return libwindow.roll_quantile(
arg,
window,
minp,
index_as_array,
self.closed,
quantile,
interpolation,
)
return self._apply(f, "quantile", quantile=quantile, **kwargs)
_shared_docs[
"cov"
] = """
Calculate the %(name)s sample covariance.
Parameters
----------
other : Series, DataFrame, or ndarray, optional
If not supplied then will default to self and produce pairwise
output.
pairwise : bool, default None
If False then only matching columns between self and other will be
used and the output will be a DataFrame.
If True then all pairwise combinations will be calculated and the
output will be a MultiIndexed DataFrame in the case of DataFrame
inputs. In the case of missing elements, only complete pairwise
observations will be used.
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.
**kwargs
Keyword arguments to be passed into func.
"""
def cov(self, other=None, pairwise=None, ddof=1, **kwargs):
if other is None:
other = self._selected_obj
# only default unset
pairwise = True if pairwise is None else pairwise
other = self._shallow_copy(other)
# GH 16058: offset window
if self.is_freq_type:
window = self.win_freq
else:
window = self._get_window(other)
def _get_cov(X, Y):
# GH #12373 : rolling functions error on float32 data
# to avoid potential overflow, cast the data to float64
X = X.astype("float64")
Y = Y.astype("float64")
mean = lambda x: x.rolling(
window, self.min_periods, center=self.center
).mean(**kwargs)
count = (X + Y).rolling(window=window, center=self.center).count(**kwargs)
bias_adj = count / (count - ddof)
return (mean(X * Y) - mean(X) * mean(Y)) * bias_adj
return _flex_binary_moment(
self._selected_obj, other._selected_obj, _get_cov, pairwise=bool(pairwise)
)
_shared_docs["corr"] = dedent(
"""
Calculate %(name)s correlation.
Parameters
----------
other : Series, DataFrame, or ndarray, optional
If not supplied then will default to self.
pairwise : bool, default None
Calculate pairwise combinations of columns within a
DataFrame. If `other` is not specified, defaults to `True`,
otherwise defaults to `False`.
Not relevant for :class:`~pandas.Series`.
**kwargs
Unused.
Returns
-------
Series or DataFrame
Returned object type is determined by the caller of the
%(name)s calculation.
See Also
--------
Series.%(name)s : Calling object with Series data.
DataFrame.%(name)s : Calling object with DataFrames.
Series.corr : Equivalent method for Series.
DataFrame.corr : Equivalent method for DataFrame.
%(name)s.cov : Similar method to calculate covariance.
numpy.corrcoef : NumPy Pearson's correlation calculation.
Notes
-----
This function uses Pearson's definition of correlation
(https://en.wikipedia.org/wiki/Pearson_correlation_coefficient).
When `other` is not specified, the output will be self correlation (e.g.
all 1's), except for :class:`~pandas.DataFrame` inputs with `pairwise`
set to `True`.
Function will return ``NaN`` for correlations of equal valued sequences;
this is the result of a 0/0 division error.
When `pairwise` is set to `False`, only matching columns between `self` and
`other` will be used.
When `pairwise` is set to `True`, the output will be a MultiIndex DataFrame
with the original index on the first level, and the `other` DataFrame
columns on the second level.
In the case of missing elements, only complete pairwise observations
will be used.
Examples
--------
The below example shows a rolling calculation with a window size of
four matching the equivalent function call using :meth:`numpy.corrcoef`.
>>> v1 = [3, 3, 3, 5, 8]
>>> v2 = [3, 4, 4, 4, 8]
>>> fmt = "{0:.6f}" # limit the printed precision to 6 digits
>>> # numpy returns a 2X2 array, the correlation coefficient
>>> # is the number at entry [0][1]
>>> print(fmt.format(np.corrcoef(v1[:-1], v2[:-1])[0][1]))
0.333333
>>> print(fmt.format(np.corrcoef(v1[1:], v2[1:])[0][1]))
0.916949
>>> s1 = pd.Series(v1)
>>> s2 = pd.Series(v2)
>>> s1.rolling(4).corr(s2)
0 NaN
1 NaN
2 NaN
3 0.333333
4 0.916949
dtype: float64
The below example shows a similar rolling calculation on a
DataFrame using the pairwise option.
>>> matrix = np.array([[51., 35.], [49., 30.], [47., 32.],\
[46., 31.], [50., 36.]])
>>> print(np.corrcoef(matrix[:-1,0], matrix[:-1,1]).round(7))
[[1. 0.6263001]
[0.6263001 1. ]]
>>> print(np.corrcoef(matrix[1:,0], matrix[1:,1]).round(7))
[[1. 0.5553681]
[0.5553681 1. ]]
>>> df = pd.DataFrame(matrix, columns=['X','Y'])
>>> df
X Y
0 51.0 35.0
1 49.0 30.0
2 47.0 32.0
3 46.0 31.0
4 50.0 36.0
>>> df.rolling(4).corr(pairwise=True)
X Y
0 X NaN NaN
Y NaN NaN
1 X NaN NaN
Y NaN NaN
2 X NaN NaN
Y NaN NaN
3 X 1.000000 0.626300
Y 0.626300 1.000000
4 X 1.000000 0.555368
Y 0.555368 1.000000
"""
)
def corr(self, other=None, pairwise=None, **kwargs):
if other is None:
other = self._selected_obj
# only default unset
pairwise = True if pairwise is None else pairwise
other = self._shallow_copy(other)
window = self._get_window(other)
def _get_corr(a, b):
a = a.rolling(
window=window, min_periods=self.min_periods, center=self.center
)
b = b.rolling(
window=window, min_periods=self.min_periods, center=self.center
)
return a.cov(b, **kwargs) / (a.std(**kwargs) * b.std(**kwargs))
return _flex_binary_moment(
self._selected_obj, other._selected_obj, _get_corr, pairwise=bool(pairwise)
)
class Rolling(_Rolling_and_Expanding):
@cache_readonly
def is_datetimelike(self):
return isinstance(
self._on, (ABCDatetimeIndex, ABCTimedeltaIndex, ABCPeriodIndex)
)
@cache_readonly
def _on(self):
if self.on is None:
return self.obj.index
elif isinstance(self.obj, ABCDataFrame) and self.on in self.obj.columns:
from pandas import Index
return Index(self.obj[self.on])
else:
raise ValueError(
"invalid on specified as {0}, "
"must be a column (if DataFrame) "
"or None".format(self.on)
)
def validate(self):
super().validate()
# we allow rolling on a datetimelike index
if (self.obj.empty or self.is_datetimelike) and isinstance(
self.window, (str, ABCDateOffset, timedelta)
):
self._validate_monotonic()
freq = self._validate_freq()
# we don't allow center
if self.center:
raise NotImplementedError(
"center is not implemented "
"for datetimelike and offset "
"based windows"
)
# this will raise ValueError on non-fixed freqs
self.win_freq = self.window
self.window = freq.nanos
self.win_type = "freq"
# min_periods must be an integer
if self.min_periods is None:
self.min_periods = 1
elif not is_integer(self.window):
raise ValueError("window must be an integer")
elif self.window < 0:
raise ValueError("window must be non-negative")
if not self.is_datetimelike and self.closed is not None:
raise ValueError(
"closed only implemented for datetimelike " "and offset based windows"
)
def _validate_monotonic(self):
"""
Validate on is_monotonic.
"""
if not self._on.is_monotonic:
formatted = self.on or "index"
raise ValueError("{0} must be " "monotonic".format(formatted))
def _validate_freq(self):
"""
Validate & return window frequency.
"""
from pandas.tseries.frequencies import to_offset
try:
return to_offset(self.window)
except (TypeError, ValueError):
raise ValueError(
"passed window {0} is not "
"compatible with a datetimelike "
"index".format(self.window)
)
_agg_see_also_doc = dedent(
"""
See Also
--------
Series.rolling
DataFrame.rolling
"""
)
_agg_examples_doc = dedent(
"""
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'])
>>> df
A B C
0 -2.385977 -0.102758 0.438822
1 -1.004295 0.905829 -0.954544
2 0.735167 -0.165272 -1.619346
3 -0.702657 -1.340923 -0.706334
4 -0.246845 0.211596 -0.901819
5 2.463718 3.157577 -1.380906
6 -1.142255 2.340594 -0.039875
7 1.396598 -1.647453 1.677227
8 -0.543425 1.761277 -0.220481
9 -0.640505 0.289374 -1.550670
>>> df.rolling(3).sum()
A B C
0 NaN NaN NaN
1 NaN NaN NaN
2 -2.655105 0.637799 -2.135068
3 -0.971785 -0.600366 -3.280224
4 -0.214334 -1.294599 -3.227500
5 1.514216 2.028250 -2.989060
6 1.074618 5.709767 -2.322600
7 2.718061 3.850718 0.256446
8 -0.289082 2.454418 1.416871
9 0.212668 0.403198 -0.093924
>>> df.rolling(3).agg({'A':'sum', 'B':'min'})
A B
0 NaN NaN
1 NaN NaN
2 -2.655105 -0.165272
3 -0.971785 -1.340923
4 -0.214334 -1.340923
5 1.514216 -1.340923
6 1.074618 0.211596
7 2.718061 -1.647453
8 -0.289082 -1.647453
9 0.212668 -1.647453
"""
)
@Substitution(
see_also=_agg_see_also_doc,
examples=_agg_examples_doc,
versionadded="",
klass="Series/Dataframe",
axis="",
)
@Appender(_shared_docs["aggregate"])
def aggregate(self, arg, *args, **kwargs):
return super().aggregate(arg, *args, **kwargs)
agg = aggregate
@Substitution(name="rolling")
@Appender(_shared_docs["count"])
def count(self):
# different impl for freq counting
if self.is_freq_type:
return self._apply("roll_count", "count")
return super().count()
@Substitution(name="rolling")
@Appender(_shared_docs["apply"])
def apply(self, func, raw=None, args=(), kwargs={}):
return super().apply(func, raw=raw, args=args, kwargs=kwargs)
@Substitution(name="rolling")
@Appender(_shared_docs["sum"])
def sum(self, *args, **kwargs):
nv.validate_rolling_func("sum", args, kwargs)
return super().sum(*args, **kwargs)
@Substitution(name="rolling")
@Appender(_doc_template)
@Appender(_shared_docs["max"])
def max(self, *args, **kwargs):
nv.validate_rolling_func("max", args, kwargs)
return super().max(*args, **kwargs)
@Substitution(name="rolling")
@Appender(_shared_docs["min"])
def min(self, *args, **kwargs):
nv.validate_rolling_func("min", args, kwargs)
return super().min(*args, **kwargs)
@Substitution(name="rolling")
@Appender(_shared_docs["mean"])
def mean(self, *args, **kwargs):
nv.validate_rolling_func("mean", args, kwargs)
return super().mean(*args, **kwargs)
@Substitution(name="rolling")
@Appender(_shared_docs["median"])
def median(self, **kwargs):
return super().median(**kwargs)
@Substitution(name="rolling")
@Appender(_shared_docs["std"])
def std(self, ddof=1, *args, **kwargs):
nv.validate_rolling_func("std", args, kwargs)
return super().std(ddof=ddof, **kwargs)
@Substitution(name="rolling")
@Appender(_shared_docs["var"])
def var(self, ddof=1, *args, **kwargs):
nv.validate_rolling_func("var", args, kwargs)
return super().var(ddof=ddof, **kwargs)
@Substitution(name="rolling")
@Appender(_doc_template)
@Appender(_shared_docs["skew"])
def skew(self, **kwargs):
return super().skew(**kwargs)
_agg_doc = dedent(
"""
Examples
--------
The example below will show a rolling calculation with a window size of
four matching the equivalent function call using `scipy.stats`.
>>> arr = [1, 2, 3, 4, 999]
>>> fmt = "{0:.6f}" # limit the printed precision to 6 digits
>>> import scipy.stats
>>> print(fmt.format(scipy.stats.kurtosis(arr[:-1], bias=False)))
-1.200000
>>> print(fmt.format(scipy.stats.kurtosis(arr[1:], bias=False)))
3.999946
>>> s = pd.Series(arr)
>>> s.rolling(4).kurt()
0 NaN
1 NaN
2 NaN
3 -1.200000
4 3.999946
dtype: float64
"""
)
@Appender(_agg_doc)
@Substitution(name="rolling")
@Appender(_shared_docs["kurt"])
def kurt(self, **kwargs):
return super().kurt(**kwargs)
@Substitution(name="rolling")
@Appender(_shared_docs["quantile"])
def quantile(self, quantile, interpolation="linear", **kwargs):
return super().quantile(
quantile=quantile, interpolation=interpolation, **kwargs
)
@Substitution(name="rolling")
@Appender(_doc_template)
@Appender(_shared_docs["cov"])
def cov(self, other=None, pairwise=None, ddof=1, **kwargs):
return super().cov(other=other, pairwise=pairwise, ddof=ddof, **kwargs)
@Substitution(name="rolling")
@Appender(_shared_docs["corr"])
def corr(self, other=None, pairwise=None, **kwargs):
return super().corr(other=other, pairwise=pairwise, **kwargs)
class RollingGroupby(_GroupByMixin, Rolling):
"""
Provide a rolling groupby implementation.
.. versionadded:: 0.18.1
"""
@property
def _constructor(self):
return Rolling
def _gotitem(self, key, ndim, subset=None):
# we are setting the index on the actual object
# here so our index is carried thru to the selected obj
# when we do the splitting for the groupby
if self.on is not None:
self._groupby.obj = self._groupby.obj.set_index(self._on)
self.on = None
return super()._gotitem(key, ndim, subset=subset)
def _validate_monotonic(self):
"""
Validate that on is monotonic;
we don't care for groupby.rolling
because we have already validated at a higher
level.
"""
pass
class Expanding(_Rolling_and_Expanding):
"""
Provide expanding transformations.
.. versionadded:: 0.18.0
Parameters
----------
min_periods : int, default 1
Minimum number of observations in window required to have a value
(otherwise result is NA).
center : bool, default False
Set the labels at the center of the window.
axis : int or str, default 0
Returns
-------
a Window sub-classed for the particular operation
See Also
--------
rolling : Provides rolling window calculations.
ewm : Provides exponential weighted functions.
Notes
-----
By default, the result is set to the right edge of the window. This can be
changed to the center of the window by setting ``center=True``.
Examples
--------
>>> df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]})
B
0 0.0
1 1.0
2 2.0
3 NaN
4 4.0
>>> df.expanding(2).sum()
B
0 NaN
1 1.0
2 3.0
3 3.0
4 7.0
"""
_attributes = ["min_periods", "center", "axis"]
def __init__(self, obj, min_periods=1, center=False, axis=0, **kwargs):
super().__init__(obj=obj, min_periods=min_periods, center=center, axis=axis)
@property
def _constructor(self):
return Expanding
def _get_window(self, other=None):
"""
Get the window length over which to perform some operation.
Parameters
----------
other : object, default None
The other object that is involved in the operation.
Such an object is involved for operations like covariance.
Returns
-------
window : int
The window length.
"""
axis = self.obj._get_axis(self.axis)
length = len(axis) + (other is not None) * len(axis)
other = self.min_periods or -1
return max(length, other)
_agg_see_also_doc = dedent(
"""
See Also
--------
DataFrame.expanding.aggregate
DataFrame.rolling.aggregate
DataFrame.aggregate
"""
)
_agg_examples_doc = dedent(
"""
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'])
>>> df
A B C
0 -2.385977 -0.102758 0.438822
1 -1.004295 0.905829 -0.954544
2 0.735167 -0.165272 -1.619346
3 -0.702657 -1.340923 -0.706334
4 -0.246845 0.211596 -0.901819
5 2.463718 3.157577 -1.380906
6 -1.142255 2.340594 -0.039875
7 1.396598 -1.647453 1.677227
8 -0.543425 1.761277 -0.220481
9 -0.640505 0.289374 -1.550670
>>> df.ewm(alpha=0.5).mean()
A B C
0 -2.385977 -0.102758 0.438822
1 -1.464856 0.569633 -0.490089
2 -0.207700 0.149687 -1.135379
3 -0.471677 -0.645305 -0.906555
4 -0.355635 -0.203033 -0.904111
5 1.076417 1.503943 -1.146293
6 -0.041654 1.925562 -0.588728
7 0.680292 0.132049 0.548693
8 0.067236 0.948257 0.163353
9 -0.286980 0.618493 -0.694496
"""
)
@Substitution(
see_also=_agg_see_also_doc,
examples=_agg_examples_doc,
versionadded="",
klass="Series/Dataframe",
axis="",
)
@Appender(_shared_docs["aggregate"])
def aggregate(self, arg, *args, **kwargs):
return super().aggregate(arg, *args, **kwargs)
agg = aggregate
@Substitution(name="expanding")
@Appender(_shared_docs["count"])
def count(self, **kwargs):
return super().count(**kwargs)
@Substitution(name="expanding")
@Appender(_shared_docs["apply"])
def apply(self, func, raw=None, args=(), kwargs={}):
return super().apply(func, raw=raw, args=args, kwargs=kwargs)
@Substitution(name="expanding")
@Appender(_shared_docs["sum"])
def sum(self, *args, **kwargs):
nv.validate_expanding_func("sum", args, kwargs)
return super().sum(*args, **kwargs)
@Substitution(name="expanding")
@Appender(_doc_template)
@Appender(_shared_docs["max"])
def max(self, *args, **kwargs):
nv.validate_expanding_func("max", args, kwargs)
return super().max(*args, **kwargs)
@Substitution(name="expanding")
@Appender(_shared_docs["min"])
def min(self, *args, **kwargs):
nv.validate_expanding_func("min", args, kwargs)
return super().min(*args, **kwargs)
@ | Substitution(name="expanding") | pandas.util._decorators.Substitution |
"""
Provide the groupby split-apply-combine paradigm. Define the GroupBy
class providing the base-class of operations.
The SeriesGroupBy and DataFrameGroupBy sub-class
(defined in pandas.core.groupby.generic)
expose these user-facing objects to provide specific functionality.
"""
from contextlib import contextmanager
import datetime
from functools import partial, wraps
import inspect
import re
import types
from typing import (
Callable,
Dict,
FrozenSet,
Generic,
Hashable,
Iterable,
List,
Mapping,
Optional,
Tuple,
Type,
TypeVar,
Union,
)
import numpy as np
from pandas._config.config import option_context
from pandas._libs import Timestamp
import pandas._libs.groupby as libgroupby
from pandas._typing import FrameOrSeries, Scalar
from pandas.compat import set_function_name
from pandas.compat.numpy import function as nv
from pandas.errors import AbstractMethodError
from pandas.util._decorators import Appender, Substitution, cache_readonly, doc
from pandas.core.dtypes.cast import maybe_cast_result
from pandas.core.dtypes.common import (
ensure_float,
is_bool_dtype,
is_datetime64_dtype,
is_extension_array_dtype,
is_integer_dtype,
is_numeric_dtype,
is_object_dtype,
is_scalar,
)
from pandas.core.dtypes.missing import isna, notna
from pandas.core import nanops
import pandas.core.algorithms as algorithms
from pandas.core.arrays import Categorical, DatetimeArray
from pandas.core.base import DataError, PandasObject, SelectionMixin
import pandas.core.common as com
from pandas.core.frame import DataFrame
from pandas.core.generic import NDFrame
from pandas.core.groupby import base, ops
from pandas.core.indexes.api import CategoricalIndex, Index, MultiIndex
from pandas.core.series import Series
from pandas.core.sorting import get_group_index_sorter
_common_see_also = """
See Also
--------
Series.%(name)s
DataFrame.%(name)s
"""
_apply_docs = dict(
template="""
Apply function `func` group-wise and combine the results together.
The function passed to `apply` must take a {input} as its first
argument and return a DataFrame, Series or scalar. `apply` will
then take care of combining the results back together into a single
dataframe or series. `apply` is therefore a highly flexible
grouping method.
While `apply` is a very flexible method, its downside is that
using it can be quite a bit slower than using more specific methods
like `agg` or `transform`. Pandas offers a wide range of method that will
be much faster than using `apply` for their specific purposes, so try to
use them before reaching for `apply`.
Parameters
----------
func : callable
A callable that takes a {input} as its first argument, and
returns a dataframe, a series or a scalar. In addition the
callable may take positional and keyword arguments.
args, kwargs : tuple and dict
Optional positional and keyword arguments to pass to `func`.
Returns
-------
applied : Series or DataFrame
See Also
--------
pipe : Apply function to the full GroupBy object instead of to each
group.
aggregate : Apply aggregate function to the GroupBy object.
transform : Apply function column-by-column to the GroupBy object.
Series.apply : Apply a function to a Series.
DataFrame.apply : Apply a function to each row or column of a DataFrame.
""",
dataframe_examples="""
>>> df = pd.DataFrame({'A': 'a a b'.split(),
'B': [1,2,3],
'C': [4,6, 5]})
>>> g = df.groupby('A')
Notice that ``g`` has two groups, ``a`` and ``b``.
Calling `apply` in various ways, we can get different grouping results:
Example 1: below the function passed to `apply` takes a DataFrame as
its argument and returns a DataFrame. `apply` combines the result for
each group together into a new DataFrame:
>>> g[['B', 'C']].apply(lambda x: x / x.sum())
B C
0 0.333333 0.4
1 0.666667 0.6
2 1.000000 1.0
Example 2: The function passed to `apply` takes a DataFrame as
its argument and returns a Series. `apply` combines the result for
each group together into a new DataFrame:
>>> g[['B', 'C']].apply(lambda x: x.max() - x.min())
B C
A
a 1 2
b 0 0
Example 3: The function passed to `apply` takes a DataFrame as
its argument and returns a scalar. `apply` combines the result for
each group together into a Series, including setting the index as
appropriate:
>>> g.apply(lambda x: x.C.max() - x.B.min())
A
a 5
b 2
dtype: int64
""",
series_examples="""
>>> s = pd.Series([0, 1, 2], index='a a b'.split())
>>> g = s.groupby(s.index)
From ``s`` above we can see that ``g`` has two groups, ``a`` and ``b``.
Calling `apply` in various ways, we can get different grouping results:
Example 1: The function passed to `apply` takes a Series as
its argument and returns a Series. `apply` combines the result for
each group together into a new Series:
>>> g.apply(lambda x: x*2 if x.name == 'b' else x/2)
0 0.0
1 0.5
2 4.0
dtype: float64
Example 2: The function passed to `apply` takes a Series as
its argument and returns a scalar. `apply` combines the result for
each group together into a Series, including setting the index as
appropriate:
>>> g.apply(lambda x: x.max() - x.min())
a 1
b 0
dtype: int64
Notes
-----
In the current implementation `apply` calls `func` twice on the
first group to decide whether it can take a fast or slow code
path. This can lead to unexpected behavior if `func` has
side-effects, as they will take effect twice for the first
group.
Examples
--------
{examples}
""",
)
_pipe_template = """
Apply a function `func` with arguments to this %(klass)s object and return
the function's result.
%(versionadded)s
Use `.pipe` when you want to improve readability by chaining together
functions that expect Series, DataFrames, GroupBy or Resampler objects.
Instead of writing
>>> h(g(f(df.groupby('group')), arg1=a), arg2=b, arg3=c) # doctest: +SKIP
You can write
>>> (df.groupby('group')
... .pipe(f)
... .pipe(g, arg1=a)
... .pipe(h, arg2=b, arg3=c)) # doctest: +SKIP
which is much more readable.
Parameters
----------
func : callable or tuple of (callable, str)
Function to apply to this %(klass)s object or, alternatively,
a `(callable, data_keyword)` tuple where `data_keyword` is a
string indicating the keyword of `callable` that expects the
%(klass)s object.
args : iterable, optional
Positional arguments passed into `func`.
kwargs : dict, optional
A dictionary of keyword arguments passed into `func`.
Returns
-------
object : the return type of `func`.
See Also
--------
Series.pipe : Apply a function with arguments to a series.
DataFrame.pipe: Apply a function with arguments to a dataframe.
apply : Apply function to each group instead of to the
full %(klass)s object.
Notes
-----
See more `here
<https://pandas.pydata.org/pandas-docs/stable/user_guide/groupby.html#piping-function-calls>`_
Examples
--------
%(examples)s
"""
_transform_template = """
Call function producing a like-indexed %(klass)s on each group and
return a %(klass)s having the same indexes as the original object
filled with the transformed values
Parameters
----------
f : function
Function to apply to each group.
Can also accept a Numba JIT function with
``engine='numba'`` specified.
If the ``'numba'`` engine is chosen, the function must be
a user defined function with ``values`` and ``index`` as the
first and second arguments respectively in the function signature.
Each group's index will be passed to the user defined function
and optionally available for use.
.. versionchanged:: 1.1.0
*args
Positional arguments to pass to func
engine : str, default 'cython'
* ``'cython'`` : Runs the function through C-extensions from cython.
* ``'numba'`` : Runs the function through JIT compiled code from numba.
.. versionadded:: 1.1.0
engine_kwargs : dict, default None
* For ``'cython'`` engine, there are no accepted ``engine_kwargs``
* For ``'numba'`` engine, the engine can accept ``nopython``, ``nogil``
and ``parallel`` dictionary keys. The values must either be ``True`` or
``False``. The default ``engine_kwargs`` for the ``'numba'`` engine is
``{'nopython': True, 'nogil': False, 'parallel': False}`` and will be
applied to the function
.. versionadded:: 1.1.0
**kwargs
Keyword arguments to be passed into func.
Returns
-------
%(klass)s
See Also
--------
%(klass)s.groupby.apply
%(klass)s.groupby.aggregate
%(klass)s.transform
Notes
-----
Each group is endowed the attribute 'name' in case you need to know
which group you are working on.
The current implementation imposes three requirements on f:
* f must return a value that either has the same shape as the input
subframe or can be broadcast to the shape of the input subframe.
For example, if `f` returns a scalar it will be broadcast to have the
same shape as the input subframe.
* if this is a DataFrame, f must support application column-by-column
in the subframe. If f also supports application to the entire subframe,
then a fast path is used starting from the second chunk.
* f must not mutate groups. Mutation is not supported and may
produce unexpected results.
When using ``engine='numba'``, there will be no "fall back" behavior internally.
The group data and group index will be passed as numpy arrays to the JITed
user defined function, and no alternative execution attempts will be tried.
Examples
--------
>>> df = pd.DataFrame({'A' : ['foo', 'bar', 'foo', 'bar',
... 'foo', 'bar'],
... 'B' : ['one', 'one', 'two', 'three',
... 'two', 'two'],
... 'C' : [1, 5, 5, 2, 5, 5],
... 'D' : [2.0, 5., 8., 1., 2., 9.]})
>>> grouped = df.groupby('A')
>>> grouped.transform(lambda x: (x - x.mean()) / x.std())
C D
0 -1.154701 -0.577350
1 0.577350 0.000000
2 0.577350 1.154701
3 -1.154701 -1.000000
4 0.577350 -0.577350
5 0.577350 1.000000
Broadcast result of the transformation
>>> grouped.transform(lambda x: x.max() - x.min())
C D
0 4 6.0
1 3 8.0
2 4 6.0
3 3 8.0
4 4 6.0
5 3 8.0
"""
_agg_template = """
Aggregate using one or more operations over the specified axis.
Parameters
----------
func : function, str, list or dict
Function to use for aggregating the data. If a function, must either
work when passed a %(klass)s or when passed to %(klass)s.apply.
Accepted combinations are:
- function
- string function name
- list of functions and/or function names, e.g. ``[np.sum, 'mean']``
- dict of axis labels -> functions, function names or list of such.
Can also accept a Numba JIT function with
``engine='numba'`` specified.
If the ``'numba'`` engine is chosen, the function must be
a user defined function with ``values`` and ``index`` as the
first and second arguments respectively in the function signature.
Each group's index will be passed to the user defined function
and optionally available for use.
.. versionchanged:: 1.1.0
*args
Positional arguments to pass to func
engine : str, default 'cython'
* ``'cython'`` : Runs the function through C-extensions from cython.
* ``'numba'`` : Runs the function through JIT compiled code from numba.
.. versionadded:: 1.1.0
engine_kwargs : dict, default None
* For ``'cython'`` engine, there are no accepted ``engine_kwargs``
* For ``'numba'`` engine, the engine can accept ``nopython``, ``nogil``
and ``parallel`` dictionary keys. The values must either be ``True`` or
``False``. The default ``engine_kwargs`` for the ``'numba'`` engine is
``{'nopython': True, 'nogil': False, 'parallel': False}`` and will be
applied to the function
.. versionadded:: 1.1.0
**kwargs
Keyword arguments to be passed into func.
Returns
-------
%(klass)s
See Also
--------
%(klass)s.groupby.apply
%(klass)s.groupby.transform
%(klass)s.aggregate
Notes
-----
When using ``engine='numba'``, there will be no "fall back" behavior internally.
The group data and group index will be passed as numpy arrays to the JITed
user defined function, and no alternative execution attempts will be tried.
%(examples)s
"""
class GroupByPlot(PandasObject):
"""
Class implementing the .plot attribute for groupby objects.
"""
def __init__(self, groupby):
self._groupby = groupby
def __call__(self, *args, **kwargs):
def f(self):
return self.plot(*args, **kwargs)
f.__name__ = "plot"
return self._groupby.apply(f)
def __getattr__(self, name: str):
def attr(*args, **kwargs):
def f(self):
return getattr(self.plot, name)(*args, **kwargs)
return self._groupby.apply(f)
return attr
@contextmanager
def _group_selection_context(groupby):
"""
Set / reset the _group_selection_context.
"""
groupby._set_group_selection()
yield groupby
groupby._reset_group_selection()
_KeysArgType = Union[
Hashable,
List[Hashable],
Callable[[Hashable], Hashable],
List[Callable[[Hashable], Hashable]],
Mapping[Hashable, Hashable],
]
class _GroupBy(PandasObject, SelectionMixin, Generic[FrameOrSeries]):
_group_selection = None
_apply_whitelist: FrozenSet[str] = frozenset()
def __init__(
self,
obj: FrameOrSeries,
keys: Optional[_KeysArgType] = None,
axis: int = 0,
level=None,
grouper: "Optional[ops.BaseGrouper]" = None,
exclusions=None,
selection=None,
as_index: bool = True,
sort: bool = True,
group_keys: bool = True,
squeeze: bool = False,
observed: bool = False,
mutated: bool = False,
dropna: bool = True,
):
self._selection = selection
assert isinstance(obj, NDFrame), type(obj)
obj._consolidate_inplace()
self.level = level
if not as_index:
if not isinstance(obj, DataFrame):
raise TypeError("as_index=False only valid with DataFrame")
if axis != 0:
raise ValueError("as_index=False only valid for axis=0")
self.as_index = as_index
self.keys = keys
self.sort = sort
self.group_keys = group_keys
self.squeeze = squeeze
self.observed = observed
self.mutated = mutated
self.dropna = dropna
if grouper is None:
from pandas.core.groupby.grouper import get_grouper
grouper, exclusions, obj = get_grouper(
obj,
keys,
axis=axis,
level=level,
sort=sort,
observed=observed,
mutated=self.mutated,
dropna=self.dropna,
)
self.obj = obj
self.axis = obj._get_axis_number(axis)
self.grouper = grouper
self.exclusions = set(exclusions) if exclusions else set()
def __len__(self) -> int:
return len(self.groups)
def __repr__(self) -> str:
# TODO: Better repr for GroupBy object
return object.__repr__(self)
def _assure_grouper(self):
"""
We create the grouper on instantiation sub-classes may have a
different policy.
"""
pass
@property
def groups(self):
"""
Dict {group name -> group labels}.
"""
self._assure_grouper()
return self.grouper.groups
@property
def ngroups(self):
self._assure_grouper()
return self.grouper.ngroups
@property
def indices(self):
"""
Dict {group name -> group indices}.
"""
self._assure_grouper()
return self.grouper.indices
def _get_indices(self, names):
"""
Safe get multiple indices, translate keys for
datelike to underlying repr.
"""
def get_converter(s):
# possibly convert to the actual key types
# in the indices, could be a Timestamp or a np.datetime64
if isinstance(s, datetime.datetime):
return lambda key: Timestamp(key)
elif isinstance(s, np.datetime64):
return lambda key: Timestamp(key).asm8
else:
return lambda key: key
if len(names) == 0:
return []
if len(self.indices) > 0:
index_sample = next(iter(self.indices))
else:
index_sample = None # Dummy sample
name_sample = names[0]
if isinstance(index_sample, tuple):
if not isinstance(name_sample, tuple):
msg = "must supply a tuple to get_group with multiple grouping keys"
raise ValueError(msg)
if not len(name_sample) == len(index_sample):
try:
# If the original grouper was a tuple
return [self.indices[name] for name in names]
except KeyError as err:
# turns out it wasn't a tuple
msg = (
"must supply a same-length tuple to get_group "
"with multiple grouping keys"
)
raise ValueError(msg) from err
converters = [get_converter(s) for s in index_sample]
names = (tuple(f(n) for f, n in zip(converters, name)) for name in names)
else:
converter = get_converter(index_sample)
names = (converter(name) for name in names)
return [self.indices.get(name, []) for name in names]
def _get_index(self, name):
"""
Safe get index, translate keys for datelike to underlying repr.
"""
return self._get_indices([name])[0]
@cache_readonly
def _selected_obj(self):
# Note: _selected_obj is always just `self.obj` for SeriesGroupBy
if self._selection is None or isinstance(self.obj, Series):
if self._group_selection is not None:
return self.obj[self._group_selection]
return self.obj
else:
return self.obj[self._selection]
def _reset_group_selection(self):
"""
Clear group based selection.
Used for methods needing to return info on each group regardless of
whether a group selection was previously set.
"""
if self._group_selection is not None:
# GH12839 clear cached selection too when changing group selection
self._group_selection = None
self._reset_cache("_selected_obj")
def _set_group_selection(self):
"""
Create group based selection.
Used when selection is not passed directly but instead via a grouper.
NOTE: this should be paired with a call to _reset_group_selection
"""
grp = self.grouper
if not (
self.as_index
and getattr(grp, "groupings", None) is not None
and self.obj.ndim > 1
and self._group_selection is None
):
return
ax = self.obj._info_axis
groupers = [g.name for g in grp.groupings if g.level is None and g.in_axis]
if len(groupers):
# GH12839 clear selected obj cache when group selection changes
self._group_selection = ax.difference(Index(groupers), sort=False).tolist()
self._reset_cache("_selected_obj")
def _set_result_index_ordered(self, result):
# set the result index on the passed values object and
# return the new object, xref 8046
# the values/counts are repeated according to the group index
# shortcut if we have an already ordered grouper
if not self.grouper.is_monotonic:
index = Index(np.concatenate(self._get_indices(self.grouper.result_index)))
result.set_axis(index, axis=self.axis, inplace=True)
result = result.sort_index(axis=self.axis)
result.set_axis(self.obj._get_axis(self.axis), axis=self.axis, inplace=True)
return result
def _dir_additions(self):
return self.obj._dir_additions() | self._apply_whitelist
def __getattr__(self, attr: str):
if attr in self._internal_names_set:
return object.__getattribute__(self, attr)
if attr in self.obj:
return self[attr]
raise AttributeError(
f"'{type(self).__name__}' object has no attribute '{attr}'"
)
@Substitution(
klass="GroupBy",
versionadded=".. versionadded:: 0.21.0",
examples="""\
>>> df = pd.DataFrame({'A': 'a b a b'.split(), 'B': [1, 2, 3, 4]})
>>> df
A B
0 a 1
1 b 2
2 a 3
3 b 4
To get the difference between each groups maximum and minimum value in one
pass, you can do
>>> df.groupby('A').pipe(lambda x: x.max() - x.min())
B
A
a 2
b 2""",
)
@Appender(_pipe_template)
def pipe(self, func, *args, **kwargs):
return com.pipe(self, func, *args, **kwargs)
plot = property(GroupByPlot)
def _make_wrapper(self, name):
assert name in self._apply_whitelist
self._set_group_selection()
# need to setup the selection
# as are not passed directly but in the grouper
f = getattr(self._selected_obj, name)
if not isinstance(f, types.MethodType):
return self.apply(lambda self: getattr(self, name))
f = getattr(type(self._selected_obj), name)
sig = inspect.signature(f)
def wrapper(*args, **kwargs):
# a little trickery for aggregation functions that need an axis
# argument
if "axis" in sig.parameters:
if kwargs.get("axis", None) is None:
kwargs["axis"] = self.axis
def curried(x):
return f(x, *args, **kwargs)
# preserve the name so we can detect it when calling plot methods,
# to avoid duplicates
curried.__name__ = name
# special case otherwise extra plots are created when catching the
# exception below
if name in base.plotting_methods:
return self.apply(curried)
try:
return self.apply(curried)
except TypeError as err:
if not re.search(
"reduction operation '.*' not allowed for this dtype", str(err)
):
# We don't have a cython implementation
# TODO: is the above comment accurate?
raise
if self.obj.ndim == 1:
# this can be called recursively, so need to raise ValueError
raise ValueError
# GH#3688 try to operate item-by-item
result = self._aggregate_item_by_item(name, *args, **kwargs)
return result
wrapper.__name__ = name
return wrapper
def get_group(self, name, obj=None):
"""
Construct DataFrame from group with provided name.
Parameters
----------
name : object
The name of the group to get as a DataFrame.
obj : DataFrame, default None
The DataFrame to take the DataFrame out of. If
it is None, the object groupby was called on will
be used.
Returns
-------
group : same type as obj
"""
if obj is None:
obj = self._selected_obj
inds = self._get_index(name)
if not len(inds):
raise KeyError(name)
return obj._take_with_is_copy(inds, axis=self.axis)
def __iter__(self):
"""
Groupby iterator.
Returns
-------
Generator yielding sequence of (name, subsetted object)
for each group
"""
return self.grouper.get_iterator(self.obj, axis=self.axis)
@Appender(
_apply_docs["template"].format(
input="dataframe", examples=_apply_docs["dataframe_examples"]
)
)
def apply(self, func, *args, **kwargs):
func = self._is_builtin_func(func)
# this is needed so we don't try and wrap strings. If we could
# resolve functions to their callable functions prior, this
# wouldn't be needed
if args or kwargs:
if callable(func):
@wraps(func)
def f(g):
with np.errstate(all="ignore"):
return func(g, *args, **kwargs)
elif hasattr(nanops, "nan" + func):
# TODO: should we wrap this in to e.g. _is_builtin_func?
f = getattr(nanops, "nan" + func)
else:
raise ValueError(
"func must be a callable if args or kwargs are supplied"
)
else:
f = func
# ignore SettingWithCopy here in case the user mutates
with option_context("mode.chained_assignment", None):
try:
result = self._python_apply_general(f)
except TypeError:
# gh-20949
# try again, with .apply acting as a filtering
# operation, by excluding the grouping column
# This would normally not be triggered
# except if the udf is trying an operation that
# fails on *some* columns, e.g. a numeric operation
# on a string grouper column
with _group_selection_context(self):
return self._python_apply_general(f)
return result
def _python_apply_general(self, f):
keys, values, mutated = self.grouper.apply(f, self._selected_obj, self.axis)
return self._wrap_applied_output(
keys, values, not_indexed_same=mutated or self.mutated
)
def _iterate_slices(self) -> Iterable[Series]:
raise AbstractMethodError(self)
def transform(self, func, *args, **kwargs):
raise AbstractMethodError(self)
def _cumcount_array(self, ascending: bool = True):
"""
Parameters
----------
ascending : bool, default True
If False, number in reverse, from length of group - 1 to 0.
Notes
-----
this is currently implementing sort=False
(though the default is sort=True) for groupby in general
"""
ids, _, ngroups = self.grouper.group_info
sorter = get_group_index_sorter(ids, ngroups)
ids, count = ids[sorter], len(ids)
if count == 0:
return np.empty(0, dtype=np.int64)
run = np.r_[True, ids[:-1] != ids[1:]]
rep = np.diff(np.r_[np.nonzero(run)[0], count])
out = (~run).cumsum()
if ascending:
out -= np.repeat(out[run], rep)
else:
out = np.repeat(out[np.r_[run[1:], True]], rep) - out
rev = np.empty(count, dtype=np.intp)
rev[sorter] = np.arange(count, dtype=np.intp)
return out[rev].astype(np.int64, copy=False)
def _transform_should_cast(self, func_nm: str) -> bool:
"""
Parameters
----------
func_nm: str
The name of the aggregation function being performed
Returns
-------
bool
Whether transform should attempt to cast the result of aggregation
"""
return (self.size().fillna(0) > 0).any() and (
func_nm not in base.cython_cast_blacklist
)
def _cython_transform(self, how: str, numeric_only: bool = True, **kwargs):
output: Dict[base.OutputKey, np.ndarray] = {}
for idx, obj in enumerate(self._iterate_slices()):
name = obj.name
is_numeric = is_numeric_dtype(obj.dtype)
if numeric_only and not is_numeric:
continue
try:
result, _ = self.grouper.transform(obj.values, how, **kwargs)
except NotImplementedError:
continue
if self._transform_should_cast(how):
result = maybe_cast_result(result, obj, how=how)
key = base.OutputKey(label=name, position=idx)
output[key] = result
if len(output) == 0:
raise DataError("No numeric types to aggregate")
return self._wrap_transformed_output(output)
def _wrap_aggregated_output(self, output: Mapping[base.OutputKey, np.ndarray]):
raise AbstractMethodError(self)
def _wrap_transformed_output(self, output: Mapping[base.OutputKey, np.ndarray]):
raise AbstractMethodError(self)
def _wrap_applied_output(self, keys, values, not_indexed_same: bool = False):
raise AbstractMethodError(self)
def _cython_agg_general(
self, how: str, alt=None, numeric_only: bool = True, min_count: int = -1
):
output: Dict[base.OutputKey, Union[np.ndarray, DatetimeArray]] = {}
# Ideally we would be able to enumerate self._iterate_slices and use
# the index from enumeration as the key of output, but ohlc in particular
# returns a (n x 4) array. Output requires 1D ndarrays as values, so we
# need to slice that up into 1D arrays
idx = 0
for obj in self._iterate_slices():
name = obj.name
is_numeric = is_numeric_dtype(obj.dtype)
if numeric_only and not is_numeric:
continue
result, agg_names = self.grouper.aggregate(
obj._values, how, min_count=min_count
)
if agg_names:
# e.g. ohlc
assert len(agg_names) == result.shape[1]
for result_column, result_name in zip(result.T, agg_names):
key = base.OutputKey(label=result_name, position=idx)
output[key] = maybe_cast_result(result_column, obj, how=how)
idx += 1
else:
assert result.ndim == 1
key = base.OutputKey(label=name, position=idx)
output[key] = maybe_cast_result(result, obj, how=how)
idx += 1
if len(output) == 0:
raise DataError("No numeric types to aggregate")
return self._wrap_aggregated_output(output)
def _python_agg_general(
self, func, *args, engine="cython", engine_kwargs=None, **kwargs
):
func = self._is_builtin_func(func)
if engine != "numba":
f = lambda x: func(x, *args, **kwargs)
# iterate through "columns" ex exclusions to populate output dict
output: Dict[base.OutputKey, np.ndarray] = {}
for idx, obj in enumerate(self._iterate_slices()):
name = obj.name
if self.grouper.ngroups == 0:
# agg_series below assumes ngroups > 0
continue
if engine == "numba":
result, counts = self.grouper.agg_series(
obj,
func,
*args,
engine=engine,
engine_kwargs=engine_kwargs,
**kwargs,
)
else:
try:
# if this function is invalid for this dtype, we will ignore it.
result, counts = self.grouper.agg_series(obj, f)
except TypeError:
continue
assert result is not None
key = base.OutputKey(label=name, position=idx)
output[key] = maybe_cast_result(result, obj, numeric_only=True)
if len(output) == 0:
return self._python_apply_general(f)
if self.grouper._filter_empty_groups:
mask = counts.ravel() > 0
for key, result in output.items():
# since we are masking, make sure that we have a float object
values = result
if is_numeric_dtype(values.dtype):
values = ensure_float(values)
output[key] = maybe_cast_result(values[mask], result)
return self._wrap_aggregated_output(output)
def _concat_objects(self, keys, values, not_indexed_same: bool = False):
from pandas.core.reshape.concat import concat
def reset_identity(values):
# reset the identities of the components
# of the values to prevent aliasing
for v in com.not_none(*values):
ax = v._get_axis(self.axis)
ax._reset_identity()
return values
if not not_indexed_same:
result = concat(values, axis=self.axis)
ax = self._selected_obj._get_axis(self.axis)
# this is a very unfortunate situation
# we can't use reindex to restore the original order
# when the ax has duplicates
# so we resort to this
# GH 14776, 30667
if ax.has_duplicates:
indexer, _ = result.index.get_indexer_non_unique(ax.values)
indexer = algorithms.unique1d(indexer)
result = result.take(indexer, axis=self.axis)
else:
result = result.reindex(ax, axis=self.axis)
elif self.group_keys:
values = reset_identity(values)
if self.as_index:
# possible MI return case
group_keys = keys
group_levels = self.grouper.levels
group_names = self.grouper.names
result = concat(
values,
axis=self.axis,
keys=group_keys,
levels=group_levels,
names=group_names,
sort=False,
)
else:
# GH5610, returns a MI, with the first level being a
# range index
keys = list(range(len(values)))
result = concat(values, axis=self.axis, keys=keys)
else:
values = reset_identity(values)
result = concat(values, axis=self.axis)
if isinstance(result, Series) and self._selection_name is not None:
result.name = self._selection_name
return result
def _apply_filter(self, indices, dropna):
if len(indices) == 0:
indices = np.array([], dtype="int64")
else:
indices = np.sort(np.concatenate(indices))
if dropna:
filtered = self._selected_obj.take(indices, axis=self.axis)
else:
mask = np.empty(len(self._selected_obj.index), dtype=bool)
mask.fill(False)
mask[indices.astype(int)] = True
# mask fails to broadcast when passed to where; broadcast manually.
mask = np.tile(mask, list(self._selected_obj.shape[1:]) + [1]).T
filtered = self._selected_obj.where(mask) # Fill with NaNs.
return filtered
# To track operations that expand dimensions, like ohlc
OutputFrameOrSeries = TypeVar("OutputFrameOrSeries", bound=NDFrame)
class GroupBy(_GroupBy[FrameOrSeries]):
"""
Class for grouping and aggregating relational data.
See aggregate, transform, and apply functions on this object.
It's easiest to use obj.groupby(...) to use GroupBy, but you can also do:
::
grouped = groupby(obj, ...)
Parameters
----------
obj : pandas object
axis : int, default 0
level : int, default None
Level of MultiIndex
groupings : list of Grouping objects
Most users should ignore this
exclusions : array-like, optional
List of columns to exclude
name : str
Most users should ignore this
Returns
-------
**Attributes**
groups : dict
{group name -> group labels}
len(grouped) : int
Number of groups
Notes
-----
After grouping, see aggregate, apply, and transform functions. Here are
some other brief notes about usage. When grouping by multiple groups, the
result index will be a MultiIndex (hierarchical) by default.
Iteration produces (key, group) tuples, i.e. chunking the data by group. So
you can write code like:
::
grouped = obj.groupby(keys, axis=axis)
for key, group in grouped:
# do something with the data
Function calls on GroupBy, if not specially implemented, "dispatch" to the
grouped data. So if you group a DataFrame and wish to invoke the std()
method on each group, you can simply do:
::
df.groupby(mapper).std()
rather than
::
df.groupby(mapper).aggregate(np.std)
You can pass arguments to these "wrapped" functions, too.
See the online documentation for full exposition on these topics and much
more
"""
@property
def _obj_1d_constructor(self) -> Type["Series"]:
# GH28330 preserve subclassed Series/DataFrames
if isinstance(self.obj, DataFrame):
return self.obj._constructor_sliced
assert isinstance(self.obj, Series)
return self.obj._constructor
def _bool_agg(self, val_test, skipna):
"""
Shared func to call any / all Cython GroupBy implementations.
"""
def objs_to_bool(vals: np.ndarray) -> Tuple[np.ndarray, Type]:
if is_object_dtype(vals):
vals = np.array([bool(x) for x in vals])
else:
vals = vals.astype(np.bool)
return vals.view(np.uint8), np.bool
def result_to_bool(result: np.ndarray, inference: Type) -> np.ndarray:
return result.astype(inference, copy=False)
return self._get_cythonized_result(
"group_any_all",
aggregate=True,
cython_dtype=np.dtype(np.uint8),
needs_values=True,
needs_mask=True,
pre_processing=objs_to_bool,
post_processing=result_to_bool,
val_test=val_test,
skipna=skipna,
)
@Substitution(name="groupby")
@Appender(_common_see_also)
def any(self, skipna: bool = True):
"""
Return True if any value in the group is truthful, else False.
Parameters
----------
skipna : bool, default True
Flag to ignore nan values during truth testing.
Returns
-------
bool
"""
return self._bool_agg("any", skipna)
@Substitution(name="groupby")
@Appender(_common_see_also)
def all(self, skipna: bool = True):
"""
Return True if all values in the group are truthful, else False.
Parameters
----------
skipna : bool, default True
Flag to ignore nan values during truth testing.
Returns
-------
bool
"""
return self._bool_agg("all", skipna)
@Substitution(name="groupby")
@Appender(_common_see_also)
def count(self):
"""
Compute count of group, excluding missing values.
Returns
-------
Series or DataFrame
Count of values within each group.
"""
# defined here for API doc
raise NotImplementedError
@Substitution(name="groupby")
@Substitution(see_also=_common_see_also)
def mean(self, numeric_only: bool = True):
"""
Compute mean of groups, excluding missing values.
Parameters
----------
numeric_only : bool, default True
Include only float, int, boolean columns. If None, will attempt to use
everything, then use only numeric data.
Returns
-------
pandas.Series or pandas.DataFrame
%(see_also)s
Examples
--------
>>> df = pd.DataFrame({'A': [1, 1, 2, 1, 2],
... 'B': [np.nan, 2, 3, 4, 5],
... 'C': [1, 2, 1, 1, 2]}, columns=['A', 'B', 'C'])
Groupby one column and return the mean of the remaining columns in
each group.
>>> df.groupby('A').mean()
B C
A
1 3.0 1.333333
2 4.0 1.500000
Groupby two columns and return the mean of the remaining column.
>>> df.groupby(['A', 'B']).mean()
C
A B
1 2.0 2
4.0 1
2 3.0 1
5.0 2
Groupby one column and return the mean of only particular column in
the group.
>>> df.groupby('A')['B'].mean()
A
1 3.0
2 4.0
Name: B, dtype: float64
"""
return self._cython_agg_general(
"mean",
alt=lambda x, axis: Series(x).mean(numeric_only=numeric_only),
numeric_only=numeric_only,
)
@Substitution(name="groupby")
@Appender(_common_see_also)
def median(self, numeric_only=True):
"""
Compute median of groups, excluding missing values.
For multiple groupings, the result index will be a MultiIndex
Parameters
----------
numeric_only : bool, default True
Include only float, int, boolean columns. If None, will attempt to use
everything, then use only numeric data.
Returns
-------
Series or DataFrame
Median of values within each group.
"""
return self._cython_agg_general(
"median",
alt=lambda x, axis: Series(x).median(axis=axis, numeric_only=numeric_only),
numeric_only=numeric_only,
)
@Substitution(name="groupby")
@Appender(_common_see_also)
def std(self, ddof: int = 1):
"""
Compute standard deviation of groups, excluding missing values.
For multiple groupings, the result index will be a MultiIndex.
Parameters
----------
ddof : int, default 1
Degrees of freedom.
Returns
-------
Series or DataFrame
Standard deviation of values within each group.
"""
# TODO: implement at Cython level?
return np.sqrt(self.var(ddof=ddof))
@Substitution(name="groupby")
@Appender(_common_see_also)
def var(self, ddof: int = 1):
"""
Compute variance of groups, excluding missing values.
For multiple groupings, the result index will be a MultiIndex.
Parameters
----------
ddof : int, default 1
Degrees of freedom.
Returns
-------
Series or DataFrame
Variance of values within each group.
"""
if ddof == 1:
return self._cython_agg_general(
"var", alt=lambda x, axis: Series(x).var(ddof=ddof)
)
else:
func = lambda x: x.var(ddof=ddof)
with _group_selection_context(self):
return self._python_agg_general(func)
@Substitution(name="groupby")
@Appender(_common_see_also)
def sem(self, ddof: int = 1):
"""
Compute standard error of the mean of groups, excluding missing values.
For multiple groupings, the result index will be a MultiIndex.
Parameters
----------
ddof : int, default 1
Degrees of freedom.
Returns
-------
Series or DataFrame
Standard error of the mean of values within each group.
"""
return self.std(ddof=ddof) / np.sqrt(self.count())
@Substitution(name="groupby")
@Appender(_common_see_also)
def size(self):
"""
Compute group sizes.
Returns
-------
Series
Number of rows in each group.
"""
result = self.grouper.size()
# GH28330 preserve subclassed Series/DataFrames through calls
if issubclass(self.obj._constructor, Series):
result = self._obj_1d_constructor(result, name=self.obj.name)
else:
result = self._obj_1d_constructor(result)
return self._reindex_output(result, fill_value=0)
@classmethod
def _add_numeric_operations(cls):
"""
Add numeric operations to the GroupBy generically.
"""
def groupby_function(
name: str,
alias: str,
npfunc,
numeric_only: bool = True,
min_count: int = -1,
):
_local_template = """
Compute %(f)s of group values.
Parameters
----------
numeric_only : bool, default %(no)s
Include only float, int, boolean columns. If None, will attempt to use
everything, then use only numeric data.
min_count : int, default %(mc)s
The required number of valid values to perform the operation. If fewer
than ``min_count`` non-NA values are present the result will be NA.
Returns
-------
Series or DataFrame
Computed %(f)s of values within each group.
"""
@Substitution(name="groupby", f=name, no=numeric_only, mc=min_count)
@Appender(_common_see_also)
@Appender(_local_template)
def func(self, numeric_only=numeric_only, min_count=min_count):
self._set_group_selection()
# try a cython aggregation if we can
try:
return self._cython_agg_general(
how=alias,
alt=npfunc,
numeric_only=numeric_only,
min_count=min_count,
)
except DataError:
pass
except NotImplementedError as err:
if "function is not implemented for this dtype" in str(
err
) or "category dtype not supported" in str(err):
# raised in _get_cython_function, in some cases can
# be trimmed by implementing cython funcs for more dtypes
pass
else:
raise
# apply a non-cython aggregation
result = self.aggregate(lambda x: npfunc(x, axis=self.axis))
return result
set_function_name(func, name, cls)
return func
def first_compat(obj: FrameOrSeries, axis: int = 0):
def first(x: Series):
x = x.array[notna(x.array)]
if len(x) == 0:
return np.nan
return x[0]
if isinstance(obj, DataFrame):
return obj.apply(first, axis=axis)
elif isinstance(obj, Series):
return first(obj)
else:
raise TypeError(type(obj))
def last_compat(obj: FrameOrSeries, axis: int = 0):
def last(x: Series):
x = x.array[notna(x.array)]
if len(x) == 0:
return np.nan
return x[-1]
if isinstance(obj, DataFrame):
return obj.apply(last, axis=axis)
elif isinstance(obj, Series):
return last(obj)
else:
raise TypeError(type(obj))
cls.sum = groupby_function("sum", "add", np.sum, min_count=0)
cls.prod = groupby_function("prod", "prod", np.prod, min_count=0)
cls.min = groupby_function("min", "min", np.min, numeric_only=False)
cls.max = groupby_function("max", "max", np.max, numeric_only=False)
cls.first = groupby_function("first", "first", first_compat, numeric_only=False)
cls.last = groupby_function("last", "last", last_compat, numeric_only=False)
@Substitution(name="groupby")
@Appender(_common_see_also)
def ohlc(self) -> DataFrame:
"""
Compute open, high, low and close values of a group, excluding missing values.
For multiple groupings, the result index will be a MultiIndex
Returns
-------
DataFrame
Open, high, low and close values within each group.
"""
return self._apply_to_column_groupbys(lambda x: x._cython_agg_general("ohlc"))
@doc(DataFrame.describe)
def describe(self, **kwargs):
with _group_selection_context(self):
result = self.apply(lambda x: x.describe(**kwargs))
if self.axis == 1:
return result.T
return result.unstack()
def resample(self, rule, *args, **kwargs):
"""
Provide resampling when using a TimeGrouper.
Given a grouper, the function resamples it according to a string
"string" -> "frequency".
See the :ref:`frequency aliases <timeseries.offset_aliases>`
documentation for more details.
Parameters
----------
rule : str or DateOffset
The offset string or object representing target grouper conversion.
*args, **kwargs
Possible arguments are `how`, `fill_method`, `limit`, `kind` and
`on`, and other arguments of `TimeGrouper`.
Returns
-------
Grouper
Return a new grouper with our resampler appended.
See Also
--------
Grouper : Specify a frequency to resample with when
grouping by a key.
DatetimeIndex.resample : Frequency conversion and resampling of
time series.
Examples
--------
>>> idx = pd.date_range('1/1/2000', periods=4, freq='T')
>>> df = pd.DataFrame(data=4 * [range(2)],
... index=idx,
... columns=['a', 'b'])
>>> df.iloc[2, 0] = 5
>>> df
a b
2000-01-01 00:00:00 0 1
2000-01-01 00:01:00 0 1
2000-01-01 00:02:00 5 1
2000-01-01 00:03:00 0 1
Downsample the DataFrame into 3 minute bins and sum the values of
the timestamps falling into a bin.
>>> df.groupby('a').resample('3T').sum()
a b
a
0 2000-01-01 00:00:00 0 2
2000-01-01 00:03:00 0 1
5 2000-01-01 00:00:00 5 1
Upsample the series into 30 second bins.
>>> df.groupby('a').resample('30S').sum()
a b
a
0 2000-01-01 00:00:00 0 1
2000-01-01 00:00:30 0 0
2000-01-01 00:01:00 0 1
2000-01-01 00:01:30 0 0
2000-01-01 00:02:00 0 0
2000-01-01 00:02:30 0 0
2000-01-01 00:03:00 0 1
5 2000-01-01 00:02:00 5 1
Resample by month. Values are assigned to the month of the period.
>>> df.groupby('a').resample('M').sum()
a b
a
0 2000-01-31 0 3
5 2000-01-31 5 1
Downsample the series into 3 minute bins as above, but close the right
side of the bin interval.
>>> df.groupby('a').resample('3T', closed='right').sum()
a b
a
0 1999-12-31 23:57:00 0 1
2000-01-01 00:00:00 0 2
5 2000-01-01 00:00:00 5 1
Downsample the series into 3 minute bins and close the right side of
the bin interval, but label each bin using the right edge instead of
the left.
>>> df.groupby('a').resample('3T', closed='right', label='right').sum()
a b
a
0 2000-01-01 00:00:00 0 1
2000-01-01 00:03:00 0 2
5 2000-01-01 00:03:00 5 1
"""
from pandas.core.resample import get_resampler_for_grouping
return get_resampler_for_grouping(self, rule, *args, **kwargs)
@Substitution(name="groupby")
@Appender(_common_see_also)
def rolling(self, *args, **kwargs):
"""
Return a rolling grouper, providing rolling functionality per group.
"""
from pandas.core.window import RollingGroupby
return RollingGroupby(self, *args, **kwargs)
@Substitution(name="groupby")
@Appender(_common_see_also)
def expanding(self, *args, **kwargs):
"""
Return an expanding grouper, providing expanding
functionality per group.
"""
from pandas.core.window import ExpandingGroupby
return ExpandingGroupby(self, *args, **kwargs)
def _fill(self, direction, limit=None):
"""
Shared function for `pad` and `backfill` to call Cython method.
Parameters
----------
direction : {'ffill', 'bfill'}
Direction passed to underlying Cython function. `bfill` will cause
values to be filled backwards. `ffill` and any other values will
default to a forward fill
limit : int, default None
Maximum number of consecutive values to fill. If `None`, this
method will convert to -1 prior to passing to Cython
Returns
-------
`Series` or `DataFrame` with filled values
See Also
--------
pad
backfill
"""
# Need int value for Cython
if limit is None:
limit = -1
return self._get_cythonized_result(
"group_fillna_indexer",
needs_mask=True,
cython_dtype=np.dtype(np.int64),
result_is_index=True,
direction=direction,
limit=limit,
)
@Substitution(name="groupby")
def pad(self, limit=None):
"""
Forward fill the values.
Parameters
----------
limit : int, optional
Limit of how many values to fill.
Returns
-------
Series or DataFrame
Object with missing values filled.
See Also
--------
Series.pad
DataFrame.pad
Series.fillna
DataFrame.fillna
"""
return self._fill("ffill", limit=limit)
ffill = pad
@Substitution(name="groupby")
def backfill(self, limit=None):
"""
Backward fill the values.
Parameters
----------
limit : int, optional
Limit of how many values to fill.
Returns
-------
Series or DataFrame
Object with missing values filled.
See Also
--------
Series.backfill
DataFrame.backfill
Series.fillna
DataFrame.fillna
"""
return self._fill("bfill", limit=limit)
bfill = backfill
@Substitution(name="groupby")
@Substitution(see_also=_common_see_also)
def nth(self, n: Union[int, List[int]], dropna: Optional[str] = None) -> DataFrame:
"""
Take the nth row from each group if n is an int, or a subset of rows
if n is a list of ints.
If dropna, will take the nth non-null row, dropna is either
'all' or 'any'; this is equivalent to calling dropna(how=dropna)
before the groupby.
Parameters
----------
n : int or list of ints
A single nth value for the row or a list of nth values.
dropna : None or str, optional
Apply the specified dropna operation before counting which row is
the nth row. Needs to be None, 'any' or 'all'.
Returns
-------
Series or DataFrame
N-th value within each group.
%(see_also)s
Examples
--------
>>> df = pd.DataFrame({'A': [1, 1, 2, 1, 2],
... 'B': [np.nan, 2, 3, 4, 5]}, columns=['A', 'B'])
>>> g = df.groupby('A')
>>> g.nth(0)
B
A
1 NaN
2 3.0
>>> g.nth(1)
B
A
1 2.0
2 5.0
>>> g.nth(-1)
B
A
1 4.0
2 5.0
>>> g.nth([0, 1])
B
A
1 NaN
1 2.0
2 3.0
2 5.0
Specifying `dropna` allows count ignoring ``NaN``
>>> g.nth(0, dropna='any')
B
A
1 2.0
2 3.0
NaNs denote group exhausted when using dropna
>>> g.nth(3, dropna='any')
B
A
1 NaN
2 NaN
Specifying `as_index=False` in `groupby` keeps the original index.
>>> df.groupby('A', as_index=False).nth(1)
A B
1 1 2.0
4 2 5.0
"""
valid_containers = (set, list, tuple)
if not isinstance(n, (valid_containers, int)):
raise TypeError("n needs to be an int or a list/set/tuple of ints")
if not dropna:
if isinstance(n, int):
nth_values = [n]
elif isinstance(n, valid_containers):
nth_values = list(set(n))
nth_array = np.array(nth_values, dtype=np.intp)
self._set_group_selection()
mask_left = np.in1d(self._cumcount_array(), nth_array)
mask_right = np.in1d(self._cumcount_array(ascending=False) + 1, -nth_array)
mask = mask_left | mask_right
ids, _, _ = self.grouper.group_info
# Drop NA values in grouping
mask = mask & (ids != -1)
out = self._selected_obj[mask]
if not self.as_index:
return out
result_index = self.grouper.result_index
out.index = result_index[ids[mask]]
if not self.observed and isinstance(result_index, CategoricalIndex):
out = out.reindex(result_index)
out = self._reindex_output(out)
return out.sort_index() if self.sort else out
# dropna is truthy
if isinstance(n, valid_containers):
raise ValueError("dropna option with a list of nth values is not supported")
if dropna not in ["any", "all"]:
# Note: when agg-ing picker doesn't raise this, just returns NaN
raise ValueError(
"For a DataFrame groupby, dropna must be "
"either None, 'any' or 'all', "
f"(was passed {dropna})."
)
# old behaviour, but with all and any support for DataFrames.
# modified in GH 7559 to have better perf
max_len = n if n >= 0 else -1 - n
dropped = self.obj.dropna(how=dropna, axis=self.axis)
# get a new grouper for our dropped obj
if self.keys is None and self.level is None:
# we don't have the grouper info available
# (e.g. we have selected out
# a column that is not in the current object)
axis = self.grouper.axis
grouper = axis[axis.isin(dropped.index)]
else:
# create a grouper with the original parameters, but on dropped
# object
from pandas.core.groupby.grouper import get_grouper
grouper, _, _ = get_grouper(
dropped,
key=self.keys,
axis=self.axis,
level=self.level,
sort=self.sort,
mutated=self.mutated,
)
grb = dropped.groupby(grouper, as_index=self.as_index, sort=self.sort)
sizes, result = grb.size(), grb.nth(n)
mask = (sizes < max_len).values
# set the results which don't meet the criteria
if len(result) and mask.any():
result.loc[mask] = np.nan
# reset/reindex to the original groups
if len(self.obj) == len(dropped) or len(result) == len(
self.grouper.result_index
):
result.index = self.grouper.result_index
else:
result = result.reindex(self.grouper.result_index)
return result
def quantile(self, q=0.5, interpolation: str = "linear"):
"""
Return group values at the given quantile, a la numpy.percentile.
Parameters
----------
q : float or array-like, default 0.5 (50% quantile)
Value(s) between 0 and 1 providing the quantile(s) to compute.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
Method to use when the desired quantile falls between two points.
Returns
-------
Series or DataFrame
Return type determined by caller of GroupBy object.
See Also
--------
Series.quantile : Similar method for Series.
DataFrame.quantile : Similar method for DataFrame.
numpy.percentile : NumPy method to compute qth percentile.
Examples
--------
>>> df = pd.DataFrame([
... ['a', 1], ['a', 2], ['a', 3],
... ['b', 1], ['b', 3], ['b', 5]
... ], columns=['key', 'val'])
>>> df.groupby('key').quantile()
val
key
a 2.0
b 3.0
"""
from pandas import concat
def pre_processor(vals: np.ndarray) -> Tuple[np.ndarray, Optional[Type]]:
if is_object_dtype(vals):
raise TypeError(
"'quantile' cannot be performed against 'object' dtypes!"
)
inference = None
if is_integer_dtype(vals.dtype):
if is_extension_array_dtype(vals.dtype):
vals = vals.to_numpy(dtype=float, na_value=np.nan)
inference = np.int64
elif is_bool_dtype(vals.dtype) and is_extension_array_dtype(vals.dtype):
vals = vals.to_numpy(dtype=float, na_value=np.nan)
elif is_datetime64_dtype(vals.dtype):
inference = "datetime64[ns]"
vals = np.asarray(vals).astype(np.float)
return vals, inference
def post_processor(vals: np.ndarray, inference: Optional[Type]) -> np.ndarray:
if inference:
# Check for edge case
if not (
is_integer_dtype(inference)
and interpolation in {"linear", "midpoint"}
):
vals = vals.astype(inference)
return vals
if is_scalar(q):
return self._get_cythonized_result(
"group_quantile",
aggregate=True,
needs_values=True,
needs_mask=True,
cython_dtype=np.dtype(np.float64),
pre_processing=pre_processor,
post_processing=post_processor,
q=q,
interpolation=interpolation,
)
else:
results = [
self._get_cythonized_result(
"group_quantile",
aggregate=True,
needs_values=True,
needs_mask=True,
cython_dtype=np.dtype(np.float64),
pre_processing=pre_processor,
post_processing=post_processor,
q=qi,
interpolation=interpolation,
)
for qi in q
]
result = concat(results, axis=0, keys=q)
# fix levels to place quantiles on the inside
# TODO(GH-10710): Ideally, we could write this as
# >>> result.stack(0).loc[pd.IndexSlice[:, ..., q], :]
# but this hits https://github.com/pandas-dev/pandas/issues/10710
# which doesn't reorder the list-like `q` on the inner level.
order = list(range(1, result.index.nlevels)) + [0]
# temporarily saves the index names
index_names = np.array(result.index.names)
# set index names to positions to avoid confusion
result.index.names = np.arange(len(index_names))
# place quantiles on the inside
result = result.reorder_levels(order)
# restore the index names in order
result.index.names = index_names[order]
# reorder rows to keep things sorted
indices = np.arange(len(result)).reshape([len(q), self.ngroups]).T.flatten()
return result.take(indices)
@Substitution(name="groupby")
def ngroup(self, ascending: bool = True):
"""
Number each group from 0 to the number of groups - 1.
This is the enumerative complement of cumcount. Note that the
numbers given to the groups match the order in which the groups
would be seen when iterating over the groupby object, not the
order they are first observed.
Parameters
----------
ascending : bool, default True
If False, number in reverse, from number of group - 1 to 0.
Returns
-------
Series
Unique numbers for each group.
See Also
--------
.cumcount : Number the rows in each group.
Examples
--------
>>> df = pd.DataFrame({"A": list("aaabba")})
>>> df
A
0 a
1 a
2 a
3 b
4 b
5 a
>>> df.groupby('A').ngroup()
0 0
1 0
2 0
3 1
4 1
5 0
dtype: int64
>>> df.groupby('A').ngroup(ascending=False)
0 1
1 1
2 1
3 0
4 0
5 1
dtype: int64
>>> df.groupby(["A", [1,1,2,3,2,1]]).ngroup()
0 0
1 0
2 1
3 3
4 2
5 0
dtype: int64
"""
with _group_selection_context(self):
index = self._selected_obj.index
result = self._obj_1d_constructor(self.grouper.group_info[0], index)
if not ascending:
result = self.ngroups - 1 - result
return result
@Substitution(name="groupby")
def cumcount(self, ascending: bool = True):
"""
Number each item in each group from 0 to the length of that group - 1.
Essentially this is equivalent to
.. code-block:: python
self.apply(lambda x: pd.Series(np.arange(len(x)), x.index))
Parameters
----------
ascending : bool, default True
If False, number in reverse, from length of group - 1 to 0.
Returns
-------
Series
Sequence number of each element within each group.
See Also
--------
.ngroup : Number the groups themselves.
Examples
--------
>>> df = pd.DataFrame([['a'], ['a'], ['a'], ['b'], ['b'], ['a']],
... columns=['A'])
>>> df
A
0 a
1 a
2 a
3 b
4 b
5 a
>>> df.groupby('A').cumcount()
0 0
1 1
2 2
3 0
4 1
5 3
dtype: int64
>>> df.groupby('A').cumcount(ascending=False)
0 3
1 2
2 1
3 1
4 0
5 0
dtype: int64
"""
with _group_selection_context(self):
index = self._selected_obj.index
cumcounts = self._cumcount_array(ascending=ascending)
return self._obj_1d_constructor(cumcounts, index)
@Substitution(name="groupby")
@Appender(_common_see_also)
def rank(
self,
method: str = "average",
ascending: bool = True,
na_option: str = "keep",
pct: bool = False,
axis: int = 0,
):
"""
Provide the rank of values within each group.
Parameters
----------
method : {'average', 'min', 'max', 'first', 'dense'}, default 'average'
* average: average rank of group.
* min: lowest rank in group.
* max: highest rank in group.
* first: ranks assigned in order they appear in the array.
* dense: like 'min', but rank always increases by 1 between groups.
ascending : bool, default True
False for ranks by high (1) to low (N).
na_option : {'keep', 'top', 'bottom'}, default 'keep'
* keep: leave NA values where they are.
* top: smallest rank if ascending.
* bottom: smallest rank if descending.
pct : bool, default False
Compute percentage rank of data within each group.
axis : int, default 0
The axis of the object over which to compute the rank.
Returns
-------
DataFrame with ranking of values within each group
"""
if na_option not in {"keep", "top", "bottom"}:
msg = "na_option must be one of 'keep', 'top', or 'bottom'"
raise ValueError(msg)
return self._cython_transform(
"rank",
numeric_only=False,
ties_method=method,
ascending=ascending,
na_option=na_option,
pct=pct,
axis=axis,
)
@Substitution(name="groupby")
@Appender(_common_see_also)
def cumprod(self, axis=0, *args, **kwargs):
"""
Cumulative product for each group.
Returns
-------
Series or DataFrame
"""
nv.validate_groupby_func("cumprod", args, kwargs, ["numeric_only", "skipna"])
if axis != 0:
return self.apply(lambda x: x.cumprod(axis=axis, **kwargs))
return self._cython_transform("cumprod", **kwargs)
@Substitution(name="groupby")
@Appender(_common_see_also)
def cumsum(self, axis=0, *args, **kwargs):
"""
Cumulative sum for each group.
Returns
-------
Series or DataFrame
"""
nv.validate_groupby_func("cumsum", args, kwargs, ["numeric_only", "skipna"])
if axis != 0:
return self.apply(lambda x: x.cumsum(axis=axis, **kwargs))
return self._cython_transform("cumsum", **kwargs)
@Substitution(name="groupby")
@Appender(_common_see_also)
def cummin(self, axis=0, **kwargs):
"""
Cumulative min for each group.
Returns
-------
Series or DataFrame
"""
if axis != 0:
return self.apply(lambda x: np.minimum.accumulate(x, axis))
return self._cython_transform("cummin", numeric_only=False)
@Substitution(name="groupby")
@Appender(_common_see_also)
def cummax(self, axis=0, **kwargs):
"""
Cumulative max for each group.
Returns
-------
Series or DataFrame
"""
if axis != 0:
return self.apply(lambda x: np.maximum.accumulate(x, axis))
return self._cython_transform("cummax", numeric_only=False)
def _get_cythonized_result(
self,
how: str,
cython_dtype: np.dtype,
aggregate: bool = False,
needs_values: bool = False,
needs_mask: bool = False,
needs_ngroups: bool = False,
result_is_index: bool = False,
pre_processing=None,
post_processing=None,
**kwargs,
):
"""
Get result for Cythonized functions.
Parameters
----------
how : str, Cythonized function name to be called
cython_dtype : np.dtype
Type of the array that will be modified by the Cython call.
aggregate : bool, default False
Whether the result should be aggregated to match the number of
groups
needs_values : bool, default False
Whether the values should be a part of the Cython call
signature
needs_mask : bool, default False
Whether boolean mask needs to be part of the Cython call
signature
needs_ngroups : bool, default False
Whether number of groups is part of the Cython call signature
result_is_index : bool, default False
Whether the result of the Cython operation is an index of
values to be retrieved, instead of the actual values themselves
pre_processing : function, default None
Function to be applied to `values` prior to passing to Cython.
Function should return a tuple where the first element is the
values to be passed to Cython and the second element is an optional
type which the values should be converted to after being returned
by the Cython operation. Raises if `needs_values` is False.
post_processing : function, default None
Function to be applied to result of Cython function. Should accept
an array of values as the first argument and type inferences as its
second argument, i.e. the signature should be
(ndarray, Type).
**kwargs : dict
Extra arguments to be passed back to Cython funcs
Returns
-------
`Series` or `DataFrame` with filled values
"""
if result_is_index and aggregate:
raise ValueError("'result_is_index' and 'aggregate' cannot both be True!")
if post_processing:
if not callable(pre_processing):
raise ValueError("'post_processing' must be a callable!")
if pre_processing:
if not callable(pre_processing):
raise ValueError("'pre_processing' must be a callable!")
if not needs_values:
raise ValueError(
"Cannot use 'pre_processing' without specifying 'needs_values'!"
)
grouper = self.grouper
labels, _, ngroups = grouper.group_info
output: Dict[base.OutputKey, np.ndarray] = {}
base_func = getattr(libgroupby, how)
for idx, obj in enumerate(self._iterate_slices()):
name = obj.name
values = obj._values
if aggregate:
result_sz = ngroups
else:
result_sz = len(values)
result = np.zeros(result_sz, dtype=cython_dtype)
func = partial(base_func, result, labels)
inferences = None
if needs_values:
vals = values
if pre_processing:
vals, inferences = pre_processing(vals)
func = partial(func, vals)
if needs_mask:
mask = | isna(values) | pandas.core.dtypes.missing.isna |
import pandas as pd
import numpy as np
from tqdm import tqdm
from Bio.PDB import Selection, PDBParser
"""
This script is to extract beads from the predicted structures in CASP13 and CASP14 after the competitions.
"""
def extract_beads(pdb_path):
amino_acids = pd.read_csv('/home/hyang/bio/erf/data/amino_acids.csv')
vocab_aa = [x.upper() for x in amino_acids.AA3C]
vocab_dict = {x.upper(): y for x, y in zip(amino_acids.AA3C, amino_acids.AA)}
p = PDBParser()
structure = p.get_structure('X', pdb_path)
residue_list = Selection.unfold_entities(structure, 'R')
ca_center_list = []
cb_center_list = []
res_name_list = []
res_num_list = []
chain_list = []
for res in residue_list:
if res.get_resname() not in vocab_aa:
# raise ValueError('protein has non natural amino acids')
continue
chain_list.append(res.parent.id)
res_name_list.append(vocab_dict[res.get_resname()])
res_num_list.append(res.id[1])
try:
ca_center_list.append(res['CA'].get_coord())
except KeyError:
return 0
if res.get_resname() != 'GLY':
try:
cb_center_list.append(res['CB'].get_coord())
except KeyError:
return 0
else:
cb_center_list.append(res['CA'].get_coord())
ca_center = np.vstack(ca_center_list)
cb_center = np.vstack(cb_center_list)
df = pd.DataFrame({'chain_id': chain_list,
'group_num': res_num_list,
'group_name': res_name_list,
'x': ca_center[:, 0],
'y': ca_center[:, 1],
'z': ca_center[:, 2],
'xcb': cb_center[:, 0],
'ycb': cb_center[:, 1],
'zcb': cb_center[:, 2]})
df.to_csv(f'{pdb_path}_bead.csv', index=False)
return 1
def extract_casp13_14():
# casp_id = 'casp13'
casp_id = 'casp14'
root_dir = f'/home/hyang/bio/erf/data/decoys/{casp_id}/'
casp = pd.read_csv(f'{root_dir}/pdb_list.txt')['pdb'].values
modified_casp_id = []
for casp_id in tqdm(casp):
pdb_list = pd.read_csv(f'{root_dir}/{casp_id}/flist.txt')['pdb'].values
ca_only_list = []
for i, pdb_id in enumerate(pdb_list):
pdb_path = f'{root_dir}/{casp_id}/{pdb_id}'
result = extract_beads(pdb_path)
if result == 0:
# some structure prediction only has CA.
ca_only_list.append(pdb_id)
pdb_list[i] = '0'
if len(ca_only_list) > 0:
pdb_list = pdb_list[pdb_list != '0']
df = | pd.DataFrame({'pdb': pdb_list}) | pandas.DataFrame |
#!/usr/bin/env python
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import geoutilities.ut_basic as gut_b
import geoutilities.ut_data_loading as gut_dl
import geoutilities.ut_distributions as gut_dist
import geoutilities.ut_taskmanager as gut_tm
import geoutilities.ut_graphing as gut_gr
from constants import Constants
class LTFile:
def __init__(self, name, filename, sheetname, time_range):
self.name = name
self.filename = filename
self.sheetname = sheetname
self.time_range = time_range
class TaskManager (gut_tm.TaskManager):
lt_file2 = 'Qs_t2990-3110.xlsx'
lt_2870_meta = LTFile(
name = 't2870',
filename = 'Qs_t2870-2990.xlsx',
sheetname = 'Qs_t2870 LT',
time_range = '2870-2990')
lt_2990_meta = LTFile(
name = 't2870',
filename = 'Qs_t2990-3110.xlsx',
sheetname = 'Qs_t2990-3110',
time_range = '2990-3110')
def __init__(self):
self.grapher = gut_gr.Grapher()
self.plots_pending = False
self.all_plots = False
gut_b.VERBOSE = Constants.verbose
options = [
('--plot-all', self.plot_all, 'Plot distributions.'),
('--c-distr', self.compare_gsd, 'Perform grain size distribution comparisons.'),
]
gut_tm.TaskManager.__init__(self, options)
if self.plots_pending:
self.grapher.show_plots()
def load_data(self, reload=False):
self.loader = gut_dl.DataLoader(Constants.data_path)
names = Constants.pickle_names
if Constants.reload_data or reload or not self.loader.is_pickled(names.values()):
gut_b.printer("Pickles do not exist (or forced reloading). Loading excel files...")
self.load_peak_data()
else:
gut_b.printer(" Pickles present! Unpacking pickles...")
self.hs_data = self.loader.load_pickle(names['hs'])
self.lt_data = self.loader.load_pickle(names['lt'])
gut_b.printer(" Pickles unpacked!")
def load_peak_data(self):
loader = gut_dl.DataLoader(data_path = Constants.data_path)
gut_b.printer("Loading data...")
self.load_helly_smith(loader)
self.load_light_table(loader)
gut_b.printer(" Done loading files!")
prepickles = {Constants.pickle_names['hs'] : self.hs_data,
Constants.pickle_names['lt'] : self.lt_data}
self.loader.produce_pickles(prepickles)
def load_helly_smith(self, loader):
## <NAME> Data
gut_b.printer(" Loading <NAME> data...")
hs_kwargs = {
'sheetname' : 'Sheet1',
'header' : 0,
'skiprows' : 1,
'index_col' : [2, 3],
'parse_cols' : 19,
'na_values' : 'ALL PAINT',
}
hs_data = loader.load_xlsx('helly_smith_data.xlsx', hs_kwargs)
# Clean data
gut_b.printer(" Cleaning Helly Smith data...")
# Sort data so indexing works
hs_data.sort_index(axis=0, inplace=True)
gut_b.printer(" Dropping unnecessary columns...")
hs_data.drop(['Date', 'ID', 32], axis=1, inplace=True)
# Select rows 't2890', 't2950', 't3010', 't3070' for my analysis
gut_b.printer(" Selecting times between 2870 to 3110...")
hs_data = hs_data.loc[pd.IndexSlice['t2890':'t3070',:],:]
gut_b.printer(" Reformatting labels...")
index_labels = hs_data.index.values
hs_data.index = pd.MultiIndex.from_tuples(index_labels)
hs_data= hs_data.T.iloc[::-1] # flip order
self.hs_data = hs_data
gut_b.printer(" Done with <NAME> data!")
def load_light_table(self, loader):
## Light table data
# To prepare the light table files for this program:
# - -
printer = gut_b.printer
printer(" Loading light table data...")
# Load files
lt_kwargs = {
'sheetname' : None,
'skiprows' : 2,
'header' : 0,
'skip_footer': 4,
'index_col' : 0,
'parse_cols' : 44,
'na_values' : 'ALL PAINT',
}
files = [TaskManager.lt_2870_meta,
TaskManager.lt_2990_meta,
]
lt_partials = []
first = True
last_index_max = 0
for meta in files:
name = meta.name
filename = meta.filename
sheetname = meta.sheetname
time_range = meta.time_range
printer(" Loading light table {} data...".format(time_range))
lt_kwargs['sheetname'] = sheetname
lt_partial = loader.load_xlsx(filename, lt_kwargs)
# Clean data
printer(" Cleaning light table {} data...".format(time_range))
# Sort data so indexing works
lt_partial.sort_index(axis=0, inplace=True)
#lt_partial.sort_index(axis=1, inplace=True)
printer(" Dropping unnecessary columns ({})...".format(name))
# Note that with pandas.load_xlsx, repeated column names get a
# ".{x}" added to the end, where x is the number of repetitions. So
# if the excel file has two colums named "0.71", then the first
# column is "0.71" and the second one is "0.71.1" and third would
# be "0.71.2" and so on.
drop_list = ['time sec', 'missing ratio', 'vel', 'sd vel',
'number vel', 'count stones'] + ["{}.1".format(gs) for gs in [
"0.5", "0.71", "1", "1.4", "2", "2.8", "4", "5.6",
"8", "11.2", "16", "22", "32", "45"]]
lt_partial.drop(drop_list, axis=1, inplace=True)
printer(" Reformatting labels...")
lt_partial.rename(columns={'Bedload transport':'Total (g)'}, inplace=True)
print(lt_partial.columns)
if first:
first = False
else:
# Drop the first row of data if not the first dataset; to
# prevent overlapping rows.
lt_partial.drop(0, axis=0, inplace=True)
printer(" Resetting index values...")
partial_times = lt_partial.index.values + last_index_max
last_index_max = np.max(partial_times)
lt_partial.index = pd.Index(partial_times)
# Save the partial data to a list
lt_partials.append(lt_partial)
printer(" Combining data into one structure...")
lt_combined = pd.concat(lt_partials)
self.lt_data = lt_combined
printer(" Done with light table data!")
def plot_all(self):
self.all_plots = True
def compare_gsd(self):
# Things to do:
# Generate 5 minute windows of average LT data
# compare HS to moving LT window
# | start at HS time? or calc estimated lag time?
# | match both distribution and total mass
# pick "best fit" times
# generate graphs for human review
# repeat for each HS time step
#
# Note: plots will not be shown unless --plot-all option given
self.plot_lt_totals()
# Set up HS mather
self.hs_mather = gut_dist.PDDistributions(self.hs_data, Constants.hs_max_size)
# Average the 5 HS data for each time step
time_sums, time_cumsums = self.calc_overall_sampler()
self.plot_HS_averages(time_cumsums)
# Prepare rolling LT data
self.gen_lt_windows()
self.plot_windowed_lt()
# Compare HS to traveling window
lt_windows = self.windowed_lt.T
hs_time_sums = time_sums
compare = gut_dist.PDDistributions.compare_distributions
max_size = max(Constants.hs_max_size, np.amax(Constants.lt_size_classes))
self.difference = compare(lt_windows, hs_time_sums, 0, max_size)
# Remove values that occur before the sampling time
hs_sample_times_str = self.difference.columns.values
hs_sample_times = Constants.hs_sample_times
for str, time in zip(hs_sample_times_str, hs_sample_times):
self.difference.loc[:time,str] = np.NAN
self.plot_difference(force=True)
def gen_lt_windows(self):
# Generate 5 minute LT windows
#
# Can skip up to tolerance blank rows
n_sec = Constants.window_duration
tolerance = int(n_sec * Constants.window_tolerance)
lt_data = self.lt_data.loc[1:, Constants.lt_size_classes]
self.window_roll = lt_data.rolling(window=n_sec, min_periods=n_sec-tolerance)
self.windowed_lt = self.window_roll.sum().loc[n_sec:]
def calc_overall_sampler(self):
# Calculate the overall distribution from the selected columns
#
# Sum the values in each grain size class then calc new cumsum
# distributions
#
# Raw values are summed b/c normalized distributions would need
# weights, which are based on the raw values anyway.
data = self.hs_mather.data
cumsum = self.hs_mather.cumsum
sizes = data.index
times = cumsum.columns.levels[0]
time_sums = | pd.DataFrame(index=sizes, columns=times) | pandas.DataFrame |
from pandas import pandas as pd
from pyspark import SparkConf
from pyspark.sql import SparkSession
from pyspark.sql import functions as F
from statistics import mean
from pyspark.sql.types import StringType
from sklearn.exceptions import ConvergenceWarning
import argparse, sys, flexmatcher, warnings, time
warnings.filterwarnings("ignore", category=FutureWarning)
warnings.filterwarnings("ignore", category=ConvergenceWarning)
class FlexMatcher_benchmark:
def __init__(self):
self.timeReadingT = {}
self.timeTraining = {}
self.timePredicting = {}
self.datasets = {}
self.schemas = {}
self.attributesNumber = 0
def readDataset(self, file, sep, multi, nullVal, trailing):
return spark.read \
.option("header", "true") \
.option("inferSchema", "true") \
.option("delimiter", sep) \
.option("multiline", multi) \
.option("quote", "\"") \
.option("escape", "\"") \
.option("nullValue", nullVal) \
.option("maxCharsPerColumn", 1000001) \
.option("ignoreTrailingWhiteSpace", trailing) \
.csv(file)
def getDatasets(self, pathInfo, pathDatasets, tesbed):
dsInfo = self.readDataset(pathInfo, ",", "false", "", "true")
print("Testbed {} has {} datasets".format(tesbed, dsInfo.count()))
for row in dsInfo.select("filename", "delimiter", "multiline", "nullVal", "file_size",
"ignoreTrailing").distinct().collect():
# hail-2015.csv does not work with FlexMatcher profile.
if row['filename'] != "hail-2015.csv":
print("reading: {}".format(row['filename']))
startReading = time.time()
ds = self.readDataset(pathDatasets + "/"+row['filename'], row['delimiter'],
row['multiline'], row['nullVal'],row['ignoreTrailing'])
endReading = time.time()
totalTimeReading = abs(endReading - startReading)
self.timeReadingT[row['filename']] = totalTimeReading/60
# variable to keep string attributes names
attStr = []
for f in ds.schema.fields:
if isinstance(f.dataType, StringType):
attStr.append(f.name)
else:
ds = ds.drop(f.name)
self.datasets[row['filename']] = ds.toPandas()
self.schemas[row['filename']] = attStr
def start(self,pathInfo,pathDatasets,testbed, pathOutput):
self.getDatasets(pathInfo,pathDatasets,testbed)
columnsDF = ["query dataset", "query attribute", "candidate dataset", "candidate attribute"]
data = []
self.attributesNumber = 0
datasetsN = self.datasets.keys()
for queryDataset in datasetsN:
print("Using querydataset {}".format(queryDataset))
mapping = {}
self.attributesNumber += len(self.schemas[queryDataset])
for q in self.schemas[queryDataset]:
mapping[q] = q
if len(self.schemas[queryDataset]) == 1:
# If query dataset have only one column,
# we create other column to be the negative column for the training
mapping["negativeColumnBench"] = "negativeColumnBench"
self.datasets[queryDataset].insert(0,"negativeColumnBench","lorem ipsum")
# We create the model for the query dataset
startTraining = time.time()
fm = flexmatcher.FlexMatcher([self.datasets[queryDataset]],[mapping])
fm.train()
endTraining = time.time()
timeTraining = endTraining - startTraining
self.timeTraining[queryDataset] = (abs(timeTraining))/60
auxTimePredicting = 0
for filename, candidateDataset in self.datasets.items():
if filename != queryDataset:
startPredicting = time.time()
predicted_mapping = fm.make_prediction(candidateDataset, True)
endPredicting = time.time()
auxTimePredicting = auxTimePredicting + abs(startPredicting - endPredicting)
for key, pred in predicted_mapping.items():
# pred, score = value
if pred != "negativeColumnBench" and key != "negativeColumnBench":
data.append([queryDataset, pred, filename, key])
self.timePredicting[queryDataset] = auxTimePredicting /60
df = | pd.DataFrame(data, columns=columnsDF) | pandas.pandas.DataFrame |
import pandas as pd
import numpy as np
from sys import argv
SCORES = {'Beethoven': [], 'Chopin': [], 'Debussy': [],
'Jarrett': [], 'Tatum': []}
PAIRS = {0: ['Beethoven', 'Chopin'], 1: ['Debussy', 'Beethoven'],
2: ['Beethoven', 'Jarrett'], 3: ['Tatum', 'Beethoven'],
4: ['Chopin', 'Debussy'], 5: ['Jarrett', 'Chopin'],
6: ['Chopin', 'Tatum'], 7: ['Debussy', 'Jarrett'],
8: ['Tatum', 'Debussy'], 9: ['Jarrett', 'Tatum']}
fn = argv[1] if len(argv) > 1 else ''
with open(fn, 'rb') as f:
lines = list(f.readlines())
contents = []
lines = lines[1:]
for line in lines:
temp = line.split(',')
content = []
for i in range(len(temp)):
if 'first' in temp[i].lower():
if 'definitely' in temp[i+1].lower():
content.append(1.)
elif 'somewhat' in temp[i+1].lower():
content.append(2.)
elif 'second' in temp[i].lower():
if 'definitely' in temp[i+1].lower():
content.append(5.)
elif 'somewhat' in temp[i+1].lower():
content.append(4.)
elif 'neutral' in temp[i].lower():
content.append(2.5)
contents.append(content)
df = | pd.DataFrame.from_records(contents) | pandas.DataFrame.from_records |
import pandas
import numpy as np
import re
import os
import glob
# LSV TYPE specified if it is about a target or a source exon such as : s|... or t|...
# LSV TYPE may be composed of various splicing events defined as : AeB.CoD
# LSV TYPE may contain an intro which is specified at the end such as : ...|i
def main():
scriptdir = os.path.dirname(os.path.realpath(__file__))
control = snakemake.params[0]
test = snakemake.params[1]
voilafile = scriptdir+'/../../results/Voila/'+control+'_'+test+'.tsv'
voilatsv = pandas.read_csv(voilafile, header=0, comment='#', sep='\t')
deseqfile = scriptdir+'/../../results/Diff_Exp/deseq2_results.tsv'
deseq = pandas.read_csv(deseqfile,header=0,sep='\t')
majiqfiles = glob.glob(scriptdir+'/../../results/MAJIQ/build_'+control+'_'+test+'/*.majiq')
fulldf,fulldfir = extract_events(voilatsv,control,test,deseq)
fulldf1 = remove_low_dpsi_and_proba(fulldf,0.1,0.9)
fulldf2 = remove_low_dpsi_and_proba(fulldf,0.2,0.9)
fulldfir1 = remove_low_dpsi_and_proba(fulldfir,0.1,0.9)
fulldfir2 = remove_low_dpsi_and_proba(fulldfir,0.2,0.9)
allrep,controlcol,testcol = explore_majiq_files(control,test,majiqfiles)
fulldf1 = add_reads(control,test,allrep,controlcol,testcol,fulldf1)
fulldf2 = add_reads(control,test,allrep,controlcol,testcol,fulldf2)
fulldfir1 = add_reads(control,test,allrep,controlcol,testcol,fulldfir1,'IR')
fulldfir2 = add_reads(control,test,allrep,controlcol,testcol,fulldfir2,'IR')
outputdir = scriptdir+'/../../results/Clean_AS_Event/'
get_only_X_event(control,test,fulldf1,fulldf2,'ES',outputdir)
get_only_X_event(control,test,fulldf1,fulldf2,'A5SS',outputdir)
get_only_X_event(control,test,fulldf1,fulldf2,'A3SS',outputdir)
get_only_X_event(control,test,fulldfir1,fulldfir2,'IR',outputdir)
def initdf(cond1,cond2):
newdf = pandas.DataFrame(columns=['gene_name','gene_id','lsv_id',
'mean_dpsi_per_lsv_junction','probability_changing',
'probability_non_changing',cond1+'_mean_psi',
cond2+'_mean_psi','de_novo_junctions',
'strand','place_constitutive_exon','seqid','junctions_coords','skipped_exons_coords',
'ES','A5SS','A3SS'])
return newdf
def initdfIR(cond1,cond2):
newdf = pandas.DataFrame(columns=['gene_name','gene_id','lsv_id',
'mean_dpsi_per_lsv_junction','probability_changing',
'probability_non_changing',cond1+'_mean_psi',
cond2+'_mean_psi','de_novo_junctions',
'strand','place_constitutive_exon','seqid','junctions_coords',
'ir_coords','IR'])
return newdf
def extract_events(voilatsv,cond1,cond2,deseq):
fulldf = initdf(cond1,cond2)
fulldfir = initdfIR(cond1,cond2)
for index,row in voilatsv.iterrows():
lsvtype = row['lsv_type']
lsvsplit = lsvtype.split('|')
target = lsvsplit[0]
if 'na' in lsvsplit :
continue
if target == 't' :
listB = []
listC = []
maxD = 1
for event in lsvsplit :
if event not in ['t','i']:
b = int(re.search('e(.*)\.',event).group(1))
listB.append(b)
d = int(re.search('o(.*)',event).group(1))
if d > maxD :
maxD = d
c = int(re.search('\.(.*)o',event).group(1))
listC.append(c)
maxB = max(listB)
for index in range (1,len(lsvsplit)) :
if index == len(lsvsplit)-1 and lsvsplit[index] == 'i':
dfevent = initdfIR(cond1,cond2)
dfevent.loc[0,'ir_coords'] = row['ir_coords']
dfevent.loc[0,'IR'] = 'TRUE'
else :
dfevent = initdf(cond1,cond2)
dfevent.loc[0,'ES'] = 'FALSE' # INIT VALUE
dfevent.loc[0,'A5SS'] = 'FALSE' # INIT VALUE
dfevent.loc[0,'A3SS'] = 'FALSE' # INIT VALUE
dfevent.loc[0,'gene_name'] = row['gene_name']
dfevent.loc[0,'gene_id'] = row['gene_id']
dfevent.loc[0,'lsv_id'] = row['lsv_id']
dfevent.loc[0,'mean_dpsi_per_lsv_junction'] = row['mean_dpsi_per_lsv_junction'].split(';')[index-1]
dfevent.loc[0,'probability_changing'] = row['probability_changing'].split(';')[index-1]
dfevent.loc[0,'probability_non_changing'] = row['probability_non_changing'].split(';')[index-1]
dfevent.loc[0,cond1+'_mean_psi'] = row[cond1+'_mean_psi'].split(';')[index-1]
dfevent.loc[0,cond2+'_mean_psi'] = row[cond2+'_mean_psi'].split(';')[index-1]
dfevent.loc[0,'de_novo_junctions'] = row['de_novo_junctions'].split(';')[index-1] # A CHANGER EN BOOL PLUS TARD
dfevent.loc[0,'strand'] = row['strand']
dfevent.loc[0,'place_constitutive_exon'] = 'TARGET'
dfevent.loc[0,'seqid'] = row['seqid']
dfevent.loc[0,'junctions_coords'] = row['junctions_coords'].split(';')[index-1]
if lsvsplit[index] != 'i':
a = int(re.search('^(.*)e',lsvsplit[index]).group(1))
b = int(re.search('e(.*)\.',lsvsplit[index]).group(1))
c = int(re.search('\.(.*)o',lsvsplit[index]).group(1))
d = int(re.search('o(.*)',lsvsplit[index]).group(1))
diffc = False
indexB = [i for i,value in enumerate(listB) if value==b]
for i in indexB :
if listC[i] != c :
diffc = True
if b != maxB :
dfevent.loc[0,'ES'] = 'TRUE'
skippedex = [row['seqid']+':' + s for s in row['exons_coords'].split(';')[1:-b]]
dfevent.loc[0,'skipped_exons_coords'] = ' '.join(skippedex)
if d != 1 and c != maxD and listB.count(b) > 1 and diffc == True :
dfevent.loc[0,'A5SS'] = 'TRUE'
if a != 1 :
dfevent.loc[0,'A3SS'] = 'TRUE'
if index == len(lsvsplit)-1 and lsvsplit[index] == 'i':
fulldfir = pandas.concat([fulldfir,dfevent])
else :
fulldf = pandas.concat([fulldf,dfevent])
elif target == 's':
maxA = 1
listB = []
listC = []
for event in lsvsplit :
if event not in ['s','i']:
a = int(re.search('(.*)e',event).group(1))
if a > maxA :
maxA = a
b = int(re.search('e(.*)\.',event).group(1))
listB.append(b)
c = int(re.search('\.(.*)o',event).group(1))
listC.append(c)
for index in range (1,len(lsvsplit)) :
if index == len(lsvsplit)-1 and lsvsplit[index] == 'i':
dfevent = initdfIR(cond1,cond2)
dfevent.loc[0,'ir_coords'] = row['ir_coords']
dfevent.loc[0,'IR'] = 'TRUE'
else :
dfevent = initdf(cond1,cond2)
dfevent.loc[0,'ES'] = 'FALSE' # INIT VALUE
dfevent.loc[0,'A5SS'] = 'FALSE' # INIT VALUE
dfevent.loc[0,'A3SS'] = 'FALSE' # INIT VALUE
dfevent.loc[0,'gene_name'] = row['gene_name']
dfevent.loc[0,'gene_id'] = row['gene_id']
dfevent.loc[0,'lsv_id'] = row['lsv_id']
dfevent.loc[0,'mean_dpsi_per_lsv_junction'] = row['mean_dpsi_per_lsv_junction'].split(';')[index-1]
dfevent.loc[0,'probability_changing'] = row['probability_changing'].split(';')[index-1]
dfevent.loc[0,'probability_non_changing'] = row['probability_non_changing'].split(';')[index-1]
dfevent.loc[0,cond1+'_mean_psi'] = row[cond1+'_mean_psi'].split(';')[index-1]
dfevent.loc[0,cond2+'_mean_psi'] = row[cond2+'_mean_psi'].split(';')[index-1]
dfevent.loc[0,'de_novo_junctions'] = row['de_novo_junctions'].split(';')[index-1] # A CHANGER EN BOOL PLUS TARD ?
dfevent.loc[0,'strand'] = row['strand']
dfevent.loc[0,'place_constitutive_exon'] = 'SOURCE'
dfevent.loc[0,'seqid'] = row['seqid']
dfevent.loc[0,'junctions_coords'] = row['junctions_coords'].split(';')[index-1]
if lsvsplit[index] != 'i':
a = int(re.search('^(.*)e',lsvsplit[index]).group(1))
b = int(re.search('e(.*)\.',lsvsplit[index]).group(1))
c = int(re.search('\.(.*)o',lsvsplit[index]).group(1))
d = int(re.search('o(.*)',lsvsplit[index]).group(1))
diffc = False
indexB = [i for i,value in enumerate(listB) if value==b]
for i in indexB :
if listC[i] != c :
diffc = True
if b != 1 :
dfevent.loc[0,'ES'] = 'TRUE'
skippedex = [row['seqid']+':'+ s for s in row['exons_coords'].split(';')[1:b]]
dfevent.loc[0,'skipped_exons_coords'] = ' '.join(skippedex)
if a != maxA :
dfevent.loc[0,'A5SS'] = 'TRUE'
if d != 1 and c != 1 and listB.count(b) > 1 and diffc == True:
dfevent.loc[0,'A3SS'] = 'TRUE'
if index == len(lsvsplit)-1 and lsvsplit[index] == 'i':
fulldfir = pandas.concat([fulldfir,dfevent])
else :
fulldf = | pandas.concat([fulldf,dfevent]) | pandas.concat |
# -*- coding: utf-8 -*-
"""System operating cost plots.
This module plots figures related to the cost of operating the power system.
Plots can be broken down by cost categories, generator types etc.
@author: <NAME>
"""
import logging
import pandas as pd
import marmot.config.mconfig as mconfig
from marmot.plottingmodules.plotutils.plot_library import PlotLibrary
from marmot.plottingmodules.plotutils.plot_data_helper import PlotDataHelper
from marmot.plottingmodules.plotutils.plot_exceptions import (MissingInputData, MissingZoneData)
class MPlot(PlotDataHelper):
"""production_cost MPlot class.
All the plotting modules use this same class name.
This class contains plotting methods that are grouped based on the
current module name.
The production_cost.py module contains methods that are
related related to the cost of operating the power system.
MPlot inherits from the PlotDataHelper class to assist in creating figures.
"""
def __init__(self, argument_dict: dict):
"""
Args:
argument_dict (dict): Dictionary containing all
arguments passed from MarmotPlot.
"""
# iterate over items in argument_dict and set as properties of class
# see key_list in Marmot_plot_main for list of properties
for prop in argument_dict:
self.__setattr__(prop, argument_dict[prop])
# Instantiation of MPlotHelperFunctions
super().__init__(self.Marmot_Solutions_folder, self.AGG_BY, self.ordered_gen,
self.PLEXOS_color_dict, self.Scenarios, self.ylabels,
self.xlabels, self.gen_names_dict, self.TECH_SUBSET,
Region_Mapping=self.Region_Mapping)
self.logger = logging.getLogger('marmot_plot.'+__name__)
def prod_cost(self, start_date_range: str = None,
end_date_range: str = None, custom_data_file_path: str = None,
**_):
"""Plots total system net revenue and cost normalized by the installed capacity of the area.
Total revenue is made up of reserve and energy revenues which are displayed in a stacked
bar plot with total generation cost. Net revensue is represented by a dot.
Each sceanrio is plotted as a separate bar.
Args:
start_date_range (str, optional): Defines a start date at which to represent data from.
Defaults to None.
end_date_range (str, optional): Defines a end date at which to represent data to.
Defaults to None.
custom_data_file_path (str, optional): Path to custom data file to concat extra
data. Index and column format should be consistent with output data csv.
Returns:
dict: Dictionary containing the created plot and its data table.
"""
outputs = {}
# List of properties needed by the plot, properties are a set of tuples and contain 3 parts:
# required True/False, property name and scenarios required, scenarios must be a list.
properties = [(True, "generator_Total_Generation_Cost", self.Scenarios),
(True, "generator_Pool_Revenue", self.Scenarios),
(True, "generator_Reserves_Revenue", self.Scenarios),
(True, "generator_Installed_Capacity", self.Scenarios)]
# Runs get_formatted_data within PlotDataHelper to populate PlotDataHelper dictionary
# with all required properties, returns a 1 if required data is missing
check_input_data = self.get_formatted_data(properties)
# Checks if all data required by plot is available, if 1 in list required data is missing
if 1 in check_input_data:
return MissingInputData()
for zone_input in self.Zones:
total_cost_chunk = []
self.logger.info(f"{self.AGG_BY} = {zone_input}")
for scenario in self.Scenarios:
self.logger.info(f"Scenario = {scenario}")
Total_Systems_Cost = pd.DataFrame()
Total_Installed_Capacity = self["generator_Installed_Capacity"].get(scenario)
#Check if zone has installed generation, if not skips
try:
Total_Installed_Capacity = Total_Installed_Capacity.xs(zone_input,level=self.AGG_BY)
except KeyError:
self.logger.warning(f"No installed capacity in : {zone_input}")
continue
Total_Installed_Capacity = self.df_process_gen_inputs(Total_Installed_Capacity)
Total_Installed_Capacity.reset_index(drop=True, inplace=True)
Total_Installed_Capacity = Total_Installed_Capacity.iloc[0]
Total_Gen_Cost = self["generator_Total_Generation_Cost"].get(scenario)
Total_Gen_Cost = Total_Gen_Cost.xs(zone_input,level=self.AGG_BY)
Total_Gen_Cost = self.df_process_gen_inputs(Total_Gen_Cost)
Total_Gen_Cost = Total_Gen_Cost.sum(axis=0)*-1
# Total_Gen_Cost = Total_Gen_Cost/Total_Installed_Capacity #Change to $/MW-year
Total_Gen_Cost.rename("Total_Gen_Cost", inplace=True)
Pool_Revenues = self["generator_Pool_Revenue"].get(scenario)
Pool_Revenues = Pool_Revenues.xs(zone_input,level=self.AGG_BY)
Pool_Revenues = self.df_process_gen_inputs(Pool_Revenues)
Pool_Revenues = Pool_Revenues.sum(axis=0)
# Pool_Revenues = Pool_Revenues/Total_Installed_Capacity #Change to $/MW-year
Pool_Revenues.rename("Energy_Revenues", inplace=True)
### Might change to Net Reserve Revenue at later date
Reserve_Revenues = self["generator_Reserves_Revenue"].get(scenario)
Reserve_Revenues = Reserve_Revenues.xs(zone_input,level=self.AGG_BY)
Reserve_Revenues = self.df_process_gen_inputs(Reserve_Revenues)
Reserve_Revenues = Reserve_Revenues.sum(axis=0)
# Reserve_Revenues = Reserve_Revenues/Total_Installed_Capacity #Change to $/MW-year
Reserve_Revenues.rename("Reserve_Revenues", inplace=True)
Total_Systems_Cost = pd.concat([Total_Systems_Cost, Total_Gen_Cost,
Pool_Revenues, Reserve_Revenues],
axis=1, sort=False)
Total_Systems_Cost.columns = Total_Systems_Cost.columns.str.replace('_',' ')
Total_Systems_Cost = Total_Systems_Cost.sum(axis=0)
Total_Systems_Cost = Total_Systems_Cost.rename(scenario)
total_cost_chunk.append(Total_Systems_Cost)
Total_Systems_Cost_Out = pd.concat(total_cost_chunk, axis=1, sort=False)
Total_Systems_Cost_Out = Total_Systems_Cost_Out.T
Total_Systems_Cost_Out.index = Total_Systems_Cost_Out.index.str.replace('_',' ')
# Total_Systems_Cost_Out = Total_Systems_Cost_Out/1000 #Change to $/kW-year
Total_Systems_Cost_Out = Total_Systems_Cost_Out/1e6 #Convert cost to millions
if pd.notna(custom_data_file_path):
Total_Systems_Cost_Out = self.insert_custom_data_columns(
Total_Systems_Cost_Out,
custom_data_file_path)
Net_Revenue = Total_Systems_Cost_Out.sum(axis=1)
#Checks if Net_Revenue contains data, if not skips zone and does not return a plot
if Net_Revenue.empty:
out = MissingZoneData()
outputs[zone_input] = out
continue
# Data table of values to return to main program
Data_Table_Out = Total_Systems_Cost_Out.add_suffix(" (Million $)")
mplt = PlotLibrary()
fig, ax = mplt.get_figure()
# Set x-tick labels
if len(self.custom_xticklabels) > 1:
tick_labels = self.custom_xticklabels
else:
tick_labels = Total_Systems_Cost_Out.index
mplt.barplot(Total_Systems_Cost_Out, stacked=True,
custom_tick_labels=tick_labels)
ax.plot(Net_Revenue.index, Net_Revenue.values,
color='black', linestyle='None', marker='o',
label='Net Revenue')
ax.set_ylabel('Total System Net Rev, Rev, & Cost ($/KW-yr)', color='black', rotation='vertical')
ax.margins(x=0.01)
mplt.add_legend(reverse_legend=True)
if mconfig.parser("plot_title_as_region"):
mplt.add_main_title(zone_input)
outputs[zone_input] = {'fig': fig, 'data_table': Data_Table_Out}
return outputs
def sys_cost(self, start_date_range: str = None,
end_date_range: str = None, custom_data_file_path: str = None,
**_):
"""Creates a stacked bar plot of Total Generation Cost and Cost of Unserved Energy.
Plot only shows totals and is NOT broken down into technology or cost type
specific values.
Each sceanrio is plotted as a separate bar.
Args:
start_date_range (str, optional): Defines a start date at which to represent data from.
Defaults to None.
end_date_range (str, optional): Defines a end date at which to represent data to.
Defaults to None.
custom_data_file_path (str, optional): Path to custom data file to concat extra
data. Index and column format should be consistent with output data csv.
Returns:
dict: Dictionary containing the created plot and its data table.
"""
outputs = {}
if self.AGG_BY == 'zone':
agg = 'zone'
else:
agg = 'region'
# List of properties needed by the plot, properties are a set of tuples and contain 3 parts:
# required True/False, property name and scenarios required, scenarios must be a list.
properties = [(True,"generator_Total_Generation_Cost",self.Scenarios),
(False,f"{agg}_Cost_Unserved_Energy",self.Scenarios)]
# Runs get_formatted_data within PlotDataHelper to populate PlotDataHelper dictionary
# with all required properties, returns a 1 if required data is missing
check_input_data = self.get_formatted_data(properties)
# Checks if all data required by plot is available, if 1 in list required data is missing
if 1 in check_input_data:
return MissingInputData()
for zone_input in self.Zones:
total_cost_chunk = []
self.logger.info(f"{self.AGG_BY} = {zone_input}")
for scenario in self.Scenarios:
self.logger.info(f"Scenario = {scenario}")
Total_Systems_Cost = pd.DataFrame()
Total_Gen_Cost = self["generator_Total_Generation_Cost"].get(scenario)
try:
Total_Gen_Cost = Total_Gen_Cost.xs(zone_input,level=self.AGG_BY)
except KeyError:
self.logger.warning(f"No Generators found for : {zone_input}")
continue
Total_Gen_Cost = Total_Gen_Cost.sum(axis=0)
Total_Gen_Cost.rename("Total_Gen_Cost", inplace=True)
Cost_Unserved_Energy = self[f"{agg}_Cost_Unserved_Energy"][scenario]
if Cost_Unserved_Energy.empty:
Cost_Unserved_Energy = self["generator_Total_Generation_Cost"][scenario].copy()
Cost_Unserved_Energy.iloc[:,0] = 0
Cost_Unserved_Energy = Cost_Unserved_Energy.xs(zone_input,level=self.AGG_BY)
Cost_Unserved_Energy = Cost_Unserved_Energy.sum(axis=0)
Cost_Unserved_Energy.rename("Cost_Unserved_Energy", inplace=True)
Total_Systems_Cost = pd.concat([Total_Systems_Cost, Total_Gen_Cost, Cost_Unserved_Energy],
axis=1, sort=False)
Total_Systems_Cost.columns = Total_Systems_Cost.columns.str.replace('_',' ')
Total_Systems_Cost.rename({0:scenario}, axis='index', inplace=True)
total_cost_chunk.append(Total_Systems_Cost)
# Checks if gen_cost_out_chunks contains data, if not skips zone and does not return a plot
if not total_cost_chunk:
outputs[zone_input] = MissingZoneData()
continue
Total_Systems_Cost_Out = pd.concat(total_cost_chunk, axis=0, sort=False)
Total_Systems_Cost_Out = Total_Systems_Cost_Out/1000000 #Convert cost to millions
Total_Systems_Cost_Out.index = Total_Systems_Cost_Out.index.str.replace('_',' ')
#Checks if Total_Systems_Cost_Out contains data, if not skips zone and does not return a plot
if Total_Systems_Cost_Out.empty:
outputs[zone_input] = MissingZoneData()
continue
if pd.notna(custom_data_file_path):
Total_Systems_Cost_Out = self.insert_custom_data_columns(
Total_Systems_Cost_Out,
custom_data_file_path)
# Data table of values to return to main program
Data_Table_Out = Total_Systems_Cost_Out.add_suffix(" (Million $)")
mplt = PlotLibrary()
fig, ax = mplt.get_figure()
# Set x-tick labels
if len(self.custom_xticklabels) > 1:
tick_labels = self.custom_xticklabels
else:
tick_labels = Total_Systems_Cost_Out.index
mplt.barplot(Total_Systems_Cost_Out, stacked=True,
custom_tick_labels=tick_labels)
ax.set_ylabel('Total System Cost (Million $)',
color='black', rotation='vertical')
ax.margins(x=0.01)
mplt.add_legend(reverse_legend=True)
if mconfig.parser("plot_title_as_region"):
mplt.add_main_title(zone_input)
cost_totals = Total_Systems_Cost_Out.sum(axis=1) #holds total of each bar
#inserts values into bar stacks
for patch in ax.patches:
width, height = patch.get_width(), patch.get_height()
if height<=1:
continue
x, y = patch.get_xy()
ax.text(x+width/2,
y+height/2,
'{:,.0f}'.format(height),
horizontalalignment='center',
verticalalignment='center', fontsize=12)
#inserts total bar value above each bar
for k, patch in enumerate(ax.patches):
height = cost_totals[k]
width = patch.get_width()
x, y = patch.get_xy()
ax.text(x+width/2,
y+height + 0.05*max(ax.get_ylim()),
'{:,.0f}'.format(height),
horizontalalignment='center',
verticalalignment='center', fontsize=15, color='red')
if k>=len(cost_totals)-1:
break
outputs[zone_input] = {'fig': fig, 'data_table': Data_Table_Out}
return outputs
def detailed_gen_cost(self, start_date_range: str = None,
end_date_range: str = None, custom_data_file_path: str = None,
**_):
"""Creates stacked bar plot of total generation cost by cost type (fuel, emission, start cost etc.)
Creates a more deatiled system cost plot.
Each sceanrio is plotted as a separate bar.
Args:
start_date_range (str, optional): Defines a start date at which to represent data from.
Defaults to None.
end_date_range (str, optional): Defines a end date at which to represent data to.
Defaults to None.
custom_data_file_path (str, optional): Path to custom data file to concat extra
data. Index and column format should be consistent with output data csv.
Returns:
dict: Dictionary containing the created plot and its data table.
"""
outputs = {}
# List of properties needed by the plot, properties are a set of tuples and contain 3 parts:
# required True/False, property name and scenarios required, scenarios must be a list.
properties = [(True,"generator_Fuel_Cost",self.Scenarios),
(True,"generator_VO&M_Cost",self.Scenarios),
(True,"generator_Start_&_Shutdown_Cost",self.Scenarios),
(False,"generator_Emissions_Cost",self.Scenarios)]
# Runs get_formatted_data within PlotDataHelper to populate PlotDataHelper dictionary
# with all required properties, returns a 1 if required data is missing
check_input_data = self.get_formatted_data(properties)
# Checks if all data required by plot is available, if 1 in list required data is missing
if 1 in check_input_data:
return MissingInputData()
for zone_input in self.Zones:
self.logger.info(f"Zone = {zone_input}")
gen_cost_out_chunks = []
for scenario in self.Scenarios:
self.logger.info(f"Scenario = {scenario}")
Fuel_Cost = self["generator_Fuel_Cost"].get(scenario)
# Check if Fuel_cost contains zone_input, skips if not
try:
Fuel_Cost = Fuel_Cost.xs(zone_input,level=self.AGG_BY)
except KeyError:
self.logger.warning(f"No Generators found for: {zone_input}")
continue
Fuel_Cost = Fuel_Cost.sum(axis=0)
Fuel_Cost.rename("Fuel_Cost", inplace=True)
VOM_Cost = self["generator_VO&M_Cost"].get(scenario)
VOM_Cost = VOM_Cost.xs(zone_input,level=self.AGG_BY)
VOM_Cost[0].values[VOM_Cost[0].values < 0] = 0
VOM_Cost = VOM_Cost.sum(axis=0)
VOM_Cost.rename("VO&M_Cost", inplace=True)
Start_Shutdown_Cost = self["generator_Start_&_Shutdown_Cost"].get(scenario)
Start_Shutdown_Cost = Start_Shutdown_Cost.xs(zone_input,level=self.AGG_BY)
Start_Shutdown_Cost = Start_Shutdown_Cost.sum(axis=0)
Start_Shutdown_Cost.rename("Start_&_Shutdown_Cost", inplace=True)
Emissions_Cost = self["generator_Emissions_Cost"][scenario]
if Emissions_Cost.empty:
self.logger.warning(f"generator_Emissions_Cost not included in {scenario} results, Emissions_Cost will not be included in plot")
Emissions_Cost = self["generator_Start_&_Shutdown_Cost"][scenario].copy()
Emissions_Cost.iloc[:,0] = 0
Emissions_Cost = Emissions_Cost.xs(zone_input,level=self.AGG_BY)
Emissions_Cost = Emissions_Cost.sum(axis=0)
Emissions_Cost.rename("Emissions_Cost", inplace=True)
Detailed_Gen_Cost = pd.concat([Fuel_Cost, VOM_Cost, Start_Shutdown_Cost, Emissions_Cost], axis=1, sort=False)
Detailed_Gen_Cost.columns = Detailed_Gen_Cost.columns.str.replace('_',' ')
Detailed_Gen_Cost = Detailed_Gen_Cost.sum(axis=0)
Detailed_Gen_Cost = Detailed_Gen_Cost.rename(scenario)
gen_cost_out_chunks.append(Detailed_Gen_Cost)
# Checks if gen_cost_out_chunks contains data, if not skips zone and does not return a plot
if not gen_cost_out_chunks:
outputs[zone_input] = MissingZoneData()
continue
Detailed_Gen_Cost_Out = pd.concat(gen_cost_out_chunks, axis=1, sort=False)
Detailed_Gen_Cost_Out = Detailed_Gen_Cost_Out.T/1000000 #Convert cost to millions
Detailed_Gen_Cost_Out.index = Detailed_Gen_Cost_Out.index.str.replace('_',' ')
# Deletes columns that are all 0
Detailed_Gen_Cost_Out = Detailed_Gen_Cost_Out.loc[:, (Detailed_Gen_Cost_Out != 0).any(axis=0)]
# Checks if Detailed_Gen_Cost_Out contains data, if not skips zone and does not return a plot
if Detailed_Gen_Cost_Out.empty:
outputs[zone_input] = MissingZoneData()
continue
if pd.notna(custom_data_file_path):
Total_Systems_Cost_Out = self.insert_custom_data_columns(
Total_Systems_Cost_Out,
custom_data_file_path)
# Data table of values to return to main program
Data_Table_Out = Detailed_Gen_Cost_Out.add_suffix(" (Million $)")
# Set x-tick labels
if len(self.custom_xticklabels) > 1:
tick_labels = self.custom_xticklabels
else:
tick_labels = Detailed_Gen_Cost_Out.index
mplt = PlotLibrary()
fig, ax = mplt.get_figure()
mplt.barplot(Detailed_Gen_Cost_Out, stacked=True,
custom_tick_labels=tick_labels)
ax.axhline(y=0)
ax.set_ylabel('Total Generation Cost (Million $)',
color='black', rotation='vertical')
ax.margins(x=0.01)
mplt.add_legend(reverse_legend=True)
if mconfig.parser("plot_title_as_region"):
mplt.add_main_title(zone_input)
cost_totals = Detailed_Gen_Cost_Out.sum(axis=1) #holds total of each bar
#inserts values into bar stacks
for patch in ax.patches:
width, height = patch.get_width(), patch.get_height()
if height<=2:
continue
x, y = patch.get_xy()
ax.text(x+width/2,
y+height/2,
'{:,.0f}'.format(height),
horizontalalignment='center',
verticalalignment='center', fontsize=12)
#inserts total bar value above each bar
for k, patch in enumerate(ax.patches):
height = cost_totals[k]
width = patch.get_width()
x, y = patch.get_xy()
ax.text(x+width/2,
y+height + 0.05*max(ax.get_ylim()),
'{:,.0f}'.format(height),
horizontalalignment='center',
verticalalignment='center', fontsize=15, color='red')
if k>=len(cost_totals)-1:
break
outputs[zone_input] = {'fig': fig, 'data_table': Data_Table_Out}
return outputs
def sys_cost_type(self, start_date_range: str = None,
end_date_range: str = None, custom_data_file_path: str = None,
**_):
"""Creates stacked bar plot of total generation cost by generator technology type.
Another way to represent total generation cost, this time by tech type,
i.e Coal, Gas, Hydro etc.
Each sceanrio is plotted as a separate bar.
Args:
start_date_range (str, optional): Defines a start date at which to represent data from.
Defaults to None.
end_date_range (str, optional): Defines a end date at which to represent data to.
Defaults to None.
custom_data_file_path (str, optional): Path to custom data file to concat extra
data. Index and column format should be consistent with output data csv.
Returns:
dict: Dictionary containing the created plot and its data table.
"""
# Create Dictionary to hold Datframes for each scenario
outputs = {}
# List of properties needed by the plot, properties are a set of tuples and contain 3 parts:
# required True/False, property name and scenarios required, scenarios must be a list.
properties = [(True,"generator_Total_Generation_Cost",self.Scenarios)]
# Runs get_formatted_data within PlotDataHelper to populate PlotDataHelper dictionary
# with all required properties, returns a 1 if required data is missing
check_input_data = self.get_formatted_data(properties)
# Checks if all data required by plot is available, if 1 in list required data is missing
if 1 in check_input_data:
return MissingInputData()
for zone_input in self.Zones:
gen_cost_out_chunks = []
self.logger.info(f"Zone = {zone_input}")
for scenario in self.Scenarios:
self.logger.info(f"Scenario = {scenario}")
Total_Gen_Stack = self["generator_Total_Generation_Cost"].get(scenario)
# Check if Total_Gen_Stack contains zone_input, skips if not
try:
Total_Gen_Stack = Total_Gen_Stack.xs(zone_input,level=self.AGG_BY)
except KeyError:
self.logger.warning(f"No Generators found for : {zone_input}")
continue
Total_Gen_Stack = self.df_process_gen_inputs(Total_Gen_Stack)
Total_Gen_Stack = Total_Gen_Stack.sum(axis=0)
Total_Gen_Stack.rename(scenario, inplace=True)
gen_cost_out_chunks.append(Total_Gen_Stack)
# Checks if gen_cost_out_chunks contains data, if not skips zone and does not return a plot
if not gen_cost_out_chunks:
outputs[zone_input] = MissingZoneData()
continue
Total_Generation_Stack_Out = pd.concat(gen_cost_out_chunks, axis=1, sort=False).fillna(0)
Total_Generation_Stack_Out = self.create_categorical_tech_index(Total_Generation_Stack_Out)
Total_Generation_Stack_Out = Total_Generation_Stack_Out.T/1000000 #Convert to millions
Total_Generation_Stack_Out = Total_Generation_Stack_Out.loc[:, (Total_Generation_Stack_Out != 0).any(axis=0)]
# Checks if Total_Generation_Stack_Out contains data, if not skips zone and does not return a plot
if Total_Generation_Stack_Out.empty:
outputs[zone_input] = MissingZoneData()
continue
if pd.notna(custom_data_file_path):
Total_Generation_Stack_Out = self.insert_custom_data_columns(
Total_Generation_Stack_Out,
custom_data_file_path)
# Data table of values to return to main program
Data_Table_Out = Total_Generation_Stack_Out.add_suffix(" (Million $)")
Total_Generation_Stack_Out.index = Total_Generation_Stack_Out.index.str.replace('_',' ')
mplt = PlotLibrary()
fig, ax = mplt.get_figure()
# Set x-tick labels
if len(self.custom_xticklabels) > 1:
tick_labels = self.custom_xticklabels
else:
tick_labels = Total_Generation_Stack_Out.index
mplt.barplot(Total_Generation_Stack_Out,
color=self.PLEXOS_color_dict, stacked=True,
custom_tick_labels=tick_labels)
ax.set_ylabel('Total System Cost (Million $)', color='black', rotation='vertical')
ax.margins(x=0.01)
mplt.add_legend(reverse_legend=True)
if mconfig.parser("plot_title_as_region"):
mplt.add_main_title(zone_input)
outputs[zone_input] = {'fig': fig, 'data_table': Data_Table_Out}
return outputs
def sys_cost_diff(self, start_date_range: str = None,
end_date_range: str = None, **_):
"""Creates stacked barplots of Total Generation Cost and Cost of Unserved Energy relative to a base scenario.
Barplots show the change in total total generation cost relative to a base scenario.
The default is to comapre against the first scenario provided in the inputs list.
Plot only shows totals and is NOT broken down into technology or cost type specific values.
Each sceanrio is plotted as a separate bar.
Args:
start_date_range (str, optional): Defines a start date at which to represent data from.
Defaults to None.
end_date_range (str, optional): Defines a end date at which to represent data to.
Defaults to None.
Returns:
dict: Dictionary containing the created plot and its data table.
"""
if self.AGG_BY == 'zone':
agg = 'zone'
else:
agg = 'region'
outputs = {}
# List of properties needed by the plot, properties are a set of tuples and contain 3 parts:
# required True/False, property name and scenarios required, scenarios must be a list.
properties = [(True, "generator_Total_Generation_Cost", self.Scenarios),
(False, f"{agg}_Cost_Unserved_Energy", self.Scenarios)]
# Runs get_formatted_data within PlotDataHelper to populate PlotDataHelper dictionary
# with all required properties, returns a 1 if required data is missing
check_input_data = self.get_formatted_data(properties)
# Checks if all data required by plot is available, if 1 in list required data is missing
if 1 in check_input_data:
return MissingInputData()
for zone_input in self.Zones:
total_cost_chunk = []
self.logger.info(f"Zone = {zone_input}")
for scenario in self.Scenarios:
self.logger.info(f"Scenario = {scenario}")
Total_Systems_Cost = pd.DataFrame()
Total_Gen_Cost = self["generator_Total_Generation_Cost"].get(scenario)
try:
Total_Gen_Cost = Total_Gen_Cost.xs(zone_input,level=self.AGG_BY)
except KeyError:
self.logger.warning(f"No Generators found for : {zone_input}")
continue
Total_Gen_Cost = Total_Gen_Cost.sum(axis=0)
Total_Gen_Cost.rename("Total_Gen_Cost", inplace=True)
Cost_Unserved_Energy = self[f"{agg}_Cost_Unserved_Energy"][scenario]
if Cost_Unserved_Energy.empty:
Cost_Unserved_Energy = self["generator_Total_Generation_Cost"][scenario].copy()
Cost_Unserved_Energy.iloc[:,0] = 0
Cost_Unserved_Energy = Cost_Unserved_Energy.xs(zone_input,level=self.AGG_BY)
Cost_Unserved_Energy = Cost_Unserved_Energy.sum(axis=0)
Cost_Unserved_Energy.rename("Cost_Unserved_Energy", inplace=True)
Total_Systems_Cost = pd.concat([Total_Systems_Cost, Total_Gen_Cost, Cost_Unserved_Energy], axis=1, sort=False)
Total_Systems_Cost.columns = Total_Systems_Cost.columns.str.replace('_',' ')
Total_Systems_Cost.rename({0:scenario}, axis='index', inplace=True)
total_cost_chunk.append(Total_Systems_Cost)
# Checks if total_cost_chunk contains data, if not skips zone and does not return a plot
if not total_cost_chunk:
outputs[zone_input] = MissingZoneData()
continue
Total_Systems_Cost_Out = | pd.concat(total_cost_chunk, axis=0, sort=False) | pandas.concat |
from typing import Union
import numpy as np
import pandas as pd
from oolearning.model_wrappers.HyperParamsBase import HyperParamsBase
from oolearning.model_wrappers.ModelWrapperBase import ModelWrapperBase
class MockRegressionModelWrapperTrainingObject:
def __init__(self, model_object, target_probabilities, target_intervals):
self._model_object = model_object
self._target_probabilities = target_probabilities
self._target_intervals = target_intervals
class MockRegressionModelWrapper(ModelWrapperBase):
@property
def feature_importance(self):
raise NotImplementedError()
@property
def results_summary(self) -> object:
return 'test_summary'
def __init__(self, data_y: np.ndarray, model_object: str='test model_object'):
"""
Dumb mock object that randomly returns values corresponding with a similar distribution as `data_y`
:type model_object: string that can be used to ensure the correct model_object is returned
:param data_y: actual values, used to know which values to randomly pass back in `predict()`
"""
super().__init__()
self._model_object_pre_train = model_object
self.fitted_train_x = None
if not isinstance(data_y, pd.Series):
data_y = | pd.Series(data_y) | pandas.Series |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 26 16:34:40 2020
@author: skyjones
"""
import os
import re
import shutil
import pandas as pd
from glob import glob
import nibabel as nib
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn.ensemble import IsolationForest
from sklearn.neighbors import LocalOutlierFactor
from sklearn import preprocessing
import scipy
from matplotlib.patches import Circle
from matplotlib.offsetbox import (TextArea, DrawingArea, OffsetImage,
AnnotationBbox)
from matplotlib.cbook import get_sample_data
from parse_fs_stats import parse_freesurfer_stats
exclude_pts = ['SCD_K065', 'SCD_TRANSP_P001_01']
in_csv = '/Users/manusdonahue/Documents/Sky/stroke_status.csv'
out_folder = '/Users/manusdonahue/Documents/Sky/freesurfer_volume_visualization/'
parsed_folder = '/Users/manusdonahue/Documents/Sky/freesurfer_volume_visualization/parsed'
brain_vol_csv = '/Users/manusdonahue/Documents/Sky/normal_brain_vols.csv' # from Borzage, Equations to describe brain size across the continuum of human lifespan (2012)
# values originally reported as mass in g, converted to cc assuming rho = 1.04 g/cc
fs_folder = '/Volumes/DonahueDataDrive/freesurfer_subjects/'
parse = False
collate = False
visualize = True
# os.path.basename(os.path.normpath(path))
###########
def plot_ci_manual(t, s_err, n, x, x2, y2, ax=None, color='#b9cfe7'):
"""Return an axes of confidence bands using a simple approach.
Notes
-----
.. math:: \left| \: \hat{\mu}_{y|x0} - \mu_{y|x0} \: \right| \; \leq \; T_{n-2}^{.975} \; \hat{\sigma} \; \sqrt{\frac{1}{n}+\frac{(x_0-\bar{x})^2}{\sum_{i=1}^n{(x_i-\bar{x})^2}}}
.. math:: \hat{\sigma} = \sqrt{\sum_{i=1}^n{\frac{(y_i-\hat{y})^2}{n-2}}}
References
----------
.. [1] <NAME>. "Curve fitting," Jupyter Notebook.
http://nbviewer.ipython.org/github/demotu/BMC/blob/master/notebooks/CurveFitting.ipynb
"""
if ax is None:
ax = plt.gca()
ci = t * s_err * np.sqrt(1/n + (x2 - np.mean(x))**2 / np.sum((x - np.mean(x))**2))
ax.fill_between(x2, y2 + ci, y2 - ci, color=color, edgecolor="", alpha=0.25)
return ax
def plot_ci_bootstrap(xs, ys, resid, nboot=500, ax=None):
"""Return an axes of confidence bands using a bootstrap approach.
Notes
-----
The bootstrap approach iteratively resampling residuals.
It plots `nboot` number of straight lines and outlines the shape of a band.
The density of overlapping lines indicates improved confidence.
Returns
-------
ax : axes
- Cluster of lines
- Upper and Lower bounds (high and low) (optional) Note: sensitive to outliers
References
----------
.. [1] <NAME>. "Visualizing Confidence Intervals", Various Consequences.
http://www.variousconsequences.com/2010/02/visualizing-confidence-intervals.html
"""
if ax is None:
ax = plt.gca()
bootindex = scipy.random.randint
for _ in range(nboot):
resamp_resid = resid[bootindex(0, len(resid) - 1, len(resid))]
# Make coeffs of for polys
pc = scipy.polyfit(xs, ys + resamp_resid, 1)
# Plot bootstrap cluster
ax.plot(xs, scipy.polyval(pc, xs), "b-", linewidth=2, alpha=3.0 / float(nboot))
return ax
def filter_zeroed_axial_slices(nii_data, thresh=0.99):
# removes slices if the number of pixels that are lesser than or equal to 0 exceeds a % threshold, and replaces NaN with -1
the_data = nii_data.copy()
wherenan = np.isnan(the_data)
the_data[wherenan] = -1
if thresh:
keep = []
for i in range(the_data.shape[2]):
d = the_data[:,:,i]
near_zero = np.isclose(d,0)
less_zero = (d <= 0)
bad_pixels = np.logical_or(near_zero, less_zero)
perc_bad = bad_pixels.sum() / d.size
if not perc_bad >= thresh:
keep.append(True)
else:
keep.append(False)
new = the_data[:,:,keep]
return new
else:
return the_data
folders = np.array(glob(os.path.join(fs_folder, '*/'))) # list of all possible subdirectories
folders = [os.path.normpath(i) for i in folders]
in_table = pd.read_csv(in_csv)
in_table = in_table.dropna(subset=['mr1_mr_id_real'])
mr_ids = in_table['mr1_mr_id_real']
in_table_indexed = pd.read_csv(in_csv, index_col='mr1_mr_id_real')
if parse:
for i, mr in enumerate(mr_ids):
print(f'\nParsing {mr} ({i+1} of {len(mr_ids)})')
stats_file = os.path.join(fs_folder, mr, 'stats', 'aseg.stats')
parsed_file = os.path.join(parsed_folder, f'{mr}.csv')
try:
parse_freesurfer_stats(stats_file, parsed_file)
except FileNotFoundError:
print(f'No completed Freesurfer folder for {mr} ({stats_file})')
parsed_csvs = np.array(glob(os.path.join(parsed_folder, '*.csv'))) # list of all possible subdirectories
if collate:
out_csv = os.path.join(out_folder, 'collated.csv')
out_df = pd.DataFrame()
blank_dict = {'mr_id':None,
'wm_vol':None,
'gm_vol':None,
'total_vol':None,
'total_vol_custom':None,
'mask_vol_custom':None,
'supratent':None,
'etiv':None,
'csf_vol':None,
'gm_normal':None,
'wm_normal':None,
'total_normal':None,
'supratent_normal':None,
'age':None,
'stroke_silent':None,
'white_matter_injury':None,
'stroke_overt':None,
'sci':None,
'transf':None,
'scd':None,
'anemia':None,
'control':None,
'gender':None,
'exclude':0}
for i, csv in enumerate(parsed_csvs):
pt_name = os.path.basename(os.path.normpath(csv))[:-4]
print(f'Collating {pt_name} ({i+1} of {len(parsed_csvs)})')
working = pd.Series(blank_dict.copy())
parsed_csv = pd.read_csv(csv, index_col='short')
working['mr_id'] = pt_name
if pt_name in exclude_pts:
working['exclude'] = 1
working['gm_vol'] = parsed_csv.loc['TotalGrayVol']['value'] / 1e3
working['total_vol'] = parsed_csv.loc['BrainSegVolNotVent']['value'] / 1e3
working['wm_vol'] = working['total_vol'] - working['gm_vol']
working['age'] = in_table_indexed.loc[pt_name]['age']
working['etiv'] = parsed_csv.loc['eTIV']['value'] / 1e3
working['gm_normal'] = working['gm_vol'] / working['etiv']
working['wm_normal'] = working['wm_vol'] / working['etiv']
working['total_normal'] = working['total_vol'] / working['etiv']
working['supratent'] = parsed_csv.loc['SupraTentorialVolNotVent']['value'] / 1e3
working['supratent_normal'] = working['supratent'] / working['etiv']
working['csf_vol'] = working['etiv'] - working['total_vol']
if in_table_indexed.loc[pt_name]['mri1_wml_drp'] == 1:
working['white_matter_injury'] = 1
else:
working['white_matter_injury'] = 0
stroke_overt = in_table_indexed.loc[pt_name]['mh_rf_prior_stroke_overt']
stroke_silent = in_table_indexed.loc[pt_name]['mh_rf_prior_stroke_silent']
if stroke_overt == 1 or stroke_silent == 1:
working['exclude'] = 1
sci = in_table_indexed.loc[pt_name]['outcome_mri1_sci']
transf = in_table_indexed.loc[pt_name]['enroll_sca_transfusion']
#if transf == 1:
#working['exclude'] = 1
for val, name in zip([stroke_overt, stroke_silent, sci, transf],
['stroke_overt', 'stroke_silent', 'sci', 'transf']):
if val == 1:
working[name] = 1
else:
working[name] = 0
status = in_table_indexed.loc[pt_name]['case_control']
if status == 2:
working['scd'] = 0
working['anemia'] = 1
working['control'] = 0
elif status == 1:
working['scd'] = 1
working['anemia'] = 0
working['control'] = 0
else:
working['scd'] = 0
working['anemia'] = 0
working['control'] = 1
working['gender'] = in_table_indexed.loc[pt_name]['gender']
fs_seg_file = os.path.join(fs_folder, pt_name, 'mri', 'aseg.auto.mgz')
fs_brain_file = os.path.join(fs_folder, pt_name, 'mri', 'brain.mgz')
seg_data = nib.load(fs_seg_file)
brain_data = nib.load(fs_brain_file)
seg_voxel_vol = np.product(seg_data.header.get_zooms())
brain_voxel_vol = np.product(seg_data.header.get_zooms())
seg_mat = seg_data.get_fdata()
brain_mat = brain_data.get_fdata()
seg_mask = seg_mat > 0
brain_mask = brain_mat > 0
seg_vol = seg_mask.sum()*seg_voxel_vol
brain_vol = brain_mask.sum()*brain_voxel_vol
working['total_vol_custom'] = seg_vol / 1e3
working['mask_vol_custom'] = brain_vol / 1e3
out_df = out_df.append(working, ignore_index=True)
out_df = out_df[blank_dict.keys()]
out_df.to_csv(out_csv, index=False)
if visualize:
print('Visualizing')
brain_vol_df = pd.read_csv(brain_vol_csv)
collated_csv = os.path.join(out_folder, 'collated.csv')
clean_table = | pd.read_csv(collated_csv, index_col='mr_id') | pandas.read_csv |
"""
Tests for scalar Timedelta arithmetic ops
"""
from datetime import datetime, timedelta
import operator
import numpy as np
import pytest
import pandas as pd
from pandas import NaT, Timedelta, Timestamp, offsets
import pandas._testing as tm
from pandas.core import ops
class TestTimedeltaAdditionSubtraction:
"""
Tests for Timedelta methods:
__add__, __radd__,
__sub__, __rsub__
"""
@pytest.mark.parametrize(
"ten_seconds",
[
Timedelta(10, unit="s"),
timedelta(seconds=10),
np.timedelta64(10, "s"),
np.timedelta64(10000000000, "ns"),
offsets.Second(10),
],
)
def test_td_add_sub_ten_seconds(self, ten_seconds):
# GH#6808
base = Timestamp("20130101 09:01:12.123456")
expected_add = Timestamp("20130101 09:01:22.123456")
expected_sub = Timestamp("20130101 09:01:02.123456")
result = base + ten_seconds
assert result == expected_add
result = base - ten_seconds
assert result == expected_sub
@pytest.mark.parametrize(
"one_day_ten_secs",
[
Timedelta("1 day, 00:00:10"),
Timedelta("1 days, 00:00:10"),
timedelta(days=1, seconds=10),
np.timedelta64(1, "D") + np.timedelta64(10, "s"),
offsets.Day() + offsets.Second(10),
],
)
def test_td_add_sub_one_day_ten_seconds(self, one_day_ten_secs):
# GH#6808
base = Timestamp("20130102 09:01:12.123456")
expected_add = Timestamp("20130103 09:01:22.123456")
expected_sub = Timestamp("20130101 09:01:02.123456")
result = base + one_day_ten_secs
assert result == expected_add
result = base - one_day_ten_secs
assert result == expected_sub
@pytest.mark.parametrize("op", [operator.add, ops.radd])
def test_td_add_datetimelike_scalar(self, op):
# GH#19738
td = Timedelta(10, unit="d")
result = op(td, datetime(2016, 1, 1))
if op is operator.add:
# datetime + Timedelta does _not_ call Timedelta.__radd__,
# so we get a datetime back instead of a Timestamp
assert isinstance(result, Timestamp)
assert result == Timestamp(2016, 1, 11)
result = op(td, Timestamp("2018-01-12 18:09"))
assert isinstance(result, Timestamp)
assert result == Timestamp("2018-01-22 18:09")
result = op(td, np.datetime64("2018-01-12"))
assert isinstance(result, Timestamp)
assert result == Timestamp("2018-01-22")
result = op(td, NaT)
assert result is NaT
@pytest.mark.parametrize("op", [operator.add, ops.radd])
def test_td_add_td(self, op):
td = Timedelta(10, unit="d")
result = op(td, Timedelta(days=10))
assert isinstance(result, Timedelta)
assert result == Timedelta(days=20)
@pytest.mark.parametrize("op", [operator.add, ops.radd])
def test_td_add_pytimedelta(self, op):
td = Timedelta(10, unit="d")
result = op(td, timedelta(days=9))
assert isinstance(result, Timedelta)
assert result == Timedelta(days=19)
@pytest.mark.parametrize("op", [operator.add, ops.radd])
def test_td_add_timedelta64(self, op):
td = Timedelta(10, unit="d")
result = op(td, np.timedelta64(-4, "D"))
assert isinstance(result, Timedelta)
assert result == Timedelta(days=6)
@pytest.mark.parametrize("op", [operator.add, ops.radd])
def test_td_add_offset(self, op):
td = Timedelta(10, unit="d")
result = op(td, offsets.Hour(6))
assert isinstance(result, Timedelta)
assert result == Timedelta(days=10, hours=6)
def test_td_sub_td(self):
td = Timedelta(10, unit="d")
expected = Timedelta(0, unit="ns")
result = td - td
assert isinstance(result, Timedelta)
assert result == expected
def test_td_sub_pytimedelta(self):
td = Timedelta(10, unit="d")
expected = Timedelta(0, unit="ns")
result = td - td.to_pytimedelta()
assert isinstance(result, Timedelta)
assert result == expected
result = td.to_pytimedelta() - td
assert isinstance(result, Timedelta)
assert result == expected
def test_td_sub_timedelta64(self):
td = Timedelta(10, unit="d")
expected = Timedelta(0, unit="ns")
result = td - td.to_timedelta64()
assert isinstance(result, Timedelta)
assert result == expected
result = td.to_timedelta64() - td
assert isinstance(result, Timedelta)
assert result == expected
def test_td_sub_nat(self):
# In this context pd.NaT is treated as timedelta-like
td = Timedelta(10, unit="d")
result = td - NaT
assert result is NaT
def test_td_sub_td64_nat(self):
td = Timedelta(10, unit="d")
td_nat = np.timedelta64("NaT")
result = td - td_nat
assert result is NaT
result = td_nat - td
assert result is NaT
def test_td_sub_offset(self):
td = Timedelta(10, unit="d")
result = td - offsets.Hour(1)
assert isinstance(result, Timedelta)
assert result == Timedelta(239, unit="h")
def test_td_add_sub_numeric_raises(self):
td = Timedelta(10, unit="d")
for other in [2, 2.0, np.int64(2), np.float64(2)]:
with pytest.raises(TypeError):
td + other
with pytest.raises(TypeError):
other + td
with pytest.raises(TypeError):
td - other
with pytest.raises(TypeError):
other - td
def test_td_rsub_nat(self):
td = Timedelta(10, unit="d")
result = NaT - td
assert result is NaT
result = np.datetime64("NaT") - td
assert result is NaT
def test_td_rsub_offset(self):
result = offsets.Hour(1) - Timedelta(10, unit="d")
assert isinstance(result, Timedelta)
assert result == Timedelta(-239, unit="h")
def test_td_sub_timedeltalike_object_dtype_array(self):
# GH#21980
arr = np.array([Timestamp("20130101 9:01"), Timestamp("20121230 9:02")])
exp = np.array([Timestamp("20121231 9:01"), Timestamp("20121229 9:02")])
res = arr - Timedelta("1D")
tm.assert_numpy_array_equal(res, exp)
def test_td_sub_mixed_most_timedeltalike_object_dtype_array(self):
# GH#21980
now = Timestamp.now()
arr = np.array([now, Timedelta("1D"), np.timedelta64(2, "h")])
exp = np.array(
[
now - Timedelta("1D"),
Timedelta("0D"),
np.timedelta64(2, "h") - Timedelta("1D"),
]
)
res = arr - Timedelta("1D")
tm.assert_numpy_array_equal(res, exp)
def test_td_rsub_mixed_most_timedeltalike_object_dtype_array(self):
# GH#21980
now = Timestamp.now()
arr = np.array([now, Timedelta("1D"), np.timedelta64(2, "h")])
with pytest.raises(TypeError):
Timedelta("1D") - arr
@pytest.mark.parametrize("op", [operator.add, ops.radd])
def test_td_add_timedeltalike_object_dtype_array(self, op):
# GH#21980
arr = np.array([Timestamp("20130101 9:01"), Timestamp("20121230 9:02")])
exp = np.array([Timestamp("20130102 9:01"), Timestamp("20121231 9:02")])
res = op(arr, Timedelta("1D"))
tm.assert_numpy_array_equal(res, exp)
@pytest.mark.parametrize("op", [operator.add, ops.radd])
def test_td_add_mixed_timedeltalike_object_dtype_array(self, op):
# GH#21980
now = Timestamp.now()
arr = np.array([now, Timedelta("1D")])
exp = np.array([now + Timedelta("1D"), Timedelta("2D")])
res = op(arr, Timedelta("1D"))
tm.assert_numpy_array_equal(res, exp)
# TODO: moved from index tests following #24365, may need de-duplication
def test_ops_ndarray(self):
td = Timedelta("1 day")
# timedelta, timedelta
other = pd.to_timedelta(["1 day"]).values
expected = pd.to_timedelta(["2 days"]).values
tm.assert_numpy_array_equal(td + other, expected)
tm.assert_numpy_array_equal(other + td, expected)
msg = r"unsupported operand type\(s\) for \+: 'Timedelta' and 'int'"
with pytest.raises(TypeError, match=msg):
td + np.array([1])
msg = r"unsupported operand type\(s\) for \+: 'numpy.ndarray' and 'Timedelta'"
with pytest.raises(TypeError, match=msg):
np.array([1]) + td
expected = pd.to_timedelta(["0 days"]).values
tm.assert_numpy_array_equal(td - other, expected)
tm.assert_numpy_array_equal(-other + td, expected)
msg = r"unsupported operand type\(s\) for -: 'Timedelta' and 'int'"
with pytest.raises(TypeError, match=msg):
td - np.array([1])
msg = r"unsupported operand type\(s\) for -: 'numpy.ndarray' and 'Timedelta'"
with pytest.raises(TypeError, match=msg):
np.array([1]) - td
expected = pd.to_timedelta(["2 days"]).values
tm.assert_numpy_array_equal(td * np.array([2]), expected)
tm.assert_numpy_array_equal(np.array([2]) * td, expected)
msg = (
"ufunc '?multiply'? cannot use operands with types"
r" dtype\('<m8\[ns\]'\) and dtype\('<m8\[ns\]'\)"
)
with pytest.raises(TypeError, match=msg):
td * other
with pytest.raises(TypeError, match=msg):
other * td
tm.assert_numpy_array_equal(td / other, np.array([1], dtype=np.float64))
tm.assert_numpy_array_equal(other / td, np.array([1], dtype=np.float64))
# timedelta, datetime
other = pd.to_datetime(["2000-01-01"]).values
expected = pd.to_datetime(["2000-01-02"]).values
tm.assert_numpy_array_equal(td + other, expected)
tm.assert_numpy_array_equal(other + td, expected)
expected = pd.to_datetime(["1999-12-31"]).values
tm.assert_numpy_array_equal(-td + other, expected)
tm.assert_numpy_array_equal(other - td, expected)
class TestTimedeltaMultiplicationDivision:
"""
Tests for Timedelta methods:
__mul__, __rmul__,
__div__, __rdiv__,
__truediv__, __rtruediv__,
__floordiv__, __rfloordiv__,
__mod__, __rmod__,
__divmod__, __rdivmod__
"""
# ---------------------------------------------------------------
# Timedelta.__mul__, __rmul__
@pytest.mark.parametrize(
"td_nat", [NaT, np.timedelta64("NaT", "ns"), np.timedelta64("NaT")]
)
@pytest.mark.parametrize("op", [operator.mul, ops.rmul])
def test_td_mul_nat(self, op, td_nat):
# GH#19819
td = Timedelta(10, unit="d")
with pytest.raises(TypeError):
op(td, td_nat)
@pytest.mark.parametrize("nan", [np.nan, np.float64("NaN"), float("nan")])
@pytest.mark.parametrize("op", [operator.mul, ops.rmul])
def test_td_mul_nan(self, op, nan):
# np.float64('NaN') has a 'dtype' attr, avoid treating as array
td = Timedelta(10, unit="d")
result = op(td, nan)
assert result is NaT
@pytest.mark.parametrize("op", [operator.mul, ops.rmul])
def test_td_mul_scalar(self, op):
# GH#19738
td = Timedelta(minutes=3)
result = op(td, 2)
assert result == Timedelta(minutes=6)
result = op(td, 1.5)
assert result == Timedelta(minutes=4, seconds=30)
assert op(td, np.nan) is NaT
assert op(-1, td).value == -1 * td.value
assert op(-1.0, td).value == -1.0 * td.value
with pytest.raises(TypeError):
# timedelta * datetime is gibberish
op(td, Timestamp(2016, 1, 2))
with pytest.raises(TypeError):
# invalid multiply with another timedelta
op(td, td)
# ---------------------------------------------------------------
# Timedelta.__div__, __truediv__
def test_td_div_timedeltalike_scalar(self):
# GH#19738
td = Timedelta(10, unit="d")
result = td / offsets.Hour(1)
assert result == 240
assert td / td == 1
assert td / np.timedelta64(60, "h") == 4
assert np.isnan(td / NaT)
def test_td_div_numeric_scalar(self):
# GH#19738
td = Timedelta(10, unit="d")
result = td / 2
assert isinstance(result, Timedelta)
assert result == Timedelta(days=5)
result = td / 5.0
assert isinstance(result, Timedelta)
assert result == Timedelta(days=2)
@pytest.mark.parametrize("nan", [np.nan, np.float64("NaN"), float("nan")])
def test_td_div_nan(self, nan):
# np.float64('NaN') has a 'dtype' attr, avoid treating as array
td = | Timedelta(10, unit="d") | pandas.Timedelta |
"""
This module contains the Connector class.
Every data fetching action should begin with instantiating this Connector class.
"""
import math
from pathlib import Path
from typing import Any, Dict, List, Optional
import pandas as pd
from jinja2 import Environment, StrictUndefined, Template
from requests import Request, Response, Session
from ..errors import UnreachableError
from .config_manager import config_directory, ensure_config
from .errors import RequestError, UniversalParameterOverridden
from .implicit_database import ImplicitDatabase, ImplicitTable
INFO_TEMPLATE = Template(
"""{% for tb in tbs.keys() %}
Table {{dbname}}.{{tb}}
Parameters
----------
{% if tbs[tb].required_params %}{{", ".join(tbs[tb].required_params)}} required {% endif %}
{% if tbs[tb].optional_params %}{{", ".join(tbs[tb].optional_params)}} optional {% endif %}
Examples
--------
>>> dc.query({{", ".join(["\\\"{}\\\"".format(tb)] + tbs[tb].joined_query_fields)}})
>>> dc.show_schema("{{tb}}")
{% endfor %}
"""
)
class Connector:
"""
This is the main class of data_connector component.
Initialize Connector class as the example code.
Parameters
----------
config_path
The path to the config. It can be hosted, e.g. "yelp", or from
local filesystem, e.g. "./yelp"
auth_params
The parameter for authentication, e.g. OAuth2
kwargs
Additional parameters
Example
-------
>>> from dataprep.data_connector import Connector
>>> dc = Connector("yelp", auth_params={"access_token":access_token})
"""
_impdb: ImplicitDatabase
_vars: Dict[str, Any]
_auth: Dict[str, Any]
_session: Session
_jenv: Environment
def __init__(
self,
config_path: str,
_auth: Optional[Dict[str, Any]] = None,
**kwargs: Dict[str, Any],
) -> None:
self._session = Session()
if (
config_path.startswith(".")
or config_path.startswith("/")
or config_path.startswith("~")
):
path = Path(config_path).resolve()
else:
# From Github!
ensure_config(config_path)
path = config_directory() / config_path
self._impdb = ImplicitDatabase(path)
self._vars = kwargs
self._auth = _auth or {}
self._jenv = Environment(undefined=StrictUndefined)
def _fetch( # pylint: disable=too-many-locals,too-many-branches
self,
table: ImplicitTable,
*,
_count: Optional[int] = None,
_cursor: Optional[int] = None,
_auth: Optional[Dict[str, Any]] = None,
kwargs: Dict[str, Any],
) -> Response:
assert (_count is None) == (
_cursor is None
), "_cursor and _count should both be None or not None"
method = table.method
url = table.url
req_data: Dict[str, Dict[str, Any]] = {
"headers": {},
"params": {},
"cookies": {},
}
merged_vars = {**self._vars, **kwargs}
if table.authorization is not None:
table.authorization.build(req_data, _auth or self._auth)
for key in ["headers", "params", "cookies"]:
if getattr(table, key) is not None:
instantiated_fields = getattr(table, key).populate(
self._jenv, merged_vars
)
req_data[key].update(**instantiated_fields)
if table.body is not None:
# TODO: do we support binary body?
instantiated_fields = table.body.populate(self._jenv, merged_vars)
if table.body_ctype == "application/x-www-form-urlencoded":
req_data["data"] = instantiated_fields
elif table.body_ctype == "application/json":
req_data["json"] = instantiated_fields
else:
raise UnreachableError
if table.pag_params is not None and _count is not None:
pag_type = table.pag_params.type
count_key = table.pag_params.count_key
if pag_type == "cursor":
assert table.pag_params.cursor_key is not None
cursor_key = table.pag_params.cursor_key
elif pag_type == "limit":
assert table.pag_params.anchor_key is not None
cursor_key = table.pag_params.anchor_key
else:
raise UnreachableError()
if count_key in req_data["params"]:
raise UniversalParameterOverridden(count_key, "_count")
req_data["params"][count_key] = _count
if cursor_key in req_data["params"]:
raise UniversalParameterOverridden(cursor_key, "_cursor")
req_data["params"][cursor_key] = _cursor
resp: Response = self._session.send( # type: ignore
Request(
method=method,
url=url,
headers=req_data["headers"],
params=req_data["params"],
json=req_data.get("json"),
data=req_data.get("data"),
cookies=req_data["cookies"],
).prepare()
)
if resp.status_code != 200:
raise RequestError(status_code=resp.status_code, message=resp.text)
return resp
def query( # pylint: disable=too-many-locals
self,
table: str,
_auth: Optional[Dict[str, Any]] = None,
_count: Optional[int] = None,
**where: Any,
) -> pd.DataFrame:
"""
Query the API to get a table.
Parameters
----------
table : str
The table name.
_auth : Optional[Dict[str, Any]] = None
The parameters for authentication. Usually the authentication parameters
should be defined when instantiating the Connector. In case some tables have different
authentication options, a different authentication parameter can be defined here.
This parameter will override the one from Connector if passed.
_count: Optional[int] = None
count of returned records.
**where: Any
The additional parameters required for the query.
"""
assert (
table in self._impdb.tables
), f"No such table {table} in {self._impdb.name}"
itable = self._impdb.tables[table]
if itable.pag_params is None:
resp = self._fetch(table=itable, _auth=_auth, kwargs=where)
df = itable.from_response(resp)
return df
# Pagination is not None
max_count = itable.pag_params.max_count
dfs = []
last_id = 0
pag_type = itable.pag_params.type
if _count is None:
# User doesn't specify _count
resp = self._fetch(table=itable, _auth=_auth, kwargs=where)
df = itable.from_response(resp)
else:
cnt_to_fetch = 0
count = _count or 1
n_page = math.ceil(count / max_count)
remain = count % max_count
for i in range(n_page):
remain = remain if remain > 0 else max_count
cnt_to_fetch = max_count if i < n_page - 1 else remain
if pag_type == "cursor":
resp = self._fetch(
table=itable,
_auth=_auth,
_count=cnt_to_fetch,
_cursor=last_id - 1,
kwargs=where,
)
elif pag_type == "limit":
resp = self._fetch(
table=itable,
_auth=_auth,
_count=cnt_to_fetch,
_cursor=i * max_count,
kwargs=where,
)
else:
raise NotImplementedError
df_ = itable.from_response(resp)
if len(df_) == 0:
# The API returns empty for this page, maybe we've reached the end
break
if pag_type == "cursor":
last_id = int(df_[itable.pag_params.cursor_id][len(df_) - 1]) - 1
dfs.append(df_)
df = | pd.concat(dfs, axis=0) | pandas.concat |
# -*- encoding:utf-8 -*-
import pandas as pd
import numpy as np
from datetime import datetime
dire = '../../data/'
start = datetime.now()
orderHistory_train = pd.read_csv(dire + 'train/orderHistory_train.csv', encoding='utf-8')
orderFuture_train = pd.read_csv(dire + 'train/orderFuture_train3.csv', encoding='utf-8')
userProfile_train = pd.read_csv(dire + 'train/userProfile_train.csv', encoding='utf-8')
userComment_train = pd.read_csv(dire + 'train/userComment_train.csv', encoding='utf-8')
action_train = pd.read_csv(dire + 'train/action_train.csv', encoding='utf-8')
orderHistory_test = pd.read_csv(dire + 'test/orderHistory_test.csv', encoding='utf-8')
orderFuture_test = pd.read_csv(dire + 'test/orderFuture_test3.csv', encoding='utf-8')
userProfile_test = pd.read_csv(dire + 'test/userProfile_test.csv', encoding='utf-8')
userComment_test = pd.read_csv(dire + 'test/userComment_test.csv', encoding='utf-8')
action_test = pd.read_csv(dire + 'test/action_test.csv', encoding='utf-8')
# """
############# 1.user feature #############
"""
# 1. 用户地点划分1 2 3 线城市
"""
def province_123(userProfile, orderFuture):
province_1 = ['上海', '北京', '广东']
province_2 = ['福建', '重庆', '山东', '湖南', '陕西', '广西', '辽宁', '安徽', '河北', '重庆', '四川', '湖北', '江苏', '浙江', '天津']
province_3 = ['云南', '黑龙江', '河南', '江西', '贵州', '山西', '内蒙古', '甘肃', '新疆', '海南', '宁夏', '青海', '西藏']
userProfile['province_123'] = None
userProfile['province_123'][userProfile['province'].isin(province_1)] = 1
userProfile['province_123'][userProfile['province'].isin(province_2)] = 2
userProfile['province_123'][userProfile['province'].isin(province_3)] = 3
print(userProfile[['userid', 'province', 'province_123']])
order = pd.merge(orderFuture, userProfile[['userid', 'province_123']], on='userid', how='left')
return order
# # orderFuture_train = province_123(userProfile_train, orderFuture_train)
# # orderFuture_test = province_123(userProfile_test, orderFuture_test)
############# 2.history order feature #############
"""
# 1.
"""
# 历史纪录中城市的精品占比
def history_type1_rate(orderFuture, orderHistory):
all = len(orderHistory)
print("all:", all)
city_type1_rate = pd.DataFrame(columns=['city', 'city_rate'])
country_type1_rate = pd.DataFrame(columns=['country', 'country_rate'])
continent_type1_rate = pd.DataFrame(columns=['continent', 'continent_rate'])
city1 = []
country1 = []
continent1 = []
city_rate = []
country_rate = []
continent_rate = []
city_list = list(set(list(orderHistory.city)))
print(len(city_list))
country_list = list(set(list(orderHistory.country)))
continent_list = list(set(list(orderHistory.continent)))
for city in city_list:
city1.append(city)
city_rate.append((len(orderHistory[orderHistory['city'] == city])/all)*(len(orderHistory[(orderHistory['city'] == city) & (orderHistory['orderType'] == 1)])/len(orderHistory[orderHistory['city'] == city])))
for country in country_list:
country1.append(country)
country_rate.append((len(orderHistory[orderHistory['country'] == country])/all)*(len(orderHistory[(orderHistory['country'] == country) & (orderHistory['orderType'] == 1)])/len(orderHistory[orderHistory['country'] == country])))
for continent in continent_list:
continent1.append(continent)
continent_rate.append((len(orderHistory[orderHistory['continent'] == continent])/all)*(len(orderHistory[(orderHistory['continent'] == continent) & (orderHistory['orderType'] == 1)])/len(orderHistory[orderHistory['continent'] == continent])))
city_type1_rate['city'] = city1
city_type1_rate['city_rate'] = city_rate
country_type1_rate['country'] = country1
country_type1_rate['country_rate'] = country_rate
continent_type1_rate['continent'] = continent1
continent_type1_rate['continent_rate'] = continent_rate
orderHistory = pd.merge(orderHistory, city_type1_rate, on='city', how='left')
orderHistory = pd.merge(orderHistory, country_type1_rate, on='country', how='left')
orderHistory = pd.merge(orderHistory, continent_type1_rate, on='continent', how='left')
orderHistory = orderHistory.groupby(orderHistory.userid)['city_rate', 'country_rate', 'continent_rate'].mean().reset_index()
orderFuture = pd.merge(orderFuture, orderHistory[['userid', 'city_rate', 'country_rate', 'continent_rate']], on='userid', how='left')
return orderFuture
# orderFuture = pd.concat([orderFuture_train,orderFuture_test])
# orderHistory = pd.concat([orderHistory_train,orderHistory_test])
# dataset = history_type1_rate(orderFuture, orderHistory)
# orderFuture_train = dataset[dataset.orderType.notnull()]
# orderFuture_test = dataset[dataset.orderType.isnull()]
############# 3.action feature #############
"""
# 1. action中大于6出现的次数
# 2. 对应点击2-4的和值 与 5-9 的比值
# 3. 全部点击2-4的和值 与 5-9 的比值
# 4. 对应浏览记录 1-9 操作所用平均时间
# 5. 全部浏览记录 1-9 操作所用平均时间
# """
# action中大于6出现的次数
def greater_6_c(orderFuture):
action_7_c = orderFuture['action_7_c'].fillna(0)
action_8_c = orderFuture['action_8_c'].fillna(0)
action_9_c = orderFuture['action_9_c'].fillna(0)
orderFuture['action_greater_7_c'] = action_7_c + action_8_c + action_9_c
return orderFuture
orderFuture_train = greater_6_c(orderFuture_train)
orderFuture_test = greater_6_c(orderFuture_test)
# 对应点击2-4的和值 与 5-9 的比值
def rate_24_59_c(orderFuture):
action = orderFuture.fillna(0)
orderFuture['rate_1_59_c'] = (action['action_1_c'])/(action['action_5_c'] + action['action_6_c'] + action['action_7_c'] + action['action_8_c'] + action['action_9_c'])
orderFuture['rate_24_59_c'] = (action['action_2_c'] + action['action_3_c'] + action['action_4_c'])/(action['action_5_c'] + action['action_6_c'] + action['action_7_c'] + action['action_8_c'] + action['action_9_c'])
# orderFuture['rate_time_1_59_c'] = (action['time_1_c'])/(action['time_5_c'] + action['time_6_c'] + action['time_7_c'] + action['time_8_c'] + action['time_9_c'])
return orderFuture
orderFuture_train = rate_24_59_c(orderFuture_train)
orderFuture_test = rate_24_59_c(orderFuture_test)
# 全部点击2-4的和值 与 5-9 的比值
def rate_24_59(orderFuture):
action = orderFuture.fillna(0)
orderFuture['rate_1_59'] = (action['action_1'])/(action['action_5'] + action['action_6'] + action['action_7'] + action['action_8'] + action['action_9'])
orderFuture['rate_24_59'] = (action['action_2'] + action['action_3'] + action['action_4'])/(action['action_5'] + action['action_6'] + action['action_7'] + action['action_8'] + action['action_9'])
# orderFuture['rate_time_1_59'] = (action['time_1'])/(action['time_5'] + action['time_6'] + action['time_7'] + action['time_8'] + action['time_9'])
return orderFuture
orderFuture_train = rate_24_59(orderFuture_train)
orderFuture_test = rate_24_59(orderFuture_test)
# 全部action 最后一次 的类型
def latest_actionType(orderFuture, action):
latest = action.groupby(['userid']).last().reset_index()
latest.rename(columns={'actionType': 'latest_actionType'}, inplace=True)
orderFuture = pd.merge(orderFuture, latest[['userid', 'latest_actionType']], on='userid', how='left')
return orderFuture
orderFuture_train = latest_actionType(orderFuture_train, action_train)
orderFuture_test = latest_actionType(orderFuture_test, action_test)
# 全部 action 倒数第2-6次操作的类型
def latest2_actionType(orderFuture, action):
userid = []
latest_2_actionType = []
latest_3_actionType = []
latest_4_actionType = []
latest_5_actionType = []
latest_6_actionType = []
latest = action.groupby(['userid'])['actionTime'].idxmax().reset_index()
latest_2 = latest
for index, row in latest.iterrows():
userid.append(row.userid)
if(row.userid == action['userid'][row.actionTime-1]):
latest_2_actionType.append(action['actionType'][row.actionTime-1])
else:
latest_2_actionType.append(None)
if (row.userid == action['userid'][row.actionTime - 2]):
latest_3_actionType.append(action['actionType'][row.actionTime - 2])
else:
latest_3_actionType.append(None)
if (row.userid == action['userid'][row.actionTime - 3]):
latest_4_actionType.append(action['actionType'][row.actionTime - 3])
else:
latest_4_actionType.append(None)
if (row.userid == action['userid'][row.actionTime - 4]):
latest_5_actionType.append(action['actionType'][row.actionTime - 4])
else:
latest_5_actionType.append(None)
if (row.userid == action['userid'][row.actionTime - 5]):
latest_6_actionType.append(action['actionType'][row.actionTime - 5])
else:
latest_6_actionType.append(None)
latest_2['latest_2_actionType'] = latest_2_actionType
latest_2['latest_3_actionType'] = latest_3_actionType
latest_2['latest_4_actionType'] = latest_4_actionType
latest_2['latest_5_actionType'] = latest_5_actionType
latest_2['latest_6_actionType'] = latest_6_actionType
orderFuture = pd.merge(orderFuture, latest_2[['userid', 'latest_2_actionType', 'latest_3_actionType',
'latest_4_actionType', 'latest_5_actionType', 'latest_6_actionType']], on='userid', how='left')
return orderFuture
orderFuture_train = latest2_actionType(orderFuture_train, action_train)
orderFuture_test = latest2_actionType(orderFuture_test, action_test)
# 时间间隔
# 最后1 2 3 4 次操作的时间间隔
# 时间间隔的均值 最小值 最大值 方差
def time_interval(orderFuture, action):
# 1
latest = action.groupby(['userid']).last().reset_index()
latest.rename(columns={'actionType_time': 'latest_1_time_interval'}, inplace=True)
orderFuture = pd.merge(orderFuture, latest[['userid', 'latest_1_time_interval']], on='userid', how='left')
# 2 3 4 5 6
userid = []
latest_2_time_interval = []
latest_3_time_interval = []
latest_4_time_interval = []
latest_5_time_interval = []
latest = action.groupby(['userid'])['actionTime'].idxmax().reset_index()
latest.rename(columns={'actionTime': 'max_index'}, inplace=True)
latest_2 = latest
for index, row in latest.iterrows():
userid.append(row.userid)
# 2
if (row.userid == action['userid'][row.max_index - 1]):
latest_2_time_interval.append(action['actionType_time'][row.max_index - 1])
else:
latest_2_time_interval.append(None)
# 3
if (row.userid == action['userid'][row.max_index - 2]):
latest_3_time_interval.append(action['actionType_time'][row.max_index - 2])
else:
latest_3_time_interval.append(None)
# 4
if (row.userid == action['userid'][row.max_index - 3]):
latest_4_time_interval.append(action['actionType_time'][row.max_index - 3])
else:
latest_4_time_interval.append(None)
# 5
if (row.userid == action['userid'][row.max_index - 4]):
latest_5_time_interval.append(action['actionType_time'][row.max_index - 4])
else:
latest_5_time_interval.append(None)
latest_2['latest_2_time_interval'] = latest_2_time_interval
latest_2['latest_3_time_interval'] = latest_3_time_interval
latest_2['latest_4_time_interval'] = latest_4_time_interval
latest_2['latest_5_time_interval'] = latest_5_time_interval
orderFuture = pd.merge(orderFuture, latest_2[['userid', 'latest_2_time_interval', 'latest_3_time_interval',
'latest_4_time_interval', 'latest_5_time_interval']], on='userid', how='left')
# 均值
latest = action.groupby(['userid'])['actionType_time'].mean().reset_index()
latest.rename(columns={'actionType_time': 'actionType_time_mean'}, inplace=True)
orderFuture = | pd.merge(orderFuture, latest[['userid', 'actionType_time_mean']], on='userid', how='left') | pandas.merge |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 4 03:42:21 2020
@author: lukepinkel
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
def crossval_mats(X, y, n, cv, categorical=False):
kfix = kfold_indices(n, cv, y, categorical)
Xf, yf, Xt, yt = [], [], [], []
for f_ix, v_ix in kfix:
Xf.append(X[f_ix])
yf.append(y[f_ix])
Xt.append(X[v_ix])
yt.append(y[v_ix])
return Xf, yf, Xt, yt
def kfold_indices(n, k, y, categorical=False, randomize=False, random_state=None):
random_state = np.random.default_rng() if random_state is None else random_state
if categorical:
_, idx = np.unique(y, return_inverse=True)
t = np.arange(y.shape[0])
fold_indices = []
for i in np.unique(idx):
ii = t[idx==i]
if randomize:
ii = random_state.permutation(ii)
fold_indices.append(ii)
splits = list(zip(*[np.array_split(ii, k) for ii in fold_indices]))
splits = [np.concatenate(x) for x in splits]
else:
idx = np.arange(n)
if randomize:
idx = random_state.permutation(idx)
splits = np.array_split(idx, k)
inds = []
for i in range(k):
fit_ind = np.concatenate([splits[j] for j in range(k) if j!=i])
inds.append([fit_ind, splits[i]])
return inds
def process_cv(fval, lambdas):
df = pd.DataFrame(fval)
summary = pd.concat([df.mean(axis=1), df.std(axis=1) / np.sqrt(df.shape[1])], axis=1)
summary.columns = ['mean', 'std']
lambda_min = lambdas[summary.idxmin()['mean']]
return summary, lambda_min
def plot_elnet_cv(f_path, lambdas, bfits=None):
mse = pd.DataFrame(f_path[:, :, 0])
pen = pd.DataFrame(f_path[:, :, 1])
pll = | pd.DataFrame(f_path[:, :, 2]) | pandas.DataFrame |
#!/usr/bin/python2
from __future__ import nested_scopes, generators, division, absolute_import, with_statement, print_function, unicode_literals
import pandas as pd
from sklearn.model_selection import train_test_split
RANDOM_STATE = 3
all_cols = 'linenum text id subreddit meta time author ups downs authorlinkkarma authorkarma authorisgold'.split()
use_cols = 'text subreddit'.split()
all_files = 'entertainment_anime.csv entertainment_comicbooks.csv gaming_dota2.csv gaming_leagueoflegends.csv news_conservative.csv news_libertarian.csv learning_askscience.csv learning_explainlikeimfive.csv television_gameofthrones.csv television_thewalkingdead.csv'.split()
malformed_files = [1, 1, 0, 0, 0, 0, 0, 0, 0, 0]
def open_with_pandas_read_csv(filename):
if malformed_files[all_files.index(filename[7:])] == 1:
df = pd.read_csv(filename, header=0, usecols=use_cols, names=['linenum'] + all_cols, skiprows=1)
else:
df = pd.read_csv(filename, header=0, usecols=use_cols, names=all_cols)
return df
def clean_data(df):
df = df.dropna()
df = df.drop_duplicates()
def remove_links(text):
# remove word after each word in banned_prewords
banned_prewords = "http https v".split()
words = text.split()
to_delete = []
for i, word in enumerate(words):
if word in banned_prewords:
if i + 1 < len(words):
to_delete.append(i + 1)
for i in reversed(to_delete):
del words[i]
text = " ".join(words)
return text
df['text'] = df['text'].apply(remove_links)
df = df[df['text'] != '']
return df
def file_to_dataframe(filename):
df = open_with_pandas_read_csv(filename)
df = clean_data(df)
return df
def main():
print("Reading all files")
frames = [file_to_dataframe('../res/' + filename) for filename in all_files]
all_data = pd.concat(frames)
all_data.to_csv('../res/data_all.csv')
print("Creating small sample for testing purposes")
small_sample = pd.concat([df.sample(n=100, random_state=RANDOM_STATE) for df in frames])
small_sample.to_csv('../res/data_sample.csv')
print("splitting all data into train & test sets")
train_test_splits = [train_test_split(df, test_size=0.2, random_state=RANDOM_STATE) for df in frames]
training_and_validation = [train for (train, test) in train_test_splits]
print("splitting training set into train & validation sets")
validation_splits = [train_test_split(df, test_size=1.0 / 8, random_state=RANDOM_STATE) for df in training_and_validation]
training = [train for (train, valid) in validation_splits]
validation = [valid for (train, valid) in validation_splits]
training = pd.concat(training)
testing = pd.concat([test for (train, test) in train_test_splits])
validation = | pd.concat(validation) | pandas.concat |
# -*- coding: utf-8 -*-
import click
import logging
from pathlib import Path
# from dotenv import find_dotenv, load_dotenv
import requests
from bs4 import BeautifulSoup
import numpy as np
import pandas as pd
import datetime
import yfinance as yf
from pandas_datareader import data as pdr
from flask import current_app
from stk_predictor.extensions import db
@click.command()
@click.argument('input_filepath', type=click.Path(exists=True))
@click.argument('output_filepath', type=click.Path())
def main(input_filepath, output_filepath):
""" Runs data processing scripts to turn raw data from (../raw) into
cleaned data ready to be analyzed (saved in ../processed).
"""
logger = logging.getLogger(__name__)
logger.info('making final data set from raw data')
def get_ticker_from_yahoo(ticker, start_date, end_date):
yf.pdr_override()
try:
new_trading_df = pdr.get_data_yahoo(
ticker, start_date, end_date, interval='1d')
new_trading_df = new_trading_df.drop(
['Open', 'High', 'Low', 'Adj Close'], axis=1)
new_trading_df = new_trading_df.dropna('index')
new_trading_df = new_trading_df.reset_index()
new_trading_df.columns = ['trading_date',
'intraday_close', 'intraday_volumes']
his_trading_df = pd.read_sql('aapl', db.engine, index_col='id')
df = pd.concat([his_trading_df, new_trading_df]
).drop_duplicates('trading_date')
df = df.sort_values(by='trading_date')
df = df.reset_index(drop=True)
if len(df) > 0:
df.to_sql("aapl", db.engine, if_exists='replace', index_label='id')
return df
else:
# t = pd.read_sql('aapl', db.engine, index_col='id')
return None
except Exception as ex:
raise RuntimeError(
"Catch Excetion when retrieve data from Yahoo...", ex)
return None
def get_news_from_finviz(ticker):
"""Request news headline from finviz, according to
company ticker's name
Parameters
-----------
ticker: str
the stock ticker name
Return
----------
df : pd.DataFrame
return the latest 2 days news healines.
"""
current_app.logger.info("Job >> Enter Finviz news scrape step...")
base_url = 'https://finviz.com/quote.ashx?t={}'.format(ticker)
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) \
AppleWebKit/537.36 (KHTML, like Gecko) \
Chrome/50.0.2661.102 Safari/537.36'
}
parsed_news = []
try:
res = requests.get(base_url, headers=headers)
if res.status_code == 200:
texts = res.text
soup = BeautifulSoup(texts)
news_tables = soup.find(id="news-table")
for x in news_tables.findAll('tr'):
text = x.a.get_text()
date_scrape = x.td.text.split()
if len(date_scrape) == 1:
time = date_scrape[0]
else:
date = date_scrape[0]
time = date_scrape[1]
parsed_news.append([date, time, text])
# filter the recent day news
df = pd.DataFrame(parsed_news, columns=['date', 'time', 'texts'])
df['date'] = pd.to_datetime(df.date).dt.date
one_day_period = (datetime.datetime.today() -
datetime.timedelta(days=1)).date()
df_sub = df[df.date >= one_day_period]
return df_sub
else:
raise RuntimeError("HTTP response Error {}".format(
res.status_code)) from None
except Exception as ex:
current_app.logger.info("Exception in scrape Finviz.", ex)
raise RuntimeError("Exception in scrape Finviz.") from ex
def prepare_trading_dataset(df):
"""Prepare the trading data set.
Time series analysis incoporate previous data for future prediction,
We need to retrieve historical data to generate features.
Parameters
-----------
df: DataFrame
the stock ticker trading data, including trading-date, close-price, volumes
window: int, default = 400
feature engineer windows size. Using at most 400 trading days to construct
features.
Return
----------
array_lstm : np.array
return the array with 3 dimensions shape -> [samples, 1, features]
"""
if len(df) == 0:
raise RuntimeError(
"Encounter Error in >>make_dataset.prepare_trading_dataset<<... \
Did not catch any news.") from None
else:
df['log_ret_1d'] = np.log(df['intraday_close'] / df['intraday_close'].shift(1))
df['log_ret_1w'] = pd.Series(df['log_ret_1d']).rolling(window=5).sum()
df['log_ret_2w'] = pd.Series(df['log_ret_1d']).rolling(window=10).sum()
df['log_ret_3w'] = pd.Series(df['log_ret_1d']).rolling(window=15).sum()
df['log_ret_4w'] = pd.Series(df['log_ret_1d']).rolling(window=20).sum()
df['log_ret_8w'] = pd.Series(df['log_ret_1d']).rolling(window=40).sum()
df['log_ret_12w'] = pd.Series(df['log_ret_1d']).rolling(window=60).sum()
df['log_ret_16w'] = pd.Series(df['log_ret_1d']).rolling(window=80).sum()
df['log_ret_20w'] = pd.Series(df['log_ret_1d']).rolling(window=100).sum()
df['log_ret_24w'] = pd.Series(df['log_ret_1d']).rolling(window=120).sum()
df['log_ret_28w'] = pd.Series(df['log_ret_1d']).rolling(window=140).sum()
df['log_ret_32w'] = pd.Series(df['log_ret_1d']).rolling(window=160).sum()
df['log_ret_36w'] = pd.Series(df['log_ret_1d']).rolling(window=180).sum()
df['log_ret_40w'] = pd.Series(df['log_ret_1d']).rolling(window=200).sum()
df['log_ret_44w'] = pd.Series(df['log_ret_1d']).rolling(window=220).sum()
df['log_ret_48w'] = pd.Series(df['log_ret_1d']).rolling(window=240).sum()
df['log_ret_52w'] = pd.Series(df['log_ret_1d']).rolling(window=260).sum()
df['log_ret_56w'] = pd.Series(df['log_ret_1d']).rolling(window=280).sum()
df['log_ret_60w'] = pd.Series(df['log_ret_1d']).rolling(window=300).sum()
df['log_ret_64w'] = pd.Series(df['log_ret_1d']).rolling(window=320).sum()
df['log_ret_68w'] = pd.Series(df['log_ret_1d']).rolling(window=340).sum()
df['log_ret_72w'] = pd.Series(df['log_ret_1d']).rolling(window=360).sum()
df['log_ret_76w'] = pd.Series(df['log_ret_1d']).rolling(window=380).sum()
df['log_ret_80w'] = pd.Series(df['log_ret_1d']).rolling(window=400).sum()
df['vol_1w'] = pd.Series(df['log_ret_1d']).rolling(window=5).std()*np.sqrt(5)
df['vol_2w'] = pd.Series(df['log_ret_1d']).rolling(window=10).std()*np.sqrt(10)
df['vol_3w'] = pd.Series(df['log_ret_1d']).rolling(window=15).std()*np.sqrt(15)
df['vol_4w'] = pd.Series(df['log_ret_1d']).rolling(window=20).std()*np.sqrt(20)
df['vol_8w'] = pd.Series(df['log_ret_1d']).rolling(window=40).std()*np.sqrt(40)
df['vol_12w'] = pd.Series(df['log_ret_1d']).rolling(window=60).std()*np.sqrt(60)
df['vol_16w'] = pd.Series(df['log_ret_1d']).rolling(window=80).std()*np.sqrt(80)
df['vol_20w'] = | pd.Series(df['log_ret_1d']) | pandas.Series |
import json
import os
import asyncio
import camera
from flask import Flask, render_template, Response, request, redirect, url_for, jsonify
import camera
import cv2
import subprocess
import os
import time
import zmq
import requests
import signal
import pandas as pd
import datetime
#import predict_clickable as pc
video_camera = None
recording_camera = None
record_proc = None
app = Flask(__name__)
@app.route('/')
@app.route('/index.html')
def index():
return render_template('index.html')
def generate_axis(width, height):
start = (i, j)
end = ()
def draw_subline(width, height, vis):
line_list = generate_axis(width, height)
for i in line_list:
cv2.line(vis, i[0], i[1], (154, 231, 197) ,1)
return vis
def gn():
global video_camera
video_camera = camera.VideoStreaming()
mser = cv2.MSER_create()
out = ''
num = 0
while True:
frame = video_camera.get_frame()
ret, jpg = cv2.imencode('.JPEG', frame)
jpg_bytes = jpg.tobytes()
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + jpg_bytes + b'\r\n\r\n')
cam.cap_res.release()
video.release()
@app.route('/video_feed')
def video_feed():
print(type(gn()))
return Response(gn(),
mimetype='multipart/x-mixed-replace; boundary=frame')
def get_path(file_name):
device = file_name.split('_')[0]
mp4_name = file_name.split('_')[1]
default_path = '/home/kimsoohyun/00-Research/02-Graph/05-data/avi/'
path = os.path.join(default_path, get_curdate(),device,'{}.avi'.format(file_name))
return path
def change_name(mp4_name):
try:
file_path = '/home/kimsoohyun/00-Research/02-Graph/data/cut_point'
file_name = 'cut_info.csv'
origin_file_name = os.path.join(file_path, file_name)
new_file_name = os.path.join(file_path, mp4_name + '.csv')
print(new_file_name)
os.rename(origin_file_name, new_file_name)
except Exception as e:
pass
def send_starttime(filename):
#===========CHANGE=========#
print("SEND STARTTIME")
ip = 'http://localhost:8888'
starttime = int(time.time())
data = {"source":"recording","status":"start", "starttime": starttime, "appname":filename, "count":5}
res = requests.post(ip, data=data)
print(res)
def get_curdate():
cur_date = datetime.datetime.today().strftime('%Y-%m-%d')
return cur_date
def send_endtime(filename):
print("SEND ENDTIME");
ip = 'http://localhost:8888'
endtime = int(time.time())
data = {"source":"recording", "status":"end",'endtime':endtime}
print(data)
res = requests.post(ip, data=data)
write_csv(json.loads(res.text), filename)
def write_csv(json_list, filename):
appname = '_'.join(filename.split('_')[1:])
device = filename.split('_')[0]
cur_date = get_curdate()
path = f'/home/kimsoohyun/00-Research/02-Graph/05-data/cut_point/{cur_date}/{device}/{appname}.csv'
df = | pd.DataFrame(json_list) | pandas.DataFrame |
from __future__ import division
import pytest
import numpy as np
from datetime import timedelta
from pandas import (
Interval, IntervalIndex, Index, isna, notna, interval_range, Timestamp,
Timedelta, compat, date_range, timedelta_range, DateOffset)
from pandas.compat import lzip
from pandas.tseries.offsets import Day
from pandas._libs.interval import IntervalTree
from pandas.tests.indexes.common import Base
import pandas.util.testing as tm
import pandas as pd
@pytest.fixture(scope='class', params=['left', 'right', 'both', 'neither'])
def closed(request):
return request.param
@pytest.fixture(scope='class', params=[None, 'foo'])
def name(request):
return request.param
class TestIntervalIndex(Base):
_holder = IntervalIndex
def setup_method(self, method):
self.index = IntervalIndex.from_arrays([0, 1], [1, 2])
self.index_with_nan = IntervalIndex.from_tuples(
[(0, 1), np.nan, (1, 2)])
self.indices = dict(intervalIndex=tm.makeIntervalIndex(10))
def create_index(self, closed='right'):
return IntervalIndex.from_breaks(range(11), closed=closed)
def create_index_with_nan(self, closed='right'):
mask = [True, False] + [True] * 8
return IntervalIndex.from_arrays(
np.where(mask, np.arange(10), np.nan),
np.where(mask, np.arange(1, 11), np.nan), closed=closed)
def test_constructors(self, closed, name):
left, right = Index([0, 1, 2, 3]), Index([1, 2, 3, 4])
ivs = [Interval(l, r, closed=closed) for l, r in lzip(left, right)]
expected = IntervalIndex._simple_new(
left=left, right=right, closed=closed, name=name)
result = IntervalIndex(ivs, name=name)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_intervals(ivs, name=name)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_breaks(
np.arange(5), closed=closed, name=name)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_arrays(
left.values, right.values, closed=closed, name=name)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_tuples(
lzip(left, right), closed=closed, name=name)
tm.assert_index_equal(result, expected)
result = Index(ivs, name=name)
assert isinstance(result, IntervalIndex)
tm.assert_index_equal(result, expected)
# idempotent
tm.assert_index_equal(Index(expected), expected)
tm.assert_index_equal(IntervalIndex(expected), expected)
result = IntervalIndex.from_intervals(expected)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_intervals(
expected.values, name=expected.name)
tm.assert_index_equal(result, expected)
left, right = expected.left, expected.right
result = IntervalIndex.from_arrays(
left, right, closed=expected.closed, name=expected.name)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_tuples(
expected.to_tuples(), closed=expected.closed, name=expected.name)
tm.assert_index_equal(result, expected)
breaks = expected.left.tolist() + [expected.right[-1]]
result = IntervalIndex.from_breaks(
breaks, closed=expected.closed, name=expected.name)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('data', [[np.nan], [np.nan] * 2, [np.nan] * 50])
def test_constructors_nan(self, closed, data):
# GH 18421
expected_values = np.array(data, dtype=object)
expected_idx = IntervalIndex(data, closed=closed)
# validate the expected index
assert expected_idx.closed == closed
tm.assert_numpy_array_equal(expected_idx.values, expected_values)
result = IntervalIndex.from_tuples(data, closed=closed)
tm.assert_index_equal(result, expected_idx)
tm.assert_numpy_array_equal(result.values, expected_values)
result = IntervalIndex.from_breaks([np.nan] + data, closed=closed)
tm.assert_index_equal(result, expected_idx)
tm.assert_numpy_array_equal(result.values, expected_values)
result = IntervalIndex.from_arrays(data, data, closed=closed)
tm.assert_index_equal(result, expected_idx)
tm.assert_numpy_array_equal(result.values, expected_values)
if closed == 'right':
# Can't specify closed for IntervalIndex.from_intervals
result = IntervalIndex.from_intervals(data)
tm.assert_index_equal(result, expected_idx)
tm.assert_numpy_array_equal(result.values, expected_values)
@pytest.mark.parametrize('data', [
[],
np.array([], dtype='int64'),
np.array([], dtype='float64'),
np.array([], dtype=object)])
def test_constructors_empty(self, data, closed):
# GH 18421
expected_dtype = data.dtype if isinstance(data, np.ndarray) else object
expected_values = np.array([], dtype=object)
expected_index = IntervalIndex(data, closed=closed)
# validate the expected index
assert expected_index.empty
assert expected_index.closed == closed
assert expected_index.dtype.subtype == expected_dtype
tm.assert_numpy_array_equal(expected_index.values, expected_values)
result = IntervalIndex.from_tuples(data, closed=closed)
tm.assert_index_equal(result, expected_index)
tm.assert_numpy_array_equal(result.values, expected_values)
result = IntervalIndex.from_breaks(data, closed=closed)
tm.assert_index_equal(result, expected_index)
tm.assert_numpy_array_equal(result.values, expected_values)
result = IntervalIndex.from_arrays(data, data, closed=closed)
tm.assert_index_equal(result, expected_index)
tm.assert_numpy_array_equal(result.values, expected_values)
if closed == 'right':
# Can't specify closed for IntervalIndex.from_intervals
result = IntervalIndex.from_intervals(data)
tm.assert_index_equal(result, expected_index)
tm.assert_numpy_array_equal(result.values, expected_values)
def test_constructors_errors(self):
# scalar
msg = ('IntervalIndex\(...\) must be called with a collection of '
'some kind, 5 was passed')
with tm.assert_raises_regex(TypeError, msg):
IntervalIndex(5)
# not an interval
msg = ("type <(class|type) 'numpy.int64'> with value 0 "
"is not an interval")
with tm.assert_raises_regex(TypeError, msg):
IntervalIndex([0, 1])
with tm.assert_raises_regex(TypeError, msg):
IntervalIndex.from_intervals([0, 1])
# invalid closed
msg = "invalid options for 'closed': invalid"
with tm.assert_raises_regex(ValueError, msg):
IntervalIndex.from_arrays([0, 1], [1, 2], closed='invalid')
# mismatched closed within intervals
msg = 'intervals must all be closed on the same side'
with tm.assert_raises_regex(ValueError, msg):
IntervalIndex.from_intervals([Interval(0, 1),
Interval(1, 2, closed='left')])
with tm.assert_raises_regex(ValueError, msg):
Index([Interval(0, 1), Interval(2, 3, closed='left')])
# mismatched closed inferred from intervals vs constructor.
msg = 'conflicting values for closed'
with tm.assert_raises_regex(ValueError, msg):
iv = [Interval(0, 1, closed='both'), Interval(1, 2, closed='both')]
IntervalIndex(iv, closed='neither')
# no point in nesting periods in an IntervalIndex
msg = 'Period dtypes are not supported, use a PeriodIndex instead'
with tm.assert_raises_regex(ValueError, msg):
IntervalIndex.from_breaks(
pd.period_range('2000-01-01', periods=3))
# decreasing breaks/arrays
msg = 'left side of interval must be <= right side'
with tm.assert_raises_regex(ValueError, msg):
IntervalIndex.from_breaks(range(10, -1, -1))
with tm.assert_raises_regex(ValueError, msg):
IntervalIndex.from_arrays(range(10, -1, -1), range(9, -2, -1))
def test_constructors_datetimelike(self, closed):
# DTI / TDI
for idx in [pd.date_range('20130101', periods=5),
pd.timedelta_range('1 day', periods=5)]:
result = IntervalIndex.from_breaks(idx, closed=closed)
expected = IntervalIndex.from_breaks(idx.values, closed=closed)
tm.assert_index_equal(result, expected)
expected_scalar_type = type(idx[0])
i = result[0]
assert isinstance(i.left, expected_scalar_type)
assert isinstance(i.right, expected_scalar_type)
def test_constructors_error(self):
# non-intervals
def f():
IntervalIndex.from_intervals([0.997, 4.0])
pytest.raises(TypeError, f)
def test_properties(self, closed):
index = self.create_index(closed=closed)
assert len(index) == 10
assert index.size == 10
assert index.shape == (10, )
tm.assert_index_equal(index.left, Index(np.arange(10)))
tm.assert_index_equal(index.right, Index(np.arange(1, 11)))
tm.assert_index_equal(index.mid, Index(np.arange(0.5, 10.5)))
assert index.closed == closed
ivs = [Interval(l, r, closed) for l, r in zip(range(10), range(1, 11))]
expected = np.array(ivs, dtype=object)
tm.assert_numpy_array_equal(np.asarray(index), expected)
tm.assert_numpy_array_equal(index.values, expected)
# with nans
index = self.create_index_with_nan(closed=closed)
assert len(index) == 10
assert index.size == 10
assert index.shape == (10, )
expected_left = Index([0, np.nan, 2, 3, 4, 5, 6, 7, 8, 9])
expected_right = expected_left + 1
expected_mid = expected_left + 0.5
tm.assert_index_equal(index.left, expected_left)
tm.assert_index_equal(index.right, expected_right)
tm.assert_index_equal(index.mid, expected_mid)
assert index.closed == closed
ivs = [Interval(l, r, closed) if notna(l) else np.nan
for l, r in zip(expected_left, expected_right)]
expected = np.array(ivs, dtype=object)
tm.assert_numpy_array_equal(np.asarray(index), expected)
tm.assert_numpy_array_equal(index.values, expected)
def test_with_nans(self, closed):
index = self.create_index(closed=closed)
assert not index.hasnans
result = index.isna()
expected = np.repeat(False, len(index))
tm.assert_numpy_array_equal(result, expected)
result = index.notna()
expected = np.repeat(True, len(index))
tm.assert_numpy_array_equal(result, expected)
index = self.create_index_with_nan(closed=closed)
assert index.hasnans
result = index.isna()
expected = np.array([False, True] + [False] * (len(index) - 2))
tm.assert_numpy_array_equal(result, expected)
result = index.notna()
expected = np.array([True, False] + [True] * (len(index) - 2))
tm.assert_numpy_array_equal(result, expected)
def test_copy(self, closed):
expected = self.create_index(closed=closed)
result = expected.copy()
assert result.equals(expected)
result = expected.copy(deep=True)
assert result.equals(expected)
assert result.left is not expected.left
def test_ensure_copied_data(self, closed):
# exercise the copy flag in the constructor
# not copying
index = self.create_index(closed=closed)
result = IntervalIndex(index, copy=False)
tm.assert_numpy_array_equal(index.left.values, result.left.values,
check_same='same')
tm.assert_numpy_array_equal(index.right.values, result.right.values,
check_same='same')
# by-definition make a copy
result = IntervalIndex.from_intervals(index.values, copy=False)
tm.assert_numpy_array_equal(index.left.values, result.left.values,
check_same='copy')
tm.assert_numpy_array_equal(index.right.values, result.right.values,
check_same='copy')
def test_equals(self, closed):
expected = IntervalIndex.from_breaks(np.arange(5), closed=closed)
assert expected.equals(expected)
assert expected.equals(expected.copy())
assert not expected.equals(expected.astype(object))
assert not expected.equals(np.array(expected))
assert not expected.equals(list(expected))
assert not expected.equals([1, 2])
assert not expected.equals(np.array([1, 2]))
assert not expected.equals(pd.date_range('20130101', periods=2))
expected_name1 = IntervalIndex.from_breaks(
np.arange(5), closed=closed, name='foo')
expected_name2 = IntervalIndex.from_breaks(
np.arange(5), closed=closed, name='bar')
assert expected.equals(expected_name1)
assert expected_name1.equals(expected_name2)
for other_closed in {'left', 'right', 'both', 'neither'} - {closed}:
expected_other_closed = IntervalIndex.from_breaks(
np.arange(5), closed=other_closed)
assert not expected.equals(expected_other_closed)
def test_astype(self, closed):
idx = self.create_index(closed=closed)
for dtype in [np.int64, np.float64, 'datetime64[ns]',
'datetime64[ns, US/Eastern]', 'timedelta64',
'period[M]']:
pytest.raises(ValueError, idx.astype, dtype)
result = idx.astype(object)
tm.assert_index_equal(result, Index(idx.values, dtype='object'))
assert not idx.equals(result)
assert idx.equals(IntervalIndex.from_intervals(result))
result = idx.astype('interval')
tm.assert_index_equal(result, idx)
assert result.equals(idx)
result = idx.astype('category')
expected = pd.Categorical(idx, ordered=True)
tm.assert_categorical_equal(result, expected)
@pytest.mark.parametrize('klass', [list, tuple, np.array, pd.Series])
def test_where(self, closed, klass):
idx = self.create_index(closed=closed)
cond = [True] * len(idx)
expected = idx
result = expected.where(klass(cond))
tm.assert_index_equal(result, expected)
cond = [False] + [True] * len(idx[1:])
expected = IntervalIndex([np.nan] + idx[1:].tolist())
result = idx.where(klass(cond))
tm.assert_index_equal(result, expected)
def test_delete(self, closed):
expected = IntervalIndex.from_breaks(np.arange(1, 11), closed=closed)
result = self.create_index(closed=closed).delete(0)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('data', [
| interval_range(0, periods=10, closed='neither') | pandas.interval_range |
import pandas as _pd
import numpy as _np
import matplotlib.pyplot as _plt
from nicepy.beam import pressure_or_density as _p_or_d
from nicepy import u as _u
class RGA:
def __init__(self, filenames, bkg_range=(22, 26)):
"""
Collects and cleans up RGA data
:param filenames: paths to RGA data files in .txt format
:param bkg_range: amu range to set background to
"""
if type(filenames) is not list:
filenames = [filenames]
self.filenames = filenames
self.data = None
self._get_data()
self._subtract_bkg(bkg_range)
def _get_data(self):
"""
Gets RGA data from .txt file
:return: DataFrame of averaged rga files
"""
data = []
for filename in self.filenames:
d = _pd.read_table(filename, header=17, delimiter=',', names=['Mass', 'Pressure'], index_col=False)
data.append(d)
if len(data) > 1:
temp = _pd.concat(data, axis=1)
else:
temp = _pd.DataFrame(data)
temp['Pressure (Torr)'] = temp['Pressure'].mean(axis=1)
temp['Mass (m/z)'] = data[0]['Mass']
temp['Sigma (Torr)'] = temp['Pressure'].sem(axis=1)
self.data = temp[['Mass (m/z)', 'Pressure (Torr)', 'Sigma (Torr)']].copy()
def _subtract_bkg(self, mass_range):
"""
Subtracts background from data set by the mass range
:param mass_range: tuple of range of masses to select for background subtraction
:return: DataFrame with background subtracted
"""
upper = max(mass_range)
lower = min(mass_range)
a = self._select_range('Mass (m/z)', lower, upper)
bkg = abs(a['Pressure (Torr)'].mean())
self.data['Pressure (Torr)'] = self.data['Pressure (Torr)'] - bkg
# self.data['Pressure (Torr)'].loc[self.data['Pressure (Torr)'] < 0] = 0
def show(self, **kwargs):
"""
Outputs plot of RGA trace
:param kwargs: plot arguments
:return: fig, ax
"""
fig, ax = _plt.subplots()
self.data.plot.line(x='Mass (m/z)', y='Pressure (Torr)', yerr='Sigma (Torr)', ax=ax, **kwargs)
return fig, ax
def _select_range(self, column, lower, upper):
"""
Selects part of data that is between values upper and lower in column
:param column: column name to rate_constants used to bound
:param lower: lower value in column
:param upper: upper value in column
:return: parsed data frame
"""
temp = self.data[(self.data[column] <= upper) & (self.data[column] >= lower)]
return temp
def get_avg_pressure(self, lower, upper):
"""
Gets an average pressure and standard error from a mass range
:param lower: lower mass value
:param upper: upper mass value
:return: average and standard error for masses in defined mass range
"""
temp = self._select_range('Mass (m/z)', lower, upper)['Pressure (Torr)']
p = temp.mean()
if len(temp) == 0:
s = abs(p * 0.1)
else:
s = temp.sem()
return p, s
def water_iso(self, alpha=0.768, beta=0.185, gamma=0.047, h2o_range=(18.1, 18.2), hod_range=(19.1, 19.2), d2o_range=(20.1, 20.2)):
masses = {'H2O': h2o_range, 'HOD': hod_range, 'D2O': d2o_range}
def h2o(p1, p2, p3):
a = 1 / alpha
b = p1 - p2 * beta / (2 * alpha) - p3 * beta / alpha
output = a * b
return output
def hod(p2):
output = p2 / alpha
return output
def d2o(p3):
output = p3 / alpha
return output
def h2o_error(sp1, sp2, sp3):
a = sp1 / alpha
b = (beta / alpha ** 2) * sp3
c = (beta / (2 * alpha ** 2)) * sp2
output = _np.sqrt(_np.sum(_np.array([a, b, c]) ** 2))
return output
def hod_error(sp2):
a = sp2 / alpha
output = _np.sqrt(_np.sum(_np.array([a]) ** 2))
return output
def d2o_error(sp3):
a = sp3 / alpha
output = _np.sqrt(_np.sum(_np.array([a]) ** 2))
return output
vals = {}
errors = {}
for key, val in masses.items():
p, s = self.get_avg_pressure(val[0], val[1])
vals[key] = p
errors[key] = s
data = {mass: {} for mass in masses.keys()}
p1 = vals['H2O']
p2 = vals['HOD']
p3 = vals['D2O']
sp1 = errors['H2O']
sp2 = errors['HOD']
sp3 = errors['D2O']
data['H2O']['val'] = h2o(p1, p2, p3)
data['HOD']['val'] = hod(p2)
data['D2O']['val'] = d2o(p3)
data['H2O']['error'] = h2o_error(sp1, sp2, sp3)
data['HOD']['error'] = hod_error(sp2)
data['D2O']['error'] = d2o_error(sp3)
self.water_iso_pressures = _pd.DataFrame(data)
temp = {key: _p_or_d(self.water_iso_pressures.loc['val'][key] * _u.torr, 300 * _u.K).magnitude for key in self.water_iso_pressures.loc['val'].keys()}
self.water_iso_densities = | _pd.Series(temp) | pandas.Series |
# Library for parsing arbitrary valid ipac tbl files and writing them out.
# Written by: <NAME>
# at: UCLA 2012, July 18
# The main elements the user should concern themselves with are:
#
# TblCol: a class for storing an IPAC table column, including all data and
# functions needed to input/output that column.
# name - the name of the column
# type - the data type stored in the column
# units - the units of the quantity in the column (if any)
# null - the string to write in the column if the value is not valid
# or otherwise missing.
# data - list containing the column's data
# mask - bolean list contains True if the corresponding element in
# data is valid, False if not.
# Stringer - the function used to convert column values to strings.
# Parser - the function used to parse the values in an ASCII tbl.
# ResetStringer - function that insures that the stringer will produce
# columns of sufficient width to store the data.
#
# Tbl: a class that stores a complete IPAC table, including all the columns,
# comment lines, and functions needed to read/write Tbl files.
# hdr - a list conting the comment lines, one item per line.
# colnames - a list of the names of the columns. The primary importance of
# this item is that it controls the order of when columns are
# written to the ouput file, and even whether they are written
# out at all.
# cols - a dictionary containing the table columns as TblCols. It is
# indexed using the name of the column in question.
# Read - function for reading in an IPAC table. The only essential
# argument is fname, the name of the table to be read in.
# The optional items are:
# RowMask - a function that takes a row's zero indexed order and
# the row's raw string and returns True if the row is to
# be read in, False if it is to be ignored.
# startrow - a long integer specifying the first zero indexed data
# row/line to be read in.
# breakrow - a long integer specifying the last zero indexed data
# row/line to be read in.
# Row - a convenience function for grabbing all the data in the given
# zero indexed row from the columns.
# ResetStringers - convenience function that calls the ResetStringer
# method of every column, returning both the data and
# boolean mask for the row in question.
# Print - writes the table to stdout.
# header - boolean set to True if the comment strings and column
# headers are to be printed.
# Write - writes the table to the file named in fname. Will append to the
# file if append=True (WARNING: it is impossible for this library
# to gaurantee that this operation will produce a valid IPAC tbl
# file. Use at your own risk).
import sys
import os
import gzip as gz
if sys.version_info[0] >= 3:
long = int
decode = lambda x: x.decode()
encode = lambda x: bytes(x, "ascii")
else:
decode = lambda x: x
encode = lambda x: x
class FormatError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
def IPACExpandType( IPACtp, shrink=False ):
"""Takes the header from an IPAC table and parses it into the full ipac
name of the type if shrink==False. If shrink != False, return the 1
character IPAC table type."""
if len(IPACtp) == 0:
raise ValueError( "IPACExpandType requires a string with length at least 1." )
pfx = IPACtp[0]
if shrink == False:
if pfx == "i":
result = "int"
elif pfx == "l":
result = "long"
elif pfx == "f":
result = "float"
elif pfx == "d":
result = "double"
elif pfx == "r":
result = "real"
elif pfx == "c":
result = "char"
elif IPACtp == "t" or ( len(IPACtp) > 1 and IPACtp[:1] == "da" ):
result = "date"
else:
raise ValueError( "Invalid IPAC type supplied to IPACExpandType: " + IPACtp )
else:
if pfx in ( "i", "l", "f", "d", "r", "c" ):
result = pfx
elif IPACtp == "t" or ( len(IPACtp) > 1 and IPACtp[:1] == "da" ):
result = "t"
else:
raise ValueError( "Invalid IPAC type supplied to IPACExpandType: " + IPACtp )
return result
def IPACtoPythonType( IPACtp ):
if IPACtp in ( "c", "char", "t", "date" ):
return type("a")
elif IPACtp in ( "i", "int" ):
return type(int(1))
elif IPACtp in ( "l", "long" ):
return type(long(1))
elif IPACtp in ( "d", "double", "f", "float", "r", "real" ):
return type( float(1.0) )
else:
raise ValueError("Argument valtype to IPACtoPythonType must be a " + \
"valid IPAC table column type. " + \
"Type given: " + valtype )
def MakeStringer( valtype, width, null="null", precision=None ):
#Check argument validity
if type(valtype) != type("a"):
raise TypeError("MakeStringer's first argument must be a string.")
if type(null) != type("a"):
raise TypeError("MakeStringer's null argument must be a string.")
if type(width) != type(int(1)):
raise TypeError("MakeStringer's width argument must be an int.")
if width <= 0:
raise ValueError("the width passed to MakeStringer must be > 0.")
if precision != None:
if type(precision) != type(int(1)):
raise TypeError("MakeStringer's precision argument must" +
" be an int.")
if precision <= 0:
raise ValueError("the precision passed to MakeStringer" +
" must be > 0.")
#Format string stuff doesn't work so well.
# #Make the formatting string
# valfmtstring = "{0: ^" + str(width)
# if precision != None:
# valfmtstring += "." + str(precision)
# if valtype in ( "c", "char", "date" ):
# valfmtstring += "s"
# elif valtype in ( "i", "int", "l", "long" ):
# valfmtstring += "d"
# elif valtype in ( "d", "double", "f", "float", "r", "real" ):
# valfmtstring += "g"
# else:
# raise ValueError("Argument valtype to MakeStringer must be a " + \
# "valid IPAC table column type. " + \
# "Type given: " + valtype )
# valfmtstring += "}"
padstring = "{0: ^" + str(width) + "s}"
def result( val, mask ):
if mask == True:
r = padstring.format(str(val))
if len(r) > width:
raise FormatError( "Column width insufficient. Width " +
str(width) + ", " + str(val) )
else:
r = padstring.format(null)
if len(r) > width:
raise FormatError( "Column width insufficient. Width " +
str(width) + ", " + str(null) )
return r
result.width = width
return result
def MakeParser( valtype, null="null" ):
if type(valtype) != type("a"):
raise TypeError("MakeParser's first argument must be a string.")
if type(null) != type("a"):
raise TypeError("MakeParser's null argument must be a string.")
if valtype in ( "i", "int" ):
baseparse = int
default = 1
elif valtype in ( "l", "long" ):
baseparse = long
default = long(1)
elif valtype in ( "d", "double", "f", "float", "r", "real" ):
def baseparse( x ):
try:
return float(x)
except ValueError:
return float.fromhex(x)
default = 1.0
elif valtype in ( "c", "char", "t", "date" ):
baseparse = lambda x: x
default = ""
else:
raise ValueError("Argument valtype to MakeParser must be a " + \
"valid IPAC table column type. " + \
"Type given: " + valtype )
def parser( x ):
y = x.strip()
if y != null:
return ( baseparse( y ), True )
else:
return ( default, False )
return parser
def MakeNullParser( valtype, null="null" ):
if type(valtype) != type("a"):
raise TypeError("MakeParser's first argument must be a string.")
if type(null) != type("a"):
raise TypeError("MakeParser's null argument must be a string.")
if valtype in ( "i", "int" ):
default = 1
elif valtype in ( "l", "long" ):
default = long(1)
elif valtype in ( "d", "double", "f", "float", "r", "real" ):
default = 1.0
elif valtype in ( "c", "char", "t", "date" ):
default = ""
else:
raise ValueError("Argument valtype to MakeParser must be a " + \
"valid IPAC table column type. " + \
"Type given: " + valtype )
def parser( x ):
return ( default, False )
return parser
class TblCol:
def __init__(self):
self.name = ""
self.type = ""
self.units = ""
self.null = "null"
self.mask = []
self.data = []
self.Stringer = lambda x, y: "undefined"
#to be defined
self.Parser = None
def __len__(self):
if len(self.data) == len(self.mask):
return len(self.data)
else:
raise FormatError( "Length of mask, " + str(len(self.mask)) +
", inconsistent with data, " +
str(len(self.data)) + "." )
def ResetStringer( self ):
width = max( len(self.name), len(self.type), len(self.units),
len(self.null) )
for v, m in zip(self.data, self.mask):
if m == True:
width = max( width, len(str(v)) )
#width += 10 #Deal with python's crappy formatting funcs
self.Stringer = MakeStringer( self.type, width, null=self.null );
return None
def ResetParser( self ):
self.Parser = MakeParser( self.type, null=self.null );
return None
import sys
class TblRow:
def __init__(self, colnames=[]):
self.colnames = colnames
self.data = [ None for n in colnames ]
self.mask = [ False for n in colnames ]
return None
def __getitem__(self, k):
if type(k) in ( type(1), type(long(1)) ):
return ( self.data[k], self.mask[k] )
elif type(k) == type("a"):
if k in self.colnames:
i = self.colnames.index(k)
return( self.data[i], self.mask[i])
else:
raise KeyError( "Column name given not understood by row." )
else:
raise TypeError("Rows must be indexed by a string, integer, or long.")
return None
def __setitem__( self, k, val ):
if type(k) in ( type(1), type(long(1)) ):
if k < len(self.data) and k >= -len(self.data):
self.data[k] = val
self.mask[k] = True
return( val, True )
else:
raise IndexError( "list index out of range" )
elif type(k) == type("a"):
if k in self.colnames:
i = self.colnames.index(k)
self.data[i] = val
self.mask[i] = True
return( val, True )
else:
raise KeyError( "Column name given not understood by row.")
else:
raise TypeError("Rows must be indexed by a string, integer, or long.")
return None
def __delitem__( self, k ):
if type(k) in ( type(1), type(long(1)) ):
if k < len(self.data) and k >= -len(self.data):
self.mask[k] = False
else:
raise IndexError( "list index out of range" )
elif type(k) == type("a"):
if k in self.colnames:
i = self.colnames.index(k)
self.mask[i] = False
else:
raise KeyError( "Column name given not understood by row.")
else:
raise TypeError("Rows must be indexed by a string, integer, or long.")
return None
def ReadTable( fname, RowMask = lambda x, y: True, startrow=long(0),
breakrow=None, gzip=False ):
"""Function for reading IPAC tables into a dictionary of Tblcolumns. Will
only read lines for which the function RowMask returns True when passed
the row number and row. Rows are zero indexed, just like Python lists.
The first row read will be startrow, and will not read past breakrow.
Does not support universal line endings for gzipped files."""
if gzip != True:
f = open(fname, "rb")
else:
f = gz.open( fname, "r" )
#Read past header
hdrlines = []
cnames = []
while (True):
l = decode(f.readline())
if (l[0] == "\\"):
hdrlines.append( l.rstrip("\n\r") )
elif (l[0] == "|"):
break
else:
raise FormatError("The header of file " + fname +
" has an error in it.")
linelen = len(l)
rawcolnames = (l.strip("|\n\r")).split("|")
#The "-" in headers part of the spec can cause problems for
# negative numerical null values, but it seems to be a problem
# inherent in the spec.
colnames = [ x.strip(" -") for x in rawcolnames ]
cols = {}
rawcoltypes = ((decode(f.readline())).strip("|\n\r")).split("|")
coltypes = [ IPACExpandType( x.strip(" -") ) for x in rawcoltypes ]
for n, r, t in zip(colnames, rawcolnames, coltypes):
newcol = TblCol()
newcol.width = len(r)
newcol.type = t
newcol.name = n
cols[n] = newcol
pos = f.tell()
l = decode(f.readline())
if ( l[0] != "|" ):
#We've read past the header
f.seek(pos)
else:
units = (l.strip("|\n\r")).split( "|" )
if ( len(units) != len( cols ) ):
raise FormatError( "Header format broken." )
for n, u in zip( colnames, units ):
cols[n].units = u.strip( " -" )
pos = f.tell()
l = decode(f.readline())
if ( l[0] != "|" ):
#We've read past the header
f.seek(pos)
else:
nulls = (l.strip("|\n\r")).split( "|" )
if ( len(nulls) != len( cols ) ):
raise FormatError( "Header format broken." )
for n, nl in zip( colnames, nulls ):
cols[n].null = nl.strip( " -" )
#Define the stringer and parser functions
colwidths = [ len(r) for r in rawcolnames ]
for n, w in zip(colnames, colwidths):
tp = cols[n].type
nl = cols[n].null
cols[n].Stringer = MakeStringer( tp, w, null=nl )
cols[n].Parser = MakeParser( tp, null=nl )
#read past ignored rows
if startrow > long(0) and gzip == False:
f.seek( long(linelen) * long(startrow), os.SEEK_CUR )
elif startrow > long(0) and gzip == True: #Read past the hard way
for i in range( startrow ):
dummy = f.readline()
del(dummy)
colstarts = [ 1 ]
colends = []
for w in colwidths:
colends.append( colstarts[-1] + w )
colstarts.append( colends[-1] + 1 )
del colstarts[-1]
# colstarts = [ 1 for w in colwidths ]
# colends = [ 1 + w for w in colwidths ]
# for i in range(1, len(colwidths)):
# colstarts[i] = colstarts[i - 1] + colwidths[i-1] + 1
# colends[i] = colends[i - 1] + colwidths[i] + 1
parsers = [ MakeParser( cols[nm].type, null=cols[nm].null )
for nm in colnames ]
alldata = [ [] for n in colnames ]
allmask = [ [] for n in colnames ]
rownum = long(startrow)
for line in f:
line = decode(line)
if breakrow != None and rownum >= breakrow:
break;
if RowMask(rownum, line) != True:
continue
rownum += long(1)
parts = [ line[start:end] for start, end in zip( colstarts, colends ) ]
for p, par, i in zip( parts, parsers, range(len(colnames)) ):
r = par( p )
alldata[i].append( r[0] )
allmask[i].append( r[1] )
for n, d, m in zip( colnames, alldata, allmask ):
cols[n].data = d
cols[n].mask = m
f.close()
return [ hdrlines, colnames, cols ]
class Tbl:
def Read( self, fname, RowMask = lambda x, l: True, startrow=long(0),
breakrow=None, gzip=False ):
"""Function for reading IPAC tables into a the Tbl. Will
only read lines for which the function RowMask returns True for the
row number. Rows are zero indexed, just like Python lists."""
if type(fname) == type("asdf"):
self.hdr, self.colnames, self.cols = ReadTable( fname,
RowMask=RowMask,
startrow=startrow,
breakrow=breakrow,
gzip=gzip)
else:
raise TypeError(" tbl file name must be a string.")
return None
def __init__( self, fname = "", gzip=False ):
if fname == "":
self.hdr = []
self.colnames = []
self.cols = {}
else:
self.Read( fname, gzip=gzip )
return None
def __len__(self):
return len(self.cols.keys())
def Row(self, rownum):
result = TblRow()
#Prep the structures - this avoids dereferencing the column dict twice
result.colnames = [ x for x in self.colnames ]
result.data = [ None for k in self.colnames ]
result.mask = [ False for k in self.colnames ]
for i in range(len(self.colnames)):
col = self.cols[self.colnames[i]]
result.data[i] = col.data[rownum]
result.mask[i] = col.mask[rownum]
return result
def ResetStringers(self):
"""Updates the stringer functions to ensure that they have the
null and type specified by the columns and produce fields wide
enough to hold all the values in the columns."""
#sys.stderr.write(str(self.cols.keys()) + "\n")
for k in self.colnames:
self.cols[k].ResetStringer()
return None
def __out(self, ofile, header=True):
"""Prints the contents of the table. if "header" is set to false,
only the data is printed. The column widths are set by the
colwidths list, the order is set by colnames, special null values
are set by nulls, and units set by units. """
if header == True:
colwidths = [ len(self.cols[k].Stringer( None, False ) ) \
for k in self.colnames ]
def hdrstrn( strs, wids ):
r = [ ("{0: ^" + str(w) + "s}").format(s) \
for s, w in zip( strs, wids ) ]
for v, w in zip( r, wids ):
if len(v) > w:
raise FormatError( "column width insufficient.")
return "|" + "|".join(r) + "|\n"
for l in self.hdr:
ofile.write(encode(l + "\n"))
l = hdrstrn( self.colnames, colwidths )
ofile.write(encode( l ))
coltypes = [ self.cols[k].type for k in self.colnames ]
l = hdrstrn( coltypes, colwidths )
ofile.write(encode( l ))
units = [ self.cols[k].units for k in self.colnames ]
l = hdrstrn( units, colwidths )
ofile.write(encode( l ))
nulls = [ self.cols[k].Stringer( "asdf", False ) \
for k in self.colnames ]
l = hdrstrn( nulls, colwidths )
ofile.write(encode( l ))
for i in range(len(self.cols[self.colnames[0]])):
strcols = [ self.cols[n].Stringer(self.cols[n].data[i],
self.cols[n].mask[i])
for n in self.colnames ]
ofile.write(encode( " " + " ".join( strcols ) + " \n" ))
return None
def Print(self, header=True):
self.__out( sys.stdout, header=header )
return None
def Write(self, fname, append=False, gzip=-1):
if type(gzip) != type(int(1)):
raise TypeError( "Keyword argument gzip expects an int." )
elif gzip <= -1:
op = lambda x, y: open( x, y )
else:
op = lambda x, y: gz.open( x, y, min( max(gzip, 1), 9 ) )
if append == False:
f = op( fname, "wb" )
self.__out(f, header=True)
else:
f = op( fname, "a" )
self.__out(f, header=False)
f.close()
return(None)
linebuffersize = 5 * 1024**2 #5 megabytes
class BigTbl:
def OpenRead( self, fname, gzip=False ):
self.hdr = []
self.types = {}
self.nulls = {}
self.units = {}
self.parsers = []
self.stringers = []
self.__seekable = not gzip
self.__inputbuffer = []
if gzip != True:
self.infile = open( fname, "rb" )
else:
self.infile = gz.open( fname, "r" )
#First read past the comment header
while True:
l = decode(self.infile.readline())
if l[0] == "\\":
self.hdr.append( l.rstrip( "\n\r" ) )
elif l[0] == "|":
break
else:
raise FormatError("The header of file " + fname +
" has an error in it.")
self.__inlinelen = len(l.rstrip( "\n\r" ))
#We now have the data necessary to find the column widths
rawcolnames = ( l.strip("|\n\r") ).split("|")
self.colwidths = [ len(n) for n in rawcolnames ]
self.colstarts = [ 1 ]
self.colends = []
for w in self.colwidths:
self.colends.append( self.colstarts[-1] + w )
self.colstarts.append( self.colends[-1] + 1 )
del self.colstarts[-1]
self.colnames = [ n.strip(" -") for n in rawcolnames ]
l = (decode(self.infile.readline()).strip("|\n\r")).split("|")
coltypes = [ IPACExpandType( n.strip(" -") ) for n in l ]
self.__indatstart = self.infile.tell()
#Defaults
units = [ "" for n in self.colnames ]
nulls = [ "null" for n in self.colnames ]
l = decode(self.infile.readline())
if l[0] == "|":
units = map( lambda x: x.strip(" -"),
(l.strip("|\n\r")).split( "|" ))
if len(units) != len(self.colnames):
raise FormatError( "Header format broken." )
self.__indatstart = self.infile.tell()
l = decode(self.infile.readline())
if l[0] == "|":
nulls = map( lambda x: x.strip(" -"),
(l.strip("|\n\r")).split( "|" ))
if len(nulls) != len(self.colnames):
raise FormatError( "Header format broken." )
self.__indatstart = self.infile.tell()
else:
self.infile.seek( self.__indatstart )
else:
self.infile.seek( self.__indatstart )
#Now parse the header info into the local variables
for n, t, nul, u, w in zip( self.colnames, coltypes, nulls, units,
self.colwidths ):
self.types[n] = t
self.nulls[n] = nul
self.units[n] = u
self.parsers.append( MakeParser( t, null=nul ) )
self.stringers.append( MakeStringer( t, w, null=nul ) )
return None
def CloseRead(self):
if self.infile != None:
self.infile.close()
self.infile = None
return None
def __init__(self, fname="", gzip=False ):
self.outfile = None
self.__currow = long(0)
if fname == "":
self.hdr = []
self.colnames = []
self.types = {}
self.nulls = {}
self.units = {}
self.parsers = []
self.stringers = []
self.colwidths = []
self.colstarts = []
self.colends = []
self.__seekable = False
self.outfile = None
self.infile = None
self.__inlinelen = long(0)
self.__indatstart = long(0)
self.__inputbuffer = []
else:
self.OpenRead( fname, gzip=gzip )
return None
def ReadRow( self, rownum=-long(1) ):
if self.__currow != rownum and rownum >= long(0):
if self.__seekable == True:
self.infile.seek( self.__indatstart +
self.__inlinelen * rownum )
else:
sys.stderr.write( "Warning: seeking in compressed tables " +
"is slower than uncompressed.\n" )
if rownum < self.__currow:
self.infile.seek( self.__indatstart )
seeknum = rownum
else:
seeknum = rownum - self.__currow
for i in range(seeknum):
dummy = self.infile.readline()
del(dummy, i)
self.__currow = rownum
self.__inputbuffer = []
if len( self.__inputbuffer ) == 0:
self.__inputbuffer = self.infile.readlines( linebuffersize )
#End of file reached, return None
if len( self.__inputbuffer ) == 0:
return None
line = ( self.__inputbuffer[0] ).rstrip( "\n\r" )
del self.__inputbuffer[0]
#Check formatting
if len(line) != self.__inlinelen:
raise FormatError( "Malformed line: " + line )
result = TblRow()
result.data = [ None for n in self.colnames ]
result.mask = [ False for n in self.colnames ]
result.colnames = [ n for n in self.colnames ] #Ensures independence
result.data, result.mask = zip(*[ p(line[s:e])
for p, s, e in zip( self.parsers,
self.colstarts,
self.colends )
])
result.data = list(result.data)
result.mask = list(result.mask)
# colstart = 1
# for i, n in zip(range(len(self.colnames)), self.colnames):
# colend = colstart + self.colwidths[i]
# result.data[i], result.mask[i] = self.parsers[i](
# line[colstart:colend] )
# colstart = colend + 1
self.__currow += 1
return result
def ReadLine(self):
if len( self.__inputbuffer ) == 0:
self.__inputbuffer = self.infile.readlines( linebuffersize )
#End of file reached, return None
if len( self.__inputbuffer ) == 0:
return None
line = ( self.__inputbuffer[0] ).rstrip( "\n\r" )
del self.__inputbuffer[0]
return line
def RefreshParsers(self):
for n in self.colnames:
self.parsers = [ MakeParser( self.types[n], null=self.nulls[n] )
for n in self.colnames ]
return None
def RefreshStringers(self):
for n, w in zip( self.colnames, self.colwidths ):
self.stringers = [ MakeStringer( self.types[n], w,
null=self.nulls[n] )
for n, w in zip( self.colnames,
self.colwidths ) ]
return None
def WriteHeader( self ):
for l in self.hdr:
self.outfile.write(encode( l + "\n" ))
hdrstringers = [ MakeStringer( "char", w ) for w in self.colwidths ]
def hdrstrn( input ):
if type(input) == type([]):
strs = input
else:
strs = [ input[n] for n in self.colnames ]
strs = [ S( x, True ) for x, S in zip( strs, hdrstringers )]
return( "|" + "|".join( strs ) + "|\n" )
self.outfile.write(encode( hdrstrn( self.colnames ) ))
self.outfile.write(encode( hdrstrn( self.types ) ))
self.outfile.write(encode( hdrstrn( self.units ) ))
self.outfile.write(encode( hdrstrn( self.nulls ) ))
return None
def OpenWrite( self, fname, appendmode=False, gzip=-1 ):
if type(gzip) != type(int(1)):
raise TypeError( "Keyword argument gzip expects an int." )
elif gzip <= -1:
op = lambda x, y: open( x, y )
else:
op = lambda x, y: gz.open( x, y, min( max(gzip, 1), 9 ) )
if appendmode == True:
self.outfile = op( fname, "ab" )
else:
self.outfile = op( fname, "wb" )
self.WriteHeader()
return None
def CloseWrite( self ):
if self.outfile != None:
self.outfile.close()
self.outfile = None
return None
def WriteRow( self, row ):
outarr = [ row[n] for n in self.colnames ]
parts = [ S( r[0], r[1] )
for r, S in zip( outarr, self.stringers ) ]
self.outfile.write(encode( " " + " ".join( parts ) + " \n" ))
return None
def Close( self ):
self.CloseRead()
self.CloseWrite()
return None
try:
import numpy as np
def arrayize_cols( tbl ):
type_dict = { "int": np.int32, \
"long": np.int64, \
"float": np.float32, \
"double": np.float64, \
"real": np.float64, \
"char": "S", \
"date": "S" }
for c in tbl.colnames:
longtype = IPACExpandType( tbl.cols[c].type )
dtype = type_dict[ longtype ]
tbl.cols[c].data = np.asarray(tbl.cols[c].data, dtype=dtype)
tbl.cols[c].mask = np.asarray(tbl.cols[c].mask)
return None
except ModuleNotFoundError:
sys.stderr.write("Warning: numpy not found - ipac array features disabled.")
try:
import pandas as pd
def tbl_to_DFrame( tbl ):
typedict = { "int": "int32", \
"long": "int64", \
"float": "float32", \
"double": "float64", \
"real": "float64", \
"char": "str" }
df = | pd.DataFrame() | pandas.DataFrame |
from webpage.forms import ValuationForm
from webpage import app, lgbm_model, rf_model
from flask import render_template, request, jsonify
from webpage.preprocessing import Preprocessing_Pipeline
from webpage.utils import load_models
import pandas as pd
import time
@app.route('/')
def main():
return render_template('home.html')
@app.route('/contact')
def contact():
return render_template('contact.html')
@app.route('/predict', methods=['POST','GET'])
def predict():
form = ValuationForm(request.form)
if form.validate_on_submit():
data = form.data #es un dict
#current_app.logger.info('Ha sido recibida un request POST {}'.format(data))
#Validar datos.
print(data,'/n', type(data))
#Pipeline
dataframe = Preprocessing_Pipeline(data_dict = data).transform()
#Prediction
prediction_lgbm = lgbm_model.predict(dataframe)
#Como los arrays no son jsonifybles:
prediction_lgbm = pd.Series(prediction_lgbm).to_json(orient='values')
prediction_rf = rf_model.predict(dataframe)
prediction_rf = pd.Series(prediction_rf).to_json(orient='values')
print("lgbm-pred:", prediction_lgbm)
return render_template('success.html', prediction_lgbm = prediction_lgbm[0], prediction_rf=prediction_rf[0])
else:
return render_template('predict.html', form = form)
@app.route('/api-predict',methods=['POST','GET'])
def api_predict():
if request.method == 'GET':
return render_template('api-predict.html')
elif request.method == 'POST':
start = time.time()
data = request.json #es un dict
#current_app.logger.info('Ha sido recibida un request POST {}'.format(data))
#Validar datos.
#Pipeline
dataframe = Preprocessing_Pipeline(data_dict = data).transform()
#Prediction
prediction_lgbm = lgbm_model.predict(dataframe)
#Como los arrays no son jsonifybles:
prediction_lgbm = | pd.Series(prediction_lgbm) | pandas.Series |
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
import os
project_name = "reco-tut-sor"; branch = "main"; account = "sparsh-ai"
project_path = os.path.join('/content', project_name)
# In[ ]:
if not os.path.exists(project_path):
get_ipython().system(u'cp /content/drive/MyDrive/mykeys.py /content')
import mykeys
get_ipython().system(u'rm /content/mykeys.py')
path = "/content/" + project_name;
get_ipython().system(u'mkdir "{path}"')
get_ipython().magic(u'cd "{path}"')
import sys; sys.path.append(path)
get_ipython().system(u'git config --global user.email "<EMAIL>"')
get_ipython().system(u'git config --global user.name "reco-tut"')
get_ipython().system(u'git init')
get_ipython().system(u'git remote add origin https://"{mykeys.git_token}":[email protected]/"{account}"/"{project_name}".git')
get_ipython().system(u'git pull origin "{branch}"')
get_ipython().system(u'git checkout main')
else:
get_ipython().magic(u'cd "{project_path}"')
# In[103]:
get_ipython().system(u'git status')
# In[104]:
get_ipython().system(u'git add . && git commit -m \'commit\' && git push origin "{branch}"')
# ---
# This data set contains simulated data that mimics customer behavior on the Starbucks rewards mobile app. Once every few days, Starbucks sends out an offer to users of the mobile app. An offer can be merely an advertisement for a drink or an actual offer such as a discount or BOGO (buy one get one free). Some users might not receive any offer during certain weeks.
#
# Not all users receive the same offer, and that is the challenge to solve with this data set.
#
# The task is to combine transaction, demographic and offer data to determine which demographic groups respond best to which offer type. This data set is a simplified version of the real Starbucks app because the underlying simulator only has one product whereas Starbucks actually sells dozens of products.
#
# Every offer has a validity period before the offer expires. As an example, a BOGO offer might be valid for only 5 days. You'll see in the data set that informational offers have a validity period even though these ads are merely providing information about a product; for example, if an informational offer has 7 days of validity, you can assume the customer is feeling the influence of the offer for 7 days after receiving the advertisement.
#
# The provided transactional data shows user purchases made on the app including the timestamp of purchase and the amount of money spent on a purchase. This transactional data also has a record for each offer that a user receives as well as a record for when a user actually views the offer. There are also records for when a user completes an offer.
#
# Let's keep in mind as well that someone using the app might make a purchase through the app without having received an offer or seen an offer.
#
# To give an example, a user could receive a discount offer buy 10 dollars get 2 off on Monday. The offer is valid for 10 days from receipt. If the customer accumulates at least 10 dollars in purchases during the validity period, the customer completes the offer.
#
# However, there are a few things to watch out for in this data set. Customers do not opt into the offers that they receive; in other words, a user can receive an offer, never actually view the offer, and still complete the offer. For example, a user might receive the "buy 10 dollars get 2 dollars off offer", but the user never opens the offer during the 10 day validity period. The customer spends 15 dollars during those ten days. There will be an offer completion record in the data set; however, the customer was not influenced by the offer because the customer never viewed the offer.
#
# You'll also want to take into account that some demographic groups will make purchases even if they don't receive an offer. From a business perspective, if a customer is going to make a 10 dollar purchase without an offer anyway, you wouldn't want to send a buy 10 dollars get 2 dollars off offer. You'll want to try to assess what a certain demographic group will buy when not receiving any offers.
# ## Dataset
#
# The data is contained in three files:
#
# * portfolio.json - containing offer ids and meta data about each offer (duration, type, etc.)
# * profile.json - demographic data for each customer
# * transcript.json - records for transactions, offers received, offers viewed, and offers completed
#
# Here is the schema and explanation of each variable in the files:
#
# **portfolio.json**
# * id (string) - offer id
# * offer_type (string) - type of offer ie BOGO, discount, informational
# * difficulty (int) - minimum required spend to complete an offer
# * reward (int) - reward given for completing an offer
# * duration (int) - time for offer to be open, in days
# * channels (list of strings)
#
# **profile.json**
# * age (int) - age of the customer
# * became_member_on (int) - date when customer created an app account
# * gender (str) - gender of the customer (note some entries contain 'O' for other rather than M or F)
# * id (str) - customer id
# * income (float) - customer's income
#
# **transcript.json**
# * event (str) - record description (ie transaction, offer received, offer viewed, etc.)
# * person (str) - customer id
# * time (int) - time in hours since start of test. The data begins at time t=0
# * value - (dict of strings) - either an offer id or transaction amount depending on the record
# In[ ]:
import datetime
import pandas as pd
import numpy as np
import math
import json
import seaborn as sns
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings('ignore')
get_ipython().magic(u'matplotlib inline')
# In[60]:
# read in the json files
portfolio = pd.read_json('./data/bronze/portfolio.json', orient='records', lines=True)
profile = pd.read_json('./data/bronze/profile.json', orient='records', lines=True)
transcript = pd.read_json('./data/bronze/transcript.json', orient='records', lines=True)
# ## Portfolio
# | attribute | description |
# | --------- | ----------- |
# | id | offer id |
# | offer_type | type of offer ie BOGO, discount, informational |
# | difficulty | minimum required spend to complete an offer |
# | reward | reward given for completing an offer |
# | duration | time for offer to be open, in days |
# | channels | email, web, mobile |
# In[61]:
portfolio
# In[62]:
portfolio.info()
# In[63]:
portfolio.describe().round(1)
# In[64]:
fig, ax = plt.subplots(figsize=(12,7))
portfolio.hist(ax=ax)
plt.show()
# In[65]:
portfolio.describe(include='O')
# In[66]:
portfolio.channels.astype('str').value_counts().plot(kind='barh');
# In[67]:
portfolio.offer_type.value_counts().plot(kind='barh');
# ## Transcript
# In[68]:
transcript.head()
# In[69]:
transcript.info()
# In[70]:
transcript.describe().round(1).T
# In[71]:
transcript.describe(include='O')
# In[72]:
transcript.event.astype('str').value_counts().plot(kind='barh');
# ## Profile
# In[73]:
profile.head()
# In[74]:
profile.info()
# In[75]:
profile.describe().round(1)
# In[76]:
fig, ax = plt.subplots(figsize=(12,7))
profile.hist(ax=ax)
plt.show()
# In[77]:
profile.describe(include='O')
# In[78]:
profile.gender.astype('str').value_counts(dropna=False).plot(kind='barh');
# ## Cleaning the data and Feature Engineering
#
# In[79]:
group_income = profile.groupby(['income', 'gender']).size().reset_index()
group_income.columns = ['income', 'gender', 'count']
sns.catplot(x="income", y="count", hue="gender", data=group_income,
kind="bar", palette="muted", height=5, aspect=12/5)
plt.xlabel('Income per year')
plt.ylabel('Count')
plt.title('Age/Income Distribution')
plt.savefig('./extras/images/income-age-dist-binned.png', dpi=fig.dpi)
# In[80]:
portfolio['web'] = portfolio['channels'].apply(lambda x: 1 if 'web' in x else 0)
portfolio['email'] = portfolio['channels'].apply(lambda x: 1 if 'email' in x else 0)
portfolio['mobile'] = portfolio['channels'].apply(lambda x: 1 if 'mobile' in x else 0)
portfolio['social'] = portfolio['channels'].apply(lambda x: 1 if 'social' in x else 0)
# apply one hot encoding to offer_type column
offer_type = | pd.get_dummies(portfolio['offer_type']) | pandas.get_dummies |
#關閉driver視窗的三種方法
#https://stackoverflow.com/questions/15067107/difference-between-webdriver-dispose-close-and-quit
# -*- coding: utf-8 -*-
import selenium
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import time
from selenium.webdriver.support.ui import Select
from selenium.webdriver.support.ui import WebDriverWait
import sys
import os
import requests
import urllib.request
import pandas as pd
Load_file_dir="C:/PYTHON/Download_Seq_files/"
Save_file_dir="C:/PYTHON/Download_Seq_files/acclist/"
seqID=[]
seqlist=[]
#input("確定要執行?")
with open(Load_file_dir+"seqID.csv","r")as file:
for line in file:
seqID.append(line[:-1])
print(seqID)
with open(Load_file_dir+"seqlist.csv","r")as file:
for line in file:
seqlist.append(line[:-1])
#print(seqlist)
"""
seqID1="Deparia_trnL_F"
seqID2="Isoetes_trnL_F"
seq1="GTGACACGAGGATTTTCAGTCCTCTGCTCTACCGACTGAGCTATCTCGGCCGACTCATTCACAGATAAAACTCAACTAGAAATCGGTGAAGTTTATTTTTCAACTCCAGCTTTTTGCCCAGTCAAACAAACCGTTGATCCCGCCTTTATCGATCTGCTTAATACAATACCGCCTTCAGCAAATAAAGTCTGGAGGGGGAGGGGGCTCGATTGAGCCATTCATGGATATTAAAATACCGAATGGCTCACTTGCAAAGTGTTTCGGAAATACCAAGTAGAAAAAAGAGTTGAAGTGGTTTCCGTATTTCGCTTACGAGCAAATTGTGTGAATCCAAACAACTTGAAATGGGTTAATTAAAGTGTCTCATTGGGGATAGAGGGACTCGAACCCTCACGGTCCTGTGAAACCAACGGATTTTCGTTCCACCGCAACTTGCGCCGCTACTTCTCATTTTACCAAGTGCGTGGAGTGGGACTCTACCTCCATTCACGGAAGTATCATTTTTTGCCACCGGCTCGCTTGCTGAATAATTTTCATTGGAACAGAGTGGGATCCCACAGCAGGTAAGAGGGGAATATGCGGACTAGCAATAGTGGAAGGAATCTACTTTGTGTTACATTACTAAGTACGAACAGAATTTCGTGAATGAGTGCTGGCCCCAAACCGAGAGGTCCGCGTCCGATTGAAGAAAGAATTGAAATTTTGATTCTTTACCAGGTCTCAGTATTATACTTCTACATATTACCTCATGCAGTTAACCACGCGAAACAGTTAGATATTCAGTTATTTCGAGAACTAGTAACTAACAGACTGCTCGTTGGAATGGCCCCCGTCGAGTCTCTGCACCTATCTTGCCTCATTGCCCGAAATTCATTTGAATAATACAAGATT"
seq2="TGAGCCTTAATATAGGANCTTAACTAAGTGATAGCTTTCAGATTCAGGGAAACCTCGGTGGAAACAATAGGCAATCCTGAGCCAAATTCCGTTGTTTCATTTCATGAACGAACGGGATAGGTGCAGAGACTCGATGGAAGCTATCCCAACGAGTGATCCTCAATACCGTATCTATGAAATAAATCATATAGATATGAATTATATGATATAATGTTTCATCTATTCCTGAAGAGGAAGAAGTGAAAGATACTATCATAGATCATACTCAGATCATGTTATTGTTTGATCAATAATAAGATGCTTAAGTTGATTTAAAATTCGAGGGTCATTCATCATTCAATATATTTATTTTTCTAAAAGATATCTATTATCTAATATCTAACGGATCAATCTTATAGTGGATGATTGGACGAGGTTAAAGATAGAGTCCGATTTTACATGCTAATATCAGCAACAATGTGAATTGTAGTAAGGAGAAAATCCGTTGGCTTTATAGGCCGTGAGGGTTCAAGTCCCTCTATCCTCAGAGAAAGTTTGATTTATTCCAAATTAAATATCCAATTCAATATTGGGATTTAATCTTTCGGTGGAAAAAATTCCCACAGCTATAGTAGGGAATAGCCAACA"
seqID=[seqID1,seqID2]
seqlist=[seq1,seq2]
#input("好了嗎?")
"""
#KX656159.1,Deparia trnL-F
#AY651835.1,Isoetes trnL-F
filename=[]
for seq,ID in zip(seqlist,seqID):
url="https://blast.ncbi.nlm.nih.gov/Blast.cgi?PROGRAM=blastn&PAGE_TYPE=BlastSearch&LINK_LOC=blasthome"
PATH="C:/Users/123/Desktop/coding/chromedriver_win32/chromedriver.exe"
driver=webdriver.Chrome(PATH)
driver.get(url)
#輸序列
search1=driver.find_element_by_xpath('//*[@id="seq"]')
search1.click()
search1.send_keys(seq)
#把滑輪滾到最下面,測試後1000差不多
#https://codertw.com/%E7%A8%8B%E5%BC%8F%E8%AA%9E%E8%A8%80/479345/
js1000="var action=document.documentElement.scrollTop=1000"
driver.execute_script(js1000)
time.sleep(3)#不休不行
#按進階選項,然後下拉一段
more_search=driver.find_element_by_xpath('//*[@id="btnDescrOver"]')
more_search.click()
js1100="var action=document.documentElement.scrollTop=1100"
driver.execute_script(js1100)
time.sleep(3)#不休不行
#資料呈現筆數1000
sort=driver.find_element_by_xpath('//*[@id="NUM_SEQ"]')
print(sort)######
sort.click()
time.sleep(2)
sort1000 = driver.find_element_by_xpath('//*[@id="NUM_SEQ"]/option[6]')
sort1000.click()
Click_any = driver.find_element_by_xpath('//*[@id="expectHelp"]/i')
Click_any.click()
#往下拉然後BLAST
js1500="var action=document.documentElement.scrollTop=1500"
driver.execute_script(js1500)
time.sleep(3)#不休不行
BLAST = driver.find_element_by_xpath('//*[@id="blastButton2"]/input')
BLAST.click()
time.sleep(10)
#從結果頁弄RID出來
try:
RID= driver.find_element_by_xpath('//*[@id="hsum"]/div[1]/dl/dd[2]/a')
except:
print("------------------fail-------------------")
time.sleep(20)
RID= driver.find_element_by_xpath('//*[@id="hsum"]/div[1]/dl/dd[2]/a')
print("--------------------------------")
print("本次blast序號為",RID.text)
RIDtext=RID.text
print("--------------------------------")
RIDurl="https://blast.ncbi.nlm.nih.gov/Blast.cgi?RESULTS_FILE=on&RID=%s&FORMAT_TYPE=CSV&FORMAT_OBJECT=Alignment&DESCRIPTIONS=1000&ALIGNMENT_VIEW=Tabular&CMD=Get"%RIDtext
accID=[]
#試過之後發現,大概要90BLAST才會存取不了NCBI網頁,其他之前失誤的應該只是等不夠久而已
#整理accession number的fun.
for line in urllib.request.urlopen(RIDurl):
#print (line)發現是二進制編碼
linestr=line.decode("utf-8")
#print(type(linestr))發現變str了
if len(linestr)>=10:
linelist=linestr.split(',')
#print(type(linelist))發現變list了
try:
accessionID=linelist[1]
#print(accessionID)#發現成功印出accession number了
accID.append(accessionID)
#讚歐
except:
pass
#print("---------------------------")
accID_x = | pd.DataFrame(accID) | pandas.DataFrame |
"""inventory_valutation.py docstring
### This script is for generating a inventory valuation pivot
table from a product export
TODO
- how to validate/verify this data is correct?
- make file name a system input
- rework entire script into class
Author: <NAME>
"""
import collections
import datetime as dt
import configparser
import pandas as pd
import numpy as np
import my_utils
def lists_calc(list0, list1):
a = list0
b = list1
a_multiset = collections.Counter(a)
b_multiset = collections.Counter(b)
overlap = list((a_multiset & b_multiset).elements())
a_remainder = list((a_multiset - b_multiset).elements())
b_remainder = list((b_multiset - a_multiset).elements())
return overlap, a_remainder, b_remainder
def convert_zeros(ele):
if ele == 0:
return np.nan
else:
return ele
def bool_fxn(col1, col2, col_bool):
if col_bool:
return col2
else:
return col1
def clean_csv(file_path):
df = pd.read_csv(file_path)
df.columns = my_utils.process_cols_v2(list(df))
return df
def run():
today = str(dt.datetime.today()).split(" ")[0]
config = my_utils.import_configs()
config_dict = dict(config.items("dir_info"))
path = config_dict["gen_path"]
prod_path_bs = my_utils.most_recent_product_file(path)
print("\n", prod_path_bs, "\n")
prod_path_ls = config_dict["ls_prods"]
files = [prod_path_bs, prod_path_ls]
dfs = [clean_csv(file_path) for file_path in files]
# filter by column criteria
df = dfs[0].loc[dfs[0].product_type == "P"]
df = df.loc[pd.isnull(df.product_code_sku) == False]
df = df.dropna(axis=1, how="all")
df = df.loc[df.product_visible == "Y"]
# create dataframe of lightspeed overlapping SKUs
overlap, _, _ = lists_calc(list(df.product_code_sku), list(dfs[1].custom_sku))
overlap.remove("MVP-147") # duplicate in lightspeed df
skus = pd.DataFrame(overlap)
skus.columns = ["sku"]
df_ls = skus.merge(dfs[1], left_on="sku", right_on="custom_sku", how="left")
df_ls = df_ls.dropna(axis=1, how="all")
df = df.merge(df_ls, how="left", left_on="product_code_sku", right_on="sku")
# clean dollar amounts
cols = [i for i in list(df) if "price" in i] + [i for i in list(df) if "_cost" in i]
for col in cols:
df[col] = df[col].apply(lambda x: convert_zeros(x))
cols_dict = {
"price": "price_x",
"cost": "combined_cost_col",
"stock": "current_stock_level",
"inv_val": "inventory_value_price",
"inv_val_cost": "inventory_value_cost",
"groupby": "product_category",
}
# aggregate cost columns
df["cost_price_null"] = | pd.isnull(df.cost_price) | pandas.isnull |
import unittest
import pandas as pd
from featurefilter import FeatureCorrelationFilter
def test_fit_high_continuous_correlation():
train_df = pd.DataFrame({'A': [0, 1],
'B': [0, 1]})
filter_ = FeatureCorrelationFilter()
train_df = filter_.fit(train_df)
assert filter_.columns_to_drop == ['B']
def test_excluding_target_column():
train_df = pd.DataFrame({'A': [0, 1],
'B': [0, 1],
'Y': [0, 1]})
filter_ = FeatureCorrelationFilter(target_column='Y')
train_df = filter_.fit(train_df)
assert filter_.columns_to_drop == ['B']
def test_high_negative_continuous_correlation():
train_df = pd.DataFrame({'A': [0, 1], 'B': [0, -1], 'Y': [0, 1]})
test_df = pd.DataFrame({'A': [0, 0], 'B': [0, 0], 'Y': [0, 1]})
filter_ = FeatureCorrelationFilter(target_column='Y')
train_df = filter_.fit_transform(train_df)
test_df = filter_.transform(test_df)
assert train_df.equals(pd.DataFrame({'A': [0, 1], 'Y': [0, 1]}))
assert test_df.equals(pd.DataFrame({'A': [0, 0], 'Y': [0, 1]}))
def test_categorical_correlation():
train_df = | pd.DataFrame({'A': ['a', 'b'], 'B': ['a', 'b'], 'Y': [0, 1]}) | pandas.DataFrame |
import numpy as np
import pandas as pd
class Data(object):
"""
methods
-------
__init__:
Data constructor method
load_data:
Loads data
order_by_time:
Create a dataframe of units sorted by travel time for each LSOA
attributes
----------
admissions(DataFrame):
Admissions by LSOA
distance_matrix (DataFrame):
Travel distance from LSOA to all stroke units
time_matrix (DataFrame):
Travel time from LSOA to all stroke units (estimated road travel time,
clear road conditions)
units (DataFrame):
Information on stroke units (including mean length of stay)
"""
def __init__(self):
"""Data constructor method"""
# Load data
self.admissions = | pd.read_csv('data/admissions.csv', index_col='LSOA') | pandas.read_csv |
import base64
import io
import textwrap
import dash
import dash_core_components as dcc
import dash_html_components as html
import gunicorn
import plotly.graph_objs as go
from dash.dependencies import Input, Output, State
import flask
import pandas as pd
import urllib.parse
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.decomposition import PCA
import numpy as np
import math
import scipy.stats
import dash_table
from dash_table.Format import Format, Scheme
from colour import Color
import dash_bootstrap_components as dbc
# from waitress import serve
external_stylesheets = [dbc.themes.BOOTSTRAP, 'https://codepen.io/chriddyp/pen/bWLwgP.css',
"https://codepen.io/sutharson/pen/dyYzEGZ.css",
"https://fonts.googleapis.com/css2?family=Raleway&display=swap",
"https://codepen.io/chriddyp/pen/brPBPO.css"]
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
server = app.server
# "external_url": "https://codepen.io/chriddyp/pen/brPBPO.css"
# https://raw.githubusercontent.com/aaml-analytics/pca-explorer/master/LoadingStatusStyleSheet.css
styles = {
'pre': {
'border': 'thin lightgrey solid',
'overflowX': 'scroll'
}
}
tabs_styles = {'height': '40px', 'font-family': 'Raleway', 'fontSize': 14}
tab_style = {
'borderBottom': '1px solid #d6d6d6',
'padding': '6px',
'Weight': 'bold'
}
tab_selected_style = {
'borderTop': '3px solid #333333',
'borderBottom': '1px solid #d6d6d6 ',
'backgroundColor': '#f6f6f6',
'color': '#333333',
# 'fontColor': '#004a4a',
'fontWeight': 'bold',
'padding': '6px'
}
# APP ABOUT DESCRIPTION
MOF_tool_about = textwrap.wrap(' These tools aim to provide a reproducible and consistent data visualisation platform '
'where experimental and computational researchers can use big data and statistical '
'analysis to find the best materials for specific applications. Principal Component '
'Analysis (PCA) is a dimension reduction technique that can be used to reduce a large '
'set of observable variables to a smaller set of latent variables that still contain '
'most of the information in the large set (feature extraction). This is done by '
'transforming a number of (possibly) correlated variables into some number of orthogonal '
'(uncorrelated) variables called principal components to find the directions of maximal '
'variance. PCA can be used to ease data visualisation by having fewer dimensions to plot '
'or be used as a pre-processing step before using another Machine Learning (ML)'
' algorithm for regression '
'and classification tasks. PCA can be used to improve an ML algorithm performance, '
'reduce overfitting and reduce noise in data.',
width=50)
Scree_plot_about = textwrap.wrap(' The Principal Component Analysis Visualisation Tools runs PCA for the user and '
'populates a Scree plot. This plot allows the user to determine if PCA is suitable '
'for '
'their dataset and if can compromise an X% drop in explained variance to '
'have fewer dimensions.', width=50)
Feature_correlation_filter = textwrap.wrap("Feature correlation heatmaps provide users with feature analysis and "
"feature principal component analysis. This tool will allow users to see the"
" correlation between variables and the"
" covariances/correlations between original variables and the "
"principal components (loadings)."
, width=50)
plots_analysis = textwrap.wrap('Users can keep all variables as features or drop certain variables to produce a '
'Biplot, cos2 plot and contribution plot. The score plot is used to look for clusters, '
'trends, and outliers in the first two principal components. The loading plot is used to'
' visually interpret the first two principal components. The biplot overlays the score '
'plot and the loading plot on the same graph. The squared cosine (cos2) plot shows '
'the importance of a component for a given observation i.e. measures '
'how much a variable is represented in a component. The contribution plot contains the '
'contributions (%) of the variables to the principal components', width=50, )
data_table_download = textwrap.wrap("The user's inputs from the 'Plots' tab will provide the output of the data tables."
" The user can download the scores, eigenvalues, explained variance, "
"cumulative explained variance, loadings, "
"cos2 and contributions from the populated data tables. "
"Note: Wait for user inputs to be"
" computed (faded tab app will return to the original colour) before downloading the"
" data tables. ", width=50)
MOF_GH = textwrap.wrap(" to explore AAML's sample data and read more on"
" AAML's Principal Component Analysis Visualisation Tool Manual, FAQ's & Troubleshooting"
" on GitHub... ", width=50)
####################
# APP LAYOUT #
####################
fig = go.Figure()
fig1 = go.Figure()
app.layout = html.Div([
html.Div([
html.Img(
src='https://raw.githubusercontent.com/aaml-analytics/mof-explorer/master/UOC.png',
height='35', width='140', style={'display': 'inline-block', 'padding-left': '1%'}),
html.Img(src='https://raw.githubusercontent.com/aaml-analytics/mof-explorer/master/A2ML-logo.png',
height='50', width='125', style={'float': 'right', 'display': 'inline-block', 'padding-right': '2%'}),
html.H1("Principal Component Analysis Visualisation Tools",
style={'display': 'inline-block', 'padding-left': '11%', 'text-align': 'center', 'fontSize': 36,
'color': 'white', 'font-family': 'Raleway'}),
html.H1("...", style={'fontColor': '#3c3c3c', 'fontSize': 6})
], style={'backgroundColor': '#333333'}),
html.Div([html.A('Refresh', href='/')], style={}),
html.Div([
html.H2("Upload Data", style={'fontSize': 24, 'font-family': 'Raleway', 'color': '#333333'}, ),
html.H3("Upload .txt, .csv or .xls files to starting exploring data...", style={'fontSize': 16,
'font-family': 'Raleway'}),
dcc.Store(id='csv-data', storage_type='session', data=None),
html.Div([dcc.Upload(
id='data-table-upload',
children=html.Div([html.Button('Upload File')],
style={'height': "60px", 'borderWidth': '1px',
'borderRadius': '5px',
'textAlign': 'center',
}),
multiple=False
),
html.Div(id='output-data-upload'),
]), ], style={'display': 'inline-block', 'padding-left': '1%', }),
html.Div([dcc.Tabs([
dcc.Tab(label='About', style=tab_style, selected_style=tab_selected_style,
children=[html.Div([html.H2(" What are AAML's Principal Component Analysis Visualisation Tools?",
style={'fontSize': 18, 'font-family': 'Raleway', 'font-weight': 'bold'
}),
html.Div([' '.join(MOF_tool_about)]
, style={'font-family': 'Raleway'}),
html.H2(["Scree Plot"],
style={'fontSize': 18,
'font-family': 'Raleway', 'font-weight': 'bold'}),
html.Div([' '.join(Scree_plot_about)], style={'font-family': 'Raleway'}),
html.H2(["Feature Correlation"], style={'fontSize': 18,
'font-weight': 'bold',
'font-family': 'Raleway'}),
html.Div([' '.join(Feature_correlation_filter)], style={'font-family': 'Raleway', }),
html.H2(["Plots"],
style={'fontSize': 18, 'font-weight': 'bold',
'font-family': 'Raleway'}),
html.Div([' '.join(plots_analysis)], style={'font-family': 'Raleway'}),
html.H2(["Data tables"],
style={'fontSize': 18, 'font-weight': 'bold',
'font-family': 'Raleway'}),
html.Div([' '.join(data_table_download)], style={'font-family': 'Raleway'}),
# ADD LINK
html.Div([html.Plaintext(
[' Click ', html.A('here ',
href='https://github.com/aaml-analytics/pca-explorer')],
style={'display': 'inline-block',
'fontSize': 14, 'font-family': 'Raleway'}),
html.Div([' '.join(MOF_GH)], style={'display': 'inline-block',
'fontSize': 14,
'font-family': 'Raleway'}),
html.Img(
src='https://raw.githubusercontent.com/aaml-analytics/mof'
'-explorer/master/github.png',
height='40', width='40',
style={'display': 'inline-block', 'float': "right"
})
]
, style={'display': 'inline-block'})
], style={'backgroundColor': '#ffffff', 'padding-left': '1%'}
)]),
dcc.Tab(label='Scree Plot', style=tab_style, selected_style=tab_selected_style,
children=[
html.Div([dcc.Graph(id='PC-Eigen-plot')
],
style={'display': 'inline-block',
'width': '49%'}),
html.Div([dcc.Graph(id='PC-Var-plot')
], style={'display': 'inline-block', 'float': 'right',
'width': '49%'}),
html.Div(
[html.Label(["Remove outliers (if any) in analysis:", dcc.RadioItems(
id='outlier-value',
options=[{'label': 'Yes', 'value': 'Yes'},
{'label': 'No', 'value': 'No'}],
value='No')
])
], style={'display': 'inline-block',
'width': '49%', 'padding-left': '1%'}),
html.Div([html.Label(["Select the type of matrix used to calculate the principal components:",
dcc.RadioItems(
id='matrix-type-scree',
options=[{'label': 'Correlation', 'value': 'Correlation'},
{'label': 'Covariance', 'value': 'Covariance'}],
value='Correlation')
])], style={'display': 'inline-block',
'width': '49%', }),
html.Div([html.P(
"Note: Use a correlation matrix when your variables have different scales and you want to weight "
"all the variables equally. Use a covariance matrix when your variables have different scales and"
" you want to give more emphasis to variables with higher variances. When unsure"
" use a correlation matrix.")],
style={'padding-left': '1%'}),
html.Div([
html.Label(["You should attempt to use at least..."
, html.Div(id='var-output-container-filter')])
], style={'padding-left': '1%'}
),
html.Div([
html.Label(["As a rule of thumb for the Scree Plot"
" Eigenvalues, the point where the slope of the curve "
"is clearly "
"leveling off (the elbow), indicates the number of "
"components that "
"should be retained as significant."])
], style={'padding-left': '1%'}),
]),
dcc.Tab(label='Feature correlation', style=tab_style,
selected_style=tab_selected_style,
children=[html.Div([html.Div([dcc.Graph(id='PC-feature-heatmap')
], style={'width': '47%',
'display': 'inline-block',
'float': 'right'}),
html.Div([dcc.Graph(id='feature-heatmap')
], style={'width': '51%',
'display': 'inline-block',
'float': 'left'}),
html.Div([html.Label(["Loading colour bar range:"
, html.Div(
id='color-range-container')])
], style={
'fontSize': 12,
'float': 'right',
'width': '100%',
'padding-left': '85%'}
),
html.Div(
[html.Label(
["Remove outliers (if any) in analysis:",
dcc.RadioItems(
id='PC-feature-outlier-value',
options=[{'label': 'Yes', 'value': 'Yes'},
{'label': 'No', 'value': 'No'}],
value='No')
])
], style={'display': 'inline-block',
'width': '29%', 'padding-left': '1%'}),
html.Div([html.Label(
["Select the type of matrix used to calculate the principal components:",
dcc.RadioItems(
id='matrix-type-heatmap',
options=[{'label': 'Correlation', 'value': 'Correlation'},
{'label': 'Covariance', 'value': 'Covariance'}],
value='Correlation')
])], style={'display': 'inline-block',
'width': '39%', }),
html.Div([html.Label(["Select color scale:",
dcc.RadioItems(
id='colorscale',
options=[{'label': i, 'value': i}
for i in
['Viridis', 'Plasma']],
value='Plasma'
)]),
], style={'display': 'inline-block',
'width': '29%', 'padding-left': '1%'}),
html.Div([html.P(
"Note: Use a correlation matrix when your variables have different scales and you want to weight "
"all the variables equally. Use a covariance matrix when your variables have different scales and"
" you want to give more emphasis to variables with higher variances. When unsure"
" use a correlation matrix.")],
style={'padding-left': '1%'}),
html.Div([
html.P("There are usually two ways multicollinearity, "
"which is when there are a number of variables "
"that are highly correlated, is dealt with:"),
html.P("1) Use PCA to obtain a set of orthogonal ("
"not correlated) variables to analyse."),
html.P("2) Use correlation of determination (R²) to "
"determine which variables are highly "
"correlated and use only 1 in analysis. "
"Cut off for highly correlated variables "
"is ~0.7."),
html.P(
"In any case, it depends on the machine learning algorithm you may apply later. For correlation robust algorithms,"
" such as Random Forest, correlation of features will not be a concern. For non-correlation robust algorithms such as Linear Discriminant Analysis, "
"all high correlation variables should be removed.")
], style={'padding-left': '1%'}
),
html.Div([
html.Label(["Note: Data has been standardised (scale)"])
], style={'padding-left': '1%'})
])
]),
dcc.Tab(label='Plots', style=tab_style,
selected_style=tab_selected_style,
children=[html.Div([
html.Div([html.P("Selecting Features")], style={'padding-left': '1%',
'font-weight': 'bold'}),
html.Div([
html.P("Input here affects all plots, datatables and downloadable data output"),
html.Label([
"Would you like to analyse all variables or choose custom variables to "
"analyse:",
dcc.RadioItems(
id='all-custom-choice',
options=[{'label': 'All',
'value': 'All'},
{'label': 'Custom',
'value': 'Custom'}],
value='All'
)])
], style={'padding-left': '1%'}),
html.Div([
html.P("For custom variables input variables you would not like as features in your PCA:"),
html.Label(
[
"Note: Only input numerical variables (non-numerical variables have already "
"been removed from your dataframe)",
dcc.Dropdown(id='feature-input',
multi=True,
)])
], style={'padding': 10, 'padding-left': '1%'}),
]), dcc.Tabs(id='sub-tabs1', style=tabs_styles,
children=[
dcc.Tab(label='Biplot (Scores + loadings)', style=tab_style,
selected_style=tab_selected_style,
children=[
html.Div([dcc.Graph(id='biplot', figure=fig)
], style={'height': '100%', 'width': '75%',
'padding-left': '20%'},
),
html.Div(
[html.Label(
["Remove outliers (if any) in analysis:",
dcc.RadioItems(
id='outlier-value-biplot',
options=[
{'label': 'Yes', 'value': 'Yes'},
{'label': 'No', 'value': 'No'}],
value='No')
])
], style={'display': 'inline-block',
'width': '29%', 'padding-left': '1%'}),
html.Div([html.Label([
"Select the type of matrix used to calculate the principal components:",
dcc.RadioItems(
id='matrix-type-biplot',
options=[{'label': 'Correlation',
'value': 'Correlation'},
{'label': 'Covariance',
'value': 'Covariance'}],
value='Correlation')
])], style={'display': 'inline-block',
'width': '39%', }),
html.Div([
html.Label([
"Graph Update to show either loadings (Loading Plot) or "
"scores and loadings (Biplot):",
dcc.RadioItems(
id='customvar-graph-update',
options=[{'label': 'Biplot',
'value': 'Biplot'},
{'label': 'Loadings',
'value': 'Loadings'}],
value='Biplot')
])
], style={'display': 'inline-block',
'width': '29%', 'padding-left': '1%'}),
html.Div([html.P(
"Note: Use a correlation matrix when your variables have different scales and you want to weight "
"all the variables equally. Use a covariance matrix when your variables have different scales and"
" you want to give more emphasis to variables with higher variances. When unsure"
" use a correlation matrix. PCA is an unsupervised machine learning technique - it only "
"looks at the input features and does not take "
"into account the output or the target"
" (response) variable.")],
style={'padding-left': '1%'}),
html.Div([
html.P("For variables you have dropped..."),
html.Label([
"Would you like to introduce a first target variable"
" into your data visualisation?"
" (Graph type must be Biplot): "
"",
dcc.RadioItems(
id='radio-target-item',
options=[{'label': 'Yes',
'value': 'Yes'},
{'label': 'No',
'value': 'No'}],
value='No'
)])
], style={'width': '49%', 'padding-left': '1%',
'display': 'inline-block'}),
html.Div([
html.Label([
"Select first target variable for color scale of scores: ",
dcc.Dropdown(
id='color-scale-scores',
)])
], style={'width': '49%', 'padding-left': '1%',
'display': 'inline-block'}),
html.Div([
html.Label([
"Would you like to introduce a second target variable"
" into your data visualisation??"
" (Graph type must be Biplot):",
dcc.RadioItems(
id='radio-target-item-second',
options=[{'label': 'Yes',
'value': 'Yes'},
{'label': 'No',
'value': 'No'}],
value='No'
)])
], style={'width': '49%', 'padding-left': '1%',
'display': 'inline-block'}),
html.Div([
html.Label([
"Select second target variable for size scale of scores:",
dcc.Dropdown(
id='size-scale-scores',
)])
], style={'width': '49%', 'padding-left': '1%',
'display': 'inline-block'}),
html.Div([html.Label(["Size range:"
, html.Div(
id='size-second-target-container')])
], style={'display': 'inline-block',
'float': 'right',
'padding-right': '5%'}
),
html.Div([
html.Br(),
html.P(
"A loading plot shows how "
"strongly each characteristic (variable)"
" influences a principal component. The angles between the vectors"
" tell us how characteristics correlate with one another: "),
html.P("1) When two vectors are close, forming a small angle, the two "
"variables they represent are positively correlated. "),
html.P(
"2) If they meet each other at 90°, they are not likely to be correlated. "),
html.P(
"3) When they diverge and form a large angle (close to 180°), they are negative correlated."),
html.P(
"The Score Plot involves the projection of the data onto the PCs in two dimensions."
"The plot contains the original data but in the rotated (PC) coordinate system"),
html.P(
"A biplot merges a score plot and loading plot together.")
], style={'padding-left': '1%'}
),
]),
dcc.Tab(label='Cos2', style=tab_style,
selected_style=tab_selected_style,
children=[
html.Div([dcc.Graph(id='cos2-plot', figure=fig)
], style={'width': '65%',
'padding-left': '25%'},
),
html.Div(
[html.Label(["Remove outliers (if any) in analysis:",
dcc.RadioItems(
id='outlier-value-cos2',
options=[
{'label': 'Yes', 'value': 'Yes'},
{'label': 'No', 'value': 'No'}],
value='No')
])
], style={'display': 'inline-block',
'padding-left': '1%',
'width': '49%'}),
html.Div([html.Label([
"Select the type of matrix used to calculate the principal components:",
dcc.RadioItems(
id='matrix-type-cos2',
options=[{'label': 'Correlation',
'value': 'Correlation'},
{'label': 'Covariance',
'value': 'Covariance'}],
value='Correlation')
])], style={'display': 'inline-block',
'width': '49%', }),
html.Div([html.P(
"Note: Use a correlation matrix when your variables have different scales and you want to weight "
"all the variables equally. Use a covariance matrix when your variables have different scales and"
" you want to give more emphasis to variables with higher variances. When unsure"
" use a correlation matrix.")],
style={'padding-left': '1%'}),
html.Div([
html.P("The squared cosine shows the importance of a "
"component for a given observation i.e. "
"measures "
" how much a variable is represented in a "
"component")
], style={'padding-left': '1%'}),
]),
dcc.Tab(label='Contribution', style=tab_style,
selected_style=tab_selected_style,
children=[
html.Div([dcc.Graph(id='contrib-plot', figure=fig)
], style={'width': '65%',
'padding-left': '25%'},
),
html.Div(
[html.Label(["Remove outliers (if any) in analysis:",
dcc.RadioItems(
id='outlier-value-contrib',
options=[
{'label': 'Yes', 'value': 'Yes'},
{'label': 'No', 'value': 'No'}],
value='No')
], style={'padding-left': '1%'})
], style={'display': 'inline-block',
'width': '49%'}),
html.Div([html.Label([
"Select the type of matrix used to calculate the principal components:",
dcc.RadioItems(
id='matrix-type-contrib',
options=[{'label': 'Correlation',
'value': 'Correlation'},
{'label': 'Covariance',
'value': 'Covariance'}],
value='Correlation')
])], style={'display': 'inline-block',
'width': '49%', }),
html.Div([html.P(
"Note: Use a correlation matrix when your variables have different scales and you want to weight "
"all the variables equally. Use a covariance matrix when your variables have different scales and"
" you want to give more emphasis to variables with higher variances. When unsure"
" use a correlation matrix.")],
style={'padding-left': '1%'}),
html.Div([
html.P("The contribution plot contains the "
"contributions (in percentage) of the "
"variables to the principal components")
], style={'padding-left': '1%'}),
])
])
]),
dcc.Tab(label='Data tables', style=tab_style,
selected_style=tab_selected_style,
children=[html.Div([
html.Div([
html.Label(
["Note: Input in 'Plots' tab will provide output of data tables and the"
" downloadable PCA data"])
], style={'font-weight': 'bold', 'padding-left': '1%'}),
html.Div([html.A(
'Download PCA Data (scores for each principal component)',
id='download-link',
href="",
target="_blank"
)], style={'padding-left': '1%'}),
html.Div([html.Label(["Remove outliers (if any) in analysis:",
dcc.RadioItems(id="eigenA-outlier",
options=[{'label': 'Yes',
'value': 'Yes'},
{'label': 'No',
'value': 'No'}],
value='No'
)])], style={'padding-left': '1%',
'display': 'inline-block', 'width': '49%'}),
html.Div([html.Label(["Select the type of matrix used to calculate the principal components:",
dcc.RadioItems(
id='matrix-type-data-table',
options=[{'label': 'Correlation', 'value': 'Correlation'},
{'label': 'Covariance', 'value': 'Covariance'}],
value='Correlation')
])], style={'display': 'inline-block',
'width': '49%', }),
html.Div([html.P(
"Note: Use a correlation matrix when your variables have different scales and you want to weight "
"all the variables equally. Use a covariance matrix when your variables have different scales and"
" you want to give more emphasis to variables with higher variances. When unsure"
" use a correlation matrix.")],
style={'padding-left': '1%'}),
html.Div([
html.Div([
html.Label(["Correlation between Features"])
], style={'font-weight': 'bold'}),
html.Div([
dash_table.DataTable(id='data-table-correlation',
editable=False,
filter_action='native',
sort_action='native',
sort_mode='multi',
selected_columns=[],
selected_rows=[],
page_action='native',
column_selectable='single',
page_current=0,
page_size=20,
style_data={'height': 'auto'},
style_table={'overflowX': 'scroll',
'maxHeight': '300px',
'overflowY': 'scroll'},
style_cell={
'minWidth': '0px', 'maxWidth': '220px',
'whiteSpace': 'normal',
}
),
html.Div(id='data-table-correlation-container'),
]),
html.Div([html.A(
'Download Feature Correlation data',
id='download-link-correlation',
href="",
target="_blank"
)]),
], style={'padding': 20}),
html.Div([
html.Div([
html.Label(["Eigen Analysis of the correlation matrix"]),
], style={'font-weight': 'bold'}),
html.Div([
dash_table.DataTable(id='data-table-eigenA',
editable=False,
filter_action='native',
sort_action='native',
sort_mode='multi',
selected_columns=[],
selected_rows=[],
page_action='native',
column_selectable='single',
page_current=0,
page_size=20,
style_data={'height': 'auto'},
style_table={'overflowX': 'scroll',
'maxHeight': '300px',
'overflowY': 'scroll'},
style_cell={
'minWidth': '0px', 'maxWidth': '220px',
'whiteSpace': 'normal',
}
),
html.Div(id='data-table-eigenA-container'),
]),
html.Div([html.A(
'Download Eigen Analysis data',
id='download-link-eigenA',
href="",
download='Eigen_Analysis_data.csv',
target="_blank"
)]),
], style={'padding': 20}),
html.Div([
html.Div([
html.Label(["Loadings (Feature and PC correlation) from PCA"]),
], style={'font-weight': 'bold'}),
html.Div([
dash_table.DataTable(id='data-table-loadings',
editable=False,
filter_action='native',
sort_action='native',
sort_mode='multi',
selected_columns=[],
selected_rows=[],
page_action='native',
column_selectable='single',
page_current=0,
page_size=20,
style_data={'height': 'auto'},
style_table={'overflowX': 'scroll',
'maxHeight': '300px',
'overflowY': 'scroll'},
style_cell={
'minWidth': '0px', 'maxWidth': '220px',
'whiteSpace': 'normal',
}
),
html.Div(id='data-table-loadings-container'),
]),
html.Div([html.A(
'Download Loadings data',
id='download-link-loadings',
download='Loadings_data.csv',
href="",
target="_blank"
)]),
], style={'padding': 20}),
html.Div([
html.Div([
html.Label(["Cos2 from PCA"])
], style={'font-weight': 'bold'}),
html.Div([
dash_table.DataTable(id='data-table-cos2',
editable=False,
filter_action='native',
sort_action='native',
sort_mode='multi',
selected_columns=[],
selected_rows=[],
page_action='native',
column_selectable='single',
page_current=0,
page_size=20,
style_data={'height': 'auto'},
style_table={'overflowX': 'scroll',
'maxHeight': '300px',
'overflowY': 'scroll'},
style_cell={
'minWidth': '0px', 'maxWidth': '220px',
'whiteSpace': 'normal',
}
),
html.Div(id='data-table-cos2-container'),
]),
html.Div([html.A(
'Download Cos2 data',
id='download-link-cos2',
download='Cos2_data.csv',
href="",
target="_blank"
)]),
], style={'padding': 20}),
html.Div([
html.Div([
html.Label(["Contributions from PCA"])
], style={'font-weight': 'bold'}),
html.Div([
dash_table.DataTable(id='data-table-contrib',
editable=False,
filter_action='native',
sort_action='native',
sort_mode='multi',
selected_columns=[],
selected_rows=[],
page_action='native',
column_selectable='single',
page_current=0,
page_size=20,
style_data={'height': 'auto'},
style_table={'overflowX': 'scroll',
'maxHeight': '300px',
'overflowY': 'scroll'},
style_cell={
'minWidth': '0px', 'maxWidth': '220px',
'whiteSpace': 'normal',
}
),
html.Div(id='data-table-contrib-container'),
]),
html.Div([html.A(
'Download Contributions data',
id='download-link-contrib',
download='Contributions_data.csv',
href="",
target="_blank"
)]),
], style={'padding': 20}),
])])
])
], style={'font-family': 'Raleway'})])
# READ FILE
def parse_contents(contents, filename):
content_type, content_string = contents.split(',')
decoded = base64.b64decode(content_string)
try:
if 'csv' in filename:
# Assume that the user uploaded a CSV file
df = pd.read_csv(io.StringIO(decoded.decode('utf-8')))
df.fillna(0)
elif 'xls' in filename:
# Assume that the user uploaded an excel file
df = pd.read_excel(io.BytesIO(decoded))
df.fillna(0)
elif 'txt' or 'tsv' in filename:
df = pd.read_csv(io.StringIO(decoded.decode('utf-8')), delimiter=r'\s+')
df.fillna(0)
except Exception as e:
print(e)
return html.Div([
'There was an error processing this file.'
])
return df
@app.callback(Output('csv-data', 'data'),
[Input('data-table-upload', 'contents')],
[State('data-table-upload', 'filename')])
def parse_uploaded_file(contents, filename):
if not filename:
return dash.no_update
df = parse_contents(contents, filename)
df.fillna(0)
return df.to_json(date_format='iso', orient='split')
@app.callback(Output('PC-Var-plot', 'figure'),
[Input('outlier-value', 'value'),
Input('matrix-type-scree', 'value'),
Input('csv-data', 'data')],
)
def update_graph_stat(outlier, matrix_type, data):
traces = []
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
if outlier == 'No' and matrix_type == 'Correlation':
features1 = dff.columns
features = list(features1)
x = dff.loc[:, features].values
# Separating out the target (if any)
# Standardizing the features to {mean, variance} = {0, 1}
x = StandardScaler().fit_transform(x)
pca = PCA(n_components=len(features))
principalComponents = pca.fit_transform(x)
principalDf = pd.DataFrame(data=principalComponents
, columns=['PC' + str(i + 1) for i in range(len(features))])
finalDf = pd.concat([df[[df.columns[0]]], principalDf], axis=1)
loading = pca.components_.T * np.sqrt(pca.explained_variance_)
loading_df = pd.DataFrame(data=loading[0:, 0:], index=features,
columns=['PC' + str(i + 1) for i in range(loading.shape[1])])
Var = pca.explained_variance_ratio_
PC_df = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features))], columns=['Principal Component'])
Var_df = pd.DataFrame(data=Var, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum = Var_df.cumsum()
Var_dff = pd.concat([PC_df, (Var_cumsum * 100)], axis=1)
data = Var_dff
elif outlier == 'Yes' and matrix_type == 'Correlation':
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_outlier = outlier_dff.loc[:, features_outlier].values
# Standardizing the features
x_outlier = StandardScaler().fit_transform(x_outlier)
pca_outlier = PCA(n_components=len(features_outlier))
principalComponents_outlier = pca_outlier.fit_transform(x_outlier)
principalDf_outlier = pd.DataFrame(data=principalComponents_outlier
, columns=['PC' + str(i + 1) for i in range(len(features_outlier))])
# combining principle components and target
finalDf_outlier = pd.concat([outlier_names, principalDf_outlier], axis=1)
# calculating loading
loading_outlier = pca_outlier.components_.T * np.sqrt(pca_outlier.explained_variance_)
loading_df_outlier = pd.DataFrame(data=loading_outlier[0:, 0:], index=features_outlier,
columns=['PC' + str(i + 1) for i in range(loading_outlier.shape[1])])
Var_outlier = pca_outlier.explained_variance_ratio_
PC_df_outlier = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_outlier))],
columns=['Principal Component'])
Var_df_outlier = pd.DataFrame(data=Var_outlier, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_outlier = Var_df_outlier.cumsum()
Var_dff_outlier = pd.concat([PC_df_outlier, (Var_cumsum_outlier * 100)], axis=1)
data = Var_dff_outlier
elif outlier == 'No' and matrix_type == 'Covariance':
features1_covar = dff.columns
features_covar = list(features1_covar)
x = dff.loc[:, features_covar].values
pca_covar = PCA(n_components=len(features_covar))
principalComponents_covar = pca_covar.fit_transform(x)
principalDf_covar = pd.DataFrame(data=principalComponents_covar
, columns=['PC' + str(i + 1) for i in range(len(features_covar))])
finalDf_covar = pd.concat([df[[df.columns[0]]], principalDf_covar], axis=1)
loading_covar = pca_covar.components_.T * np.sqrt(pca_covar.explained_variance_)
loading_df_covar = pd.DataFrame(data=loading_covar[0:, 0:], index=features_covar,
columns=['PC' + str(i + 1) for i in range(loading_covar.shape[1])])
Var_covar = pca_covar.explained_variance_ratio_
PC_df_covar = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_covar))],
columns=['Principal Component'])
Var_df_covar = pd.DataFrame(data=Var_covar, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_covar = Var_df_covar.cumsum()
Var_dff_covar = pd.concat([PC_df_covar, (Var_cumsum_covar * 100)], axis=1)
data = Var_dff_covar
elif outlier == 'Yes' and matrix_type == 'Covariance':
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier_covar = outlier_dff.columns
features_outlier_covar = list(features1_outlier_covar)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_outlier = outlier_dff.loc[:, features_outlier_covar].values
pca_outlier_covar = PCA(n_components=len(features_outlier_covar))
principalComponents_outlier_covar = pca_outlier_covar.fit_transform(x_outlier)
principalDf_outlier_covar = pd.DataFrame(data=principalComponents_outlier_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_outlier_covar))])
# combining principle components and target
finalDf_outlier_covar = pd.concat([outlier_names, principalDf_outlier_covar], axis=1)
# calculating loading
loading_outlier_covar = pca_outlier_covar.components_.T * np.sqrt(pca_outlier_covar.explained_variance_)
loading_df_outlier_covar = pd.DataFrame(data=loading_outlier_covar[0:, 0:], index=features_outlier_covar,
columns=['PC' + str(i + 1) for i in
range(loading_outlier_covar.shape[1])])
Var_outlier_covar = pca_outlier_covar.explained_variance_ratio_
PC_df_outlier_covar = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_outlier_covar))],
columns=['Principal Component'])
Var_df_outlier_covar = pd.DataFrame(data=Var_outlier_covar,
columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_outlier_covar = Var_df_outlier_covar.cumsum()
Var_dff_outlier_covar = pd.concat([PC_df_outlier_covar, (Var_cumsum_outlier_covar * 100)], axis=1)
data = Var_dff_outlier_covar
traces.append(go.Scatter(x=data['Principal Component'], y=data['Cumulative Proportion of Explained Variance'],
mode='lines', line=dict(color='Red')))
return {'data': traces,
'layout': go.Layout(title='<b>Cumulative Scree Plot Proportion of Explained Variance</b>',
titlefont=dict(family='Helvetica', size=16),
xaxis={'title': 'Principal Component',
'mirror': True,
'ticks': 'outside',
'showline': True,
'showspikes': True
}, yaxis={'title': 'Cumulative Explained Variance',
'mirror': True,
'ticks': 'outside',
'showline': True,
'showspikes': True,
'range': [0, 100]},
hovermode='closest', font=dict(family="Helvetica"), template="simple_white")
}
@app.callback(
Output('var-output-container-filter', 'children'),
[Input('outlier-value', 'value'),
Input('matrix-type-scree', 'value'),
Input('csv-data', 'data')],
)
def update_output(outlier, matrix_type, data):
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
if outlier == 'No' and matrix_type == 'Correlation':
features1 = dff.columns
features = list(features1)
x = dff.loc[:, features].values
# Separating out the target (if any)
# Standardizing the features to {mean, variance} = {0, 1}
x = StandardScaler().fit_transform(x)
pca = PCA(n_components=len(features))
principalComponents = pca.fit_transform(x)
principalDf = pd.DataFrame(data=principalComponents
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf = pd.concat([df[[df.columns[0]]], principalDf], axis=1)
dfff = finalDf
loading = pca.components_.T * np.sqrt(pca.explained_variance_)
loading_df = pd.DataFrame(data=loading[0:, 0:], index=features,
columns=['PC' + str(i + 1) for i in range(loading.shape[1])])
loading_dff = loading_df.T
Var = pca.explained_variance_ratio_
PC_df = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features))], columns=['Principal Component'])
PC_num = [float(i + 1) for i in range(len(features))]
Var_df = pd.DataFrame(data=Var, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum = Var_df.cumsum()
Var_dff = pd.concat([PC_df, (Var_cumsum * 100)], axis=1)
PC_interp = np.interp(70, Var_dff['Cumulative Proportion of Explained Variance'], PC_num)
PC_interp_int = math.ceil(PC_interp)
return "'{}' principal components (≥70% of explained variance) to avoid losing too much of your " \
"data. Note that there is no required threshold in order for PCA to be valid." \
" ".format(PC_interp_int)
elif outlier == 'Yes' and matrix_type == "Correlation":
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_outlier = outlier_dff.loc[:, features_outlier].values
# Separating out the target (if any)
y_outlier = outlier_dff.loc[:, ].values
# Standardizing the features
x_outlier = StandardScaler().fit_transform(x_outlier)
pca_outlier = PCA(n_components=len(features_outlier))
principalComponents_outlier = pca_outlier.fit_transform(x_outlier)
principalDf_outlier = pd.DataFrame(data=principalComponents_outlier
, columns=['PC' + str(i + 1) for i in range(len(features_outlier))])
# combining principle components and target
finalDf_outlier = pd.concat([outlier_names, principalDf_outlier], axis=1)
dfff_outlier = finalDf_outlier
# calculating loading
loading_outlier = pca_outlier.components_.T * np.sqrt(pca_outlier.explained_variance_)
loading_df_outlier = pd.DataFrame(data=loading_outlier[0:, 0:], index=features_outlier,
columns=['PC' + str(i + 1) for i in range(loading_outlier.shape[1])])
loading_dff_outlier = loading_df_outlier.T
Var_outlier = pca_outlier.explained_variance_ratio_
PC_df_outlier = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_outlier))],
columns=['Principal Component'])
PC_num_outlier = [float(i + 1) for i in range(len(features_outlier))]
Var_df_outlier = pd.DataFrame(data=Var_outlier, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_outlier = Var_df_outlier.cumsum()
Var_dff_outlier = pd.concat([PC_df_outlier, (Var_cumsum_outlier * 100)], axis=1)
PC_interp_outlier = np.interp(70, Var_dff_outlier['Cumulative Proportion of Explained Variance'],
PC_num_outlier)
PC_interp_int_outlier = math.ceil(PC_interp_outlier)
return "'{}' principal components (≥70% of explained variance) to avoid losing too much of your " \
"data. Note that there is no required threshold in order for PCA to be valid." \
" ".format(PC_interp_int_outlier)
elif outlier == 'No' and matrix_type == 'Covariance':
features1_covar = dff.columns
features_covar = list(features1_covar)
x = dff.loc[:, features_covar].values
pca_covar = PCA(n_components=len(features_covar))
principalComponents_covar = pca_covar.fit_transform(x)
principalDf_covar = pd.DataFrame(data=principalComponents_covar
, columns=['PC' + str(i + 1) for i in range(len(features_covar))])
finalDf_covar = pd.concat([df[[df.columns[0]]], principalDf_covar], axis=1)
loading_covar = pca_covar.components_.T * np.sqrt(pca_covar.explained_variance_)
loading_df_covar = pd.DataFrame(data=loading_covar[0:, 0:], index=features_covar,
columns=['PC' + str(i + 1) for i in range(loading_covar.shape[1])])
Var_covar = pca_covar.explained_variance_ratio_
PC_df_covar = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_covar))],
columns=['Principal Component'])
PC_num_covar = [float(i + 1) for i in range(len(features_covar))]
Var_df_covar = pd.DataFrame(data=Var_covar, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_covar = Var_df_covar.cumsum()
Var_dff_covar = pd.concat([PC_df_covar, (Var_cumsum_covar * 100)], axis=1)
PC_interp_covar = np.interp(70, Var_dff_covar['Cumulative Proportion of Explained Variance'], PC_num_covar)
PC_interp_int_covar = math.ceil(PC_interp_covar)
return "'{}' principal components (≥70% of explained variance) to avoid losing too much of your " \
"data. Note that there is no required threshold in order for PCA to be valid." \
" ".format(PC_interp_int_covar)
elif outlier == 'Yes' and matrix_type == 'Covariance':
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier_covar = outlier_dff.columns
features_outlier_covar = list(features1_outlier_covar)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_outlier = outlier_dff.loc[:, features_outlier_covar].values
pca_outlier_covar = PCA(n_components=len(features_outlier_covar))
principalComponents_outlier_covar = pca_outlier_covar.fit_transform(x_outlier)
principalDf_outlier_covar = pd.DataFrame(data=principalComponents_outlier_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_outlier_covar))])
# combining principle components and target
finalDf_outlier_covar = pd.concat([outlier_names, principalDf_outlier_covar], axis=1)
# calculating loading
loading_outlier_covar = pca_outlier_covar.components_.T * np.sqrt(pca_outlier_covar.explained_variance_)
loading_df_outlier_covar = pd.DataFrame(data=loading_outlier_covar[0:, 0:], index=features_outlier_covar,
columns=['PC' + str(i + 1) for i in
range(loading_outlier_covar.shape[1])])
Var_outlier_covar = pca_outlier_covar.explained_variance_ratio_
PC_df_outlier_covar = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_outlier_covar))],
columns=['Principal Component'])
PC_num_outlier_covar = [float(i + 1) for i in range(len(features_outlier_covar))]
Var_df_outlier_covar = pd.DataFrame(data=Var_outlier_covar,
columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_outlier_covar = Var_df_outlier_covar.cumsum()
Var_dff_outlier_covar = pd.concat([PC_df_outlier_covar, (Var_cumsum_outlier_covar * 100)], axis=1)
PC_interp_outlier = np.interp(70, Var_dff_outlier_covar['Cumulative Proportion of Explained Variance'],
PC_num_outlier_covar)
PC_interp_int_outlier = math.ceil(PC_interp_outlier)
return "'{}' principal components (≥70% of explained variance) to avoid losing too much of your " \
"data. Note that there is no required threshold in order for PCA to be valid." \
" ".format(PC_interp_int_outlier)
@app.callback(Output('PC-Eigen-plot', 'figure'),
[Input('outlier-value', 'value'),
Input('matrix-type-scree', 'value'),
Input('csv-data', 'data')]
)
def update_graph_stat(outlier, matrix_type, data):
traces = []
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
if outlier == 'No' and matrix_type == "Correlation":
features1 = dff.columns
features = list(features1)
x = dff.loc[:, features].values
# Separating out the target (if any)
y = dff.loc[:, ].values
# Standardizing the features to {mean, variance} = {0, 1}
x = StandardScaler().fit_transform(x)
pca = PCA(n_components=len(features))
principalComponents = pca.fit_transform(x)
principalDf = pd.DataFrame(data=principalComponents
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf = pd.concat([df[[df.columns[0]]], principalDf], axis=1)
dfff = finalDf
loading = pca.components_.T * np.sqrt(pca.explained_variance_)
loading_df = pd.DataFrame(data=loading[0:, 0:], index=features,
columns=['PC' + str(i + 1) for i in range(loading.shape[1])])
loading_dff = loading_df.T
Var = pca.explained_variance_ratio_
PC_df = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features))], columns=['Principal Component'])
PC_num = [float(i + 1) for i in range(len(features))]
Var_df = pd.DataFrame(data=Var, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum = Var_df.cumsum()
Var_dff = pd.concat([PC_df, (Var_cumsum * 100)], axis=1)
PC_interp = np.interp(70, Var_dff['Cumulative Proportion of Explained Variance'], PC_num)
PC_interp_int = math.ceil(PC_interp)
eigenvalues = pca.explained_variance_
Eigen_df = pd.DataFrame(data=eigenvalues, columns=['Eigenvalues'])
Eigen_dff = pd.concat([PC_df, Eigen_df], axis=1)
data = Eigen_dff
elif outlier == 'Yes' and matrix_type == "Correlation":
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_outlier = outlier_dff.loc[:, features_outlier].values
# Separating out the target (if any)
y_outlier = outlier_dff.loc[:, ].values
# Standardizing the features
x_outlier = StandardScaler().fit_transform(x_outlier)
pca_outlier = PCA(n_components=len(features_outlier))
principalComponents_outlier = pca_outlier.fit_transform(x_outlier)
principalDf_outlier = pd.DataFrame(data=principalComponents_outlier
, columns=['PC' + str(i + 1) for i in range(len(features_outlier))])
# combining principle components and target
finalDf_outlier = pd.concat([outlier_names, principalDf_outlier], axis=1)
dfff_outlier = finalDf_outlier
# calculating loading
loading_outlier = pca_outlier.components_.T * np.sqrt(pca_outlier.explained_variance_)
loading_df_outlier = pd.DataFrame(data=loading_outlier[0:, 0:], index=features_outlier,
columns=['PC' + str(i + 1) for i in range(loading_outlier.shape[1])])
loading_dff_outlier = loading_df_outlier.T
Var_outlier = pca_outlier.explained_variance_ratio_
PC_df_outlier = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_outlier))],
columns=['Principal Component'])
PC_num_outlier = [float(i + 1) for i in range(len(features_outlier))]
Var_df_outlier = pd.DataFrame(data=Var_outlier, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_outlier = Var_df_outlier.cumsum()
Var_dff_outlier = pd.concat([PC_df_outlier, (Var_cumsum_outlier * 100)], axis=1)
PC_interp_outlier = np.interp(70, Var_dff_outlier['Cumulative Proportion of Explained Variance'],
PC_num_outlier)
PC_interp_int_outlier = math.ceil(PC_interp_outlier)
eigenvalues_outlier = pca_outlier.explained_variance_
Eigen_df_outlier = pd.DataFrame(data=eigenvalues_outlier, columns=['Eigenvalues'])
Eigen_dff_outlier = pd.concat([PC_df_outlier, Eigen_df_outlier], axis=1)
data = Eigen_dff_outlier
elif outlier == 'No' and matrix_type == "Covariance":
features1_covar = dff.columns
features_covar = list(features1_covar)
x = dff.loc[:, features_covar].values
pca_covar = PCA(n_components=len(features_covar))
principalComponents_covar = pca_covar.fit_transform(x)
principalDf_covar = pd.DataFrame(data=principalComponents_covar
, columns=['PC' + str(i + 1) for i in range(len(features_covar))])
finalDf_covar = pd.concat([df[[df.columns[0]]], principalDf_covar], axis=1)
loading_covar = pca_covar.components_.T * np.sqrt(pca_covar.explained_variance_)
loading_df_covar = pd.DataFrame(data=loading_covar[0:, 0:], index=features_covar,
columns=['PC' + str(i + 1) for i in range(loading_covar.shape[1])])
Var_covar = pca_covar.explained_variance_ratio_
PC_df_covar = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_covar))],
columns=['Principal Component'])
PC_num_covar = [float(i + 1) for i in range(len(features_covar))]
Var_df_covar = pd.DataFrame(data=Var_covar, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_covar = Var_df_covar.cumsum()
Var_dff_covar = pd.concat([PC_df_covar, (Var_cumsum_covar * 100)], axis=1)
PC_interp_covar = np.interp(70, Var_dff_covar['Cumulative Proportion of Explained Variance'], PC_num_covar)
PC_interp_int_covar = math.ceil(PC_interp_covar)
eigenvalues_covar = pca_covar.explained_variance_
Eigen_df_covar = pd.DataFrame(data=eigenvalues_covar, columns=['Eigenvalues'])
Eigen_dff_covar = pd.concat([PC_df_covar, Eigen_df_covar], axis=1)
data = Eigen_dff_covar
elif outlier == 'Yes' and matrix_type == 'Covariance':
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier_covar = outlier_dff.columns
features_outlier_covar = list(features1_outlier_covar)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_outlier = outlier_dff.loc[:, features_outlier_covar].values
pca_outlier_covar = PCA(n_components=len(features_outlier_covar))
principalComponents_outlier_covar = pca_outlier_covar.fit_transform(x_outlier)
principalDf_outlier_covar = pd.DataFrame(data=principalComponents_outlier_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_outlier_covar))])
# combining principle components and target
finalDf_outlier_covar = pd.concat([outlier_names, principalDf_outlier_covar], axis=1)
# calculating loading
loading_outlier_covar = pca_outlier_covar.components_.T * np.sqrt(pca_outlier_covar.explained_variance_)
loading_df_outlier_covar = pd.DataFrame(data=loading_outlier_covar[0:, 0:], index=features_outlier_covar,
columns=['PC' + str(i + 1) for i in
range(loading_outlier_covar.shape[1])])
Var_outlier_covar = pca_outlier_covar.explained_variance_ratio_
PC_df_outlier_covar = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_outlier_covar))],
columns=['Principal Component'])
PC_num_outlier_covar = [float(i + 1) for i in range(len(features_outlier_covar))]
Var_df_outlier_covar = pd.DataFrame(data=Var_outlier_covar,
columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_outlier_covar = Var_df_outlier_covar.cumsum()
Var_dff_outlier_covar = pd.concat([PC_df_outlier_covar, (Var_cumsum_outlier_covar * 100)], axis=1)
PC_interp_outlier = np.interp(70, Var_dff_outlier_covar['Cumulative Proportion of Explained Variance'],
PC_num_outlier_covar)
PC_interp_int_outlier = math.ceil(PC_interp_outlier)
eigenvalues_outlier_covar = pca_outlier_covar.explained_variance_
Eigen_df_outlier_covar = pd.DataFrame(data=eigenvalues_outlier_covar, columns=['Eigenvalues'])
Eigen_dff_outlier_covar = pd.concat([PC_df_outlier_covar, Eigen_df_outlier_covar], axis=1)
data = Eigen_dff_outlier_covar
traces.append(go.Scatter(x=data['Principal Component'], y=data['Eigenvalues'], mode='lines'))
return {'data': traces,
'layout': go.Layout(title='<b>Scree Plot Eigenvalues</b>', xaxis={'title': 'Principal Component',
'mirror': True,
'ticks': 'outside',
'showline': True,
'showspikes': True},
titlefont=dict(family='Helvetica', size=16),
yaxis={'title': 'Eigenvalues', 'mirror': True,
'ticks': 'outside',
'showline': True,
'showspikes': True}, hovermode='closest',
font=dict(family="Helvetica"), template="simple_white", )
}
def round_up(n, decimals=0):
multiplier = 10 ** decimals
return math.ceil(n * multiplier) / multiplier
def round_down(n, decimals=0):
multiplier = 10 ** decimals
return math.floor(n * multiplier) / multiplier
@app.callback([Output('PC-feature-heatmap', 'figure'),
Output('color-range-container', 'children')],
[
Input('PC-feature-outlier-value', 'value'),
Input('colorscale', 'value'),
Input("matrix-type-heatmap", "value"),
Input('csv-data', 'data')]
)
def update_graph_stat(outlier, colorscale, matrix_type, data):
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
traces = []
# INCLUDING OUTLIERS
features1 = dff.columns
features = list(features1)
x = dff.loc[:, features].values
# Separating out the target (if any)
y = dff.loc[:, ].values
# Standardizing the features to {mean, variance} = {0, 1}
x = StandardScaler().fit_transform(x)
pca = PCA(n_components=len(features))
principalComponents = pca.fit_transform(x)
principalDf = pd.DataFrame(data=principalComponents
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf = pd.concat([df[[df.columns[0]]], principalDf], axis=1)
dfff = finalDf
# explained variance of the the two principal components
# print(pca.explained_variance_ratio_)
# Explained variance tells us how much information (variance) can be attributed to each of the principal components
# loading of each feature in principle components
loading = pca.components_.T * np.sqrt(pca.explained_variance_)
loading_df = pd.DataFrame(data=loading[0:, 0:], index=features,
columns=['PC' + str(i + 1) for i in range(loading.shape[1])])
loading_dff = loading_df.T
# OUTLIERS REMOVED
z_scores_hm = scipy.stats.zscore(dff)
abs_z_scores_hm = np.abs(z_scores_hm)
filtered_entries_hm = (abs_z_scores_hm < 3).all(axis=1)
outlier_dff_hm = dff[filtered_entries_hm]
features1_outlier_hm = outlier_dff_hm.columns
features_outlier2 = list(features1_outlier_hm)
outlier_names1_hm = df[filtered_entries_hm]
outlier_names_hm = outlier_names1_hm.iloc[:, 0]
x_outlier_hm = outlier_dff_hm.loc[:, features_outlier2].values
# Separating out the target (if any)
# Standardizing the features
x_outlier_hm = StandardScaler().fit_transform(x_outlier_hm)
pca_outlier_hm = PCA(n_components=len(features_outlier2))
principalComponents_outlier_hm = pca_outlier_hm.fit_transform(x_outlier_hm)
principalDf_outlier_hm = pd.DataFrame(data=principalComponents_outlier_hm
, columns=['PC' + str(i + 1) for i in range(len(features_outlier2))])
# combining principle components and target
finalDf_outlier_hm = pd.concat([outlier_names_hm, principalDf_outlier_hm], axis=1)
dfff_outlier_hm = finalDf_outlier_hm
# calculating loading
loading_outlier_hm = pca_outlier_hm.components_.T * np.sqrt(pca_outlier_hm.explained_variance_)
loading_df_outlier_hm = pd.DataFrame(data=loading_outlier_hm[0:, 0:], index=features_outlier2,
columns=['PC' + str(i + 1) for i in range(loading_outlier_hm.shape[1])])
loading_dff_outlier_hm = loading_df_outlier_hm.T
# COVAR MATRIX
features1_covar = dff.columns
features_covar = list(features1_covar)
x = dff.loc[:, features_covar].values
pca_covar = PCA(n_components=len(features_covar))
principalComponents_covar = pca_covar.fit_transform(x)
principalDf_covar = pd.DataFrame(data=principalComponents_covar
, columns=['PC' + str(i + 1) for i in range(len(features_covar))])
finalDf_covar = pd.concat([df[[df.columns[0]]], principalDf_covar], axis=1)
loading_covar = pca_covar.components_.T * np.sqrt(pca_covar.explained_variance_)
loading_df_covar = pd.DataFrame(data=loading_covar[0:, 0:], index=features_covar,
columns=['PC' + str(i + 1) for i in range(loading_covar.shape[1])])
loading_dff_covar = loading_df_covar.T
# COVAR MATRIX OUTLIERS REMOVED
if outlier == 'No' and matrix_type == "Correlation":
data = loading_dff
elif outlier == 'Yes' and matrix_type == "Correlation":
data = loading_dff_outlier_hm
elif outlier == 'No' and matrix_type == "Covariance":
data = loading_dff_covar
elif outlier == "Yes" and matrix_type == "Covariance":
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier_covar = outlier_dff.columns
features_outlier_covar = list(features1_outlier_covar)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_outlier = outlier_dff.loc[:, features_outlier_covar].values
pca_outlier_covar = PCA(n_components=len(features_outlier_covar))
principalComponents_outlier_covar = pca_outlier_covar.fit_transform(x_outlier)
principalDf_outlier_covar = pd.DataFrame(data=principalComponents_outlier_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_outlier_covar))])
# combining principle components and target
finalDf_outlier_covar = pd.concat([outlier_names, principalDf_outlier_covar], axis=1)
# calculating loading
loading_outlier_covar = pca_outlier_covar.components_.T * np.sqrt(pca_outlier_covar.explained_variance_)
loading_df_outlier_covar = pd.DataFrame(data=loading_outlier_covar[0:, 0:], index=features_outlier_covar,
columns=['PC' + str(i + 1) for i in
range(loading_outlier_covar.shape[1])])
loading_dff_outlier_covar = loading_df_outlier_covar.T
data = loading_dff_outlier_covar
size_range = [round_up(data.values.min(), 2),round_down(data.values.max(),2) ]
traces.append(go.Heatmap(
z=data, x=features_outlier2, y=['PC' + str(i + 1) for i in range(loading_outlier_hm.shape[1])],
colorscale="Viridis" if colorscale == 'Viridis' else "Plasma",
# coord: represent the correlation between the various feature and the principal component itself
colorbar={"title": "Loading",
# 'tickvals': [round_up(data.values.min(), 2),
# round_up((data.values.min() + (data.values.max() + data.values.min())/2)/2, 2),
# round_down((data.values.max() + data.values.min())/2,2),
# round_down((data.values.max() + (data.values.max() + data.values.min())/2)/2, 2),
# round_down(data.values.max(),2), ]
}
))
return {'data': traces,
'layout': go.Layout(title=dict(text='<b>PC and Feature Correlation Analysis</b>'),
xaxis=dict(title_text='Features', title_standoff=50),
titlefont=dict(family='Helvetica', size=16),
hovermode='closest', margin={'b': 110, 't': 50, 'l': 75},
font=dict(family="Helvetica", size=11),
annotations=[
dict(x=-0.16, y=0.5, showarrow=False, text="Principal Components",
xref='paper', yref='paper', textangle=-90,
font=dict(size=12))]
),
}, '{}'.format(size_range)
@app.callback(Output('feature-heatmap', 'figure'),
[
Input('PC-feature-outlier-value', 'value'),
Input('colorscale', 'value'),
Input('csv-data', 'data')])
def update_graph_stat(outlier, colorscale, data):
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
traces = []
if outlier == 'No':
features1 = dff.columns
features = list(features1)
# correlation coefficient and coefficient of determination
correlation_dff = dff.corr(method='pearson', )
r2_dff = correlation_dff * correlation_dff
data = r2_dff
feat = features
elif outlier == 'Yes':
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
# correlation coefficient and coefficient of determination
correlation_dff_outlier = outlier_dff.corr(method='pearson', )
r2_dff_outlier = correlation_dff_outlier * correlation_dff_outlier
data = r2_dff_outlier
feat = features_outlier
traces.append(go.Heatmap(
z=data, x=feat, y=feat, colorscale="Viridis" if colorscale == 'Viridis' else "Plasma",
# coord: represent the correlation between the various feature and the principal component itself
colorbar={"title": "R²", 'tickvals': [0, 0.2, 0.4, 0.6, 0.8, 1]}))
return {'data': traces,
'layout': go.Layout(title=dict(text='<b>Feature Correlation Analysis</b>', y=0.97, x=0.6),
xaxis={},
titlefont=dict(family='Helvetica', size=16),
yaxis={},
hovermode='closest', margin={'b': 110, 't': 50, 'l': 180, 'r': 50},
font=dict(family="Helvetica", size=11)),
}
@app.callback(Output('feature-input', 'options'),
[Input('all-custom-choice', 'value'),
Input('csv-data', 'data')])
def activate_input(all_custom, data):
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
if all_custom == 'All':
options = []
elif all_custom == 'Custom':
options = [{'label': i, 'value': i} for i in dff.columns]
return options
@app.callback(Output('color-scale-scores', 'options'),
[Input('feature-input', 'value'),
Input('radio-target-item', 'value'),
Input('outlier-value-biplot', 'value'),
Input('customvar-graph-update', 'value'),
Input('matrix-type-biplot', 'value'),
Input('csv-data', 'data')], )
def populate_color_dropdown(input, target, outlier, graph_type, matrix_type, data):
if not data:
return dash.no_update
if input is None:
raise dash.exceptions.PreventUpdate
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
dff_target = dff[input]
z_scores_target = scipy.stats.zscore(dff_target)
abs_z_scores_target = np.abs(z_scores_target)
filtered_entries_target = (abs_z_scores_target < 3).all(axis=1)
dff_target_outlier = dff_target[filtered_entries_target]
if target == 'Yes' and outlier == 'Yes' and matrix_type == "Correlation" and graph_type == 'Biplot':
options = [{'label': i, 'value': i} for i in dff_target_outlier.columns]
elif target == 'Yes' and outlier == 'Yes' and matrix_type == "Covariance" and graph_type == 'Biplot':
options = [{'label': i, 'value': i} for i in dff_target_outlier.columns]
elif target == 'Yes' and outlier == 'No' and matrix_type == "Correlation" and graph_type == 'Biplot':
options = [{'label': i, 'value': i} for i in dff_target.columns]
elif target == 'Yes' and outlier == 'No' and matrix_type == "Covariance" and graph_type == 'Biplot':
options = [{'label': i, 'value': i} for i in dff_target.columns]
elif target == 'No' or graph_type == 'Loadings':
options = []
return options
@app.callback(Output('size-scale-scores', 'options'),
[Input('feature-input', 'value'),
Input('radio-target-item-second', 'value'),
Input('outlier-value-biplot', 'value'),
Input('customvar-graph-update', 'value'),
Input('matrix-type-biplot', 'value'),
Input('csv-data', 'data')])
def populate_color_dropdown(input, target, outlier, graph_type, matrix_type, data):
if not data:
return dash.no_update
if input is None:
raise dash.exceptions.PreventUpdate
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
dff_target = dff[input]
z_scores_target = scipy.stats.zscore(dff_target)
abs_z_scores_target = np.abs(z_scores_target)
filtered_entries_target = (abs_z_scores_target < 3).all(axis=1)
dff_target_outlier = dff_target[filtered_entries_target]
if target == 'Yes' and outlier == 'Yes' and matrix_type == "Correlation" and graph_type == 'Biplot':
options = [{'label': i, 'value': i} for i in dff_target_outlier.columns]
elif target == 'Yes' and outlier == 'Yes' and matrix_type == "Covariance" and graph_type == 'Biplot':
options = [{'label': i, 'value': i} for i in dff_target_outlier.columns]
elif target == 'Yes' and outlier == 'No' and matrix_type == "Correlation" and graph_type == 'Biplot':
options = [{'label': i, 'value': i} for i in dff_target.columns]
elif target == 'Yes' and outlier == 'No' and matrix_type == "Covariance" and graph_type == 'Biplot':
options = [{'label': i, 'value': i} for i in dff_target.columns]
elif target == 'No' or graph_type == 'Loadings':
options = []
return options
# resume covar matrix...
@app.callback(Output('biplot', 'figure'),
[
Input('outlier-value-biplot', 'value'),
Input('feature-input', 'value'),
Input('customvar-graph-update', 'value'),
Input('color-scale-scores', 'value'),
Input('radio-target-item', 'value'),
Input('size-scale-scores', 'value'),
Input('radio-target-item-second', 'value'),
Input('all-custom-choice', 'value'),
Input('matrix-type-biplot', 'value'),
Input('csv-data', 'data')
]
)
def update_graph_custom(outlier, input, graph_update, color, target, size, target2, all_custom, matrix_type, data):
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
features1 = dff.columns
features = list(features1)
if all_custom == 'All':
# x_scale = MinMaxScaler(feature_range=(0, 1), copy=True).fit_transform(x_scale)
# OUTLIER DATA
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
# def rescale(data, new_min=0, new_max=1):
# """Rescale the data to be within the range [new_min, new_max]"""
# return (data - data.min()) / (data.max() - data.min()) * (new_max - new_min) + new_min
# ORIGINAL DATA WITH OUTLIERS
x_scale = dff.loc[:, features].values
y_scale = dff.loc[:, ].values
x_scale = StandardScaler().fit_transform(x_scale)
# x_scale = rescale(x_scale, new_min=0, new_max=1)
pca_scale = PCA(n_components=len(features))
principalComponents_scale = pca_scale.fit_transform(x_scale)
principalDf_scale = pd.DataFrame(data=principalComponents_scale
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf_scale = pd.concat([df[[df.columns[0]]], principalDf_scale], axis=1)
dfff_scale = finalDf_scale.fillna(0)
Var_scale = pca_scale.explained_variance_ratio_
# calculating loading vector plot
loading_scale = pca_scale.components_.T * np.sqrt(pca_scale.explained_variance_)
loading_scale_df = pd.DataFrame(data=loading_scale[:, 0:2],
columns=["PC1", "PC2"])
line_group_scale_df = pd.DataFrame(data=features, columns=['line_group'])
loading_scale_dff = pd.concat([loading_scale_df, line_group_scale_df], axis=1)
a = (len(features), 2)
zero_scale = np.zeros(a)
zero_scale_df = pd.DataFrame(data=zero_scale, columns=["PC1", "PC2"])
zero_scale_dff = pd.concat([zero_scale_df, line_group_scale_df], axis=1)
loading_scale_line_graph = pd.concat([loading_scale_dff, zero_scale_dff], axis=0)
# ORIGINAL DATA WITH REMOVING OUTLIERS
x_outlier_scale = outlier_dff.loc[:, features_outlier].values
y_outlier_scale = outlier_dff.loc[:, ].values
x_outlier_scale = StandardScaler().fit_transform(x_outlier_scale)
# x_outlier_scale = MinMaxScaler().fit_transform(x_outlier_scale)
# def rescale(data, new_min=0, new_max=1):
# """Rescale the data to be within the range [new_min, new_max]"""
# return (data - data.min()) / (data.max() - data.min()) * (new_max - new_min) + new_min
# x_outlier_scale = rescale(x_outlier_scale, new_min=0, new_max=1)
# uses covariance matrix
pca_outlier_scale = PCA(n_components=len(features_outlier))
principalComponents_outlier_scale = pca_outlier_scale.fit_transform(x_outlier_scale)
principalDf_outlier_scale = pd.DataFrame(data=principalComponents_outlier_scale
, columns=['PC' + str(i + 1) for i in range(len(features_outlier))])
# combining principle components and target
finalDf_outlier_scale = pd.concat([outlier_names, principalDf_outlier_scale], axis=1)
dfff_outlier_scale = finalDf_outlier_scale.fillna(0)
# calculating loading
Var_outlier_scale = pca_outlier_scale.explained_variance_ratio_
# calculating loading vector plot
loading_outlier_scale = pca_outlier_scale.components_.T * np.sqrt(pca_outlier_scale.explained_variance_)
loading_outlier_scale_df = pd.DataFrame(data=loading_outlier_scale[:, 0:2],
columns=["PC1", "PC2"])
line_group_df = pd.DataFrame(data=features_outlier, columns=['line_group'])
loading_outlier_scale_dff = pd.concat([loading_outlier_scale_df, line_group_df], axis=1)
a = (len(features_outlier), 2)
zero_outlier_scale = np.zeros(a)
zero_outlier_scale_df = pd.DataFrame(data=zero_outlier_scale, columns=["PC1", "PC2"])
zero_outlier_scale_dff = pd.concat([zero_outlier_scale_df, line_group_df], axis=1)
loading_outlier_scale_line_graph = pd.concat([loading_outlier_scale_dff, zero_outlier_scale_dff], axis=0)
# COVARIANCE MATRIX
x_scale_covar = dff.loc[:, features].values
y_scale_covar = dff.loc[:, ].values
pca_scale_covar = PCA(n_components=len(features))
principalComponents_scale_covar = pca_scale_covar.fit_transform(x_scale_covar)
principalDf_scale_covar = pd.DataFrame(data=principalComponents_scale_covar
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf_scale_covar = pd.concat([df[[df.columns[0]]], principalDf_scale_covar], axis=1)
dfff_scale_covar = finalDf_scale_covar.fillna(0)
Var_scale_covar = pca_scale_covar.explained_variance_ratio_
loading_scale_covar = pca_scale_covar.components_.T * np.sqrt(pca_scale_covar.explained_variance_)
loading_scale_df_covar = pd.DataFrame(data=loading_scale_covar[:, 0:2],
columns=["PC1", "PC2"])
line_group_scale_df_covar = pd.DataFrame(data=features, columns=['line_group'])
loading_scale_dff_covar = pd.concat([loading_scale_df_covar, line_group_scale_df_covar], axis=1)
a = (len(features), 2)
zero_scale_covar = np.zeros(a)
zero_scale_df_covar = pd.DataFrame(data=zero_scale_covar, columns=["PC1", "PC2"])
zero_scale_dff_covar = pd.concat([zero_scale_df_covar, line_group_scale_df_covar], axis=1)
loading_scale_line_graph_covar = pd.concat([loading_scale_dff_covar, zero_scale_dff_covar], axis=0)
# COVARIANCE MATRIX OUTLIERS
x_outlier_scale_covar = outlier_dff.loc[:, features_outlier].values
y_outlier_scale_covar = outlier_dff.loc[:, ].values
pca_outlier_scale_covar = PCA(n_components=len(features_outlier))
principalComponents_outlier_scale_covar = pca_outlier_scale_covar.fit_transform(x_outlier_scale_covar)
principalDf_outlier_scale_covar = pd.DataFrame(data=principalComponents_outlier_scale_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_outlier))])
finalDf_outlier_scale_covar = pd.concat([outlier_names, principalDf_outlier_scale_covar], axis=1)
dfff_outlier_scale_covar = finalDf_outlier_scale_covar.fillna(0)
Var_outlier_scale_covar = pca_outlier_scale_covar.explained_variance_ratio_
# calculating loading vector plot
loading_outlier_scale_covar = pca_outlier_scale_covar.components_.T * np.sqrt(
pca_outlier_scale_covar.explained_variance_)
loading_outlier_scale_df_covar = pd.DataFrame(data=loading_outlier_scale_covar[:, 0:2],
columns=["PC1", "PC2"])
line_group_df_covar = pd.DataFrame(data=features_outlier, columns=['line_group'])
loading_outlier_scale_dff_covar = pd.concat([loading_outlier_scale_df_covar, line_group_df_covar], axis=1)
a = (len(features_outlier), 2)
zero_outlier_scale_covar = np.zeros(a)
zero_outlier_scale_df_covar = pd.DataFrame(data=zero_outlier_scale_covar, columns=["PC1", "PC2"])
zero_outlier_scale_dff_covar = pd.concat([zero_outlier_scale_df_covar, line_group_df_covar], axis=1)
loading_outlier_scale_line_graph_covar = pd.concat(
[loading_outlier_scale_dff_covar, zero_outlier_scale_dff_covar], axis=0)
if outlier == 'No' and matrix_type == "Correlation":
dat = dfff_scale
elif outlier == 'Yes' and matrix_type == "Correlation":
dat = dfff_outlier_scale
elif outlier == "No" and matrix_type == "Covariance":
dat = dfff_scale_covar
elif outlier == "Yes" and matrix_type == "Covariance":
dat = dfff_outlier_scale_covar
trace2_all = go.Scatter(x=dat['PC1'], y=dat['PC2'], mode='markers',
text=dat[dat.columns[0]],
hovertemplate=
'<b>%{text}</b>' +
'<br>PC1: %{x}<br>' +
'PC2: %{y}'
"<extra></extra>",
marker=dict(opacity=0.7, showscale=False, size=12,
line=dict(width=0.5, color='DarkSlateGrey'),
),
)
####################################################################################################
# INCLUDE THIS
if outlier == 'No' and matrix_type == "Correlation":
data = loading_scale_line_graph
variance = Var_scale
elif outlier == 'Yes' and matrix_type == "Correlation":
data = loading_outlier_scale_line_graph
variance = Var_outlier_scale
elif outlier == "No" and matrix_type == "Covariance":
data = loading_scale_line_graph_covar
variance = Var_scale_covar
elif outlier == "Yes" and matrix_type == "Covariance":
data = loading_outlier_scale_line_graph_covar
variance = Var_outlier_scale_covar
counter = 0
lists = [[] for i in range(len(data['line_group'].unique()))]
for i in data['line_group'].unique():
dataf_all = data[data['line_group'] == i]
trace1_all = go.Scatter(x=dataf_all['PC1'], y=dataf_all['PC2'], line=dict(color="#4f4f4f"),
name=i,
# text=i,
meta=i,
hovertemplate=
'<b>%{meta}</b>' +
'<br>PC1: %{x}<br>' +
'PC2: %{y}'
"<extra></extra>",
mode='lines+text',
textposition='bottom right', textfont=dict(size=12)
)
lists[counter] = trace1_all
counter = counter + 1
####################################################################################################
if graph_update == 'Biplot':
lists.insert(0, trace2_all)
return {'data': lists,
'layout': go.Layout(xaxis=dict(title='PC1 ({}%)'.format(round((variance[0] * 100), 2))),
yaxis=dict(title='PC2 ({}%)'.format(round((variance[1] * 100), 2))),
showlegend=False, margin={'r': 0},
# shapes=[dict(type="circle", xref="x", yref="y", x0=-1,
# y0=-1, x1=1, y1=1,
# line_color="DarkSlateGrey")]
),
}
elif graph_update == 'Loadings':
return {'data': lists,
'layout': go.Layout(xaxis=dict(title='PC1 ({}%)'.format(round((variance[0] * 100), 2))),
yaxis=dict(title='PC2 ({}%)'.format(round((variance[1] * 100), 2))),
showlegend=False, margin={'r': 0},
# shapes=[dict(type="circle", xref="x", yref="y", x0=-1,
# y0=-1, x1=1, y1=1,
# line_color="DarkSlateGrey")]
),
}
elif all_custom == 'Custom':
# Dropping Data variables
dff_input = dff.drop(columns=dff[input])
features1_input = dff_input.columns
features_input = list(features1_input)
dff_target = dff[input]
# OUTLIER DATA INPUT
z_scores_input = scipy.stats.zscore(dff_input)
abs_z_scores_input = np.abs(z_scores_input)
filtered_entries_input = (abs_z_scores_input < 3).all(axis=1)
dff_input_outlier = dff_input[filtered_entries_input]
features1_input_outlier = dff_input_outlier.columns
features_input_outlier = list(features1_input_outlier)
outlier_names_input1 = df[filtered_entries_input]
outlier_names_input = outlier_names_input1.iloc[:, 0]
# OUTLIER DATA TARGET
z_scores_target = scipy.stats.zscore(dff_target)
abs_z_scores_target = np.abs(z_scores_target)
filtered_entries_target = (abs_z_scores_target < 3).all(axis=1)
dff_target_outlier = dff_target[filtered_entries_target]
# INPUT DATA WITH OUTLIERS
x_scale_input = dff_input.loc[:, features_input].values
y_scale_input = dff_input.loc[:, ].values
x_scale_input = StandardScaler().fit_transform(x_scale_input)
# x_scale_input = MinMaxScaler(feature_range=(0, 1), copy=True).fit_transform(x_scale_input)
# def rescale(data, new_min=0, new_max=1):
# """Rescale the data to be within the range [new_min, new_max]"""
# return (data - data.min()) / (data.max() - data.min()) * (new_max - new_min) + new_min
# x_scale_input = rescale(x_scale_input, new_min=0, new_max=1)
pca_scale_input = PCA(n_components=len(features_input))
principalComponents_scale_input = pca_scale_input.fit_transform(x_scale_input)
principalDf_scale_input = pd.DataFrame(data=principalComponents_scale_input
, columns=['PC' + str(i + 1) for i in range(len(features_input))])
finalDf_scale_input = pd.concat([df[[df.columns[0]]], principalDf_scale_input, dff_target], axis=1)
dfff_scale_input = finalDf_scale_input.fillna(0)
Var_scale_input = pca_scale_input.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input = pca_scale_input.components_.T * np.sqrt(pca_scale_input.explained_variance_)
loading_scale_input_df = pd.DataFrame(data=loading_scale_input[:, 0:2],
columns=["PC1", "PC2"])
line_group_scale_input_df = pd.DataFrame(data=features_input, columns=['line_group'])
loading_scale_input_dff = pd.concat([loading_scale_input_df, line_group_scale_input_df],
axis=1)
a = (len(features_input), 2)
zero_scale_input = np.zeros(a)
zero_scale_input_df = pd.DataFrame(data=zero_scale_input, columns=["PC1", "PC2"])
zero_scale_input_dff = pd.concat([zero_scale_input_df, line_group_scale_input_df], axis=1)
loading_scale_input_line_graph = pd.concat([loading_scale_input_dff, zero_scale_input_dff],
axis=0)
# INPUT DATA WITH REMOVING OUTLIERS
x_scale_input_outlier = dff_input_outlier.loc[:, features_input_outlier].values
y_scale_input_outlier = dff_input_outlier.loc[:, ].values
x_scale_input_outlier = StandardScaler().fit_transform(x_scale_input_outlier)
# x_scale_input_outlier = MinMaxScaler(feature_range=(0, 1), copy=True).fit_transform(x_scale_input_outlier)
# def rescale(data, new_min=0, new_max=1):
# """Rescale the data to be within the range [new_min, new_max]"""
# return (data - data.min()) / (data.max() - data.min()) * (new_max - new_min) + new_min
# x_scale_input_outlier = rescale(x_scale_input_outlier, new_min=0, new_max=1)
pca_scale_input_outlier = PCA(n_components=len(features_input_outlier))
principalComponents_scale_input_outlier = pca_scale_input_outlier.fit_transform(x_scale_input_outlier)
principalDf_scale_input_outlier = pd.DataFrame(data=principalComponents_scale_input_outlier
, columns=['PC' + str(i + 1) for i in
range(len(features_input_outlier))])
finalDf_scale_input_outlier = pd.concat(
[outlier_names_input, principalDf_scale_input_outlier, dff_target_outlier],
axis=1)
dfff_scale_input_outlier = finalDf_scale_input_outlier.fillna(0)
Var_scale_input_outlier = pca_scale_input_outlier.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input_outlier = pca_scale_input_outlier.components_.T * np.sqrt(
pca_scale_input_outlier.explained_variance_)
loading_scale_input_outlier_df = pd.DataFrame(data=loading_scale_input_outlier[:, 0:2],
columns=["PC1", "PC2"])
line_group_scale_input_outlier_df = pd.DataFrame(data=features_input_outlier, columns=['line_group'])
loading_scale_input_outlier_dff = pd.concat([loading_scale_input_outlier_df, line_group_scale_input_outlier_df],
axis=1)
a = (len(features_input_outlier), 2)
zero_scale_input_outlier = np.zeros(a)
zero_scale_input_outlier_df = pd.DataFrame(data=zero_scale_input_outlier, columns=["PC1", "PC2"])
zero_scale_input_outlier_dff = pd.concat([zero_scale_input_outlier_df, line_group_scale_input_outlier_df],
axis=1)
loading_scale_input_outlier_line_graph = pd.concat(
[loading_scale_input_outlier_dff, zero_scale_input_outlier_dff],
axis=0)
# COVARIANCE MATRIX
x_scale_input_covar = dff_input.loc[:, features_input].values
y_scale_input_covar = dff_input.loc[:, ].values
pca_scale_input_covar = PCA(n_components=len(features_input))
principalComponents_scale_input_covar = pca_scale_input_covar.fit_transform(x_scale_input_covar)
principalDf_scale_input_covar = pd.DataFrame(data=principalComponents_scale_input_covar
, columns=['PC' + str(i + 1) for i in range(len(features_input))])
finalDf_scale_input_covar = pd.concat([df[[df.columns[0]]], principalDf_scale_input_covar, dff_target], axis=1)
dfff_scale_input_covar = finalDf_scale_input_covar.fillna(0)
Var_scale_input_covar = pca_scale_input_covar.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input_covar = pca_scale_input_covar.components_.T * np.sqrt(
pca_scale_input_covar.explained_variance_)
loading_scale_input_df_covar = pd.DataFrame(data=loading_scale_input_covar[:, 0:2],
columns=["PC1", "PC2"])
line_group_scale_input_df_covar = pd.DataFrame(data=features_input, columns=['line_group'])
loading_scale_input_dff_covar = pd.concat([loading_scale_input_df_covar, line_group_scale_input_df_covar],
axis=1)
a = (len(features_input), 2)
zero_scale_input_covar = np.zeros(a)
zero_scale_input_df_covar = pd.DataFrame(data=zero_scale_input_covar, columns=["PC1", "PC2"])
zero_scale_input_dff_covar = pd.concat([zero_scale_input_df_covar, line_group_scale_input_df_covar], axis=1)
loading_scale_input_line_graph_covar = pd.concat([loading_scale_input_dff_covar, zero_scale_input_dff_covar],
axis=0)
# COVARIANCE MATRIX OUTLIERS
x_scale_input_outlier_covar = dff_input_outlier.loc[:, features_input_outlier].values
y_scale_input_outlier_covar = dff_input_outlier.loc[:, ].values
pca_scale_input_outlier_covar = PCA(n_components=len(features_input_outlier))
principalComponents_scale_input_outlier_covar = pca_scale_input_outlier_covar.fit_transform(
x_scale_input_outlier_covar)
principalDf_scale_input_outlier_covar = pd.DataFrame(data=principalComponents_scale_input_outlier_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_input_outlier))])
finalDf_scale_input_outlier_covar = pd.concat(
[outlier_names_input, principalDf_scale_input_outlier_covar, dff_target_outlier],
axis=1)
dfff_scale_input_outlier_covar = finalDf_scale_input_outlier_covar.fillna(0)
Var_scale_input_outlier_covar = pca_scale_input_outlier_covar.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input_outlier_covar = pca_scale_input_outlier_covar.components_.T * np.sqrt(
pca_scale_input_outlier_covar.explained_variance_)
loading_scale_input_outlier_df_covar = pd.DataFrame(data=loading_scale_input_outlier_covar[:, 0:2],
columns=["PC1", "PC2"])
line_group_scale_input_outlier_df_covar = pd.DataFrame(data=features_input_outlier, columns=['line_group'])
loading_scale_input_outlier_dff_covar = pd.concat([loading_scale_input_outlier_df_covar,
line_group_scale_input_outlier_df_covar],
axis=1)
a = (len(features_input_outlier), 2)
zero_scale_input_outlier_covar = np.zeros(a)
zero_scale_input_outlier_df_covar = pd.DataFrame(data=zero_scale_input_outlier_covar, columns=["PC1", "PC2"])
zero_scale_input_outlier_dff_covar = pd.concat([zero_scale_input_outlier_df_covar,
line_group_scale_input_outlier_df_covar],
axis=1)
loading_scale_input_outlier_line_graph_covar = pd.concat(
[loading_scale_input_outlier_dff_covar, zero_scale_input_outlier_dff_covar],
axis=0)
if outlier == 'No' and matrix_type == "Correlation":
dat = dfff_scale_input
variance = Var_scale_input
elif outlier == 'Yes' and matrix_type == "Correlation":
dat = dfff_scale_input_outlier
variance = Var_scale_input_outlier
elif outlier == "No" and matrix_type == "Covariance":
dat = dfff_scale_input_covar
variance = Var_scale_input_covar
elif outlier == "Yes" and matrix_type == "Covariance":
dat = dfff_scale_input_outlier_covar
variance = Var_scale_input_outlier_covar
trace2 = go.Scatter(x=dat['PC1'], y=dat['PC2'], mode='markers',
marker_color=dat[color] if target == 'Yes' else None,
marker_size=dat[size] if target2 == 'Yes' else 12,
text=dat[dat.columns[0]],
hovertemplate=
'<b>%{text}</b>' +
'<br>PC1: %{x}<br>' +
'PC2: %{y}'
"<extra></extra>",
marker=dict(opacity=0.7, colorscale='Plasma',
sizeref=max(dat[size]) / (15 ** 2) if target2 == 'Yes' else None,
sizemode='area',
showscale=True if target == 'Yes' else False,
line=dict(width=0.5, color='DarkSlateGrey'),
colorbar=dict(title=dict(text=color if target == 'Yes' else None,
font=dict(family='Helvetica'),
side='right'), ypad=0),
),
)
####################################################################################################
if outlier == 'No' and matrix_type == "Correlation":
data = loading_scale_input_line_graph
elif outlier == 'Yes' and matrix_type == "Correlation":
data = loading_scale_input_outlier_line_graph
elif outlier == "No" and matrix_type == "Covariance":
data = loading_scale_input_line_graph_covar
elif outlier == "Yes" and matrix_type == "Covariance":
data = loading_scale_input_outlier_line_graph_covar
counter = 0
lists = [[] for i in range(len(data['line_group'].unique()))]
for i in data['line_group'].unique():
dataf = data[data['line_group'] == i]
trace1 = go.Scatter(x=dataf['PC1'], y=dataf['PC2'],
line=dict(color="#666666" if target == 'Yes' else '#4f4f4f'), name=i,
# text=i,
meta=i,
hovertemplate=
'<b>%{meta}</b>' +
'<br>PC1: %{x}<br>' +
'PC2: %{y}'
"<extra></extra>",
mode='lines+text', textposition='bottom right', textfont=dict(size=12),
)
lists[counter] = trace1
counter = counter + 1
####################################################################################################
if graph_update == 'Biplot':
lists.insert(0, trace2)
return {'data': lists,
'layout': go.Layout(xaxis=dict(title='PC1 ({}%)'.format(round((variance[0] * 100), 2))),
yaxis=dict(title='PC2 ({}%)'.format(round((variance[1] * 100), 2))),
showlegend=False, margin={'r': 0},
# shapes=[dict(type="circle", xref="x", yref="y", x0=-1,
# y0=-1, x1=1, y1=1,
# line_color="DarkSlateGrey")]
),
}
elif graph_update == 'Loadings':
return {'data': lists,
'layout': go.Layout(xaxis=dict(title='PC1 ({}%)'.format(round((variance[0] * 100), 2))),
yaxis=dict(title='PC2 ({}%)'.format(round((variance[1] * 100), 2))),
showlegend=False, margin={'r': 0},
# shapes=[dict(type="circle", xref="x", yref="y", x0=-1,
# y0=-1, x1=1, y1=1,
# line_color="DarkSlateGrey")]
),
}
@app.callback(
Output('size-second-target-container', 'children'),
[Input('size-scale-scores', 'value'),
Input('outlier-value-biplot', 'value'),
Input('csv-data', 'data')
]
)
def update_output(size, outlier, data):
if not data:
return dash.no_update
if size is None:
raise dash.exceptions.PreventUpdate
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
z_scores_dff_size = scipy.stats.zscore(dff)
abs_z_scores_dff_size = np.abs(z_scores_dff_size)
filtered_entries_dff_size = (abs_z_scores_dff_size < 3).all(axis=1)
dff_target_outlier_size = dff[filtered_entries_dff_size]
if outlier == 'Yes':
size_range = [round(dff_target_outlier_size[size].min(), 2), round(dff_target_outlier_size[size].max(), 2)]
elif outlier == 'No':
size_range = [round(dff[size].min(), 2), round(dff[size].max(), 2)]
return '{}'.format(size_range)
@app.callback(Output('cos2-plot', 'figure'),
[
Input('outlier-value-cos2', 'value'),
Input('feature-input', 'value'),
Input('all-custom-choice', 'value'),
Input("matrix-type-cos2", "value"),
Input('csv-data', 'data')
])
def update_cos2_plot(outlier, input, all_custom, matrix_type, data):
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
if all_custom == 'All':
# x_scale = MinMaxScaler(feature_range=(0, 1), copy=True).fit_transform(x_scale)
features1 = dff.columns
features = list(features1)
# OUTLIER DATA
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
# def rescale(data, new_min=0, new_max=1):
# """Rescale the data to be within the range [new_min, new_max]"""
# return (data - data.min()) / (data.max() - data.min()) * (new_max - new_min) + new_min
# ORIGINAL DATA WITH OUTLIERS
x_scale = dff.loc[:, features].values
y_scale = dff.loc[:, ].values
x_scale = StandardScaler().fit_transform(x_scale)
pca_scale = PCA(n_components=len(features))
principalComponents_scale = pca_scale.fit_transform(x_scale)
principalDf_scale = pd.DataFrame(data=principalComponents_scale
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf_scale = pd.concat([df[[df.columns[0]]], principalDf_scale], axis=1)
Var_scale = pca_scale.explained_variance_ratio_
# calculating loading vector plot
loading_scale = pca_scale.components_.T * np.sqrt(pca_scale.explained_variance_)
loading_scale_df = pd.DataFrame(data=loading_scale[:, 0:2],
columns=["PC1", "PC2"])
loading_scale_df['cos2'] = (loading_scale_df["PC1"] ** 2) + (loading_scale_df["PC2"] ** 2)
line_group_scale_df = pd.DataFrame(data=features, columns=['line_group'])
loading_scale_dff = pd.concat([loading_scale_df, line_group_scale_df], axis=1)
a = (len(features), 2)
zero_scale = np.zeros(a)
zero_scale_df = pd.DataFrame(data=zero_scale, columns=["PC1", "PC2"])
zero_scale_df_color = pd.DataFrame(data=loading_scale_df.iloc[:, 2], columns=['cos2'])
zero_scale_dff = pd.concat([zero_scale_df, zero_scale_df_color, line_group_scale_df], axis=1)
loading_scale_line_graph = pd.concat([loading_scale_dff, zero_scale_dff], axis=0)
# ORIGINAL DATA WITH REMOVING OUTLIERS
x_outlier_scale = outlier_dff.loc[:, features_outlier].values
y_outlier_scale = outlier_dff.loc[:, ].values
x_outlier_scale = StandardScaler().fit_transform(x_outlier_scale)
# x_outlier_scale = MinMaxScaler().fit_transform(x_outlier_scale)
# def rescale(data, new_min=0, new_max=1):
# """Rescale the data to be within the range [new_min, new_max]"""
# return (data - data.min()) / (data.max() - data.min()) * (new_max - new_min) + new_min
#
# x_outlier_scale = rescale(x_outlier_scale, new_min=0, new_max=1)
# uses covariance matrix
pca_outlier_scale = PCA(n_components=len(features_outlier))
principalComponents_outlier_scale = pca_outlier_scale.fit_transform(x_outlier_scale)
principalDf_outlier_scale = pd.DataFrame(data=principalComponents_outlier_scale
, columns=['PC' + str(i + 1) for i in range(len(features_outlier))])
# combining principle components and target
finalDf_outlier_scale = pd.concat([outlier_names, principalDf_outlier_scale], axis=1)
Var_outlier_scale = pca_outlier_scale.explained_variance_ratio_
# calculating loading
# calculating loading vector plot
loading_outlier_scale = pca_outlier_scale.components_.T * np.sqrt(pca_outlier_scale.explained_variance_)
loading_outlier_scale_df = pd.DataFrame(data=loading_outlier_scale[:, 0:2],
columns=["PC1", "PC2"])
loading_outlier_scale_df["cos2"] = (loading_outlier_scale_df["PC1"] ** 2) + (
loading_outlier_scale_df["PC2"] ** 2)
line_group_df = pd.DataFrame(data=features_outlier, columns=['line_group'])
loading_outlier_scale_dff = pd.concat([loading_outlier_scale_df, line_group_df], axis=1)
a = (len(features_outlier), 2)
zero_outlier_scale = np.zeros(a)
zero_outlier_scale_df = pd.DataFrame(data=zero_outlier_scale, columns=["PC1", "PC2"])
zero_outlier_scale_df_color = pd.DataFrame(data=loading_outlier_scale_df.iloc[:, 2], columns=['cos2'])
zero_outlier_scale_dff = pd.concat([zero_outlier_scale_df, zero_outlier_scale_df_color, line_group_df], axis=1)
loading_outlier_scale_line_graph = pd.concat([loading_outlier_scale_dff, zero_outlier_scale_dff], axis=0)
loading_scale_line_graph_sort = loading_scale_line_graph.sort_values(by='cos2')
loading_outlier_scale_line_graph_sort = loading_outlier_scale_line_graph.sort_values(by='cos2')
# COVARIANCE MATRIX
x_scale_covar = dff.loc[:, features].values
y_scale_covar = dff.loc[:, ].values
pca_scale_covar = PCA(n_components=len(features))
principalComponents_scale_covar = pca_scale_covar.fit_transform(x_scale_covar)
principalDf_scale_covar = pd.DataFrame(data=principalComponents_scale_covar
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf_scale_covar = pd.concat([df[[df.columns[0]]], principalDf_scale_covar], axis=1)
Var_scale_covar = pca_scale_covar.explained_variance_ratio_
# calculating loading vector plot
loading_scale_covar = pca_scale_covar.components_.T * np.sqrt(pca_scale_covar.explained_variance_)
loading_scale_df_covar = pd.DataFrame(data=loading_scale_covar[:, 0:2],
columns=["PC1", "PC2"])
loading_scale_df_covar['cos2'] = (loading_scale_df_covar["PC1"] ** 2) + (loading_scale_df_covar["PC2"] ** 2)
line_group_scale_df_covar = pd.DataFrame(data=features, columns=['line_group'])
loading_scale_dff_covar = pd.concat([loading_scale_df_covar, line_group_scale_df_covar], axis=1)
a = (len(features), 2)
zero_scale_covar = np.zeros(a)
zero_scale_df_covar = pd.DataFrame(data=zero_scale_covar, columns=["PC1", "PC2"])
zero_scale_df_color_covar = pd.DataFrame(data=loading_scale_df_covar.iloc[:, 2], columns=['cos2'])
zero_scale_dff_covar = pd.concat([zero_scale_df_covar, zero_scale_df_color_covar, line_group_scale_df_covar],
axis=1)
loading_scale_line_graph_covar = pd.concat([loading_scale_dff_covar, zero_scale_dff_covar], axis=0)
loading_scale_line_graph_sort_covar = loading_scale_line_graph_covar.sort_values(by='cos2')
# COVARIANCE MATRIX WITH OUTLIERS
x_outlier_scale_covar = outlier_dff.loc[:, features_outlier].values
y_outlier_scale_covar = outlier_dff.loc[:, ].values
pca_outlier_scale_covar = PCA(n_components=len(features_outlier))
principalComponents_outlier_scale_covar = pca_outlier_scale_covar.fit_transform(x_outlier_scale_covar)
principalDf_outlier_scale_covar = pd.DataFrame(data=principalComponents_outlier_scale_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_outlier))])
# combining principle components and target
finalDf_outlier_scale_covar = pd.concat([outlier_names, principalDf_outlier_scale_covar], axis=1)
Var_outlier_scale_covar = pca_outlier_scale_covar.explained_variance_ratio_
# calculating loading
# calculating loading vector plot
loading_outlier_scale_covar = pca_outlier_scale_covar.components_.T * np.sqrt(
pca_outlier_scale_covar.explained_variance_)
loading_outlier_scale_df_covar = pd.DataFrame(data=loading_outlier_scale_covar[:, 0:2],
columns=["PC1", "PC2"])
loading_outlier_scale_df_covar["cos2"] = (loading_outlier_scale_df_covar["PC1"] ** 2) + (
loading_outlier_scale_df_covar["PC2"] ** 2)
line_group_df_covar = pd.DataFrame(data=features_outlier, columns=['line_group'])
loading_outlier_scale_dff_covar = pd.concat([loading_outlier_scale_df_covar, line_group_df_covar], axis=1)
a = (len(features_outlier), 2)
zero_outlier_scale_covar = np.zeros(a)
zero_outlier_scale_df_covar = pd.DataFrame(data=zero_outlier_scale_covar, columns=["PC1", "PC2"])
zero_outlier_scale_df_color_covar = pd.DataFrame(data=loading_outlier_scale_df_covar.iloc[:, 2],
columns=['cos2'])
zero_outlier_scale_dff_covar = pd.concat([zero_outlier_scale_df_covar, zero_outlier_scale_df_color_covar,
line_group_df_covar], axis=1)
loading_outlier_scale_line_graph_covar = pd.concat(
[loading_outlier_scale_dff_covar, zero_outlier_scale_dff_covar], axis=0)
loading_outlier_scale_line_graph_sort_covar = loading_outlier_scale_line_graph_covar.sort_values(by='cos2')
# scaling data
if outlier == 'No' and matrix_type == "Correlation":
data = loading_scale_line_graph_sort
variance = Var_scale
elif outlier == 'Yes' and matrix_type == "Correlation":
data = loading_outlier_scale_line_graph_sort
variance = Var_outlier_scale
elif outlier == "No" and matrix_type == "Covariance":
data = loading_scale_line_graph_sort_covar
variance = Var_scale_covar
elif outlier == "Yes" and matrix_type == "Covariance":
data = loading_outlier_scale_line_graph_sort_covar
variance = Var_outlier_scale_covar
N = len(data['cos2'].unique())
end_color = "#00264c" # dark blue
start_color = "#c6def5" # light blue
colorscale = [x.hex for x in list(Color(start_color).range_to(Color(end_color), N))]
counter = 0
counter_color = 0
lists = [[] for i in range(len(data['line_group'].unique()))]
for i in data['line_group'].unique():
dataf_all = data[data['line_group'] == i]
trace1_all = go.Scatter(x=dataf_all['PC1'], y=dataf_all['PC2'], mode='lines+text',
name=i, line=dict(color=colorscale[counter_color]),
# text=i,
meta=i,
hovertemplate=
'<b>%{meta}</b>' +
'<br>PC1: %{x}<br>' +
'PC2: %{y}'
"<extra></extra>",
textposition='bottom right', textfont=dict(size=12)
)
trace2_all = go.Scatter(x=[1, -1], y=[1, -1], mode='markers',
hoverinfo='skip',
marker=dict(showscale=True, opacity=0,
color=[data["cos2"].min(), data["cos2"].max()],
colorscale=colorscale,
colorbar=dict(title=dict(text="Cos2",
side='right'), ypad=0)
), )
lists[counter] = trace1_all
counter = counter + 1
counter_color = counter_color + 1
lists.append(trace2_all)
####################################################################################################
return {'data': lists,
'layout': go.Layout(xaxis=dict(title='PC1 ({}%)'.format(round((variance[0] * 100), 2)), mirror=True,
ticks='outside', showline=True),
yaxis=dict(title='PC2 ({}%)'.format(round((variance[1] * 100), 2)), mirror=True,
ticks='outside', showline=True),
showlegend=False, margin={'r': 0},
# shapes=[dict(type="circle", xref="x", yref="y", x0=-1,
# y0=-1, x1=1, y1=1,
# line_color="DarkSlateGrey")]
),
}
elif all_custom == "Custom":
# Dropping Data variables
dff_input = dff.drop(columns=dff[input])
features1_input = dff_input.columns
features_input = list(features1_input)
dff_target = dff[input]
# OUTLIER DATA INPUT
z_scores_input = scipy.stats.zscore(dff_input)
abs_z_scores_input = np.abs(z_scores_input)
filtered_entries_input = (abs_z_scores_input < 3).all(axis=1)
dff_input_outlier = dff_input[filtered_entries_input]
features1_input_outlier = dff_input_outlier.columns
features_input_outlier = list(features1_input_outlier)
outlier_names_input1 = df[filtered_entries_input]
outlier_names_input = outlier_names_input1.iloc[:, 0]
# OUTLIER DATA TARGET
z_scores_target = scipy.stats.zscore(dff_target)
abs_z_scores_target = np.abs(z_scores_target)
filtered_entries_target = (abs_z_scores_target < 3).all(axis=1)
dff_target_outlier = dff_target[filtered_entries_target]
# INPUT DATA WITH OUTLIERS
x_scale_input = dff_input.loc[:, features_input].values
y_scale_input = dff_input.loc[:, ].values
x_scale_input = StandardScaler().fit_transform(x_scale_input)
# # x_scale_input = MinMaxScaler(feature_range=(0, 1), copy=True).fit_transform(x_scale_input)
# def rescale(data, new_min=0, new_max=1):
# """Rescale the data to be within the range [new_min, new_max]"""
# return (data - data.min()) / (data.max() - data.min()) * (new_max - new_min) + new_min
#
# x_scale_input = rescale(x_scale_input, new_min=0, new_max=1)
pca_scale_input = PCA(n_components=len(features_input))
principalComponents_scale_input = pca_scale_input.fit_transform(x_scale_input)
principalDf_scale_input = pd.DataFrame(data=principalComponents_scale_input
, columns=['PC' + str(i + 1) for i in range(len(features_input))])
finalDf_scale_input = pd.concat([df[[df.columns[0]]], principalDf_scale_input, dff_target], axis=1)
dfff_scale_input = finalDf_scale_input.fillna(0)
Var_scale_input = pca_scale_input.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input = pca_scale_input.components_.T * np.sqrt(pca_scale_input.explained_variance_)
loading_scale_input_df = pd.DataFrame(data=loading_scale_input[:, 0:2],
columns=["PC1", "PC2"])
loading_scale_input_df["cos2"] = (loading_scale_input_df["PC1"] ** 2) + (loading_scale_input_df["PC2"] ** 2)
line_group_scale_input_df = pd.DataFrame(data=features_input, columns=['line_group'])
loading_scale_input_dff = pd.concat([loading_scale_input_df, line_group_scale_input_df],
axis=1)
a = (len(features_input), 2)
zero_scale_input = np.zeros(a)
zero_scale_input_df = pd.DataFrame(data=zero_scale_input, columns=["PC1", "PC2"])
zero_scale_input_df_color = pd.DataFrame(data=loading_scale_input_df.iloc[:, 2], columns=['cos2'])
zero_scale_input_dff = pd.concat([zero_scale_input_df, zero_scale_input_df_color, line_group_scale_input_df],
axis=1)
loading_scale_input_line_graph = pd.concat([loading_scale_input_dff, zero_scale_input_dff],
axis=0)
# INPUT DATA WITH REMOVING OUTLIERS
x_scale_input_outlier = dff_input_outlier.loc[:, features_input_outlier].values
y_scale_input_outlier = dff_input_outlier.loc[:, ].values
x_scale_input_outlier = StandardScaler().fit_transform(x_scale_input_outlier)
# # x_scale_input_outlier = MinMaxScaler(feature_range=(0, 1), copy=True).fit_transform(x_scale_input_outlier)
# def rescale(data, new_min=0, new_max=1):
# """Rescale the data to be within the range [new_min, new_max]"""
# return (data - data.min()) / (data.max() - data.min()) * (new_max - new_min) + new_min
# x_scale_input_outlier = rescale(x_scale_input_outlier, new_min=0, new_max=1)
pca_scale_input_outlier = PCA(n_components=len(features_input_outlier))
principalComponents_scale_input_outlier = pca_scale_input_outlier.fit_transform(x_scale_input_outlier)
principalDf_scale_input_outlier = pd.DataFrame(data=principalComponents_scale_input_outlier
, columns=['PC' + str(i + 1) for i in
range(len(features_input_outlier))])
finalDf_scale_input_outlier = pd.concat(
[outlier_names_input, principalDf_scale_input_outlier, dff_target_outlier],
axis=1)
dfff_scale_input_outlier = finalDf_scale_input_outlier.fillna(0)
Var_scale_input_outlier = pca_scale_input_outlier.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input_outlier = pca_scale_input_outlier.components_.T * np.sqrt(
pca_scale_input_outlier.explained_variance_)
loading_scale_input_outlier_df = pd.DataFrame(data=loading_scale_input_outlier[:, 0:2],
columns=["PC1", "PC2"])
loading_scale_input_outlier_df["cos2"] = (loading_scale_input_outlier_df["PC1"] ** 2) + \
(loading_scale_input_outlier_df["PC2"] ** 2)
line_group_scale_input_outlier_df = pd.DataFrame(data=features_input_outlier, columns=['line_group'])
loading_scale_input_outlier_dff = pd.concat([loading_scale_input_outlier_df, line_group_scale_input_outlier_df],
axis=1)
a = (len(features_input_outlier), 2)
zero_scale_input_outlier = np.zeros(a)
zero_scale_input_outlier_df = pd.DataFrame(data=zero_scale_input_outlier, columns=["PC1", "PC2"])
zero_scale_input_outlier_df_color = pd.DataFrame(data=loading_scale_input_outlier_df.iloc[:, 2],
columns=['cos2'])
zero_scale_input_outlier_dff = pd.concat([zero_scale_input_outlier_df, zero_scale_input_outlier_df_color,
line_group_scale_input_outlier_df],
axis=1)
loading_scale_input_outlier_line_graph = pd.concat(
[loading_scale_input_outlier_dff, zero_scale_input_outlier_dff],
axis=0)
loading_scale_input_line_graph_sort = loading_scale_input_line_graph.sort_values(by='cos2')
loading_scale_input_outlier_line_graph_sort = loading_scale_input_outlier_line_graph.sort_values(by='cos2')
# COVARIANCE MATRIX
x_scale_input_covar = dff_input.loc[:, features_input].values
y_scale_input_covar = dff_input.loc[:, ].values
pca_scale_input_covar = PCA(n_components=len(features_input))
principalComponents_scale_input_covar = pca_scale_input_covar.fit_transform(x_scale_input_covar)
principalDf_scale_input_covar = pd.DataFrame(data=principalComponents_scale_input_covar
, columns=['PC' + str(i + 1) for i in range(len(features_input))])
finalDf_scale_input_covar = pd.concat([df[[df.columns[0]]], principalDf_scale_input_covar, dff_target], axis=1)
dfff_scale_input_covar = finalDf_scale_input_covar.fillna(0)
Var_scale_input_covar = pca_scale_input_covar.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input_covar = pca_scale_input_covar.components_.T * np.sqrt(
pca_scale_input_covar.explained_variance_)
loading_scale_input_df_covar = pd.DataFrame(data=loading_scale_input_covar[:, 0:2],
columns=["PC1", "PC2"])
loading_scale_input_df_covar["cos2"] = (loading_scale_input_df_covar["PC1"] ** 2) + (
loading_scale_input_df_covar["PC2"] ** 2)
line_group_scale_input_df_covar = pd.DataFrame(data=features_input, columns=['line_group'])
loading_scale_input_dff_covar = pd.concat([loading_scale_input_df_covar, line_group_scale_input_df_covar],
axis=1)
a = (len(features_input), 2)
zero_scale_input_covar = np.zeros(a)
zero_scale_input_df_covar = pd.DataFrame(data=zero_scale_input_covar, columns=["PC1", "PC2"])
zero_scale_input_df_color_covar = pd.DataFrame(data=loading_scale_input_df_covar.iloc[:, 2], columns=['cos2'])
zero_scale_input_dff_covar = pd.concat([zero_scale_input_df_covar, zero_scale_input_df_color_covar,
line_group_scale_input_df_covar],
axis=1)
loading_scale_input_line_graph_covar = pd.concat([loading_scale_input_dff_covar, zero_scale_input_dff_covar],
axis=0)
loading_scale_input_line_graph_sort_covar = loading_scale_input_line_graph_covar.sort_values(by='cos2')
# COVARIANCE MATRIX WITH OUTLIERS
x_scale_input_outlier_covar = dff_input_outlier.loc[:, features_input_outlier].values
y_scale_input_outlier_covar = dff_input_outlier.loc[:, ].values
pca_scale_input_outlier_covar = PCA(n_components=len(features_input_outlier))
principalComponents_scale_input_outlier_covar = pca_scale_input_outlier_covar.fit_transform(
x_scale_input_outlier_covar)
principalDf_scale_input_outlier_covar = pd.DataFrame(data=principalComponents_scale_input_outlier_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_input_outlier))])
finalDf_scale_input_outlier_covar = pd.concat(
[outlier_names_input, principalDf_scale_input_outlier_covar, dff_target_outlier],
axis=1)
dfff_scale_input_outlier_covar = finalDf_scale_input_outlier_covar.fillna(0)
Var_scale_input_outlier_covar = pca_scale_input_outlier_covar.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input_outlier_covar = pca_scale_input_outlier_covar.components_.T * np.sqrt(
pca_scale_input_outlier_covar.explained_variance_)
loading_scale_input_outlier_df_covar = pd.DataFrame(data=loading_scale_input_outlier_covar[:, 0:2],
columns=["PC1", "PC2"])
loading_scale_input_outlier_df_covar["cos2"] = (loading_scale_input_outlier_df_covar["PC1"] ** 2) + \
(loading_scale_input_outlier_df_covar["PC2"] ** 2)
line_group_scale_input_outlier_df_covar = pd.DataFrame(data=features_input_outlier, columns=['line_group'])
loading_scale_input_outlier_dff_covar = pd.concat([loading_scale_input_outlier_df_covar,
line_group_scale_input_outlier_df_covar],
axis=1)
a = (len(features_input_outlier), 2)
zero_scale_input_outlier_covar = np.zeros(a)
zero_scale_input_outlier_df_covar = pd.DataFrame(data=zero_scale_input_outlier_covar, columns=["PC1", "PC2"])
zero_scale_input_outlier_df_color_covar = pd.DataFrame(data=loading_scale_input_outlier_df_covar.iloc[:, 2],
columns=['cos2'])
zero_scale_input_outlier_dff_covar = pd.concat([zero_scale_input_outlier_df_covar,
zero_scale_input_outlier_df_color_covar,
line_group_scale_input_outlier_df_covar],
axis=1)
loading_scale_input_outlier_line_graph_covar = pd.concat(
[loading_scale_input_outlier_dff_covar, zero_scale_input_outlier_dff_covar],
axis=0)
loading_scale_input_outlier_line_graph_sort_covar = loading_scale_input_outlier_line_graph_covar.sort_values(
by='cos2')
####################################################################################################
if outlier == 'No' and matrix_type == "Correlation":
data = loading_scale_input_line_graph_sort
variance = Var_scale_input
elif outlier == 'Yes' and matrix_type == "Correlation":
variance = Var_scale_input_outlier
data = loading_scale_input_outlier_line_graph_sort
elif outlier == "No" and matrix_type == "Covariance":
data = loading_scale_input_line_graph_sort_covar
variance = Var_scale_input_covar
elif outlier == "Yes" and matrix_type == "Covariance":
variance = Var_scale_input_outlier_covar
data = loading_scale_input_outlier_line_graph_sort_covar
N = len(data['cos2'].unique())
end_color = "#00264c" # dark blue
start_color = "#c6def5" # light blue
colorscale = [x.hex for x in list(Color(start_color).range_to(Color(end_color), N))]
counter_color = 0
counter = 0
lists = [[] for i in range(len(data['line_group'].unique()))]
for i in data['line_group'].unique():
dataf = data[data['line_group'] == i]
trace1 = go.Scatter(x=dataf['PC1'], y=dataf['PC2'], name=i, line=dict(color=colorscale[counter_color]),
mode='lines+text', textposition='bottom right', textfont=dict(size=12),
meta=i,
hovertemplate=
'<b>%{meta}</b>' +
'<br>PC1: %{x}<br>' +
'PC2: %{y}'
"<extra></extra>",
)
trace2_all = go.Scatter(x=[1, -1], y=[1, -1], mode='markers', hoverinfo='skip',
marker=dict(showscale=True, color=[data["cos2"].min(), data["cos2"].max()],
colorscale=colorscale, opacity=0,
colorbar=dict(title=dict(text="Cos2",
side='right'), ypad=0)
), )
lists[counter] = trace1
counter_color = counter_color + 1
counter = counter + 1
lists.append(trace2_all)
####################################################################################################
return {'data': lists,
'layout': go.Layout(xaxis=dict(title='PC1 ({}%)'.format(round((variance[0] * 100), 2)),
mirror=True, ticks='outside', showline=True),
yaxis=dict(title='PC2 ({}%)'.format(round((variance[1] * 100), 2)),
mirror=True, ticks='outside', showline=True),
showlegend=False, margin={'r': 0},
# shapes=[dict(type="circle", xref="x", yref="y", x0=-1,
# y0=-1, x1=1, y1=1,
# line_color="DarkSlateGrey")]
),
}
@app.callback(Output('contrib-plot', 'figure'),
[
Input('outlier-value-contrib', 'value'),
Input('feature-input', 'value'),
Input('all-custom-choice', 'value'),
Input("matrix-type-contrib", "value"),
Input('csv-data', 'data')
])
def update_cos2_plot(outlier, input, all_custom, matrix_type, data):
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
if all_custom == 'All':
features1 = dff.columns
features = list(features1)
# OUTLIER DATA
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_scale = dff.loc[:, features].values
y_scale = dff.loc[:, ].values
x_scale = StandardScaler().fit_transform(x_scale)
pca_scale = PCA(n_components=len(features))
principalComponents_scale = pca_scale.fit_transform(x_scale)
principalDf_scale = pd.DataFrame(data=principalComponents_scale
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf_scale = pd.concat([df[[df.columns[0]]], principalDf_scale], axis=1)
Var_scale = pca_scale.explained_variance_ratio_
# calculating loading vector plot
loading_scale = pca_scale.components_.T * np.sqrt(pca_scale.explained_variance_)
loading_scale_df = pd.DataFrame(data=loading_scale[:, 0:2],
columns=["PC1", "PC2"])
loading_scale_df["PC1_cos2"] = loading_scale_df["PC1"] ** 2
loading_scale_df["PC2_cos2"] = loading_scale_df["PC2"] ** 2
loading_scale_df["PC1_contrib"] = \
(loading_scale_df["PC1_cos2"] * 100) / (loading_scale_df["PC1_cos2"].sum(axis=0))
loading_scale_df["PC2_contrib"] = \
(loading_scale_df["PC2_cos2"] * 100) / (loading_scale_df["PC2_cos2"].sum(axis=0))
loading_scale_df["contrib"] = loading_scale_df["PC1_contrib"] + loading_scale_df["PC2_contrib"]
# after youve got sum of contrib (colorscale) get that and PC1 and PC2 into a sep df
loading_scale_dataf = pd.concat([loading_scale_df.iloc[:, 0:2], loading_scale_df.iloc[:, 6]], axis=1)
line_group_scale_df = pd.DataFrame(data=features, columns=['line_group'])
loading_scale_dff = pd.concat([loading_scale_dataf, line_group_scale_df], axis=1)
a = (len(features), 2)
zero_scale = np.zeros(a)
zero_scale_df = pd.DataFrame(data=zero_scale, columns=["PC1", "PC2"])
zero_scale_df_color = pd.DataFrame(data=loading_scale_dataf.iloc[:, 2], columns=['contrib'])
zero_scale_dff = pd.concat([zero_scale_df, zero_scale_df_color, line_group_scale_df], axis=1)
loading_scale_line_graph = pd.concat([loading_scale_dff, zero_scale_dff], axis=0)
# ORIGINAL DATA WITH REMOVING OUTLIERS
x_outlier_scale = outlier_dff.loc[:, features_outlier].values
y_outlier_scale = outlier_dff.loc[:, ].values
x_outlier_scale = StandardScaler().fit_transform(x_outlier_scale)
pca_outlier_scale = PCA(n_components=len(features_outlier))
principalComponents_outlier_scale = pca_outlier_scale.fit_transform(x_outlier_scale)
principalDf_outlier_scale = pd.DataFrame(data=principalComponents_outlier_scale
, columns=['PC' + str(i + 1) for i in range(len(features_outlier))])
finalDf_outlier_scale = pd.concat([outlier_names, principalDf_outlier_scale], axis=1)
Var_outlier_scale = pca_outlier_scale.explained_variance_ratio_
loading_outlier_scale = pca_outlier_scale.components_.T * np.sqrt(pca_outlier_scale.explained_variance_)
loading_outlier_scale_df = pd.DataFrame(data=loading_outlier_scale[:, 0:2],
columns=["PC1", "PC2"])
loading_outlier_scale_df["PC1_cos2"] = loading_outlier_scale_df["PC1"] ** 2
loading_outlier_scale_df["PC2_cos2"] = loading_outlier_scale_df["PC2"] ** 2
loading_outlier_scale_df["PC1_contrib"] = \
(loading_outlier_scale_df["PC1_cos2"] * 100) / (loading_outlier_scale_df["PC1_cos2"].sum(axis=0))
loading_outlier_scale_df["PC2_contrib"] = \
(loading_outlier_scale_df["PC2_cos2"] * 100) / (loading_outlier_scale_df["PC2_cos2"].sum(axis=0))
loading_outlier_scale_df["contrib"] = loading_outlier_scale_df["PC1_contrib"] + loading_outlier_scale_df[
"PC2_contrib"]
# after youve got sum of contrib (colorscale) get that and PC1 and PC2 into a sep df
loading_outlier_scale_dataf = pd.concat(
[loading_outlier_scale_df.iloc[:, 0:2], loading_outlier_scale_df.iloc[:, 6]], axis=1)
line_group_df = pd.DataFrame(data=features_outlier, columns=['line_group'])
loading_outlier_scale_dff = pd.concat([loading_outlier_scale_dataf, line_group_df], axis=1)
a = (len(features_outlier), 2)
zero_outlier_scale = np.zeros(a)
zero_outlier_scale_df = pd.DataFrame(data=zero_outlier_scale, columns=["PC1", "PC2"])
zero_outlier_scale_df_color = pd.DataFrame(data=loading_outlier_scale_dataf.iloc[:, 2], columns=['contrib'])
zero_outlier_scale_dff = pd.concat([zero_outlier_scale_df, zero_outlier_scale_df_color, line_group_df], axis=1)
loading_outlier_scale_line_graph = pd.concat([loading_outlier_scale_dff, zero_outlier_scale_dff], axis=0)
loading_scale_line_graph_sort = loading_scale_line_graph.sort_values(by='contrib')
loading_outlier_scale_line_graph_sort = loading_outlier_scale_line_graph.sort_values(by='contrib')
# COVARIANCE MATRIX
x_scale_covar = dff.loc[:, features].values
y_scale_covar = dff.loc[:, ].values
pca_scale_covar = PCA(n_components=len(features))
principalComponents_scale_covar = pca_scale_covar.fit_transform(x_scale_covar)
principalDf_scale_covar = pd.DataFrame(data=principalComponents_scale_covar
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf_scale_covar = pd.concat([df[[df.columns[0]]], principalDf_scale_covar], axis=1)
Var_scale_covar = pca_scale_covar.explained_variance_ratio_
# calculating loading vector plot
loading_scale_covar = pca_scale_covar.components_.T * np.sqrt(pca_scale_covar.explained_variance_)
loading_scale_df_covar = pd.DataFrame(data=loading_scale_covar[:, 0:2],
columns=["PC1", "PC2"])
loading_scale_df_covar["PC1_cos2"] = loading_scale_df_covar["PC1"] ** 2
loading_scale_df_covar["PC2_cos2"] = loading_scale_df_covar["PC2"] ** 2
loading_scale_df_covar["PC1_contrib"] = \
(loading_scale_df_covar["PC1_cos2"] * 100) / (loading_scale_df_covar["PC1_cos2"].sum(axis=0))
loading_scale_df_covar["PC2_contrib"] = \
(loading_scale_df_covar["PC2_cos2"] * 100) / (loading_scale_df_covar["PC2_cos2"].sum(axis=0))
loading_scale_df_covar["contrib"] = loading_scale_df_covar["PC1_contrib"] + loading_scale_df_covar[
"PC2_contrib"]
loading_scale_dataf_covar = pd.concat([loading_scale_df_covar.iloc[:, 0:2], loading_scale_df_covar.iloc[:, 6]],
axis=1)
line_group_scale_df_covar = pd.DataFrame(data=features, columns=['line_group'])
loading_scale_dff_covar = pd.concat([loading_scale_dataf_covar, line_group_scale_df_covar], axis=1)
a = (len(features), 2)
zero_scale_covar = np.zeros(a)
zero_scale_df_covar = pd.DataFrame(data=zero_scale_covar, columns=["PC1", "PC2"])
zero_scale_df_color_covar = pd.DataFrame(data=loading_scale_dataf_covar.iloc[:, 2], columns=['contrib'])
zero_scale_dff_covar = pd.concat([zero_scale_df_covar, zero_scale_df_color_covar, line_group_scale_df_covar],
axis=1)
loading_scale_line_graph_covar = pd.concat([loading_scale_dff_covar, zero_scale_dff_covar], axis=0)
loading_scale_line_graph_sort_covar = loading_scale_line_graph_covar.sort_values(by='contrib')
# COVARIANCE MATRIX OUTLIERS REMOVED
x_outlier_scale_covar = outlier_dff.loc[:, features_outlier].values
y_outlier_scale_covar = outlier_dff.loc[:, ].values
pca_outlier_scale_covar = PCA(n_components=len(features_outlier))
principalComponents_outlier_scale_covar = pca_outlier_scale_covar.fit_transform(x_outlier_scale_covar)
principalDf_outlier_scale_covar = pd.DataFrame(data=principalComponents_outlier_scale_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_outlier))])
finalDf_outlier_scale_covar = pd.concat([outlier_names, principalDf_outlier_scale_covar], axis=1)
Var_outlier_scale_covar = pca_outlier_scale_covar.explained_variance_ratio_
loading_outlier_scale_covar = pca_outlier_scale_covar.components_.T * np.sqrt(
pca_outlier_scale_covar.explained_variance_)
loading_outlier_scale_df_covar = pd.DataFrame(data=loading_outlier_scale_covar[:, 0:2],
columns=["PC1", "PC2"])
loading_outlier_scale_df_covar["PC1_cos2"] = loading_outlier_scale_df_covar["PC1"] ** 2
loading_outlier_scale_df_covar["PC2_cos2"] = loading_outlier_scale_df_covar["PC2"] ** 2
loading_outlier_scale_df_covar["PC1_contrib"] = \
(loading_outlier_scale_df_covar["PC1_cos2"] * 100) / (
loading_outlier_scale_df_covar["PC1_cos2"].sum(axis=0))
loading_outlier_scale_df_covar["PC2_contrib"] = \
(loading_outlier_scale_df_covar["PC2_cos2"] * 100) / (
loading_outlier_scale_df_covar["PC2_cos2"].sum(axis=0))
loading_outlier_scale_df_covar["contrib"] = loading_outlier_scale_df_covar["PC1_contrib"] + \
loading_outlier_scale_df_covar[
"PC2_contrib"]
# after youve got sum of contrib (colorscale) get that and PC1 and PC2 into a sep df
loading_outlier_scale_dataf_covar = pd.concat(
[loading_outlier_scale_df_covar.iloc[:, 0:2], loading_outlier_scale_df_covar.iloc[:, 6]], axis=1)
line_group_df_covar = pd.DataFrame(data=features_outlier, columns=['line_group'])
loading_outlier_scale_dff_covar = pd.concat([loading_outlier_scale_dataf_covar, line_group_df_covar], axis=1)
a = (len(features_outlier), 2)
zero_outlier_scale_covar = np.zeros(a)
zero_outlier_scale_df_covar = pd.DataFrame(data=zero_outlier_scale_covar, columns=["PC1", "PC2"])
zero_outlier_scale_df_color_covar = pd.DataFrame(data=loading_outlier_scale_dataf_covar.iloc[:, 2],
columns=['contrib'])
zero_outlier_scale_dff_covar = pd.concat(
[zero_outlier_scale_df_covar, zero_outlier_scale_df_color_covar, line_group_df_covar], axis=1)
loading_outlier_scale_line_graph_covar = pd.concat(
[loading_outlier_scale_dff_covar, zero_outlier_scale_dff_covar], axis=0)
loading_outlier_scale_line_graph_sort_covar = loading_outlier_scale_line_graph_covar.sort_values(by='contrib')
# scaling data
if outlier == 'No' and matrix_type == "Correlation":
data = loading_scale_line_graph_sort
variance = Var_scale
elif outlier == 'Yes' and matrix_type == "Correlation":
data = loading_outlier_scale_line_graph_sort
variance = Var_outlier_scale
elif outlier == "No" and matrix_type == "Covariance":
data = loading_scale_line_graph_sort_covar
variance = Var_scale_covar
elif outlier == "Yes" and matrix_type == "Covariance":
data = loading_outlier_scale_line_graph_sort_covar
variance = Var_outlier_scale_covar
N = len(data['contrib'].unique())
end_color = "#00264c" # dark blue
start_color = "#c6def5" # light blue
colorscale = [x.hex for x in list(Color(start_color).range_to(Color(end_color), N))]
counter = 0
counter_color = 0
lists = [[] for i in range(len(data['line_group'].unique()))]
for i in data['line_group'].unique():
dataf_all = data[data['line_group'] == i]
trace1_all = go.Scatter(x=dataf_all['PC1'], y=dataf_all['PC2'], mode='lines+text',
name=i, line=dict(color=colorscale[counter_color]),
textposition='bottom right', textfont=dict(size=12),
meta=i,
hovertemplate=
'<b>%{meta}</b>' +
'<br>PC1: %{x}<br>' +
'PC2: %{y}'
"<extra></extra>",
)
trace2_all = go.Scatter(x=[1, -1], y=[1, -1], mode='markers', hoverinfo='skip',
marker=dict(showscale=True, opacity=0,
color=[data["contrib"].min(), data["contrib"].max()],
colorscale=colorscale,
colorbar=dict(title=dict(text="Contribution",
side='right'), ypad=0),
), )
lists[counter] = trace1_all
counter = counter + 1
counter_color = counter_color + 1
lists.append(trace2_all)
####################################################################################################
return {'data': lists,
'layout': go.Layout(xaxis=dict(title='PC1 ({}%)'.format(round((variance[0] * 100), 2)),
mirror=True, ticks='outside', showline=True),
yaxis=dict(title='PC2 ({}%)'.format(round((variance[1] * 100), 2)),
mirror=True, ticks='outside', showline=True),
showlegend=False, margin={'r': 0},
# shapes=[dict(type="circle", xref="x", yref="y", x0=-1,
# y0=-1, x1=1, y1=1,
# line_color="DarkSlateGrey")]
),
}
elif all_custom == "Custom":
# Dropping Data variables
dff_input = dff.drop(columns=dff[input])
features1_input = dff_input.columns
features_input = list(features1_input)
dff_target = dff[input]
# OUTLIER DATA INPUT
z_scores_input = scipy.stats.zscore(dff_input)
abs_z_scores_input = np.abs(z_scores_input)
filtered_entries_input = (abs_z_scores_input < 3).all(axis=1)
dff_input_outlier = dff_input[filtered_entries_input]
features1_input_outlier = dff_input_outlier.columns
features_input_outlier = list(features1_input_outlier)
outlier_names_input1 = df[filtered_entries_input]
outlier_names_input = outlier_names_input1.iloc[:, 0]
# OUTLIER DATA TARGET
z_scores_target = scipy.stats.zscore(dff_target)
abs_z_scores_target = np.abs(z_scores_target)
filtered_entries_target = (abs_z_scores_target < 3).all(axis=1)
dff_target_outlier = dff_target[filtered_entries_target]
# INPUT DATA WITH OUTLIERS
x_scale_input = dff_input.loc[:, features_input].values
y_scale_input = dff_input.loc[:, ].values
x_scale_input = StandardScaler().fit_transform(x_scale_input)
pca_scale_input = PCA(n_components=len(features_input))
principalComponents_scale_input = pca_scale_input.fit_transform(x_scale_input)
principalDf_scale_input = pd.DataFrame(data=principalComponents_scale_input
, columns=['PC' + str(i + 1) for i in range(len(features_input))])
finalDf_scale_input = pd.concat([df[[df.columns[0]]], principalDf_scale_input, dff_target], axis=1)
dfff_scale_input = finalDf_scale_input.fillna(0)
Var_scale_input = pca_scale_input.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input = pca_scale_input.components_.T * np.sqrt(pca_scale_input.explained_variance_)
loading_scale_input_df = pd.DataFrame(data=loading_scale_input[:, 0:2],
columns=["PC1", "PC2"])
loading_scale_input_df["PC1_cos2"] = loading_scale_input_df["PC1"] ** 2
loading_scale_input_df["PC2_cos2"] = loading_scale_input_df["PC2"] ** 2
loading_scale_input_df["PC1_contrib"] = \
(loading_scale_input_df["PC1_cos2"] * 100) / (loading_scale_input_df["PC1_cos2"].sum(axis=0))
loading_scale_input_df["PC2_contrib"] = \
(loading_scale_input_df["PC2_cos2"] * 100) / (loading_scale_input_df["PC2_cos2"].sum(axis=0))
loading_scale_input_df["contrib"] = loading_scale_input_df["PC1_contrib"] + loading_scale_input_df[
"PC2_contrib"]
loading_scale_input_dataf = pd.concat(
[loading_scale_input_df.iloc[:, 0:2], loading_scale_input_df.iloc[:, 6]], axis=1)
line_group_scale_input_df = pd.DataFrame(data=features_input, columns=['line_group'])
loading_scale_input_dff = pd.concat([loading_scale_input_dataf, line_group_scale_input_df],
axis=1)
a = (len(features_input), 2)
zero_scale_input = np.zeros(a)
zero_scale_input_df = pd.DataFrame(data=zero_scale_input, columns=["PC1", "PC2"])
zero_scale_input_df_color = pd.DataFrame(data=loading_scale_input_dataf.iloc[:, 2], columns=['contrib'])
zero_scale_input_dff = pd.concat([zero_scale_input_df, zero_scale_input_df_color, line_group_scale_input_df],
axis=1)
loading_scale_input_line_graph = pd.concat([loading_scale_input_dff, zero_scale_input_dff],
axis=0)
# INPUT DATA WITH REMOVING OUTLIERS
x_scale_input_outlier = dff_input_outlier.loc[:, features_input_outlier].values
y_scale_input_outlier = dff_input_outlier.loc[:, ].values
x_scale_input_outlier = StandardScaler().fit_transform(x_scale_input_outlier)
pca_scale_input_outlier = PCA(n_components=len(features_input_outlier))
principalComponents_scale_input_outlier = pca_scale_input_outlier.fit_transform(x_scale_input_outlier)
principalDf_scale_input_outlier = pd.DataFrame(data=principalComponents_scale_input_outlier
, columns=['PC' + str(i + 1) for i in
range(len(features_input_outlier))])
finalDf_scale_input_outlier = pd.concat(
[outlier_names_input, principalDf_scale_input_outlier, dff_target_outlier],
axis=1)
dfff_scale_input_outlier = finalDf_scale_input_outlier.fillna(0)
Var_scale_input_outlier = pca_scale_input_outlier.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input_outlier = pca_scale_input_outlier.components_.T * np.sqrt(
pca_scale_input_outlier.explained_variance_)
loading_scale_input_outlier_df = pd.DataFrame(data=loading_scale_input_outlier[:, 0:2],
columns=["PC1", "PC2"])
loading_scale_input_outlier_df["PC1_cos2"] = loading_scale_input_outlier_df["PC1"] ** 2
loading_scale_input_outlier_df["PC2_cos2"] = loading_scale_input_outlier_df["PC2"] ** 2
loading_scale_input_outlier_df["PC1_contrib"] = \
(loading_scale_input_outlier_df["PC1_cos2"] * 100) / (
loading_scale_input_outlier_df["PC1_cos2"].sum(axis=0))
loading_scale_input_outlier_df["PC2_contrib"] = \
(loading_scale_input_outlier_df["PC2_cos2"] * 100) / (
loading_scale_input_outlier_df["PC2_cos2"].sum(axis=0))
loading_scale_input_outlier_df["contrib"] = loading_scale_input_outlier_df["PC1_contrib"] + \
loading_scale_input_outlier_df[
"PC2_contrib"]
loading_scale_input_outlier_dataf = pd.concat(
[loading_scale_input_outlier_df.iloc[:, 0:2], loading_scale_input_outlier_df.iloc[:, 6]], axis=1)
line_group_scale_input_outlier_df = pd.DataFrame(data=features_input_outlier, columns=['line_group'])
loading_scale_input_outlier_dff = pd.concat(
[loading_scale_input_outlier_dataf, line_group_scale_input_outlier_df],
axis=1)
a = (len(features_input_outlier), 2)
zero_scale_input_outlier = np.zeros(a)
zero_scale_input_outlier_df = pd.DataFrame(data=zero_scale_input_outlier, columns=["PC1", "PC2"])
zero_scale_input_outlier_df_color = pd.DataFrame(data=loading_scale_input_outlier_dataf.iloc[:, 2],
columns=['contrib'])
zero_scale_input_outlier_dff = pd.concat([zero_scale_input_outlier_df, zero_scale_input_outlier_df_color,
line_group_scale_input_outlier_df],
axis=1)
loading_scale_input_outlier_line_graph = pd.concat(
[loading_scale_input_outlier_dff, zero_scale_input_outlier_dff],
axis=0)
loading_scale_input_line_graph_sort = loading_scale_input_line_graph.sort_values(by='contrib')
loading_scale_input_outlier_line_graph_sort = loading_scale_input_outlier_line_graph.sort_values(by='contrib')
# COVARIANCE MATRIX
x_scale_input_covar = dff_input.loc[:, features_input].values
y_scale_input_covar = dff_input.loc[:, ].values
pca_scale_input_covar = PCA(n_components=len(features_input))
principalComponents_scale_input_covar = pca_scale_input_covar.fit_transform(x_scale_input_covar)
principalDf_scale_input_covar = pd.DataFrame(data=principalComponents_scale_input_covar
, columns=['PC' + str(i + 1) for i in range(len(features_input))])
finalDf_scale_input_covar = pd.concat([df[[df.columns[0]]], principalDf_scale_input_covar, dff_target], axis=1)
dfff_scale_input_covar = finalDf_scale_input_covar.fillna(0)
Var_scale_input_covar = pca_scale_input_covar.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input_covar = pca_scale_input_covar.components_.T * np.sqrt(
pca_scale_input_covar.explained_variance_)
loading_scale_input_df_covar = pd.DataFrame(data=loading_scale_input_covar[:, 0:2],
columns=["PC1", "PC2"])
loading_scale_input_df_covar["PC1_cos2"] = loading_scale_input_df_covar["PC1"] ** 2
loading_scale_input_df_covar["PC2_cos2"] = loading_scale_input_df_covar["PC2"] ** 2
loading_scale_input_df_covar["PC1_contrib"] = \
(loading_scale_input_df_covar["PC1_cos2"] * 100) / (loading_scale_input_df_covar["PC1_cos2"].sum(axis=0))
loading_scale_input_df_covar["PC2_contrib"] = \
(loading_scale_input_df_covar["PC2_cos2"] * 100) / (loading_scale_input_df_covar["PC2_cos2"].sum(axis=0))
loading_scale_input_df_covar["contrib"] = loading_scale_input_df_covar["PC1_contrib"] + \
loading_scale_input_df_covar[
"PC2_contrib"]
loading_scale_input_dataf_covar = pd.concat(
[loading_scale_input_df_covar.iloc[:, 0:2], loading_scale_input_df_covar.iloc[:, 6]], axis=1)
line_group_scale_input_df_covar = pd.DataFrame(data=features_input, columns=['line_group'])
loading_scale_input_dff_covar = pd.concat([loading_scale_input_dataf_covar, line_group_scale_input_df_covar],
axis=1)
a = (len(features_input), 2)
zero_scale_input_covar = np.zeros(a)
zero_scale_input_df_covar = pd.DataFrame(data=zero_scale_input_covar, columns=["PC1", "PC2"])
zero_scale_input_df_color_covar = pd.DataFrame(data=loading_scale_input_dataf_covar.iloc[:, 2],
columns=['contrib'])
zero_scale_input_dff_covar = pd.concat([zero_scale_input_df_covar, zero_scale_input_df_color_covar,
line_group_scale_input_df_covar],
axis=1)
loading_scale_input_line_graph_covar = pd.concat([loading_scale_input_dff_covar, zero_scale_input_dff_covar],
axis=0)
loading_scale_input_line_graph_sort_covar = loading_scale_input_line_graph_covar.sort_values(by='contrib')
# COVARIANCE MATRIX WITH OUTLIERS
x_scale_input_outlier_covar = dff_input_outlier.loc[:, features_input_outlier].values
y_scale_input_outlier_covar = dff_input_outlier.loc[:, ].values
pca_scale_input_outlier_covar = PCA(n_components=len(features_input_outlier))
principalComponents_scale_input_outlier_covar = pca_scale_input_outlier_covar.fit_transform(
x_scale_input_outlier_covar)
principalDf_scale_input_outlier_covar = pd.DataFrame(data=principalComponents_scale_input_outlier_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_input_outlier))])
finalDf_scale_input_outlier_covar = pd.concat(
[outlier_names_input, principalDf_scale_input_outlier_covar, dff_target_outlier],
axis=1)
dfff_scale_input_outlier_covar = finalDf_scale_input_outlier_covar.fillna(0)
Var_scale_input_outlier_covar = pca_scale_input_outlier_covar.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input_outlier_covar = pca_scale_input_outlier_covar.components_.T * np.sqrt(
pca_scale_input_outlier_covar.explained_variance_)
loading_scale_input_outlier_df_covar = pd.DataFrame(data=loading_scale_input_outlier_covar[:, 0:2],
columns=["PC1", "PC2"])
loading_scale_input_outlier_df_covar["PC1_cos2"] = loading_scale_input_outlier_df_covar["PC1"] ** 2
loading_scale_input_outlier_df_covar["PC2_cos2"] = loading_scale_input_outlier_df_covar["PC2"] ** 2
loading_scale_input_outlier_df_covar["PC1_contrib"] = \
(loading_scale_input_outlier_df_covar["PC1_cos2"] * 100) / (
loading_scale_input_outlier_df_covar["PC1_cos2"].sum(axis=0))
loading_scale_input_outlier_df_covar["PC2_contrib"] = \
(loading_scale_input_outlier_df_covar["PC2_cos2"] * 100) / (
loading_scale_input_outlier_df_covar["PC2_cos2"].sum(axis=0))
loading_scale_input_outlier_df_covar["contrib"] = loading_scale_input_outlier_df_covar["PC1_contrib"] + \
loading_scale_input_outlier_df_covar[
"PC2_contrib"]
loading_scale_input_outlier_dataf_covar = pd.concat(
[loading_scale_input_outlier_df_covar.iloc[:, 0:2], loading_scale_input_outlier_df_covar.iloc[:, 6]],
axis=1)
line_group_scale_input_outlier_df_covar = pd.DataFrame(data=features_input_outlier, columns=['line_group'])
loading_scale_input_outlier_dff_covar = pd.concat(
[loading_scale_input_outlier_dataf_covar, line_group_scale_input_outlier_df_covar],
axis=1)
a = (len(features_input_outlier), 2)
zero_scale_input_outlier_covar = np.zeros(a)
zero_scale_input_outlier_df_covar = pd.DataFrame(data=zero_scale_input_outlier_covar, columns=["PC1", "PC2"])
zero_scale_input_outlier_df_color_covar = pd.DataFrame(data=loading_scale_input_outlier_dataf_covar.iloc[:, 2],
columns=['contrib'])
zero_scale_input_outlier_dff_covar = pd.concat(
[zero_scale_input_outlier_df_covar, zero_scale_input_outlier_df_color_covar,
line_group_scale_input_outlier_df_covar],
axis=1)
loading_scale_input_outlier_line_graph_covar = pd.concat(
[loading_scale_input_outlier_dff_covar, zero_scale_input_outlier_dff_covar],
axis=0)
loading_scale_input_outlier_line_graph_sort_covar = loading_scale_input_outlier_line_graph_covar.sort_values(
by='contrib')
####################################################################################################
if outlier == 'No' and matrix_type == "Correlation":
data = loading_scale_input_line_graph_sort
variance = Var_scale_input
elif outlier == 'Yes' and matrix_type == "Correlation":
variance = Var_scale_input_outlier
data = loading_scale_input_outlier_line_graph_sort
elif outlier == "No" and matrix_type == "Covariance":
data = loading_scale_input_line_graph_sort_covar
variance = Var_scale_input_covar
elif outlier == "Yes" and matrix_type == "Covariance":
data = loading_scale_input_outlier_line_graph_sort_covar
variance = Var_scale_input_outlier_covar
N = len(data['contrib'].unique())
end_color = "#00264c" # dark blue
start_color = "#c6def5" # light blue
colorscale = [x.hex for x in list(Color(start_color).range_to(Color(end_color), N))]
counter_color = 0
counter = 0
lists = [[] for i in range(len(data['line_group'].unique()))]
for i in data['line_group'].unique():
dataf = data[data['line_group'] == i]
trace1 = go.Scatter(x=dataf['PC1'], y=dataf['PC2'], name=i, line=dict(color=colorscale[counter_color]),
mode='lines+text', textposition='bottom right', textfont=dict(size=12),
meta=i,
hovertemplate=
'<b>%{meta}</b>' +
'<br>PC1: %{x}<br>' +
'PC2: %{y}'
"<extra></extra>",
)
trace2_all = go.Scatter(x=[1, -1], y=[1, -1], mode='markers', hoverinfo='skip',
marker=dict(showscale=True, color=[data["contrib"].min(), data["contrib"].max()],
colorscale=colorscale, opacity=0,
colorbar=dict(title=dict(text="Contribution",
side='right'), ypad=0)
))
lists[counter] = trace1
counter_color = counter_color + 1
counter = counter + 1
lists.append(trace2_all)
####################################################################################################
return {'data': lists,
'layout': go.Layout(xaxis=dict(title='PC1 ({}%)'.format(round((variance[0] * 100), 2)),
mirror=True, ticks='outside', showline=True),
yaxis=dict(title='PC2 ({}%)'.format(round((variance[1] * 100), 2)),
mirror=True, ticks='outside', showline=True),
showlegend=False, margin={'r': 0},
# shapes=[dict(type="circle", xref="x", yref="y", x0=-1,
# y0=-1, x1=1, y1=1,
# line_color="DarkSlateGrey")]
),
}
@app.callback(Output('download-link', 'download'),
[Input('all-custom-choice', 'value'),
Input('eigenA-outlier', 'value'),
Input("matrix-type-data-table", 'value')])
def update_filename(all_custom, outlier, matrix_type):
if all_custom == 'All' and outlier == 'Yes' and matrix_type == "Correlation":
download = 'all_variables_correlation_matrix_outliers_removed_data.csv'
elif all_custom == 'All' and outlier == 'Yes' and matrix_type == "Covariance":
download = 'all_variables_covariance_matrix_outliers_removed_data.csv'
elif all_custom == 'All' and outlier == 'No' and matrix_type == "Correlation":
download = 'all_variables_correlation_matrix_data.csv'
elif all_custom == 'All' and outlier == 'No' and matrix_type == "Covariance":
download = 'all_variables_covariance_matrix_data.csv'
elif all_custom == 'Custom' and outlier == 'Yes' and matrix_type == "Correlation":
download = 'custom_variables_correlation_matrix_outliers_removed_data.csv'
elif all_custom == 'Custom' and outlier == 'Yes' and matrix_type == "Covariance":
download = 'custom_variables_covariance_matrix_outliers_removed_data.csv'
elif all_custom == 'Custom' and outlier == 'No' and matrix_type == "Correlation":
download = 'custom_variables_correlation_matrix_data.csv'
elif all_custom == 'Custom' and outlier == 'No' and matrix_type == "Covariance":
download = 'custom_variables_covariance_matrix_data.csv'
return download
@app.callback(Output('download-link', 'href'),
[Input('all-custom-choice', 'value'),
Input('feature-input', 'value'),
Input('eigenA-outlier', 'value'),
Input("matrix-type-data-table", 'value'),
Input('csv-data', 'data')])
def update_link(all_custom, input, outlier, matrix_type, data):
if not data:
return dash.no_update, dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
features1 = dff.columns
features = list(features1)
if all_custom == 'All':
# OUTLIER DATA
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
# ORIGINAL DATA WITH OUTLIERS
x_scale = dff.loc[:, features].values
y_scale = dff.loc[:, ].values
x_scale = StandardScaler().fit_transform(x_scale)
# x_scale = rescale(x_scale, new_min=0, new_max=1)
pca_scale = PCA(n_components=len(features))
principalComponents_scale = pca_scale.fit_transform(x_scale)
principalDf_scale = pd.DataFrame(data=principalComponents_scale
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf_scale = pd.concat([df[[df.columns[0]]], principalDf_scale], axis=1)
dfff_scale = finalDf_scale.fillna(0)
# ORIGINAL DATA WITH REMOVING OUTLIERS
x_outlier_scale = outlier_dff.loc[:, features_outlier].values
y_outlier_scale = outlier_dff.loc[:, ].values
x_outlier_scale = StandardScaler().fit_transform(x_outlier_scale)
pca_outlier_scale = PCA(n_components=len(features_outlier))
principalComponents_outlier_scale = pca_outlier_scale.fit_transform(x_outlier_scale)
principalDf_outlier_scale = pd.DataFrame(data=principalComponents_outlier_scale
, columns=['PC' + str(i + 1) for i in range(len(features_outlier))])
# combining principle components and target
finalDf_outlier_scale = pd.concat([outlier_names, principalDf_outlier_scale], axis=1)
dfff_outlier_scale = finalDf_outlier_scale.fillna(0)
# COVARIANCE MATRIX
x_scale_covar = dff.loc[:, features].values
y_scale_covar = dff.loc[:, ].values
pca_scale_covar = PCA(n_components=len(features))
principalComponents_scale_covar = pca_scale_covar.fit_transform(x_scale_covar)
principalDf_scale_covar = pd.DataFrame(data=principalComponents_scale_covar
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf_scale_covar = pd.concat([df[[df.columns[0]]], principalDf_scale_covar], axis=1)
dfff_scale_covar = finalDf_scale_covar.fillna(0)
# COVARIANCE MATRIX REMOVING OUTLIERS
x_outlier_scale_covar = outlier_dff.loc[:, features_outlier].values
y_outlier_scale_covar = outlier_dff.loc[:, ].values
pca_outlier_scale_covar = PCA(n_components=len(features_outlier))
principalComponents_outlier_scale_covar = pca_outlier_scale_covar.fit_transform(x_outlier_scale_covar)
principalDf_outlier_scale_covar = pd.DataFrame(data=principalComponents_outlier_scale_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_outlier))])
finalDf_outlier_scale_covar = pd.concat([outlier_names, principalDf_outlier_scale_covar], axis=1)
dfff_outlier_scale_covar = finalDf_outlier_scale_covar.fillna(0)
if outlier == 'No' and matrix_type == "Correlation":
dat = dfff_scale
elif outlier == 'Yes' and matrix_type == "Correlation":
dat = dfff_outlier_scale
elif outlier == "No" and matrix_type == "Covariance":
dat = dfff_scale_covar
elif outlier == "Yes" and matrix_type == "Covariance":
dat = dfff_outlier_scale_covar
elif all_custom == 'Custom':
# Dropping Data variables
dff_input = dff.drop(columns=dff[input])
features1_input = dff_input.columns
features_input = list(features1_input)
dff_target = dff[input]
# OUTLIER DATA INPUT
z_scores_input = scipy.stats.zscore(dff_input)
abs_z_scores_input = np.abs(z_scores_input)
filtered_entries_input = (abs_z_scores_input < 3).all(axis=1)
dff_input_outlier = dff_input[filtered_entries_input]
features1_input_outlier = dff_input_outlier.columns
features_input_outlier = list(features1_input_outlier)
outlier_names_input1 = df[filtered_entries_input]
outlier_names_input = outlier_names_input1.iloc[:, 0]
# OUTLIER DATA TARGET
z_scores_target = scipy.stats.zscore(dff_target)
abs_z_scores_target = np.abs(z_scores_target)
filtered_entries_target = (abs_z_scores_target < 3).all(axis=1)
dff_target_outlier = dff_target[filtered_entries_target]
# INPUT DATA WITH OUTLIERS
x_scale_input = dff_input.loc[:, features_input].values
y_scale_input = dff_input.loc[:, ].values
x_scale_input = StandardScaler().fit_transform(x_scale_input)
pca_scale_input = PCA(n_components=len(features_input))
principalComponents_scale_input = pca_scale_input.fit_transform(x_scale_input)
principalDf_scale_input = pd.DataFrame(data=principalComponents_scale_input
, columns=['PC' + str(i + 1) for i in range(len(features_input))])
finalDf_scale_input = pd.concat([df[[df.columns[0]]], principalDf_scale_input, dff_target], axis=1)
dfff_scale_input = finalDf_scale_input.fillna(0)
# INPUT DATA WITH REMOVING OUTLIERS
x_scale_input_outlier = dff_input_outlier.loc[:, features_input_outlier].values
y_scale_input_outlier = dff_input_outlier.loc[:, ].values
x_scale_input_outlier = StandardScaler().fit_transform(x_scale_input_outlier)
pca_scale_input_outlier = PCA(n_components=len(features_input_outlier))
principalComponents_scale_input_outlier = pca_scale_input_outlier.fit_transform(x_scale_input_outlier)
principalDf_scale_input_outlier = pd.DataFrame(data=principalComponents_scale_input_outlier
, columns=['PC' + str(i + 1) for i in
range(len(features_input_outlier))])
finalDf_scale_input_outlier = pd.concat(
[outlier_names_input, principalDf_scale_input_outlier, dff_target_outlier],
axis=1)
dfff_scale_input_outlier = finalDf_scale_input_outlier.fillna(0)
# COVARIANCE MATRIX
x_scale_input_covar = dff_input.loc[:, features_input].values
y_scale_input_covar = dff_input.loc[:, ].values
pca_scale_input_covar = PCA(n_components=len(features_input))
principalComponents_scale_input_covar = pca_scale_input_covar.fit_transform(x_scale_input_covar)
principalDf_scale_input_covar = pd.DataFrame(data=principalComponents_scale_input_covar
, columns=['PC' + str(i + 1) for i in range(len(features_input))])
finalDf_scale_input_covar = pd.concat([df[[df.columns[0]]], principalDf_scale_input_covar, dff_target], axis=1)
dfff_scale_input_covar = finalDf_scale_input_covar.fillna(0)
# COVARIANCE MATRIX OUTLIERS REMOVED
x_scale_input_outlier_covar = dff_input_outlier.loc[:, features_input_outlier].values
y_scale_input_outlier_covar = dff_input_outlier.loc[:, ].values
pca_scale_input_outlier_covar = PCA(n_components=len(features_input_outlier))
principalComponents_scale_input_outlier_covar = pca_scale_input_outlier_covar.fit_transform(
x_scale_input_outlier_covar)
principalDf_scale_input_outlier_covar = pd.DataFrame(data=principalComponents_scale_input_outlier_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_input_outlier))])
finalDf_scale_input_outlier_covar = pd.concat(
[outlier_names_input, principalDf_scale_input_outlier_covar, dff_target_outlier],
axis=1)
dfff_scale_input_outlier_covar = finalDf_scale_input_outlier_covar.fillna(0)
if outlier == 'No' and matrix_type == "Correlation":
dat = dfff_scale_input
elif outlier == 'Yes' and matrix_type == "Correlation":
dat = dfff_scale_input_outlier
elif outlier == "No" and matrix_type == "Covariance":
dat = dfff_scale_input_covar
elif outlier == "Yes" and matrix_type == "Covariance":
dat = dfff_scale_input_outlier_covar
csv_string = dat.to_csv(index=False, encoding='utf-8')
csv_string = "data:text/csv;charset=utf-8,%EF%BB%BF" + urllib.parse.quote(csv_string)
return csv_string
@app.callback(Output('download-link-correlation', 'download'),
[Input('eigenA-outlier', 'value'),
])
def update_filename(outlier):
if outlier == 'Yes':
download = 'feature_correlation_removed_outliers_data.csv'
elif outlier == 'No':
download = 'feature_correlation_data.csv'
return download
@app.callback([Output('data-table-correlation', 'data'),
Output('data-table-correlation', 'columns'),
Output('download-link-correlation', 'href')],
[Input("eigenA-outlier", 'value'),
Input('csv-data', 'data')], )
def update_output(outlier, data):
if not data:
return dash.no_update, dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
if outlier == 'No':
features1 = dff.columns
features = list(features1)
# correlation coefficient and coefficient of determination
correlation_dff = dff.corr(method='pearson', )
r2_dff_table = correlation_dff * correlation_dff
r2_dff_table.insert(0, 'Features', features)
data_frame = r2_dff_table
if outlier == 'Yes':
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
# correlation coefficient and coefficient of determination
correlation_dff_outlier = outlier_dff.corr(method='pearson', )
r2_dff_outlier_table = correlation_dff_outlier * correlation_dff_outlier
r2_dff_outlier_table.insert(0, 'Features', features_outlier)
data_frame = r2_dff_outlier_table
data = data_frame.to_dict('records')
columns = [{"name": i, "id": i, "deletable": True, "selectable": True, 'type': 'numeric',
'format': Format(precision=3, scheme=Scheme.fixed)} for i in data_frame.columns]
csv_string = data_frame.to_csv(index=False, encoding='utf-8')
csv_string = "data:text/csv;charset=utf-8,%EF%BB%BF" + urllib.parse.quote(csv_string)
return data, columns, csv_string
@app.callback(Output('download-link-eigenA', 'download'),
[Input("matrix-type-data-table", 'value'),
Input('eigenA-outlier', 'value')])
def update_filename(matrix_type, outlier):
if outlier == 'Yes' and matrix_type == "Correlation":
download = 'Eigen_Analysis_correlation_matrix_removed_outliers_data.csv'
elif outlier == 'Yes' and matrix_type == "Covariance":
download = 'Eigen_Analysis_covariance_matrix_removed_outliers_data.csv'
elif outlier == 'No' and matrix_type == "Correlation":
download = 'Eigen_Analysis_correlation_matrix_data.csv'
elif outlier == "No" and matrix_type == "Covariance":
download = 'Eigen_Analysis_covariance_matrix_data.csv'
return download
@app.callback([Output('data-table-eigenA', 'data'),
Output('data-table-eigenA', 'columns'),
Output('download-link-eigenA', 'href')],
[Input('all-custom-choice', 'value'),
Input("eigenA-outlier", 'value'),
Input('feature-input', 'value'),
Input("matrix-type-data-table", 'value'),
Input('csv-data', 'data')], )
def update_output(all_custom, outlier, input, matrix_type, data):
if not data:
return dash.no_update, dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
if all_custom == 'All':
if outlier == 'No' and matrix_type == "Correlation":
features1 = dff.columns
features = list(features1)
x = dff.loc[:, features].values
# Separating out the target (if any)
# Standardizing the features to {mean, variance} = {0, 1}
x = StandardScaler().fit_transform(x)
pca = PCA(n_components=len(features))
principalComponents = pca.fit_transform(x)
principalDf = pd.DataFrame(data=principalComponents
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf = pd.concat([df[[df.columns[0]]], principalDf], axis=1)
dfff = finalDf
loading = pca.components_.T * np.sqrt(pca.explained_variance_)
loading_df = pd.DataFrame(data=loading[0:, 0:], index=features,
columns=['PC' + str(i + 1) for i in range(loading.shape[1])])
loading_dff = loading_df.T
Var = pca.explained_variance_ratio_
PC_df = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features))],
columns=['Principal Component'])
PC_num = [float(i + 1) for i in range(len(features))]
Var_df = pd.DataFrame(data=Var, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum = Var_df.cumsum()
Var_dff = pd.concat([PC_df, (Var_cumsum * 100)], axis=1)
PC_interp = np.interp(70, Var_dff['Cumulative Proportion of Explained Variance'], PC_num)
PC_interp_int = math.ceil(PC_interp)
eigenvalues = pca.explained_variance_
Eigen_df = pd.DataFrame(data=eigenvalues, columns=['Eigenvalues'])
Eigen_dff = pd.concat([PC_df, Eigen_df], axis=1)
Var_dfff = pd.concat([(Var_cumsum * 100)], axis=1)
Eigen_Analysis = pd.concat([PC_df.T, Eigen_df.T, Var_df.T, Var_dfff.T], axis=0)
Eigen_Analysis = Eigen_Analysis.rename(columns=Eigen_Analysis.iloc[0])
Eigen_Analysis = Eigen_Analysis.drop(Eigen_Analysis.index[0])
Eigen_Analysis.insert(loc=0, column="Principal Components",
value=["Eigenvalues", "Proportion of Explained Variance",
"Cumulative Proportion of Explained Variance (%)"])
data_frame_EigenA = Eigen_Analysis
elif outlier == 'Yes' and matrix_type == "Correlation":
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
# correlation coefficient and coefficient of determination
correlation_dff_outlier = outlier_dff.corr(method='pearson', )
r2_dff_outlier = correlation_dff_outlier * correlation_dff_outlier
x_outlier = outlier_dff.loc[:, features_outlier].values
# Separating out the target (if any)
y_outlier = outlier_dff.loc[:, ].values
# Standardizing the features
x_outlier = StandardScaler().fit_transform(x_outlier)
pca_outlier = PCA(n_components=len(features_outlier))
principalComponents_outlier = pca_outlier.fit_transform(x_outlier)
principalDf_outlier = pd.DataFrame(data=principalComponents_outlier
, columns=['PC' + str(i + 1) for i in range(len(features_outlier))])
# combining principle components and target
finalDf_outlier = pd.concat([outlier_names, principalDf_outlier], axis=1)
dfff_outlier = finalDf_outlier
# calculating loading
loading_outlier = pca_outlier.components_.T * np.sqrt(pca_outlier.explained_variance_)
loading_df_outlier = pd.DataFrame(data=loading_outlier[0:, 0:], index=features_outlier,
columns=['PC' + str(i + 1) for i in range(loading_outlier.shape[1])])
loading_dff_outlier = loading_df_outlier.T
Var_outlier = pca_outlier.explained_variance_ratio_
PC_df_outlier = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_outlier))],
columns=['Principal Component'])
PC_num_outlier = [float(i + 1) for i in range(len(features_outlier))]
Var_df_outlier = pd.DataFrame(data=Var_outlier, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_outlier = Var_df_outlier.cumsum()
Var_dff_outlier = pd.concat([PC_df_outlier, (Var_cumsum_outlier * 100)], axis=1)
PC_interp_outlier = np.interp(70, Var_dff_outlier['Cumulative Proportion of Explained Variance'],
PC_num_outlier)
PC_interp_int_outlier = math.ceil(PC_interp_outlier)
eigenvalues_outlier = pca_outlier.explained_variance_
Eigen_df_outlier = pd.DataFrame(data=eigenvalues_outlier, columns=['Eigenvalues'])
Eigen_dff_outlier = pd.concat([PC_df_outlier, Eigen_df_outlier], axis=1)
Var_dfff_outlier = pd.concat([Var_cumsum_outlier * 100], axis=1)
Eigen_Analysis_Outlier = pd.concat(
[PC_df_outlier.T, Eigen_df_outlier.T, Var_df_outlier.T, Var_dfff_outlier.T],
axis=0)
Eigen_Analysis_Outlier = Eigen_Analysis_Outlier.rename(columns=Eigen_Analysis_Outlier.iloc[0])
Eigen_Analysis_Outlier = Eigen_Analysis_Outlier.drop(Eigen_Analysis_Outlier.index[0])
Eigen_Analysis_Outlier.insert(loc=0, column="Principal Components",
value=["Eigenvalues", "Proportion of Explained Variance",
"Cumulative Proportion of Explained Variance (%)"])
data_frame_EigenA = Eigen_Analysis_Outlier
elif outlier == "No" and matrix_type == "Covariance":
features1 = dff.columns
features = list(features1)
x_covar = dff.loc[:, features].values
pca_covar = PCA(n_components=len(features))
principalComponents_covar = pca_covar.fit_transform(x_covar)
principalDf_covar = pd.DataFrame(data=principalComponents_covar
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf_covar = pd.concat([df[[df.columns[0]]], principalDf_covar], axis=1)
dfff_covar = finalDf_covar
loading_covar = pca_covar.components_.T * np.sqrt(pca_covar.explained_variance_)
loading_df_covar = pd.DataFrame(data=loading_covar[0:, 0:], index=features,
columns=['PC' + str(i + 1) for i in range(loading_covar.shape[1])])
loading_dff_covar = loading_df_covar.T
Var_covar = pca_covar.explained_variance_ratio_
PC_df_covar = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features))],
columns=['Principal Component'])
PC_num_covar = [float(i + 1) for i in range(len(features))]
Var_df_covar = pd.DataFrame(data=Var_covar, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_covar = Var_df_covar.cumsum()
Var_dff_covar = pd.concat([PC_df_covar, (Var_cumsum_covar * 100)], axis=1)
PC_interp_covar = np.interp(70, Var_dff_covar['Cumulative Proportion of Explained Variance'], PC_num_covar)
PC_interp_int_covar = math.ceil(PC_interp_covar)
eigenvalues_covar = pca_covar.explained_variance_
Eigen_df_covar = pd.DataFrame(data=eigenvalues_covar, columns=['Eigenvalues'])
Eigen_dff_covar = pd.concat([PC_df_covar, Eigen_df_covar], axis=1)
Var_dfff_covar = pd.concat([(Var_cumsum_covar * 100)], axis=1)
Eigen_Analysis_covar = pd.concat([PC_df_covar.T, Eigen_df_covar.T, Var_df_covar.T, Var_dfff_covar.T],
axis=0)
Eigen_Analysis_covar = Eigen_Analysis_covar.rename(columns=Eigen_Analysis_covar.iloc[0])
Eigen_Analysis_covar = Eigen_Analysis_covar.drop(Eigen_Analysis_covar.index[0])
Eigen_Analysis_covar.insert(loc=0, column="Principal Components",
value=["Eigenvalues", "Proportion of Explained Variance",
"Cumulative Proportion of Explained Variance (%)"])
data_frame_EigenA = Eigen_Analysis_covar
elif outlier == "Yes" and matrix_type == "Covariance":
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_outlier_covar = outlier_dff.loc[:, features_outlier].values
# Separating out the target (if any)
y_outlier_covar = outlier_dff.loc[:, ].values
pca_outlier_covar = PCA(n_components=len(features_outlier))
principalComponents_outlier_covar = pca_outlier_covar.fit_transform(x_outlier_covar)
principalDf_outlier_covar = pd.DataFrame(data=principalComponents_outlier_covar
,
columns=['PC' + str(i + 1) for i in range(len(features_outlier))])
# combining principle components and target
finalDf_outlier_covar = pd.concat([outlier_names, principalDf_outlier_covar], axis=1)
dfff_outlier_covar = finalDf_outlier_covar
# calculating loading
loading_outlier_covar = pca_outlier_covar.components_.T * np.sqrt(pca_outlier_covar.explained_variance_)
loading_df_outlier_covar = pd.DataFrame(data=loading_outlier_covar[0:, 0:], index=features_outlier,
columns=['PC' + str(i + 1) for i in
range(loading_outlier_covar.shape[1])])
loading_dff_outlier_covar = loading_df_outlier_covar.T
Var_outlier_covar = pca_outlier_covar.explained_variance_ratio_
PC_df_outlier_covar = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_outlier))],
columns=['Principal Component'])
PC_num_outlier_covar = [float(i + 1) for i in range(len(features_outlier))]
Var_df_outlier_covar = pd.DataFrame(data=Var_outlier_covar,
columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_outlier_covar = Var_df_outlier_covar.cumsum()
Var_dff_outlier_covar = pd.concat([PC_df_outlier_covar, (Var_cumsum_outlier_covar * 100)], axis=1)
PC_interp_outlier_covar = np.interp(70,
Var_dff_outlier_covar['Cumulative Proportion of Explained Variance'],
PC_num_outlier_covar)
PC_interp_int_outlier_covar = math.ceil(PC_interp_outlier_covar)
eigenvalues_outlier_covar = pca_outlier_covar.explained_variance_
Eigen_df_outlier_covar = pd.DataFrame(data=eigenvalues_outlier_covar, columns=['Eigenvalues'])
Eigen_dff_outlier_covar = pd.concat([PC_df_outlier_covar, Eigen_df_outlier_covar], axis=1)
Var_dfff_outlier_covar = pd.concat([Var_cumsum_outlier_covar * 100], axis=1)
Eigen_Analysis_Outlier_covar = pd.concat(
[PC_df_outlier_covar.T, Eigen_df_outlier_covar.T, Var_df_outlier_covar.T, Var_dfff_outlier_covar.T],
axis=0)
Eigen_Analysis_Outlier_covar = Eigen_Analysis_Outlier_covar.rename(
columns=Eigen_Analysis_Outlier_covar.iloc[0])
Eigen_Analysis_Outlier_covar = Eigen_Analysis_Outlier_covar.drop(Eigen_Analysis_Outlier_covar.index[0])
Eigen_Analysis_Outlier_covar.insert(loc=0, column="Principal Components",
value=["Eigenvalues", "Proportion of Explained Variance",
"Cumulative Proportion of Explained Variance (%)"])
data_frame_EigenA = Eigen_Analysis_Outlier_covar
elif all_custom == "Custom":
if outlier == 'No' and matrix_type == "Correlation":
# Dropping Data variables
dff_input = dff.drop(columns=dff[input])
features1_input = dff_input.columns
features_input = list(features1_input)
dff_target = dff[input]
x_scale_input = dff_input.loc[:, features_input].values
y_scale_input = dff_input.loc[:, ].values
x_scale_input = StandardScaler().fit_transform(x_scale_input)
# INPUT DATA WITH OUTLIERS
pca_scale_input = PCA(n_components=len(features_input))
principalComponents_scale_input = pca_scale_input.fit_transform(x_scale_input)
principalDf_scale_input = pd.DataFrame(data=principalComponents_scale_input
, columns=['PC' + str(i + 1) for i in range(len(features_input))])
finalDf_scale_input = pd.concat([df[[df.columns[0]]], principalDf_scale_input, dff_target], axis=1)
dfff_scale_input = finalDf_scale_input.fillna(0)
Var_scale_input = pca_scale_input.explained_variance_ratio_
eigenvalues_scale_input = pca_scale_input.explained_variance_
Eigen_df_scale_input = pd.DataFrame(data=eigenvalues_scale_input, columns=["Eigenvaues"])
PC_df_scale_input = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_input))],
columns=['Principal Component'])
Var_df_scale_input = pd.DataFrame(data=Var_scale_input,
columns=['Cumulative Proportion of Explained Ratio'])
Var_cumsum_scale_input = Var_df_scale_input.cumsum()
Var_dfff_scale_input = pd.concat([Var_cumsum_scale_input * 100], axis=1)
Eigen_Analysis_scale_input = pd.concat([PC_df_scale_input.T, Eigen_df_scale_input.T,
Var_df_scale_input.T, Var_dfff_scale_input.T], axis=0)
Eigen_Analysis_scale_input = Eigen_Analysis_scale_input.rename(columns=Eigen_Analysis_scale_input.iloc[0])
Eigen_Analysis_scale_input = Eigen_Analysis_scale_input.drop(Eigen_Analysis_scale_input.index[0])
Eigen_Analysis_scale_input.insert(loc=0, column="Principal Components",
value=["Eigenvalues", "Proportion of Explained Variance",
"Cumulative Proportion of Explained Variance (%)"])
data_frame_EigenA = Eigen_Analysis_scale_input
elif outlier == "Yes" and matrix_type == "Correlation":
dff_input = dff.drop(columns=dff[input])
z_scores_input = scipy.stats.zscore(dff_input)
abs_z_scores_input = np.abs(z_scores_input)
filtered_entries_input = (abs_z_scores_input < 3).all(axis=1)
dff_input_outlier = dff_input[filtered_entries_input]
features1_input_outlier = dff_input_outlier.columns
features_input_outlier = list(features1_input_outlier)
outlier_names_input1 = df[filtered_entries_input]
outlier_names_input = outlier_names_input1.iloc[:, 0]
dff_target = dff[input]
# OUTLIER DATA TARGET
z_scores_target = scipy.stats.zscore(dff_target)
abs_z_scores_target = np.abs(z_scores_target)
filtered_entries_target = (abs_z_scores_target < 3).all(axis=1)
dff_target_outlier = dff_target[filtered_entries_target]
x_scale_input_outlier = dff_input_outlier.loc[:, features_input_outlier].values
y_scale_input_outlier = dff_input_outlier.loc[:, ].values
x_scale_input_outlier = StandardScaler().fit_transform(x_scale_input_outlier)
# INPUT DATA WITH REMOVING OUTLIERS
pca_scale_input_outlier = PCA(n_components=len(features_input_outlier))
principalComponents_scale_input_outlier = pca_scale_input_outlier.fit_transform(x_scale_input_outlier)
principalDf_scale_input_outlier = pd.DataFrame(data=principalComponents_scale_input_outlier
, columns=['PC' + str(i + 1) for i in
range(len(features_input_outlier))])
finalDf_scale_input_outlier = pd.concat(
[outlier_names_input, principalDf_scale_input_outlier, dff_target_outlier],
axis=1)
dfff_scale_input_outlier = finalDf_scale_input_outlier.fillna(0)
Var_scale_input_outlier = pca_scale_input_outlier.explained_variance_ratio_
eigenvalues_scale_input_outlier = pca_scale_input_outlier.explained_variance_
Eigen_df_scale_input_outlier = pd.DataFrame(data=eigenvalues_scale_input_outlier, columns=["Eigenvaues"])
PC_df_scale_input_outlier = pd.DataFrame(
data=['PC' + str(i + 1) for i in range(len(features_input_outlier))],
columns=['Principal Component'])
Var_df_scale_input_outlier = pd.DataFrame(data=Var_scale_input_outlier,
columns=['Cumulative Proportion of Explained '
'Ratio'])
Var_cumsum_scale_input_outlier = Var_df_scale_input_outlier.cumsum()
Var_dfff_scale_input_outlier = pd.concat([Var_cumsum_scale_input_outlier * 100], axis=1)
Eigen_Analysis_scale_input_outlier = pd.concat([PC_df_scale_input_outlier.T, Eigen_df_scale_input_outlier.T,
Var_df_scale_input_outlier.T,
Var_dfff_scale_input_outlier.T], axis=0)
Eigen_Analysis_scale_input_outlier = Eigen_Analysis_scale_input_outlier.rename(
columns=Eigen_Analysis_scale_input_outlier.iloc[0])
Eigen_Analysis_scale_input_outlier = Eigen_Analysis_scale_input_outlier.drop(
Eigen_Analysis_scale_input_outlier.index[0])
Eigen_Analysis_scale_input_outlier.insert(loc=0, column="Principal Components",
value=["Eigenvalues", "Proportion of Explained Variance",
"Cumulative Proportion of Explained Variance (%)"])
data_frame_EigenA = Eigen_Analysis_scale_input_outlier
elif outlier == "No" and matrix_type == "Covariance":
dff_input = dff.drop(columns=dff[input])
features1_input = dff_input.columns
features_input = list(features1_input)
dff_target = dff[input]
x_scale_input_covar = dff_input.loc[:, features_input].values
# INPUT DATA WITH OUTLIERS
pca_scale_input_covar = PCA(n_components=len(features_input))
principalComponents_scale_input_covar = pca_scale_input_covar.fit_transform(x_scale_input_covar)
principalDf_scale_input_covar = pd.DataFrame(data=principalComponents_scale_input_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_input))])
finalDf_scale_input_covar = pd.concat([df[[df.columns[0]]], principalDf_scale_input_covar, dff_target],
axis=1)
dfff_scale_input_covar = finalDf_scale_input_covar.fillna(0)
Var_scale_input_covar = pca_scale_input_covar.explained_variance_ratio_
eigenvalues_scale_input_covar = pca_scale_input_covar.explained_variance_
Eigen_df_scale_input_covar = | pd.DataFrame(data=eigenvalues_scale_input_covar, columns=["Eigenvaues"]) | pandas.DataFrame |
#!/usr/bin/env python3
__author__ = "<EMAIL>"
import os
import os.path
import sys
import subprocess
import argparse
import datetime
import epiweeks
import pandas as pd
import numpy as np
def load_data(assemblies_tsv, collab_tsv, min_unambig, min_date, max_date):
df_assemblies = pd.read_csv(assemblies_tsv, sep='\t').dropna(how='all')
if collab_tsv and os.path.isfile(collab_tsv) and os.path.getsize(collab_tsv):
collab_ids = pd.read_csv(collab_tsv, sep='\t').dropna(how='all')[list(['external_id', 'collaborator_id'])]
collab_ids.columns = ['sample', 'collaborator_id']
else:
collab_ids = None
# format dates properly
df_assemblies = df_assemblies.loc[
~df_assemblies['run_date'].isna() &
~df_assemblies['collection_date'].isna() &
(df_assemblies['run_date'] != 'missing') &
(df_assemblies['collection_date'] != 'missing')]
df_assemblies = df_assemblies.astype({'collection_date':'datetime64[D]','run_date':'datetime64[D]'})
# fix vadr_num_alerts
df_assemblies = df_assemblies.astype({'vadr_num_alerts':'Int64'})
# remove columns with File URIs
cols_unwanted = [
'assembly_fasta','coverage_plot','aligned_bam','replicate_discordant_vcf',
'variants_from_ref_vcf','nextclade_tsv','nextclade_json',
'pangolin_csv','vadr_tgz','vadr_alerts',
]
cols_unwanted = list(c for c in cols_unwanted if c in df_assemblies.columns)
df_assemblies.drop(columns=cols_unwanted, inplace=True)
# subset by date range
if min_date:
df_assemblies = df_assemblies.loc[~df_assemblies['run_date'].isna() & (np.datetime64(min_date) <= df_assemblies['run_date'])]
if max_date:
df_assemblies = df_assemblies.loc[~df_assemblies['run_date'].isna() & (df_assemblies['run_date'] <= np.datetime64(max_date))]
# fix missing data in purpose_of_sequencing
df_assemblies.loc[:,'purpose_of_sequencing'] = df_assemblies.loc[:,'purpose_of_sequencing'].fillna('Missing').replace('', 'Missing')
# derived column: genome_status
if 'genome_status' not in df_assemblies.columns:
df_assemblies.loc[:,'genome_status'] = list(
'failed_sequencing' if df_assemblies.loc[id, 'assembly_length_unambiguous'] < min_unambig
else 'failed_annotation' if df_assemblies.loc[id, 'vadr_num_alerts'] > 0
else 'submittable'
for id in df_assemblies.index)
# derived columns: geo_country, geo_state, geo_locality
if 'geo_country' not in df_assemblies.columns:
df_assemblies.loc[:,'geo_country'] = list(g.split(': ')[0] if not | pd.isna(g) | pandas.isna |
from datetime import timedelta
import operator
from typing import Any, Callable, List, Optional, Sequence, Union
import numpy as np
from pandas._libs.tslibs import (
NaT,
NaTType,
frequencies as libfrequencies,
iNaT,
period as libperiod,
)
from pandas._libs.tslibs.fields import isleapyear_arr
from pandas._libs.tslibs.period import (
DIFFERENT_FREQ,
IncompatibleFrequency,
Period,
get_period_field_arr,
period_asfreq_arr,
)
from pandas._libs.tslibs.timedeltas import Timedelta, delta_to_nanoseconds
from pandas.util._decorators import cache_readonly
from pandas.core.dtypes.common import (
_TD_DTYPE,
ensure_object,
is_datetime64_dtype,
is_float_dtype,
is_period_dtype,
pandas_dtype,
)
from pandas.core.dtypes.dtypes import PeriodDtype
from pandas.core.dtypes.generic import (
ABCIndexClass,
ABCPeriodArray,
ABCPeriodIndex,
ABCSeries,
)
from pandas.core.dtypes.missing import isna, notna
import pandas.core.algorithms as algos
from pandas.core.arrays import datetimelike as dtl
import pandas.core.common as com
from pandas.tseries import frequencies
from pandas.tseries.offsets import DateOffset, Tick, _delta_to_tick
def _field_accessor(name, alias, docstring=None):
def f(self):
base, mult = libfrequencies.get_freq_code(self.freq)
result = get_period_field_arr(alias, self.asi8, base)
return result
f.__name__ = name
f.__doc__ = docstring
return property(f)
class PeriodArray(dtl.DatetimeLikeArrayMixin, dtl.DatelikeOps):
"""
Pandas ExtensionArray for storing Period data.
Users should use :func:`period_array` to create new instances.
Parameters
----------
values : Union[PeriodArray, Series[period], ndarray[int], PeriodIndex]
The data to store. These should be arrays that can be directly
converted to ordinals without inference or copy (PeriodArray,
ndarray[int64]), or a box around such an array (Series[period],
PeriodIndex).
freq : str or DateOffset
The `freq` to use for the array. Mostly applicable when `values`
is an ndarray of integers, when `freq` is required. When `values`
is a PeriodArray (or box around), it's checked that ``values.freq``
matches `freq`.
dtype : PeriodDtype, optional
A PeriodDtype instance from which to extract a `freq`. If both
`freq` and `dtype` are specified, then the frequencies must match.
copy : bool, default False
Whether to copy the ordinals before storing.
Attributes
----------
None
Methods
-------
None
See Also
--------
period_array : Create a new PeriodArray.
PeriodIndex : Immutable Index for period data.
Notes
-----
There are two components to a PeriodArray
- ordinals : integer ndarray
- freq : pd.tseries.offsets.Offset
The values are physically stored as a 1-D ndarray of integers. These are
called "ordinals" and represent some kind of offset from a base.
The `freq` indicates the span covered by each element of the array.
All elements in the PeriodArray have the same `freq`.
"""
# array priority higher than numpy scalars
__array_priority__ = 1000
_typ = "periodarray" # ABCPeriodArray
_scalar_type = Period
_recognized_scalars = (Period,)
_is_recognized_dtype = is_period_dtype
# Names others delegate to us
_other_ops: List[str] = []
_bool_ops = ["is_leap_year"]
_object_ops = ["start_time", "end_time", "freq"]
_field_ops = [
"year",
"month",
"day",
"hour",
"minute",
"second",
"weekofyear",
"weekday",
"week",
"dayofweek",
"dayofyear",
"quarter",
"qyear",
"days_in_month",
"daysinmonth",
]
_datetimelike_ops = _field_ops + _object_ops + _bool_ops
_datetimelike_methods = ["strftime", "to_timestamp", "asfreq"]
# --------------------------------------------------------------------
# Constructors
def __init__(self, values, freq=None, dtype=None, copy=False):
freq = validate_dtype_freq(dtype, freq)
if freq is not None:
freq = Period._maybe_convert_freq(freq)
if isinstance(values, ABCSeries):
values = values._values
if not isinstance(values, type(self)):
raise TypeError("Incorrect dtype")
elif isinstance(values, ABCPeriodIndex):
values = values._values
if isinstance(values, type(self)):
if freq is not None and freq != values.freq:
raise raise_on_incompatible(values, freq)
values, freq = values._data, values.freq
values = np.array(values, dtype="int64", copy=copy)
self._data = values
if freq is None:
raise ValueError("freq is not specified and cannot be inferred")
self._dtype = PeriodDtype(freq)
@classmethod
def _simple_new(cls, values: np.ndarray, freq=None, **kwargs):
# alias for PeriodArray.__init__
assert isinstance(values, np.ndarray) and values.dtype == "i8"
return cls(values, freq=freq, **kwargs)
@classmethod
def _from_sequence(
cls,
scalars: Sequence[Optional[Period]],
dtype: Optional[PeriodDtype] = None,
copy: bool = False,
) -> ABCPeriodArray:
if dtype:
freq = dtype.freq
else:
freq = None
if isinstance(scalars, cls):
validate_dtype_freq(scalars.dtype, freq)
if copy:
scalars = scalars.copy()
return scalars
periods = np.asarray(scalars, dtype=object)
if copy:
periods = periods.copy()
freq = freq or libperiod.extract_freq(periods)
ordinals = libperiod.extract_ordinals(periods, freq)
return cls(ordinals, freq=freq)
@classmethod
def _from_sequence_of_strings(cls, strings, dtype=None, copy=False):
return cls._from_sequence(strings, dtype, copy)
@classmethod
def _from_datetime64(cls, data, freq, tz=None):
"""
Construct a PeriodArray from a datetime64 array
Parameters
----------
data : ndarray[datetime64[ns], datetime64[ns, tz]]
freq : str or Tick
tz : tzinfo, optional
Returns
-------
PeriodArray[freq]
"""
data, freq = dt64arr_to_periodarr(data, freq, tz)
return cls(data, freq=freq)
@classmethod
def _generate_range(cls, start, end, periods, freq, fields):
periods = dtl.validate_periods(periods)
if freq is not None:
freq = Period._maybe_convert_freq(freq)
field_count = len(fields)
if start is not None or end is not None:
if field_count > 0:
raise ValueError(
"Can either instantiate from fields or endpoints, but not both"
)
subarr, freq = _get_ordinal_range(start, end, periods, freq)
elif field_count > 0:
subarr, freq = _range_from_fields(freq=freq, **fields)
else:
raise ValueError("Not enough parameters to construct Period range")
return subarr, freq
# -----------------------------------------------------------------
# DatetimeLike Interface
def _unbox_scalar(self, value: Union[Period, NaTType]) -> int:
if value is NaT:
return value.value
elif isinstance(value, self._scalar_type):
if not isna(value):
self._check_compatible_with(value)
return value.ordinal
else:
raise ValueError(f"'value' should be a Period. Got '{value}' instead.")
def _scalar_from_string(self, value: str) -> Period:
return Period(value, freq=self.freq)
def _check_compatible_with(self, other, setitem: bool = False):
if other is NaT:
return
if self.freqstr != other.freqstr:
raise raise_on_incompatible(self, other)
# --------------------------------------------------------------------
# Data / Attributes
@cache_readonly
def dtype(self):
return self._dtype
# error: Read-only property cannot override read-write property [misc]
@property # type: ignore
def freq(self):
"""
Return the frequency object for this PeriodArray.
"""
return self.dtype.freq
def __array__(self, dtype=None) -> np.ndarray:
# overriding DatetimelikeArray
return np.array(list(self), dtype=object)
def __arrow_array__(self, type=None):
"""
Convert myself into a pyarrow Array.
"""
import pyarrow
from pandas.core.arrays._arrow_utils import ArrowPeriodType
if type is not None:
if pyarrow.types.is_integer(type):
return pyarrow.array(self._data, mask=self.isna(), type=type)
elif isinstance(type, ArrowPeriodType):
# ensure we have the same freq
if self.freqstr != type.freq:
raise TypeError(
"Not supported to convert PeriodArray to array with different "
f"'freq' ({self.freqstr} vs {type.freq})"
)
else:
raise TypeError(
f"Not supported to convert PeriodArray to '{type}' type"
)
period_type = ArrowPeriodType(self.freqstr)
storage_array = pyarrow.array(self._data, mask=self.isna(), type="int64")
return pyarrow.ExtensionArray.from_storage(period_type, storage_array)
# --------------------------------------------------------------------
# Vectorized analogues of Period properties
year = _field_accessor(
"year",
0,
"""
The year of the period.
""",
)
month = _field_accessor(
"month",
3,
"""
The month as January=1, December=12.
""",
)
day = _field_accessor(
"day",
4,
"""
The days of the period.
""",
)
hour = _field_accessor(
"hour",
5,
"""
The hour of the period.
""",
)
minute = _field_accessor(
"minute",
6,
"""
The minute of the period.
""",
)
second = _field_accessor(
"second",
7,
"""
The second of the period.
""",
)
weekofyear = _field_accessor(
"week",
8,
"""
The week ordinal of the year.
""",
)
week = weekofyear
dayofweek = _field_accessor(
"dayofweek",
10,
"""
The day of the week with Monday=0, Sunday=6.
""",
)
weekday = dayofweek
dayofyear = day_of_year = _field_accessor(
"dayofyear",
9,
"""
The ordinal day of the year.
""",
)
quarter = _field_accessor(
"quarter",
2,
"""
The quarter of the date.
""",
)
qyear = _field_accessor("qyear", 1)
days_in_month = _field_accessor(
"days_in_month",
11,
"""
The number of days in the month.
""",
)
daysinmonth = days_in_month
@property
def is_leap_year(self):
"""
Logical indicating if the date belongs to a leap year.
"""
return isleapyear_arr(np.asarray(self.year))
@property
def start_time(self):
return self.to_timestamp(how="start")
@property
def end_time(self):
return self.to_timestamp(how="end")
def to_timestamp(self, freq=None, how="start"):
"""
Cast to DatetimeArray/Index.
Parameters
----------
freq : str or DateOffset, optional
Target frequency. The default is 'D' for week or longer,
'S' otherwise.
how : {'s', 'e', 'start', 'end'}
Whether to use the start or end of the time period being converted.
Returns
-------
DatetimeArray/Index
"""
from pandas.core.arrays import DatetimeArray
how = libperiod._validate_end_alias(how)
end = how == "E"
if end:
if freq == "B":
# roll forward to ensure we land on B date
adjust = Timedelta(1, "D") - Timedelta(1, "ns")
return self.to_timestamp(how="start") + adjust
else:
adjust = Timedelta(1, "ns")
return (self + self.freq).to_timestamp(how="start") - adjust
if freq is None:
base, mult = libfrequencies.get_freq_code(self.freq)
freq = libfrequencies.get_to_timestamp_base(base)
else:
freq = Period._maybe_convert_freq(freq)
base, mult = libfrequencies.get_freq_code(freq)
new_data = self.asfreq(freq, how=how)
new_data = | libperiod.periodarr_to_dt64arr(new_data.asi8, base) | pandas._libs.tslibs.period.periodarr_to_dt64arr |
"""\
Data structures for expt.
The "Experiment" data is structured like a 4D array, i.e.
Experiment := [hypothesis_name, run_index, index, column]
The data is structured in the following ways (from higher to lower level):
Experiment (~= Dict[str, List[DataFrame]]):
An experiment consists of one or multiple Hypotheses (e.g. different
hyperparameters or algorithms) that can be compared with one another.
Hypothesis (~= List[DataFrame], or RunGroup):
A Hypothesis consists of several `Run`s that share an
identical experimental setups (e.g. hyperparameters).
This usually corresponds to one single curve for each model.
It may also contain additional metadata of the experiment.
Run (~= DataFrame == [index, column]):
Contains a pandas DataFrame (a table-like structure, str -> Series)
as well as more metadata (e.g. path, seed, etc.)
Note that one can also manage a collection of Experiments (e.g. the same set
of hypotheses or algorithms applied over different environments or dataset).
"""
import collections
import fnmatch
import itertools
import os.path
import re
import sys
import types
from dataclasses import dataclass # for python 3.6, backport needed
from multiprocessing.pool import Pool as MultiprocessPool
from multiprocessing.pool import ThreadPool
from typing import (Any, Callable, Dict, Generator, Iterable, Iterator, List,
Mapping, MutableMapping, Optional, Sequence, Set, Tuple,
TypeVar, Union)
import numpy as np
import pandas as pd
from pandas.core.accessor import CachedAccessor
from pandas.core.groupby.generic import DataFrameGroupBy
from typeguard import typechecked
from . import plot as _plot
from . import util
from .path_util import exists, glob, isdir, open
T = TypeVar('T')
try:
from tqdm.auto import tqdm
except:
tqdm = util.NoopTqdm
#########################################################################
# Data Classes
#########################################################################
@dataclass
class Run:
"""Represents a single run, containing one pd.DataFrame object
as well as other metadata (path, etc.)
"""
path: str
df: pd.DataFrame
@classmethod
def of(cls, o):
"""A static factory method."""
if isinstance(o, Run):
return Run(path=o.path, df=o.df)
elif isinstance(o, pd.DataFrame):
return cls.from_dataframe(o)
raise TypeError("Unknown type {}".format(type(o)))
@classmethod
@typechecked
def from_dataframe(cls, df: pd.DataFrame):
run = cls(path='', df=df)
if hasattr(df, 'path'):
run.path = df.path
return run
def __repr__(self):
return 'Run({path!r}, df with {rows} rows)'.format(
path=self.path, rows=len(self.df))
@property
def columns(self) -> Sequence[str]:
"""Returns all column names."""
return list(self.df.columns) # type: ignore
@property
def name(self) -> str:
"""Returns the last segment of the path."""
path = self.path.rstrip('/')
return os.path.basename(path)
def to_hypothesis(self) -> 'Hypothesis':
"""Create a new `Hypothesis` consisting of only this run."""
return Hypothesis.of(self)
def plot(self, *args, subplots=True, **kwargs):
return self.to_hypothesis().plot(*args, subplots=subplots, **kwargs)
def hvplot(self, *args, subplots=True, **kwargs):
return self.to_hypothesis().hvplot(*args, subplots=subplots, **kwargs)
class RunList(Sequence[Run]):
"""A (immutable) list of Run objects, but with some useful utility
methods such as filtering, searching, and handy format conversion."""
def __init__(self, runs: Iterable[Run]):
runs = self._validate_type(runs)
self._runs = list(runs)
@classmethod
def of(cls, runs: Iterable[Run]):
if isinstance(runs, cls):
return runs # do not make a copy
else:
return cls(runs) # RunList(runs)
def _validate_type(self, runs) -> List[Run]:
if not isinstance(runs, Iterable):
raise TypeError(f"`runs` must be a Iterable, but given {type(runs)}")
if isinstance(runs, Mapping):
raise TypeError(f"`runs` should not be a dictionary, given {type(runs)} "
" (forgot to wrap with pd.DataFrame?)")
runs = list(runs)
if not all(isinstance(r, Run) for r in runs):
raise TypeError("`runs` must be a iterable of Run, "
"but given {}".format([type(r) for r in runs]))
return runs
def __getitem__(self, index_or_slice):
o = self._runs[index_or_slice]
if isinstance(index_or_slice, slice):
o = RunList(o)
return o
def __next__(self):
# This is a hack to prevent panda's pprint_thing() from converting
# into a sequence of Runs.
raise TypeError("'RunList' object is not an iterator.")
def __len__(self):
return len(self._runs)
def __repr__(self):
return "RunList([\n " + "\n ".join(repr(r) for r in self._runs) + "\n]"
def extend(self, more_runs: Iterable[Run]):
self._runs.extend(more_runs)
def to_list(self) -> List[Run]:
"""Create a new copy of list containing all the runs."""
return list(self._runs)
def to_dataframe(self) -> pd.DataFrame:
"""Return a DataFrame consisting of columns `name` and `run`."""
return pd.DataFrame({
'name': [r.name for r in self._runs],
'run': self._runs,
})
def filter(self, fn: Union[Callable[[Run], bool], str]) -> 'RunList':
"""Apply a filter function (Run -> bool) and return the filtered runs
as another RunList. If a string is given, we convert it as a matcher
function (see fnmatch) that matches `run.name`."""
if isinstance(fn, str):
pat = str(fn)
fn = lambda run: fnmatch.fnmatch(run.name, pat)
return RunList(filter(fn, self._runs))
def grep(self, regex: Union[str, 're.Pattern'], flags=0):
"""Apply a regex-based filter on the path of `Run`, and return the
matched `Run`s as a RunList."""
if isinstance(regex, str):
regex = re.compile(regex, flags=flags)
return self.filter(lambda r: bool(regex.search(r.path)))
def map(self, func: Callable[[Run], Any]) -> List:
"""Apply func for each of the runs. Return the transformation
as a plain list."""
return list(map(func, self._runs))
def to_hypothesis(self, name: str) -> 'Hypothesis':
"""Create a new Hypothesis instance containing all the runs
as the current RunList instance."""
return Hypothesis.of(self, name=name)
def groupby(
self,
by: Callable[[Run], T],
*,
name: Callable[[T], str] = str,
) -> Iterator[Tuple[T, 'Hypothesis']]:
r"""Group runs into hypotheses with the key function `by` (Run -> key).
This will enumerate tuples (`group_key`, Hypothesis) where `group_key`
is the result of the key function for each group, and a Hypothesis
object (with name `name(group_key)`) will consist of all the runs
mapped to the same group.
Args:
by: a key function for groupby operation. (Run -> Key)
name: a function that maps the group (Key) into Hypothesis name (str).
Example:
>>> key_func = lambda run: re.search("algo=(\w+),lr=([.0-9]+)", run.name).group(1, 2)
>>> for group_name, hypothesis in runs.groupby(key_func):
>>> ...
"""
series = pd.Series(self._runs)
groupby = series.groupby(lambda i: by(series[i]))
group: T
for group, runs_in_group in groupby:
yield group, Hypothesis.of(runs_in_group, name=name(group))
def extract(self, pat: str, flags: int = 0) -> pd.DataFrame:
r"""Extract capture groups in the regex pattern `pat` as columns.
Example:
>>> runs[0].name
"ppo-halfcheetah-seed0"
>>> df = runs.extract(r"(?P<algo>[\w]+)-(?P<env_id>[\w]+)-seed(?P<seed>[\d]+)")
>>> assert list(df.columns) == ['algo', 'env_id', 'seed', 'run']
"""
df: pd.DataFrame = self.to_dataframe()
df = df['name'].str.extract(pat, flags=flags)
df['run'] = list(self._runs)
return df
@dataclass
class Hypothesis(Iterable[Run]):
name: str
runs: RunList
def __init__(self, name: str, runs: Union[Run, Iterable[Run]]):
if isinstance(runs, Run) or isinstance(runs, pd.DataFrame):
if not isinstance(runs, Run):
runs = Run.of(runs)
runs = [runs] # type: ignore
self.name = name
self.runs = RunList(runs)
def __iter__(self) -> Iterator[Run]:
return iter(self.runs)
@classmethod
def of(cls,
runs: Union[Run, Iterable[Run]],
*,
name: Optional[str] = None) -> 'Hypothesis':
"""A static factory method."""
if isinstance(runs, Run):
name = name or runs.path
return cls(name=name or '', runs=runs)
def __getitem__(self, k):
if isinstance(k, int):
return self.runs[k]
if k not in self.columns:
raise KeyError(k)
return pd.DataFrame({r.path: r.df[k] for r in self.runs})
def __repr__(self) -> str:
return f"Hypothesis({self.name!r}, <{len(self.runs)} runs>)"
def __len__(self) -> int:
return len(self.runs)
def __hash__(self):
return hash(id(self))
def __next__(self):
# This is a hack to prevent panda's pprint_thing() from converting
# into a sequence of Runs.
raise TypeError("'Hypothesis' object is not an iterator.")
def describe(self) -> pd.DataFrame:
"""Report a descriptive statistics as a DataFrame,
after aggregating all runs (e.g., mean)."""
return self.mean().describe()
def summary(self) -> pd.DataFrame:
"""Return a DataFrame that summarizes the current hypothesis."""
return Experiment(self.name, [self]).summary()
# see module expt.plot
plot = CachedAccessor("plot", _plot.HypothesisPlotter)
plot.__doc__ = _plot.HypothesisPlotter.__doc__
hvplot = CachedAccessor("hvplot", _plot.HypothesisHvPlotter)
hvplot.__doc__ = _plot.HypothesisHvPlotter.__doc__
@property
def grouped(self) -> DataFrameGroupBy:
return pd.concat(self._dataframes, sort=False).groupby(level=0)
def empty(self) -> bool:
sentinel = object()
return next(iter(self.grouped), sentinel) is sentinel # O(1)
@property
def _dataframes(self) -> List[pd.DataFrame]:
"""Get all dataframes associated with all the runs."""
def _get_df(o):
if isinstance(o, pd.DataFrame):
return o
else:
return o.df
return [_get_df(r) for r in self.runs]
@property
def columns(self) -> Iterable[str]:
return util.merge_list(*[df.columns for df in self._dataframes])
def rolling(self, *args, **kwargs):
return self.grouped.rolling(*args, **kwargs)
def mean(self, *args, **kwargs) -> pd.DataFrame:
g = self.grouped
return g.mean(*args, **kwargs)
def std(self, *args, **kwargs) -> pd.DataFrame:
g = self.grouped
return g.std(*args, **kwargs)
def min(self, *args, **kwargs) -> pd.DataFrame:
g = self.grouped
return g.min(*args, **kwargs)
def max(self, *args, **kwargs) -> pd.DataFrame:
g = self.grouped
return g.max(*args, **kwargs)
class Experiment(Iterable[Hypothesis]):
@typechecked
def __init__(
self,
name: Optional[str] = None,
hypotheses: Iterable[Hypothesis] = None,
):
self._name = name if name is not None else ""
self._hypotheses: MutableMapping[str, Hypothesis]
self._hypotheses = collections.OrderedDict()
if isinstance(hypotheses, np.ndarray):
hypotheses = list(hypotheses)
for h in (hypotheses or []):
if not isinstance(h, Hypothesis):
raise TypeError("An element of hypotheses contains a wrong type: "
"expected {}, but given {} ".format(
Hypothesis, type(h)))
if h.name in self._hypotheses:
raise ValueError(f"Duplicate hypothesis name: `{h.name}`")
self._hypotheses[h.name] = h
@classmethod
def from_dataframe(
cls,
df: pd.DataFrame,
by: Optional[Union[str, List[str]]] = None,
*,
run_column: str = 'run',
hypothesis_namer: Callable[..., str] = str,
name: Optional[str] = None,
) -> 'Experiment':
"""Constructs a new Experiment object from a DataFrame instance
structured as per the convention.
Args:
by (str, List[str]): The column name to group by. If None (default),
it will try to automatically determine from the dataframe if there
is only one column other than `run_column`.
run_column (str): The column name that contains `Run` objects.
See also `RunList.to_dataframe()`.
hypothesis_namer: This is a mapping that transforms the group key
(a str or tuple) that pandas groupby produces into hypothesis name.
This function should take one positional argument for the group key.
name: The name for the produced `Experiment`.
"""
if by is None:
# Automatically determine the column from df.
by_columns = list(sorted(set(df.columns).difference([run_column])))
if len(by_columns) != 1:
raise ValueError("Cannot automatically determine the column to "
"group by. Candidates: {}".format(by_columns))
by = next(iter(by_columns))
ex = Experiment(name=name)
for hypothesis_key, runs_df in df.groupby(by):
hypothesis_name = hypothesis_namer(hypothesis_key)
runs = RunList(runs_df[run_column])
h = runs.to_hypothesis(name=hypothesis_name)
ex.add_hypothesis(h)
return ex
def add_runs(
self,
hypothesis_name: str,
runs: List[Union[Run, Tuple[str, pd.DataFrame], pd.DataFrame]],
*,
color=None,
linestyle=None,
) -> Hypothesis:
def check_runs_type(runs) -> List[Run]:
if isinstance(runs, types.GeneratorType):
runs = list(runs)
if runs == []:
return []
if isinstance(runs, Run):
runs = [runs]
return [Run.of(r) for r in runs]
_runs = check_runs_type(runs)
d = Hypothesis.of(name=hypothesis_name, runs=_runs)
return self.add_hypothesis(d, extend_if_conflict=True)
@typechecked
def add_hypothesis(
self,
h: Hypothesis,
extend_if_conflict=False,
) -> Hypothesis:
if h.name in self._hypotheses:
if not extend_if_conflict:
raise ValueError(f"Hypothesis named {h.name} already exists!")
d: Hypothesis = self._hypotheses[h.name]
d.runs.extend(h.runs)
else:
self._hypotheses[h.name] = h
return self._hypotheses[h.name]
@property
def name(self) -> str:
return self._name
@property
def title(self) -> str:
return self._name
def keys(self) -> Iterable[str]:
"""Return all hypothesis names."""
return self._hypotheses.keys()
@property
def hypotheses(self) -> Sequence[Hypothesis]:
return tuple(self._hypotheses.values())
def select_top(
self,
key,
k=None,
descending=True,
) -> Union[Hypothesis, Sequence[Hypothesis]]:
"""Choose a hypothesis that has the largest value on the specified column.
Args:
key: str (y_name) or Callable(Hypothesis -> number).
k: If None, the top-1 hypothesis will be returned. Otherwise (integer),
top-k hypotheses will be returned as a tuple.
descending: If True, the hypothesis with largest value in key will be
chosen. If False, the hypothesis with smallest value will be chosen.
Returns: the top-1 hypothesis (if `k` is None) or a tuple of k hypotheses
in the order specified by `key`.
"""
if k is not None and k <= 0:
raise ValueError("k must be greater than 0.")
if k is not None and k > len(self._hypotheses):
raise ValueError("k must be smaller than the number of "
"hypotheses ({})".format(len(self._hypotheses)))
if isinstance(key, str):
y = str(key) # make a copy for closure
if descending:
key = lambda h: h.mean()[y].max()
else:
key = lambda h: h.mean()[y].min()
elif callable(key):
pass # key: Hypothesis -> scalar.
else:
raise TypeError(
f"`key` must be a str or a callable, but got: {type(key)}")
candidates = sorted(self.hypotheses, key=key, reverse=descending)
assert isinstance(candidates, list)
if k is None:
return candidates[0]
else:
return candidates[:k]
def __iter__(self) -> Iterator[Hypothesis]:
return iter(self._hypotheses.values())
def __repr__(self) -> str:
return (
f"Experiment('{self.name}', {len(self._hypotheses)} hypotheses: [ \n " +
'\n '.join([repr(exp) for exp in self.hypotheses]) + "\n])")
def __getitem__(
self,
key: Union[str, Tuple],
) -> Union[Hypothesis, np.ndarray, Run, pd.DataFrame]:
"""Return self[key].
`key` can be one of the following:
- str: The hypothesis's name to retrieve.
- int: An index [0, len(self)) in all hypothesis. A numpy-style fancy
indexing is supported.
- Tuple(hypo_key: str|int, column: str):
- The first axis is the same as previous (hypothesis' name or index)
- The second one is the column name. The return value will be same
as self[hypo_key][column].
"""
if isinstance(key, str):
name = key
return self._hypotheses[name]
elif isinstance(key, int):
try:
_keys = self._hypotheses.keys()
name = next(itertools.islice(_keys, key, None))
except StopIteration:
raise IndexError("out of range: {} (should be < {})".format(
key, len(self._hypotheses)))
return self._hypotheses[name]
elif isinstance(key, tuple):
hypo_key, column = key
hypos = self[hypo_key]
if isinstance(hypos, list):
raise NotImplementedError("2-dim fancy indexing is not implemented") # yapf: disable
return hypos[column] # type: ignore
elif isinstance(key, Iterable):
key = list(key)
if all(isinstance(k, bool) for k in key):
# fancy indexing through bool
if len(key) != len(self._hypotheses):
raise IndexError("boolean index did not match indexed array along"
" dimension 0; dimension is {} but corresponding "
" boolean dimension is {}".format(
len(self._hypotheses), len(key)))
r = np.empty(len(key), dtype=object)
r[:] = list(self._hypotheses.values())
return r[key]
else:
# fancy indexing through int? # TODO: support str
hypo_keys = list(self._hypotheses.keys())
to_key = lambda k: k if isinstance(k, str) else hypo_keys[k]
return [self._hypotheses[to_key(k)] for k in key]
else:
raise ValueError("Unsupported index: {}".format(key))
def __setitem__(
self,
name: str,
hypothesis_or_runs: Union[Hypothesis, List[Run]],
) -> Hypothesis:
"""An dict-like method for adding hypothesis or runs."""
if isinstance(hypothesis_or_runs, Hypothesis):
if hypothesis_or_runs in self._hypotheses:
raise ValueError(f"A hypothesis named {name} already exists")
self._hypotheses[name] = hypothesis_or_runs
else:
# TODO metadata (e.g. color)
self.add_runs(name, hypothesis_or_runs) # type: ignore
return self._hypotheses[name]
@property
def columns(self) -> Iterable[str]:
# merge and uniquify all columns but preserving the order.
return util.merge_list(*[h.columns for h in self._hypotheses.values()])
@staticmethod
def AGGREGATE_MEAN_LAST(portion: float):
return (lambda series: series.rolling(max(1, int(len(series) * portion))
).mean().iloc[-1]) # yapf: disable
def summary(self, columns=None, aggregate=None) -> pd.DataFrame:
"""Return a DataFrame that summarizes the current experiments,
whose rows are all hypothesis.
Args:
columns: The list of columns to show. Defaults to `self.columns` plus
`"index"`.
aggregate: A function or a dict of functions ({column_name: ...})
specifying a strategy to aggregate a `Series`. Defaults to take the
average of the last 10% of the series.
Example Usage:
>>> pd.set_option('display.max_colwidth', 2000) # hypothesis name can be long!
>>> df = ex.summary(columns=['index', 'loss', 'return'])
>>> df.style.background_gradient(cmap='viridis')
"""
columns = columns or (['index'] + list(self.columns))
aggregate = aggregate or self.AGGREGATE_MEAN_LAST(0.1)
df = pd.DataFrame({'hypothesis': [h.name for h in self.hypotheses]})
hypo_means = [
(h.mean() if not all(len(df) == 0 for df in h._dataframes) \
else | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# Best viewed locally in a Jupyter notebook or online in <a href="https://nbviewer.jupyter.org/github/codykingham/noun_semantics/blob/master/analysis.ipynb">Jupyter Notebook Viewer</a>
#
# # Analysis of Noun Semantics in the Hebrew Bible
# ## <NAME>
#
# In this notebook, I compare the syntactic contexts of the top 200 most frequent nouns in the Hebrew Bible. This notebook essentially walks through my process and includes limited commentary throughout. Full descriptions borrowed from the paper will soon be transferred to here as well.
# In[1]:
get_ipython().system(' echo "last updated:"; date')
# In[3]:
# ETCBC's BHSA data
from tf.fabric import Fabric
from tf.app import use
# stats & data-containers
import collections, math, re, random, csv
import pandas as pd
import numpy as np
import scipy.stats as stats
from sklearn.decomposition import PCA
from sklearn.metrics.pairwise import pairwise_distances
from kneed import KneeLocator # https://github.com/arvkevi/kneed
# data visualizations
import seaborn as sns
import matplotlib.pyplot as plt
from matplotlib import rcParams
rcParams['font.serif'] = ['Times New Roman']
from IPython.display import HTML, display, Image
from adjustText import adjust_text # fixes overlapping scatterplot annotations
# custom modules
#from pyscripts.contextcount import ContextCounter, ContextTester
from pyscripts.contextparameters import deliver_params
from pyscripts.deliver_data import deliver_data
# prep the Hebrew syntax data
name = 'noun_semantics'
hebrew_data = ['~/github/etcbc/{}/tf/c'.format(direc) for direc in ('bhsa','lingo/heads', 'heads', 'phono')] # data dirs
load_features = '''
typ phono lex_utf8 lex
voc_lex_utf8 voc_lex gloss
freq_lex pdp sp ls
language
rela number function
vs vt
code label
head obj_prep sem_set nhead
heads noun_heads
'''
# TF load statements
TF = Fabric(locations=hebrew_data)
api = TF.load(load_features)
B = use('bhsa', api=api, hoist=globals(), silent=True) # Bhsa functions for search and visualizing text
# In[4]:
def reverse_hb(heb_text):
'''
Reverses order of left-to-right text
for good matplotlib formatting.
'''
return ''.join(reversed(heb_text))
def show_word_list(word_nodes, joiner=' |', title=''):
'''
Displays Hebrew for a pipe-separated list of word nodes
Good for seeing lexemes without taking up screen space.
'''
formatted = joiner.join(T.text(node) for node in word_nodes)
display(HTML(formatted))
def show_subphrases(phrase, direction=L.d):
'''
A simple function to print subphrases
and their relations to each other.
'''
for sp in direction(phrase, 'subphrase'):
mother = E.mother.f(sp)[0] if E.mother.f(sp) else ''
mother_text = T.text(mother)
print('-'*7 + str(sp) + '-'*16)
print()
print(f'{T.text(sp)} -{F.rela.v(sp)}-> {mother_text}')
print(f'nodes: {sp} -{F.rela.v(sp)}-> {mother}')
print(f'slots: {L.d(sp, "word")} -{F.rela.v(sp)}-> {L.d(mother or 0, "word")}')
print('-'*30)
# ## Corpus Size
#
# Below is the number of words included in the corpus of BHSA.
# In[5]:
len(list(F.otype.s('word')))
# ## Demonstrating the Collocational Principle
#
# Here is a query for all nouns that serve as the object to the verb אכל "to eat". This query demonstrates how the collocation patterns of syntactic context can be informative for semantic meaning. This is the driving principle behind this project.
# In[6]:
eat_obj = '''
clause
phrase function=Pred
word pdp=verb lex=>KL[
phrase function=Objc
<head- w1:word pdp=subs
lex
w2:word
w1 = w2
'''
eat_obj = B.search(eat_obj)
eaten_lexs = collections.Counter(T.text(r[5]) for r in eat_obj)
for word, count in eaten_lexs.most_common(10):
print(f'{count}\t{word}')
# ## Define a Target Noun Set
#
# *Insert discussion about the semantic relationship between iconicity and frequency with regards to the most frequent noun lexemes in the HB.*
# In[7]:
raw_search = '''
lex language=Hebrew sp=subs
'''
raw_nouns = B.search(raw_search)
# Now we order the results on the basis of lexeme frequency.
# In[8]:
raw_terms_ordered = sorted(((F.freq_lex.v(res[0]), res[0]) for res in raw_nouns), reverse=True)
# Below we have a look at the top 50 terms from the selected set. Pay attention to the feature `ls`, i.e. "lexical set." This feature gives us some rudimentary semantic information about the nouns and their usual functions, and it suggests that some additional restrictions are necessary for the noun selection procedure. Note especially that several of these nouns are used in adjectival or prepositional roles (e.g. כל ,אחד, אין, תחת).
# In[9]:
raw_nnodes = [res[1] for res in raw_terms_ordered] # isolate the word nodes of the sample
B.displaySetup(extraFeatures={'ls', 'freq_lex'}) # config B to display ls and freq_lex
# display lexeme data
# for i, node in enumerate(raw_nnodes[:50]):
# B.prettyTuple((node,), seq=i)
# Based on the nouns that are present, we should make some key exclusions. Many substantives have more functional or adjectival roles. Undesirable categories include copulative nouns (`nmcp`, e.g. אין), cardinal numbers (`card`), potential prepositions (`ppre`, e.g. תחת). The `ls` category of potential adverb (`padv`) contains desirable nouns like יום, but also more functionally adverbial-nouns like עוד. Thus we can see that there is a range of adverbial tendencies found in this category. Due to the potentially interesting possibility of seeing these tendencies play out in the data, we can decide to keep these instances.
#
# To be sure, the very phenomenon of "functional" versus "nominal" is worthy of further, quantitative investigation. The `ls` feature is an experimental and incomplete feature in the ETCBC, and this is precisely the kind of shortcoming this present work seeks to address. Nouns and adverbs likely sit along a sliding scale of adverbial tendencies, with adverbs nearly always functioning in such a role, and nouns exhibiting various statistical tendencies. But due to the scope of this investigation, we limit ourselves to mainly nominal words with a small inclusion of some adverbial-like substantives.
#
# We can eliminate more functional nouns by restricting the possible lexical set (`ls`) values. Below we apply those restrictions to the search template. In the case of certain quantifiers such as כל there is an `ls` feature of distributive noun (`nmdi`), yet this feature is likewise applied to nouns such as אח ("brother"). So it is undesirable to exclude all of these cases. Thus we depend, instead, on an additional filter list that excludes quantifiers.
#
# A few terms such as דרך and עבר are eliminated because the ETCBC labels it as a potential preposition. This is a speculative classification. So we define a seperate parameter in the template that saves this instance.
# In[10]:
exclude = '|'.join(('KL/', 'M<V/', 'JTR/', 'M<FR/', 'XYJ/')) # exclude quantifiers
include = '|'.join(('padv', 'nmdi')) # ok ls features
keep = '|'.join(('DRK/', '<BR/'))
'''
Below is a TF search query for three cases:
One is a lexeme with included ls features.
The second is a lexeme with a null ls feature.
The third is lexemes we want to prevent from being excluded.
For all cases we exclude excluded lexemes.
'''
select_noun_search = f'''
lex language=Hebrew
/with/
sp=subs ls={include} lex#{exclude}
/or/
sp=subs ls# lex#{exclude}
/or/
sp=subs lex={keep}
/-/
'''
select_nouns = B.search(select_noun_search)
noun_dat_ordered = sorted(((F.freq_lex.v(res[0]), res[0]) for res in select_nouns), reverse=True)
nnodes_ordered = list(noun_dat[1] for noun_dat in noun_dat_ordered)
filtered_lexs = list(node for node in raw_nnodes if node not in nnodes_ordered)
print(f'\t{len(raw_nouns) - len(select_nouns)} results filtered out of raw noun list...')
print('\tfiltered lexemes shown below:')
show_word_list(filtered_lexs)
# ### Plot the Nouns in Order of Frequency
#
# Now that we have obtained a filtered noun-set, we must decide a cut-off point at which to limit the present analysis. Below we plot the attested nouns and their respective frequencies.
# In[14]:
# plot data
y_freqs = [lex_data[0] for lex_data in noun_dat_ordered]
x_rank = [i+1 for i in range(0, len(y_freqs))]
title = 'Noun Frequencies in the Hebrew Bible'
xlabel = 'Noun Rank'
ylabel = 'Noun Frequency'
# first plot
plt.figure(figsize=(8, 4))
plt.plot(x_rank, y_freqs, color='black', linewidth=1)
plt.title(title + f' (ranks 1-{len(x_rank)})', size=10)
plt.xlabel(xlabel, size=10)
plt.ylabel(ylabel, size=10)
plt.plot()
plt.show()
# We zoom in closer to view ranks 1-1000...
#
# *Consider using a subplot here with 4 different zooms*
# In[16]:
# second plot
plt.figure(figsize=(8, 4))
plt.plot(x_rank[:1000], y_freqs[:1000], color='black', linewidth=1)
plt.xlabel(xlabel, size=10)
plt.ylabel(ylabel, size=10)
plt.axvline(200, color='red', linewidth=0.8, linestyle='--')
plt.savefig('results/plots/noun_frequencies1-1000.svg', format='svg', bbox_inches='tight') # save the plot (without title)
plt.title(title + f' (ranks 1-1000)', size=10)
plt.show()
# This curve is typical of Zipf's law:
#
# > Zipf's law states that given some corpus of natural language utterances, the frequency of any word is inversely proportional to its rank in the frequency table ([wikipedia](https://en.wikipedia.org/wiki/Zipf%27s_law))
#
# The curve sharply "elbows" at around rank 15. Between ranks 50-100 there is still an appreciable drop-off. The curve starts to significantly flatten after 200. We thus decide an arbitrary cut-off point at rank 200, based on the fact that the curve does not show any significant leveling after this point.
# In[17]:
target_nouns = nnodes_ordered[:200]
tnoun_instances = set(word for lex in target_nouns for word in L.d(lex, 'word'))
show_word_list(target_nouns) # temporary comment out while bug is fixed
print(f'\n{len(tnoun_instances)} nouns ready for searches')
# In[18]:
nouns_text_freqs = sorted(
((F.voc_lex_utf8.v(L.d(noun,'word')[0]), F.freq_lex.v(noun))
for noun in target_nouns), key=lambda k: k[-1], reverse=True
)
# In[19]:
', '.join(f'{noun}' for noun, freq in nouns_text_freqs)
# ## Strategy for Context Selection
#
# See [pyscripts/contextparameters.py](pyscripts/contextparameters.py) for the full delineation of these patterns and to see how they've been selected and tokenized.
# In[20]:
contexts = deliver_params(tnoun_instances, tf=api)
# In[21]:
data = deliver_data(contexts, tf=TF)
# Let's have a look at the first example...
# In[22]:
data[0]
# Now we put the data into a dataframe. We also export the dataframe for reference.
# In[23]:
data_df = pd.DataFrame(data)
data_df.set_index('clause', inplace=True)
data_df.to_csv('dataset.csv') # export dataset
data_df.head()
# #### Random Samples of the Data
# In[15]:
# randomized = [r for r in counts.search2result['T.const→ lex (with article separation)']]
# random.shuffle(randomized)
# In[16]:
# B.show(randomized, end=50, condenseType='phrase', withNodes=True, extraFeatures={'sem_set'})
# <hr>
#
# ### Excursus: Checking Context Tags and Gathering Examples
#
# In this section I will inspect the tokens that are generated and counted, as well as pull out some examples and their counts for the presentation.
# In[17]:
# patterns = {'funct.-> st.verb.lex': '\D*\.-> \D*\.\D*\[',
# 'funct.prep-> st.verb.lex': '\D*\.\D+\-> \D*\.\D*\['}
# token_examps = collections.defaultdict(list)
# for token in counts.data.index:
# for query, pattern in patterns.items():
# if re.match(pattern, token):
# token_examps[query].append(token)
# for query in token_examps:
# random.shuffle(token_examps[query])
# examples = token_examps[query][:10]
# targets = list()
# # get example target nouns
# for ex in examples:
# ex_target = counts.data.loc[ex].sort_values(ascending=False).index[0]
# targets.append(ex_target)
# show_random = [f'target: {target} \t {ex}' for target, ex in zip(targets, examples)]
# print('QUERY: ', query)
# print('-'*5)
# print('\n'.join(show_random))
# print('-'*20, '\n')
# Now some more specific counts...
# In[18]:
counts.data['לב.n1']['T.Objc→ זכה.v1.piel'].sum()
# In[19]:
counts.data['פתח.n1']['T.Cmpl→ עמד.v1.qal'].sum()
# In[20]:
counts.data['אישׁ.n1']['T.Subj→ פקד.v1.hit'].sum()
# In[21]:
counts.data['שׁער.n1']['T.Loca→ שׁית.v1.qal'].sum()
# In[22]:
counts.data['גוי.n1']['T.ב.Adju→ אמר.v1.qal'].sum()
# In[23]:
counts.data['יד.n1']['T.מן.Cmpl→ ישׁע.v1.hif'].sum()
# In[24]:
counts.data['עת.n1']['T.ב.Time→ נתן.v1.nif'].sum()
# In[25]:
counts.data['דרך.n1']['T.ל.Cmpl→ פנה.v1.qal'].sum()
# <hr>
# #### Examining the Dataset
#
# Below we look at the number of dimensions in the data:
# In[26]:
counts.data.shape
# And a sample of the data is below, sorted on the results of אלהים in order to bring up interesting examples.
# In[27]:
counts.data.sort_values(ascending=False, by='אלהים.n1').head(10)
# Next we look at a few example counts:
# In[28]:
pd.DataFrame(counts.data['אלהים.n1'][counts.data['אלהים.n1'] > 0].sort_values(ascending=False)).head(15)
# This gives a good idea of the content of the co-occurrence counts.
# #### Various Tag Searches Below
#
# Below I isolate a few tags of interest to serve as examples in the paper.
#
# **TODO:** Extract and display all the exact examples.
# In[29]:
prec = [tag for tag in counts.data.index if 'PreC' in tag and 'אישׁ.n1' in tag]
prec
# In[30]:
target = 'עלה.n1'
target_counts = counts.data[target][counts.data[target]>0].sort_values(ascending=False)
prec_contexts = target_counts[target_counts.index.str.contains('ל.PreC')]
prec_contexts
# ## Adjusting the Counts
#
# We will apply two primary adjustments:
#
# 1. We drop co-occurrences that are unique to a noun. The dropped observations will thus be considered outliers. While these items are useful for describing the uniqueness of a given lexeme, they are unhelpful for drawing comparisons between our sets.
# 2. We convert the counts into a measure of statistical significance. For this we use Fisher's exact test, which is ideal for datasets that have counts that are less than 5. Our matrix is likely to have many such counts. The resulting p-values, of which <0.05 represents a statistically significant colexeme, will be log-transformed. Values that fall below expected frequencies will be negatively transformed.
# ### Remove Co-occurrence Outliers
#
# We will remove colexemes/bases that occur with only one target noun. This is done by subtracting the row total from each item in the row. Any 0 value in a row means that that row has a unique colexeme that only occurs with one target noun (we will call that a `hapax_colex` here). We will remove these rows further down.
# In[31]:
colex_counts = counts.data.sum(1)
remaining_counts = counts.data.sub(colex_counts, axis=0) # subtract colex_counts
hapax_colex = remaining_counts[(remaining_counts == 0).any(1)] # select rows that have a 0 value anywhere
# Below is an example just to make sure we've selected the right indices. The value has been manually chosen from `hapax_colex`.
# In[32]:
counts.data.loc['T.Adju→ אכל.v1.pual'].sort_values(ascending=False).head()
# Indeed this context tag is only attested with חרב, thus it is not useful for drawing meaningful comparisons to this noun. Below we see that there are `8191` other such basis elements. We remove these data points in the next cell and name the new dataset `data`.
# In[33]:
hapax_colex.shape
# In[34]:
data = counts.data.drop(labels=hapax_colex.index, axis=0)
print(f'New data dimensions: {data.shape}')
print(f'New total observations: {data.sum().sum()}')
print(f'Observations removed: {counts.data.sum().sum() - data.sum().sum()}')
# Random example to make sure there are no unique colexemes in the new dataset:
# In[35]:
data.loc['T.Adju→ בוא.v1.hif'].sort_values(ascending=False).head(5)
# #### Check for Orphaned Target Nouns
#
# I want to see if any target nouns in the dataset now have 0 basis observations (i.e. "orphaned") as a result of our data pruning. The test below shows that there is no columns in the table with a sum of 0.
# In[36]:
data.loc[:, (data == 0).all(0)].shape
# ### How many zero counts are there?
#
# The raw count matrix has a lot of sparsity. Here's how many zeros there are. We also count other values.
# In[37]:
unique_values, value_counts = np.unique(data.values, return_counts=True)
unique_counts = pd.DataFrame.from_dict(dict(zip(unique_values, value_counts)), orient='index', columns=['count'])
display(HTML('<h5>Top 10 Unique Values and Their Counts in Dataset</h5>'))
unique_counts.head(10)
# In[38]:
zero = unique_counts.loc[0.0][0]
non_zero = unique_counts[unique_counts.index > 0].sum()[0]
non_zero_ratio, zero_ratio = non_zero / (non_zero+zero), zero / (non_zero+zero)
print(f'Number of zero count variables: {zero} ({round(zero_ratio, 2)})')
print(f'Number of non-zero count variables: {non_zero} ({round(non_zero_ratio, 2)})')
# Below the number of observed counts is given:
# In[39]:
data.sum().sum()
# ### Apply Fisher's Exact Test
#
# Now we apply the Fisher's exact test to the data set. This involves supplying values to a 2x2 contingency table that is fed to `scipy.stats.fisher_exact`
# #### Number of Datapoints To Iterate Over
#
# The Fisher's exact test takes some time to run. That is because it must iterate over a lot of pairs. The number is printed below.
# In[40]:
print(data.shape[0]*data.shape[1])
# #### Apply the Tests
#
# The whole run takes 5.5-6.0 minutes on a 2017 Macbook pro.
# In[41]:
# data for contingency tables
target_obs = data.apply(lambda col: col.sum(), axis=0, result_type='broadcast') # total target lexeme observations
colex_obs = data.apply(lambda col: col.sum(), axis=1, result_type='broadcast') # total colexeme/basis observations
total_obs = data.sum().sum() # total observations
# preprocess parts of contingency formula;
# NB: a_matrix = data
b_matrix = target_obs.sub(data)
c_matrix = colex_obs.sub(data)
d_matrix = pd.DataFrame.copy(data, deep=True)
d_matrix[:] = total_obs
d_matrix = d_matrix.sub(data+b_matrix+c_matrix)
fisher_transformed = collections.defaultdict(lambda: collections.defaultdict())
i = 0 # counter for messages
indent(reset=True) # TF utility for timed messages
info('applying Fisher\'s test to dataset...')
indent(level=1, reset=True)
for lex in data.columns:
for colex in data.index:
a = data[lex][colex]
b = b_matrix[lex][colex]
c = c_matrix[lex][colex]
d = d_matrix[lex][colex]
contingency = np.matrix([[a, b], [c, d]])
oddsratio, pvalue = stats.fisher_exact(contingency)
fisher_transformed[lex][colex] = pvalue
i += 1
if i % 100000 == 0: # update message every 100,000 iterations
info(f'finished iteration {i}...')
indent(level=0)
info(f'DONE at iteration {i}!')
fisherdata = pd.DataFrame(fisher_transformed)
# In[42]:
fisherdata.head(10)
# ### log10 transformation
# In[43]:
expectedfreqs = (data+b_matrix) * (data+c_matrix) / (data+b_matrix+c_matrix+d_matrix)
fishertransf = collections.defaultdict(lambda: collections.defaultdict())
indent(reset=True)
info('applying log10 transformation to Fisher\'s data...')
for lex in data.columns:
for colex in data.index:
observed_freq = data[lex][colex]
exp_freq = expectedfreqs[lex][colex]
pvalue = fisherdata[lex][colex]
if observed_freq < exp_freq:
logv = np.log10(pvalue)
fishertransf[lex][colex] = logv
else:
logv = -np.log10(pvalue)
fishertransf[lex][colex] = logv
info('finished transformations!')
fishertransf = pd.DataFrame(fishertransf)
# The Fisher's test has produced zero values, indicating a very high degree of attraction between lexemes and a colexemes. A log-transformed zero equals `infinity`. Below those values are isolated.
# In[44]:
display(HTML('<h5>contexts x nouns with a p-value of 0 :</h5>'))
inf_nouns = fishertransf.columns[(fishertransf == np.inf).any()]
inf_data = [] # inf data contains column/index information needed to assign the new values
for inf_noun in inf_nouns:
inf_noun2context = pd.DataFrame(fishertransf[inf_noun][fishertransf[inf_noun] == np.inf])
inf_data.append(inf_noun2context)
display(inf_noun2context)
# In this case the Fisher's has returned a zero value. A p-value of 0 means that the likelihood אלהים and יהוה are *not* dependent variables is essentially null. We can thus reject the null hypothesis that the two values are not related. There is, rather, a maximum level of confidence that these two values *are* interrelated. The `np.inf` value that resulted from `log10(0)` is not viable for calculating vector distances. Thus, we need to substitute an arbitrary, but appropriate value. Below we access the lowest non-zero p-values in the dataset.
# In[45]:
minimum_pvalues = fisherdata.min()[fisherdata.min() > 0].sort_values()
minmin_noun = minimum_pvalues.index[0]
minmin_context = fisherdata[minimum_pvalues.index[0]].sort_values().index[0]
minimum_pvalues.head(10)
# The minimum noun x context score is shown below.
# In[46]:
minmin_noun
# In[47]:
minmin_context
# The small pvalue listed above is used to substitute the infinitive values below.
# In[48]:
# make the substitutions
for inf_dat in inf_data:
for noun in inf_dat.columns:
for context in inf_dat.index:
print(f'adjusting infinite score for {noun}')
new_pvalue, new_transf = fisherdata[minmin_noun][minmin_context], fishertransf[minmin_noun][minmin_context]
fisherdata[noun][context] = new_pvalue
print(f'\tpvalue updated to {new_pvalue}')
fishertransf[noun][context] = new_transf
print(f'\ttransformed pvalue updated to {new_transf}')
# Below we double to check to ensure that all infinitive values have been removed. The test should read `False`.
# In[49]:
# infinites in dataset?
bool(len(fishertransf[(fishertransf == np.inf).any(1)].index))
# ### Comparing Raw and Adjusted Counts
#
# What kinds of counts are "upvoted" and "downvoted" in the adjusted numbers? This information is helpful for gaining insight into the adjustment process and the efficacy of its results.
#
# Below I isolate and compare counts for a set of key lexemes: מלך "king", עיר "city", and חכמה "wisdom". The counts are analyzed by comparing context tag rankings and looking for those contexts which are most affected (i.e. have the most absolute differences) by the changes.
# In[50]:
examine_nouns = ['מלך.n1', 'עיר.n1', 'חכמה.n1']
context_rankings = {}
# gather context rankings into dataframes
for noun in examine_nouns:
# make raw context DF, sorted, with columns count and rank
rawcounts = pd.DataFrame(data[noun].values,
columns=['count'],
index=data.index).sort_values(ascending=False, by='count')
rawcounts['rank'] = np.arange(len(rawcounts))+1 # add column "rank"
# make adjusted context DF, sorted, with columns count and rank
adjcounts = pd.DataFrame(fishertransf[noun].values,
columns=['count'],
index=fishertransf.index).sort_values(ascending=False, by='count')
adjcounts['rank'] = np.arange(len(adjcounts))+1
# put both DFs into dict mapped to noun
context_rankings[noun]={'raw':rawcounts, 'adj':adjcounts}
# print for each noun a report on top up/downgrades
for noun, rankset in context_rankings.items():
raw, adj = rankset['raw'], rankset['adj']
upgrades = pd.DataFrame((raw['rank']-adj['rank']).sort_values(ascending=False))
downgrades = pd.DataFrame((raw['rank']-adj['rank']).sort_values())
upgrades.columns, downgrades.columns = [['difference']]*2
upgrades['previous rank'], downgrades['previous rank'] = [raw['rank']]*2
upgrades['new rank'], downgrades['new rank'] = [adj['rank']]*2
display(HTML(f'<h3>{noun}</h3>'))
print('top 10 raw counts:')
display(raw.head(10))
print('top 10 adjusted counts:')
display(adj.head(10))
print('top 10 rank upgrades')
display(upgrades.head(10))
print('top 10 rank downgrades')
display(downgrades.head(10))
print('-'*40)
print()
# #### Export Data for מלך for Paper
# In[51]:
context_rankings['מלך.n1']['raw'].head(10).to_csv('spreadsheets/king_top10_raw.csv')
round(context_rankings['מלך.n1']['adj'].head(10), 2).to_csv('spreadsheets/king_top10_adj.csv')
# #### Extracting Specific Examples for the Paper (on מלך) to Illustrate Count Adjustments
#
# Below the four separate parts of the contingency table are extracted for מלך "king". These were previously calculated above
# In[52]:
data['מלך.n1']['T.Objc→ נתן.v1.qal'] # A
# In[53]:
b_matrix['מלך.n1']['T.Objc→ נתן.v1.qal'] # B
# In[54]:
c_matrix['מלך.n1']['T.Objc→ נתן.v1.qal'] # C
# In[55]:
d_matrix['מלך.n1']['T.Objc→ נתן.v1.qal'] # D
# Where do the 10 cases happen?
# In[56]:
passages = []
for res in counts.target2basis2result['מלך.n1']['T.Objc→ נתן.v1.qal']:
passages.append('{} {}:{}'.format(*T.sectionFromNode(res[0])))
print('; '.join(passages))
# What is the result of the Fisher's test?
# In[57]:
round(fisherdata['מלך.n1']['T.Objc→ נתן.v1.qal'], 4)
# What is the value of the expected count?
# In[58]:
round(expectedfreqs['מלך.n1']['T.Objc→ נתן.v1.qal'], 2)
# In[59]:
round(fishertransf['מלך.n1']['T.Objc→ נתן.v1.qal'], 2)
# How has the rank changed?
# In[60]:
context_rankings['מלך.n1']['raw'].loc['T.Objc→ נתן.v1.qal']
# In[61]:
context_rankings['מלך.n1']['adj'].loc['T.Objc→ נתן.v1.qal']
# <hr>
#
# #### Excursus: A Random Sample Examined
#
# We saw that the model seems to be succeeding at isolating intuitive associations with קול. Let's look at another example at random, in this case the noun ארץ ("land"). Below are the transformed p-values for that noun.
# In[62]:
fishertransf['ארץ.n1'].sort_values(ascending=False).head(10)
# The most associated variables include cases where ארץ is an object to the verb ירשׁ, where ארץ serves as the complement from which something is brought (hifil of יצא and hifil of עלה), frequently in construct to עם "people"), the participle of ישב "inhabitant(s)"), and ממלכה, "kingdom", as well as other satisfying and expected occasions of use. These examples show that the model is working well.
# <hr>
# ## Comparing the Nouns
#
# The nouns are now ready to be compared. I will do so in two ways.
#
# 1. Principle Component Analysis — We have a semantic space with 4,218 dimensions. That is a lot of potential angles from which to compare the vectors. One method that is commonly used in semantic space analysis is principle component analysis or **PCA**. PCA is a dimensionality reduction method that reduce a multi-dimensional vector to the two points in an imagined space that show the most distance between the nouns. We can visualize said space by plotting the two points on an X and Y axis.
# 2. Cosine Similarity — This measure allows us to compare the vectors on the basis of their trajectories. This method is particularly well-suited for semantic spaces because it ignores differences in frequency and compares, rather, the closeness of relationship between two sets of frequencies.
# ### PCA Analysis
#
# We want to apply PCA in order to plot nouns in an imaginary space. The goal is to use the visualization to identify patterns and groups amongst the 199 target nouns. Nouns that are more similar should fall within the same general areas relative to the origin (0, 0). PCA seeks to identify the maximum variance amongst the vector spaces.
# In[63]:
pca = PCA(10) # PCA with 3 principal components
noun_fit = pca.fit(fishertransf.T.values) # get coordinates
pca_nouns = noun_fit.transform(fishertransf.T.values)
plt.figure(figsize=(8, 6))
sns.barplot(x=np.arange(10)+1, y=noun_fit.explained_variance_ratio_[:10])
plt.xlabel('Principle Component', size=20)
plt.ylabel('Raio of Explained Variance', size=20)
plt.title('Ratio of Explained Variance for Principle Components 1-10 (Scree Plot)', size=20)
plt.show()
# Variance accounted for by PC1 and PC2:
# In[64]:
noun_fit.explained_variance_ratio_[0]+noun_fit.explained_variance_ratio_[1]
# The plot above, also called a scree plot, tells us that the first two principle components only account for 12% of the total variance in the dataset. Thus the PCA noun space is rather noisy. This may be explained by the fact that we are combining many different kinds of syntactic contexts into one dataset. And it may also be due to the rather spread out nature of lexical data.
#
# Below we extract the top 25 features which are most influential for the first two principal components.
# In[65]:
loadings = noun_fit.components_.T * np.sqrt(noun_fit.explained_variance_)
loadings = pd.DataFrame(loadings.T, index=np.arange(10)+1, columns=data.index)
# In[66]:
pc1_loadings = pd.DataFrame(loadings.loc[1].sort_values(ascending=False))
pc2_loadings = pd.DataFrame(loadings.loc[2].sort_values(ascending=False))
pc1_loadings_above0 = pc1_loadings[pc1_loadings[1] > 0.1] # isolate loadings > 0
# automatically detect elbow in graph:
elbow = KneeLocator(x=np.arange(pc1_loadings_above0.shape[0]),
y=pc1_loadings_above0[1].values,
curve='convex',
direction='decreasing').knee
# plot it all
plt.figure(figsize=(8, 6))
plt.plot(pc1_loadings_above0.values)
plt.title('Loading Scores >0 by Rank for Principle Component 1', size=20)
plt.ylabel('Loading Score', size=20)
plt.xlabel('Rank', size=20)
plt.xticks(np.arange(pc1_loadings_above0.shape[0], step=20), size=20)
plt.yticks(size=20)
plt.axvline(elbow, color='red') # plot elbow with red line
plt.show()
# #### Top PCX Loadings and Scores (for data exploration)
# In[67]:
# pcx_loadings = pd.DataFrame(loadings.loc[4].sort_values(ascending=False)) # for experiments
# pcx_loadings.head(25)
# #### Top 25 PC1 Loadings and Scores
# In[68]:
pc1_loadings.round(2).head(25).to_csv('spreadsheets/PC1_loadings.csv')
pc1_loadings.head(25)
# #### PC1 Verb Contexts and Loadings
# In[69]:
pc1_loadings[pc1_loadings.index.str.contains('v1')].round(2).head(15).to_csv('spreadsheets/top15_animate_verbs.csv')
top_pc1_loadings = pc1_loadings[pc1_loadings[1] >= 0.30]
pc1_loadings[pc1_loadings.index.str.contains('v1')].head(15)
# #### Looking at T.ל.Cmpl→ לקח.v1.qal
#
# This is an interesting top verbal context. Is it related to marriage situations?
# In[70]:
take_contexts = [r for r in counts.basis2result['T.ל.Cmpl→ לקח.v1.qal']]
random.seed(213214) # shuffle random, preserve state
random.shuffle(take_contexts)
B.show(take_contexts, condenseType='clause', withNodes=True, end=5)
display(HTML(f'<h4>...{len(take_contexts)-5} other results cutoff...'))
# In[71]:
'; '.join(['{} {}:{}'.format(*T.sectionFromNode(r[0])) for r in sorted(take_contexts)])
# In[72]:
len(take_contexts)
# #### PC2 Loadings, top 25
# In[73]:
pc2_loadings.head(25)
# In[74]:
def plot_PCA(pca_nouns,
zoom=tuple(),
noun_xy_dict=False,
save='',
annotate=True,
title='',
components=(pca_nouns[:,0], pca_nouns[:,1])):
'''
Plots a PCA noun space.
Function is useful for presenting various zooms on the data.
'''
x, y = components
# plot coordinates
plt.figure(figsize=(12, 10))
plt.scatter(x, y)
if zoom:
xmin, xmax, ymin, ymax = zoom
plt.xlim(xmin, xmax)
plt.ylim(ymin, ymax)
if title:
plt.title(title, size=18)
plt.xlabel('PC1', size=18)
plt.ylabel('PC2', size=18)
plt.axhline(color='red', linestyle=':')
plt.axvline(color='red', linestyle=':')
# annotate points
if annotate:
noun_xy = {} # for noun_dict
noun_lexs = [f'{reverse_hb(F.voc_lex_utf8.v(counts.target2lex[n]))}' for n in fishertransf.columns]
for i, noun in enumerate(noun_lexs):
noun_x, noun_y = x[i], y[i]
noun_xy[fishertransf.columns[i]] = (noun_x, noun_y)
if zoom: # to avoid annotating outside of field of view (makes plot small)
if any([noun_x < xmin, noun_x > xmax, noun_y < ymin, noun_y > ymax]):
continue # skip noun
plt.annotate(noun, xy=(noun_x, noun_y), size='18')
if save:
plt.savefig(save, dpi=300, bbox_inches='tight')
plt.show()
if noun_xy_dict:
return noun_xy
test_components = (pca_nouns[:,0], pca_nouns[:,1])
# #### Whole PCA Space
# In[75]:
pca_nouns_xy = plot_PCA(pca_nouns, noun_xy_dict=True, save='plots/PCA_whole.png', components=test_components)
# We can already see some interesting tendencies in the data. קול and דבר are grouped in the same quadrant. In the upper right quadrant we see בן and בת. The lower left quadrant presents a particularly interesting match: יד "hand" and אלהים "God".
#
# We zoom in closer below to have a better look at the tendencies.
# #### Main Cluster of PCA space
# In[76]:
plot_PCA(pca_nouns, zoom=((-3, 3, -2.5, 1)), save='plots/PCA_main.png')
# ### ~Animate Nouns
# Note that nouns in the lower right quadrant tend to be people, while on the lower left there are primarily things.
#
# The plot below shows person nouns.
# In[77]:
plot_PCA(pca_nouns, zoom=((-0.1, 5, -2.5, 0.1)), save='plots/PCA_~animates')
# Let's see what nouns to the right of the y axis have most in common. This could corroborate the intuition that the nouns on the right are personal.
#
# First we isolate the nouns with a x-axis value > 0. Those are shown below, they are obviously personal nouns.
# In[78]:
nouns_xy = pd.DataFrame.from_dict(pca_nouns_xy, orient='index', columns=['x', 'y'])
possibly_animate = pd.DataFrame(nouns_xy[nouns_xy.x > 0])
possibly_animate['gloss'] = [F.gloss.v(counts.target2lex[targ]) for targ in possibly_animate.index]
possibly_animate = possibly_animate.reindex(['gloss', 'x', 'y'], axis=1)
# In[79]:
x_animate = pd.DataFrame(possibly_animate.drop('y', axis=1).sort_values(ascending=False, by='x'))
round(x_animate,2).to_csv('spreadsheets/animate_x.csv')
print(f'total number of ~animate nouns {x_animate.shape[0]}')
x_animate
# #### Why בגד?
#
# Why has בגד "garment" made it into the set? We compare the top loading scores against the top scores for בגד.
# In[80]:
def cf_PC_Noun(pc_loadings, noun_counts, noun, pc_name='PC1', ascending=False):
'''
Compares PC loadings and noun counts.
Returns a DF containing the top common
counts sorted on the PC.
'''
top_cts = noun_counts[noun][noun_counts[noun]>0] # isolate non-zero counts
pc_word = pc_loadings.copy() # make copy of PC loadings for modifications
pc_word.columns = [pc_name] # rename col to PCX
pc_word[noun] = top_cts[[i for i in top_cts.index if i in pc_word.index]] # add new column for noun
pc_word = pc_word[pc_word[noun] > 0].sort_values(by='PC1', ascending=ascending) # remove zero counts completely, sort
return pc_word
bgd_pc1 = cf_PC_Noun(pc1_loadings, fishertransf, 'בגד.n1')
bgd_pc1[bgd_pc1.PC1 >= 0.3].round(2).to_csv('spreadsheets/BGD_pc1.csv')
bgd_pc1[bgd_pc1.PC1 >= 0.3]
# Show passages for coord relations for paper:
# In[81]:
etcbc2sbl = {
'Genesis': 'Gen', 'Exodus': 'Exod', 'Leviticus': 'Lev', 'Numbers': 'Num',
'Deuteronomy': 'Deut', 'Joshua': 'Josh', 'Judges': 'Judg', '1_Samuel': '1 Sam', '2_Samuel': '2 Sam',
'1_Kings': '1 Kgs', '2_Kings': '2 Kgs', 'Isaiah': 'Isa', 'Jeremiah': 'Jer', 'Ezekiel': 'Ezek',
'Hosea': 'Hos', 'Joel': 'Joel', 'Amos': 'Amos', 'Obadiah': 'Obad', 'Jonah': 'Jonah', 'Micah': 'Mic',
'Nahum': 'Nah', 'Habakkuk': 'Hab', 'Zephaniah': 'Zeph', 'Haggai': 'Hag', 'Zechariah': 'Zech',
'Malachi': 'Mal', 'Psalms': 'Ps', 'Job': 'Job', 'Proverbs': 'Prov', 'Ruth': 'Ruth',
'Song_of_songs': 'Song', 'Ecclesiastes': 'Eccl', 'Lamentations': 'Lam', 'Esther': 'Esth',
'Daniel': 'Dan', 'Ezra': 'Ezra', 'Nehemiah': 'Neh', '1_Chronicles': '1 Chr', '2_Chronicles': '2 Chr'}
def formatPassages(resultslist):
'''
Formats biblical passages with SBL style
for a list of results.
'''
book2ch2vs = collections.defaultdict(lambda: collections.defaultdict(set))
for result in resultslist:
book, chapter, verse = T.sectionFromNode(result[0])
book = etcbc2sbl[book]
book2ch2vs[book][chapter].add(str(verse))
# assemble in to readable passages list
passages = []
for book, chapters in book2ch2vs.items():
ch_verses = []
for chapter, verses in chapters.items():
verses = ', '.join(f'{chapter}:{verse}' for verse in sorted(verses))
ch_verses.append(verses)
passage = f'{book} {", ".join(ch_verses)}'
passages.append(passage)
return '; '.join(passages)
def collectPassages(contextslist, targetnoun):
'''
Collects and returns neatly
formatted passages
for use in the paper.
'''
# map the passages with dicts to avoid repeats
results = sorted(res for context in contextslist for res in counts.target2basis2result[targetnoun][context])
return formatPassages(results)
bgd_mixContexts = ['']
collectPassages(bgd_pc1.head(4).index[bgd_pc1.head(4).index.str.contains('coord')], 'בגד.n1')
# In[82]:
# B.show(counts.target2basis2result['בגד.n1']['T.coord→ אהרן.n1'], condenseType='phrase', withNodes=True)
# Now we find the context tags that are highest in the set. We pull the fourth quartile (75th percentile) of the context tags to see which ones are most shared accross these nouns.
# In[83]:
animate_context = fishertransf[possibly_animate.index].quantile(0.75, axis=1).sort_values(ascending=False)
pd.DataFrame(animate_context.head(15))
# #### PCA Space: Focus on Bordering ~Animate Nouns
# In[84]:
plot_PCA(pca_nouns, zoom=((-0.5, 0.5, -1.5, -1)), save='plots/PCA_~animate_border')
# In[85]:
nouns_xy[(nouns_xy.x < 0) & (nouns_xy.x > -0.4)].sort_values(ascending=False, by='x')
# Verbs are the greatest distinguishing factor here, with אמר, בוא,נתן, לקח and others serving a big role. מות "die" also plays a role. These are definitely contexts we could expect with animate nouns.
# ### ~Inanimate Nouns
#
# The nouns to the left of the y axis appear to be mostly inanimate.
# In[86]:
plot_PCA(pca_nouns, zoom=((-2, 0, -2.5, 0)), title='PCA Space: ~Inanimate Noun Cluster')
# Below we pull the tendencies for the nouns with a PC1 < 0. These nouns appear to be impersonal in nature.
# In[87]:
possibly_inanimate = pd.DataFrame(nouns_xy[(nouns_xy.x < 0) & (nouns_xy.y < 0)])
possibly_inanimate['gloss'] = [F.gloss.v(counts.target2lex[targ]) for targ in possibly_inanimate.index]
possibly_inanimate = possibly_inanimate.reindex(['gloss', 'x', 'y'], axis=1)
x_inanimate = pd.DataFrame(possibly_inanimate.drop('y', axis=1).sort_values(by='x'))
round(x_inanimate,2).head(x_animate.shape[0]).to_csv('spreadsheets/inanimate_x.csv')
print(f'Number of total ~inanimates: {x_inanimate.shape[0]}')
print(f'Top ~inanimates: ')
x_inanimate.head(x_animate.shape[0])
# ### Top Influencing ~inanimate Contexts
# In[88]:
pc1_loadings.tail(25).sort_values(by=1).round(2).to_csv('spreadsheets//PC1_loadings_negative.csv')
pc1_loadings.tail(25).sort_values(by=1)
# #### What about מלאך?
#
# Why is מלאך rated in this list of mostly "inanimates"?
# In[89]:
pc_mlak = cf_PC_Noun(pc1_loadings, fishertransf, 'מלאך.n1', ascending=True)
pc_mlak[pc_mlak.PC1 <= -0.2].round(2).to_csv('spreadsheets/MLAK_pc1.csv')
pc_mlak.head(10)
# Note that several of the top 4 contexts are related to אלהים. We pull a few examples with אלהים out for use in the paper.
# In[90]:
collectPassages(['T.אחר.n1.Cmpl→ הלך.v1.qal'], 'אלהים.n1')
# In[91]:
collectPassages(['T.אחר.n1.Cmpl→ הלך.v1.qal'], 'מלאך.n1')
# In[92]:
collectPassages(['אחר.n2.atr→ T'], 'מלאך.n1')
# In[93]:
collectPassages(['T.appo→ אלהים.n1'], 'מלאך.n1')
# The next plot shows nouns to the left of the y-origin. Note especially the terms between y(-0.5) and y(0.0.). These are more conceptual nouns. This same trajectory extends up into the far parts of the upper left quadrant through דבר and קול.
# Here is a closer look at the larger cluster near the left side of the y-origin.
# In[94]:
plot_PCA(pca_nouns, zoom=((-0.5, -0.1, -1.5, -1)))
# Moving over one more notch:
# In[95]:
plot_PCA(pca_nouns, zoom=((-1, -0.5, -2, -0.5)))
# ### ~Perception Nouns?
#
# The first quandrant contains a number of interesting terms that appear to be mostly abstract. These nouns appear to be related in some sense to perceptions:
# In[96]:
plot_PCA(pca_nouns, zoom=((-2, 0.05, -0.05, 1)), save='plots/PCA_~perception_nouns')
# Below are the most common contexts for these nouns.
# In[97]:
perceptions = nouns_xy[(nouns_xy.x < 0) & (nouns_xy.y > 0)]
perception_contexts = fishertransf[perceptions.index].quantile(0.75, axis=1).sort_values(ascending=False).head(15)
pd.DataFrame(perception_contexts)
# Many perceptional related contexts can be seen here, namely when the noun is a direct object to verbs such as שׁמע "hear", ידע "know", ראה "see", מצא "find", and שׁכח "forget".
# ## Experiment in Metaphor Detection
#
# If the contexts of the animate nouns are queried against the inanimate nouns, is it possible to detect cases of metaphorical extension in the dataset?
# In[98]:
# get top 25 animate verbal contexts with Subj roles:
animate_verbal_contexts = pc1_loadings[pc1_loadings.index.str.contains('v') & pc1_loadings.index.str.contains('Subj')].head(25)
print(f'number of verbal contexts searched: {animate_verbal_contexts.shape[0]}')
metaphors = [] # metaphor data here
for i, ia_noun in enumerate(x_inanimate[1:].head(40).index): # go through top 40
# skip these nouns:
if ia_noun in {'אלהים.n1', 'מלאך.n1'}:
continue
# find attested, common contexts
contexts = cf_PC_Noun(animate_verbal_contexts, fishertransf, ia_noun)
if contexts.shape[0]: # a match is found
# gather row data with columns of [noun, context, hits, passages, example]
for context in contexts.index:
results = counts.target2basis2result[ia_noun][context] # get results from searches
hits = len(results)
passages = formatPassages(results)
example = T.text(results[0][0])
metaphors.append([ia_noun, context, hits, passages, example, f'({formatPassages([(results[0][0],)])})'])
metaphors = | pd.DataFrame(metaphors, columns=['noun', 'context', 'hits', 'passages', 'example', ' ']) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""Test losses"""
import datetime
import numpy as np
import pandas as pd
from conftest import assert_series_equal
from pvlib.soiling import hsu, kimber
from pvlib.iotools import read_tmy3
from conftest import requires_scipy, DATA_DIR
import pytest
@pytest.fixture
def expected_output():
# Sample output (calculated manually)
dt = pd.date_range(start=pd.Timestamp(2019, 1, 1, 0, 0, 0),
end=pd.Timestamp(2019, 1, 1, 23, 59, 0), freq='1h')
expected_no_cleaning = pd.Series(
data=[0.96998483, 0.94623958, 0.92468139, 0.90465654, 0.88589707,
0.86826366, 0.85167258, 0.83606715, 0.82140458, 0.80764919,
0.79476875, 0.78273241, 0.77150951, 0.76106905, 0.75137932,
0.74240789, 0.73412165, 0.72648695, 0.71946981, 0.7130361,
0.70715176, 0.70178307, 0.69689677, 0.69246034],
index=dt)
return expected_no_cleaning
@pytest.fixture
def expected_output_1():
dt = pd.date_range(start=pd.Timestamp(2019, 1, 1, 0, 0, 0),
end=pd.Timestamp(2019, 1, 1, 23, 59, 0), freq='1h')
expected_output_1 = pd.Series(
data=[0.98484972, 0.97277367, 0.96167471, 0.95119603, 1.,
0.98484972, 0.97277367, 0.96167471, 1., 1.,
0.98484972, 0.97277367, 0.96167471, 0.95119603, 0.94118234,
0.93154854, 0.922242, 0.91322759, 0.90448058, 0.89598283,
0.88772062, 0.87968325, 0.8718622, 0.86425049],
index=dt)
return expected_output_1
@pytest.fixture
def expected_output_2():
dt = pd.date_range(start=pd.Timestamp(2019, 1, 1, 0, 0, 0),
end=pd.Timestamp(2019, 1, 1, 23, 59, 0), freq='1h')
expected_output_2 = pd.Series(
data=[0.95036261, 0.91178179, 0.87774818, 0.84732079, 1.,
1., 1., 0.95036261, 1., 1.,
1., 1., 0.95036261, 0.91178179, 0.87774818,
0.84732079, 0.8201171, 1., 1., 1.,
1., 0.95036261, 0.91178179, 0.87774818],
index=dt)
return expected_output_2
@pytest.fixture
def expected_output_3():
dt = pd.date_range(start=pd.Timestamp(2019, 1, 1, 0, 0, 0),
end=pd.Timestamp(2019, 1, 1, 23, 59, 0), freq='1h')
timedelta = [0, 0, 0, 0, 0, 30, 0, 30, 0, 30, 0, -30,
-30, -30, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
dt_new = dt + | pd.to_timedelta(timedelta, 'm') | pandas.to_timedelta |
import pandas as pd
import pytest
import featuretools as ft
from featuretools.entityset import EntitySet
from featuretools.utils.gen_utils import import_or_none
ks = import_or_none('databricks.koalas')
@pytest.mark.skipif('not ks')
def test_single_table_ks_entityset():
primitives_list = ['absolute', 'is_weekend', 'year', 'day', 'num_characters', 'num_words']
ks_es = EntitySet(id="ks_es")
df = pd.DataFrame({"id": [0, 1, 2, 3],
"values": [1, 12, -34, 27],
"dates": [pd.to_datetime('2019-01-10'),
pd.to_datetime('2019-02-03'),
pd.to_datetime('2019-01-01'),
pd.to_datetime('2017-08-25')],
"strings": ["I am a string",
"23",
"abcdef ghijk",
""]})
values_dd = ks.from_pandas(df)
vtypes = {
"id": ft.variable_types.Id,
"values": ft.variable_types.Numeric,
"dates": ft.variable_types.Datetime,
"strings": ft.variable_types.NaturalLanguage
}
ks_es.entity_from_dataframe(entity_id="data",
dataframe=values_dd,
index="id",
variable_types=vtypes)
ks_fm, _ = ft.dfs(entityset=ks_es,
target_entity="data",
trans_primitives=primitives_list)
pd_es = ft.EntitySet(id="pd_es")
pd_es.entity_from_dataframe(entity_id="data",
dataframe=df,
index="id",
variable_types={"strings": ft.variable_types.NaturalLanguage})
fm, _ = ft.dfs(entityset=pd_es,
target_entity="data",
trans_primitives=primitives_list)
ks_computed_fm = ks_fm.to_pandas().set_index('id').loc[fm.index][fm.columns]
# NUM_WORDS(strings) is int32 in koalas for some reason
pd.testing.assert_frame_equal(fm, ks_computed_fm, check_dtype=False)
@pytest.mark.skipif('not ks')
def test_single_table_ks_entityset_ids_not_sorted():
primitives_list = ['absolute', 'is_weekend', 'year', 'day', 'num_characters', 'num_words']
ks_es = EntitySet(id="ks_es")
df = pd.DataFrame({"id": [2, 0, 1, 3],
"values": [1, 12, -34, 27],
"dates": [pd.to_datetime('2019-01-10'),
pd.to_datetime('2019-02-03'),
pd.to_datetime('2019-01-01'),
pd.to_datetime('2017-08-25')],
"strings": ["I am a string",
"23",
"abcdef ghijk",
""]})
values_dd = ks.from_pandas(df)
vtypes = {
"id": ft.variable_types.Id,
"values": ft.variable_types.Numeric,
"dates": ft.variable_types.Datetime,
"strings": ft.variable_types.NaturalLanguage
}
ks_es.entity_from_dataframe(entity_id="data",
dataframe=values_dd,
index="id",
variable_types=vtypes)
ks_fm, _ = ft.dfs(entityset=ks_es,
target_entity="data",
trans_primitives=primitives_list)
pd_es = ft.EntitySet(id="pd_es")
pd_es.entity_from_dataframe(entity_id="data",
dataframe=df,
index="id",
variable_types={"strings": ft.variable_types.NaturalLanguage})
fm, _ = ft.dfs(entityset=pd_es,
target_entity="data",
trans_primitives=primitives_list)
# Make sure both indexes are sorted the same
pd.testing.assert_frame_equal(fm, ks_fm.to_pandas().set_index('id').loc[fm.index], check_dtype=False)
@pytest.mark.skipif('not ks')
def test_single_table_ks_entityset_with_instance_ids():
primitives_list = ['absolute', 'is_weekend', 'year', 'day', 'num_characters', 'num_words']
instance_ids = [0, 1, 3]
ks_es = EntitySet(id="ks_es")
df = pd.DataFrame({"id": [0, 1, 2, 3],
"values": [1, 12, -34, 27],
"dates": [pd.to_datetime('2019-01-10'),
pd.to_datetime('2019-02-03'),
| pd.to_datetime('2019-01-01') | pandas.to_datetime |
import pandas as pd
import pytest
from feature_engine.encoding import OneHotEncoder
def test_encode_categories_in_k_binary_plus_select_vars_automatically(df_enc_big):
# test case 1: encode all categories into k binary variables, select variables
# automatically
encoder = OneHotEncoder(top_categories=None, variables=None, drop_last=False)
X = encoder.fit_transform(df_enc_big)
# test init params
assert encoder.top_categories is None
assert encoder.variables is None
assert encoder.drop_last is False
# test fit attr
transf = {
"var_A_A": 6,
"var_A_B": 10,
"var_A_C": 4,
"var_A_D": 10,
"var_A_E": 2,
"var_A_F": 2,
"var_A_G": 6,
"var_B_A": 10,
"var_B_B": 6,
"var_B_C": 4,
"var_B_D": 10,
"var_B_E": 2,
"var_B_F": 2,
"var_B_G": 6,
"var_C_A": 4,
"var_C_B": 6,
"var_C_C": 10,
"var_C_D": 10,
"var_C_E": 2,
"var_C_F": 2,
"var_C_G": 6,
}
assert encoder.variables_ == ["var_A", "var_B", "var_C"]
assert encoder.variables_binary_ == []
assert encoder.n_features_in_ == 3
assert encoder.encoder_dict_ == {
"var_A": ["A", "B", "C", "D", "E", "F", "G"],
"var_B": ["A", "B", "C", "D", "E", "F", "G"],
"var_C": ["A", "B", "C", "D", "E", "F", "G"],
}
# test transform output
assert X.sum().to_dict() == transf
assert "var_A" not in X.columns
def test_encode_categories_in_k_minus_1_binary_plus_list_of_variables(df_enc_big):
# test case 2: encode all categories into k-1 binary variables,
# pass list of variables
encoder = OneHotEncoder(
top_categories=None, variables=["var_A", "var_B"], drop_last=True
)
X = encoder.fit_transform(df_enc_big)
# test init params
assert encoder.top_categories is None
assert encoder.variables == ["var_A", "var_B"]
assert encoder.drop_last is True
# test fit attr
transf = {
"var_A_A": 6,
"var_A_B": 10,
"var_A_C": 4,
"var_A_D": 10,
"var_A_E": 2,
"var_A_F": 2,
"var_B_A": 10,
"var_B_B": 6,
"var_B_C": 4,
"var_B_D": 10,
"var_B_E": 2,
"var_B_F": 2,
}
assert encoder.variables_ == ["var_A", "var_B"]
assert encoder.variables_binary_ == []
assert encoder.n_features_in_ == 3
assert encoder.encoder_dict_ == {
"var_A": ["A", "B", "C", "D", "E", "F"],
"var_B": ["A", "B", "C", "D", "E", "F"],
}
# test transform output
for col in transf.keys():
assert X[col].sum() == transf[col]
assert "var_B" not in X.columns
assert "var_B_G" not in X.columns
assert "var_C" in X.columns
def test_encode_top_categories():
# test case 3: encode only the most popular categories
df = pd.DataFrame(
{
"var_A": ["A"] * 5
+ ["B"] * 11
+ ["C"] * 4
+ ["D"] * 9
+ ["E"] * 2
+ ["F"] * 2
+ ["G"] * 7,
"var_B": ["A"] * 11
+ ["B"] * 7
+ ["C"] * 4
+ ["D"] * 9
+ ["E"] * 2
+ ["F"] * 2
+ ["G"] * 5,
"var_C": ["A"] * 4
+ ["B"] * 5
+ ["C"] * 11
+ ["D"] * 9
+ ["E"] * 2
+ ["F"] * 2
+ ["G"] * 7,
}
)
encoder = OneHotEncoder(top_categories=4, variables=None, drop_last=False)
X = encoder.fit_transform(df)
# test init params
assert encoder.top_categories == 4
# test fit attr
transf = {
"var_A_D": 9,
"var_A_B": 11,
"var_A_A": 5,
"var_A_G": 7,
"var_B_A": 11,
"var_B_D": 9,
"var_B_G": 5,
"var_B_B": 7,
"var_C_D": 9,
"var_C_C": 11,
"var_C_G": 7,
"var_C_B": 5,
}
# test fit attr
assert encoder.variables_ == ["var_A", "var_B", "var_C"]
assert encoder.variables_binary_ == []
assert encoder.n_features_in_ == 3
assert encoder.encoder_dict_ == {
"var_A": ["B", "D", "G", "A"],
"var_B": ["A", "D", "B", "G"],
"var_C": ["C", "D", "G", "B"],
}
# test transform output
for col in transf.keys():
assert X[col].sum() == transf[col]
assert "var_B" not in X.columns
assert "var_B_F" not in X.columns
def test_error_if_top_categories_not_integer():
with pytest.raises(ValueError):
OneHotEncoder(top_categories=0.5)
def test_error_if_drop_last_not_bool():
with pytest.raises(ValueError):
OneHotEncoder(drop_last=0.5)
def test_raises_error_if_df_contains_na(df_enc_big, df_enc_big_na):
# test case 4: when dataset contains na, fit method
with pytest.raises(ValueError):
encoder = OneHotEncoder()
encoder.fit(df_enc_big_na)
# test case 4: when dataset contains na, transform method
with pytest.raises(ValueError):
encoder = OneHotEncoder()
encoder.fit(df_enc_big)
encoder.transform(df_enc_big_na)
def test_encode_numerical_variables(df_enc_numeric):
encoder = OneHotEncoder(
top_categories=None,
variables=None,
drop_last=False,
ignore_format=True,
)
X = encoder.fit_transform(df_enc_numeric[["var_A", "var_B"]])
# test fit attr
transf = {
"var_A_1": [1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
"var_A_2": [0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0],
"var_A_3": [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1],
"var_B_1": [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
"var_B_2": [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0],
"var_B_3": [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1],
}
transf = pd.DataFrame(transf).astype("int32")
X = | pd.DataFrame(X) | pandas.DataFrame |
from time import time
from os import path, listdir
from datetime import timedelta
from datetime import date as dt_date
from datetime import datetime as dt
from numpy import cumprod
from pandas import DataFrame, read_sql_query, read_csv, concat
from functions import psqlEngine
class Investments():
def __init__(self, path = '../investments/', name = 'get_investments', **kwargs):
self.kwargs = kwargs
self.path = path
self.hyperparameters()
self.get_engine()
self.get_dollar()
self.get_all_assets()
self.domestic_bond_returns()
self.get_benchmarks()
self.portfolio_domestic_stocks = self.get_quotas('domestic_stocks')
self.portfolio_international_stocks = self.get_quotas('international_stocks')
self.portfolio_crypto = self.get_quotas('crypto')
# self.portfolio_domestic_options = self.get_quotas('domestic_options')
self.portfolio_domestic_funds = self.get_quotas('domestic_funds')
self.get_portfolio()
self.get_aggregate()
self.get_time_series()
self.dispose_engine()
def __call__(self, flag = 'assets'):
if flag == 'dollar':
return self.dollar
if flag == 'bonds':
return self.domestic_bonds, self.interests
if flag == 'stocks':
return self.domestic_tickers, self.international_tickers
if flag == 'crypto':
return self.crypto, self.fractions
if flag == 'portfolio':
return self.portfolio, self.portfolio_aggregate.round(2)
if flag == 'save':
rounded = self.portfolio.round(2)
rounded2 = self.portfolio_aggregate.round(2)
engine = psqlEngine(self.database)
connection = engine.connect()
rounded.to_sql('portfolio', connection, if_exists = 'replace', index = False)
rounded2.to_sql('aggregate', connection, if_exists = 'replace', index = False)
connection.close()
engine.dispose()
if flag == 'time_series':
return self.portfolio_time_series.round(2)
def hyperparameters(self):
self.database = self.kwargs.get('database', 'database.ini')
self.benchmark_database = self.kwargs.get('benchmarks_database', 'benchmarks')
self.domestic_stocks_database = self.kwargs.get('domestic_database', 'brazil_stocks')
self.domestic_options_database = self.kwargs.get('domestic_database', 'brazil_options')
self.international_database = self.kwargs.get('international_database', 'usa_stocks')
self.currency_database = self.kwargs.get('currency_database', 'currencies')
self.domestic_bonds_path = '{}bonds/'.format(self.path)
self.crypto_path = '{}crypto/'.format(self.path)
self.domestic_stocks_path = '{}stocks/domestic/'.format(self.path)
self.international_stocks_path = '{}stocks/international/'.format(self.path)
self.domestic_options_path = '{}options/domestic/'.format(self.path)
self.domestic_funds_path = '{}funds/domestic/'.format(self.path)
self.list_paths = [
self.domestic_bonds_path,
self.crypto_path,
self.domestic_stocks_path,
self.international_stocks_path,
self.domestic_options_path,
self.domestic_funds_path,
]
self.dates_min = DataFrame()
def get_engine(self):
self.engine = psqlEngine(self.database)
self.connection = self.engine.connect()
def dispose_engine(self):
self.connection.close()
self.engine.dispose()
def get_dollar(self):
currency = 'BRLUSD'
self.dollar = float(read_sql_query("SELECT * FROM {} WHERE ticker = '{}'".format(self.benchmark_database, currency), self.connection).iloc[0].close)
self.dollar_full = read_sql_query("SELECT date, close FROM {} WHERE ticker = '{}' ORDER BY date".format(self.benchmark_database, currency), self.connection)
self.dollar_full.drop_duplicates('date', inplace = True)
self.dollar_full = self.insert_weekends(self.dollar_full)
self.dollar_full.rename(columns = {'close': 'dollar_close'}, inplace = True)
self.dollar_full['dollar_close'] = self.dollar_full.dollar_close.astype('float')
def get_benchmarks(self):
self.spy = read_sql_query("SELECT date, adjusted_close as close FROM {} WHERE ticker = 'SPY' ORDER BY date".format(self.benchmark_database), self.connection)
self.bova = read_sql_query("SELECT date, adjusted_close as close FROM {} WHERE ticker = 'BOVA11' ORDER BY date".format(self.benchmark_database), self.connection)
self.spy.drop_duplicates('date', inplace = True)
self.bova.drop_duplicates('date', inplace = True)
self.spy = self.insert_weekends(self.spy)
self.spy['close'] = self.spy.close.astype('float')
self.bova = self.insert_weekends(self.bova)
self.bova = self.bova.merge(self.dollar_full, on = 'date')
self.bova['close'] = self.bova.close.astype('float')
self.bova['close_dollar'] = (self.bova.close * self.bova.dollar_close).to_list()
def get_all_assets(self):
self.interests, self.fractions = list(), list()
self.domestic_tickers, self.international_tickers = list(), list()
self.domestic_options_tickers = list()
self.domestic_funds_tickers = list()
for directory in self.list_paths:
list_files = list()
for filename in listdir(directory):
if filename.endswith('.csv'):
list_files.append(path.join(directory, filename))
if directory == self.domestic_bonds_path:
self.interests.append(filename.replace('.csv', '').upper())
if directory == self.crypto_path:
self.fractions.append(filename.replace('.csv', '').upper())
if directory == self.domestic_stocks_path:
self.domestic_tickers.append(filename.replace('.csv', '').upper())
if directory == self.international_stocks_path:
self.international_tickers.append(filename.replace('.csv', '').upper())
if directory == self.domestic_options_path:
self.domestic_options_tickers.append(filename.replace('.csv', '').upper())
if directory == self.domestic_funds_path:
self.domestic_funds_tickers.append(filename.replace('.csv', '').upper())
dictionary = dict()
if directory == self.domestic_bonds_path:
for filename, interest in zip(list_files, self.interests):
df = read_csv(filename)
dictionary[interest] = df
if dictionary:
self.domestic_bonds = concat(dictionary)
self.domestic_bonds = self.domestic_bonds.rename(columns = {'pct_cdi': 'share'})
self.domestic_bonds = self.domestic_bonds.merge(self.dollar_full, on = 'date')
self.domestic_bonds['purchase_price_dollar'] = (self.domestic_bonds.purchase_price.astype('float') * self.domestic_bonds.dollar_close.astype('float')).to_list()
else:
if directory == self.crypto_path:
symbols = self.fractions
if directory == self.domestic_stocks_path:
symbols = self.domestic_tickers
if directory == self.international_stocks_path:
symbols = self.international_tickers
if directory == self.domestic_options_path:
symbols = self.domestic_options_tickers
if directory == self.domestic_funds_path:
symbols = self.domestic_funds_tickers
for filename, ticker in zip(list_files, symbols):
df = read_csv(filename)
if ticker in self.domestic_funds_tickers:
df.set_index('date', inplace = True)
df['purchase_price'] = df.purchase_price.diff()
df = df.dropna()
df.reset_index(inplace = True)
if (ticker in self.domestic_tickers) or (ticker in self.domestic_options_tickers) or (ticker in self.domestic_funds_tickers):
df = df.merge(self.dollar_full, on = 'date')
df['purchase_price'] = df.purchase_price.astype('float') * df.dollar_close.astype('float')
dictionary[ticker] = df
df['cum_share'] = df.share.cumsum()
df['price_share'] = (df.purchase_price / df.share)
df['cum_price_share'] = df.price_share.expanding().mean()
dictionary[ticker] = df
if dictionary:
self.stocks = concat(dictionary)
if directory == self.crypto_path:
self.crypto = concat(dictionary)
if directory == self.domestic_stocks_path:
self.domestic_stocks = concat(dictionary)
if directory == self.international_stocks_path:
self.international_stocks = concat(dictionary)
if directory == self.domestic_options_path:
self.domestic_options = concat(dictionary)
if directory == self.domestic_funds_path:
self.domestic_funds = concat(dictionary)
def get_quotas(self, asset):
quotas = dict()
domestic = False
if asset == 'crypto':
list_tickers = self.fractions
db = self.currency_database
if asset == 'domestic_stocks':
list_tickers = self.domestic_tickers
db = self.domestic_stocks_database
domestic = True
if asset == 'international_stocks':
list_tickers = self.international_tickers
db = self.international_database
if asset == 'domestic_options':
list_tickers = self.domestic_options_tickers
db = self.domestic_options_database
domestic = True
if asset == 'domestic_funds':
list_tickers = self.domestic_funds_tickers
domestic = True
for ticker in list_tickers:
key = ticker.upper()
if asset == 'crypto':
quotas[key] = self.crypto.loc[ticker].cum_share.iloc[-1]
if asset == 'domestic_stocks':
quotas[key] = self.domestic_stocks.loc[ticker].cum_share.iloc[-1]
if asset == 'international_stocks':
quotas[key] = self.international_stocks.loc[ticker].cum_share.iloc[-1]
if asset == 'domestic_options':
quotas[key] = self.domestic_options.loc[ticker].cum_share.iloc[-1]
if asset == 'domestic_funds':
quotas[key] = 1.
portfolio = DataFrame({
'asset': list(quotas.keys()),
'quotas': list(quotas.values())
})
portfolio.sort_values(by = ['asset'], inplace = True)
if asset == 'domestic_funds':
value_usd, value_brl = list(), list()
for asset in list_tickers:
close_price = read_csv(self.domestic_funds_path + '{}.csv'.format(asset.lower())).share.iloc[-1]
value_usd.append(close_price * quotas.get(asset) * self.dollar)
value_brl.append(close_price * quotas.get(asset))
portfolio['value_usd'] = value_usd
portfolio['value_brl'] = value_brl
else:
if domestic == False:
close_price = read_sql_query("SELECT date, ticker, close FROM (SELECT date, ticker, close, MAX(date) OVER (PARTITION BY ticker) AS max_date FROM {}) x WHERE date = max_date".format(db), self.connection)
else:
close_price = read_sql_query("SELECT date, ticker, close FROM (SELECT date, ticker, adjusted_close as close, MAX(date) OVER (PARTITION BY ticker) AS max_date FROM {}) x WHERE date = max_date".format(db), self.connection)
close_price['close'] = close_price.close.astype('float')
close_price = close_price.loc[close_price.ticker.isin(portfolio.asset.to_list())]
self.dates_min = self.dates_min.append(close_price[['date', 'ticker']])
close_price['quota'] = close_price.ticker.apply(lambda x: quotas.get(x))
if domestic == False:
portfolio['value_usd'] = (close_price.close * close_price.quota).to_list()
portfolio['value_brl'] = (close_price.close * close_price.quota / self.dollar).to_list()
else:
portfolio['value_usd'] = (close_price.close * close_price.quota * self.dollar).to_list()
portfolio['value_brl'] = (close_price.close * close_price.quota).to_list()
portfolio.sort_values(by = ['value_usd'], ascending = False, inplace = True)
return portfolio
def get_portfolio(self):
self.portfolio = dict()
self.portfolio['domestic bonds'] = self.portfolio_bonds
self.portfolio['domestic stocks'] = self.portfolio_domestic_stocks
self.portfolio['international stocks'] = self.portfolio_international_stocks
self.portfolio['crypto'] = self.portfolio_crypto
# self.portfolio['domestic options'] = self.portfolio_domestic_options
self.portfolio['domestic funds'] = self.portfolio_domestic_funds
self.portfolio = concat(self.portfolio)
self.portfolio = self.portfolio.loc[self.portfolio.quotas >= 1e-10]
def get_aggregate(self):
assets = list(self.portfolio.index.unique(level = 0))
value_brl, value_usd = list(), list()
for asset in assets:
value_brl.append(self.portfolio.loc[asset].sum().value_brl)
value_usd.append(self.portfolio.loc[asset].sum().value_usd)
self.portfolio_aggregate = DataFrame({
'asset': assets,
'value_brl': value_brl,
'value_usd': value_usd,
})
def insert_weekends(self, df, asset = 'stock'):
df.set_index('date', inplace = True)
start, end = df.index[0], df.index[-1]
start = dt.strptime(start, '%Y-%m-%d').date()
end = dt.strptime(end, '%Y-%m-%d').date()
dates = [str(start + timedelta(days = x)) for x in range(0, (end - start).days + 1, 1)]
df = df.reindex(dates, fill_value = 0)
df.reset_index(inplace = True)
close = list()
if asset == '6040':
for value in df.interest:
if value != 0:
close.append(value)
if value == 0:
close.append(1.)
df['interest'] = close
if asset == 'bond':
for value in df.portfolio:
if value != 0:
close.append(value)
if value == 0:
close.append(close[-1])
df['portfolio'] = close
if asset == 'crypto':
for value in df.close:
if value != 0:
close.append(value)
if value == 0:
close.append(close[-1])
df['close'] = close
if asset == 'stock':
for value in df.close:
if value != 0:
close.append(value)
if value == 0:
close.append(close[-1])
df['close'] = close
return df
def get_concat_dataframe(self, columns, options = True):
columns_bonds = list()
for elem in columns:
if elem == 'share':
columns_bonds.append('purchase_price')
elif elem == 'purchase_price':
columns_bonds.append('purchase_price_dollar')
else:
columns_bonds.append(elem)
domestic_bonds = dict()
domestic_bonds['CDB'] = self.domestic_bonds[columns_bonds].rename(columns = {'purchase_price_dollar': 'purchase_price'})
domestic_bonds = concat(domestic_bonds)
if options == True:
df = concat([domestic_bonds, self.domestic_stocks[columns], self.international_stocks[columns], self.crypto[columns], self.domestic_funds[columns], self.domestic_options[columns]])
else:
df = | concat([domestic_bonds, self.domestic_stocks[columns], self.international_stocks[columns], self.crypto[columns], self.domestic_funds[columns]]) | pandas.concat |
import pandas as pd
import re
data_path = ""
data_path_ectf = "ECTF_dataset/"
###############
## read data ##
###############
# read all data of dataset covid_fake_new
train_df = pd.read_csv(data_path+"Constraint_Train.csv").drop(columns=["id"],axis = 1) # tweet, label
val_df = pd.read_csv(data_path+"Constraint_Val.csv").drop(columns=["id"],axis = 1)# tweet, label
test_with_label_df = pd.read_csv(data_path+"english_test_with_labels.csv").drop(columns=["id"],axis = 1) # tweet, label
all_df = pd.concat([train_df, val_df, test_with_label_df])
# read all data of dataset ECTF
fake_df = pd.read_csv(data_path_ectf+"fake.csv", index_col=[0]).drop(columns=["id"],axis = 1)
fake_df["label"] = "fake"
genuine_df = | pd.read_csv(data_path_ectf+"genuine.csv", index_col=[0]) | pandas.read_csv |
'''
This script uses Google cloud storage to transcribe wav file
'''
import os
import pandas as pd
from pydub.utils import mediainfo
from google.cloud import speech_v1p1beta1 as speech
from scipy.io.wavfile import read as read_wav
import Python.Data_Preprocessing.config.dir_config as cfg
def transcribe_gcs(bucket_name, audio_id, parallel_run_settings):
'''
This function asynchronously transcribes the audio file.
:param bucket_name: bucket_name in google cloud
:param audio_id: wav file
:return: word and utterance transcripts
'''
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = "./data/secrets/<KEY>"
# info = mediainfo(os.path.join(parallel_run_settings['wav_path'], audio_id))
# sample_rate = info['sample_rate']
sample_rate, data = read_wav(os.path.join(parallel_run_settings['wav_path'], audio_id))
# channels = info['channels']
client = speech.SpeechClient()
gcs_uri = "gs://" + bucket_name + "/" + audio_id
audio = speech.types.RecognitionAudio(uri=gcs_uri)
config = speech.types.RecognitionConfig(
encoding=speech.enums.RecognitionConfig.AudioEncoding.LINEAR16,
sample_rate_hertz=sample_rate,
language_code='en-US',
enable_speaker_diarization=True,
diarization_speaker_count=1,
audio_channel_count=1,# channels,
enable_separate_recognition_per_channel=True)
operation = client.long_running_recognize(config, audio)
print('\nWaiting for operation to complete...')
response = operation.result(timeout=10000)
print('\nCompleted!')
row_list = []
sub_row_list = []
for result in response.results:
row_i = {'transcript': result.alternatives[0].transcript,
'confidence': result.alternatives[0].confidence}
for alternative in result.alternatives:
for word_info in alternative.words:
sub_row_i = {'word': word_info.word,
'start_time': (word_info.start_time.seconds +
word_info.start_time.nanos*1e-9)}
sub_row_list.append(sub_row_i)
row_list.append(row_i)
df_word = | pd.DataFrame(sub_row_list) | pandas.DataFrame |
from bs4 import BeautifulSoup
import bs4
import pandas as pd
import numpy as np
import time
total_df = pd.DataFrame(columns=["title", "url","case_num", "content"])
file = open('rescue_20191030.txt', 'r', encoding='utf-8')
soup = BeautifulSoup(file, "html.parser")
post = soup.findAll('item')
str1 = """<td height="33" style="padding-left:20px"><img alt="로고" src="/img/hpgonggo/logo_scourt.gif"/></td>"""
str2 = """<td height="27"><img alt="종료" border="0" onclick="window.close();" src="/img/hpgonggo/btn_close.gif" style="cursor:hand"/><img alt="공백" height="10" src="/img/hpgonggo/blank.gif" width="10"/></td>"""
i = 0
for p in post:
title = p.find('title').text.strip()
content = p.find('content:encoded').find(text=lambda tag: isinstance(tag, bs4.CData)).string.strip()
content = content.replace(str1,"").replace(str2,"")
url = p.find('guid').text
soup2 = BeautifulSoup(content, "html.parser")
try:
code = soup2.findAll('font')[3].text.strip()
except:
code = soup2.findAll("span", {"style":"color: 145192; font-size: small;"})[1].text
print(i,"번째: ",code)
total_df = total_df.append({"title" : title,
"case_num" : code,
"url" : url,
"content" : content}, ignore_index=True)
i +=1
total_df.to_excel('./rescue_detail.xlsx', index=False)
def remove_tag(data):
try:
soup = BeautifulSoup(data, "html.parser")
ret = soup.find('a').text.strip()
except AttributeError:
ret = data
return ret
def get_url(data):
try:
soup = BeautifulSoup(data, "html.parser")
ret = soup.find('a')
url = ret['href']
except AttributeError:
url = data
except TypeError:
url = data
return url
def catch_address(html):
try:
soup = BeautifulSoup(html, "html.parser")
addr = soup.select('p')[2].text.split(' ')[4].strip()
except TypeError:
print(html)
addr = None
except IndexError:
print(soup.select('p')[2])
addr = None
return addr
data1 = pd.read_csv('./rescue.csv', engine='python', encoding='utf-8')
data1.shape
data2 = | pd.read_excel('./rescue_detail.xlsx') | pandas.read_excel |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import ahocorasick
import math
import os
import re
import sys
import shutil
import glob
import xlsxwriter
import subprocess
from functools import partial
from itertools import product, combinations
from subprocess import DEVNULL
from multiprocessing import Pool
from threading import Timer
import random
import pandas as pd
import tqdm
import primer3
from Bio import SeqIO, Seq
from Bio.Alphabet import IUPAC
from Bio.SeqRecord import SeqRecord
__author__ = "<NAME>"
__copyright__ = "Copyright 2019, <NAME>"
__license__ = "MIT"
__maintainer__ = "<NAME>"
__version__ = "1.0"
__email__ = "<EMAIL>"
__status__ = "Production"
class PRIMeval:
def __init__(self, run_id, max_primer_mismatches, max_probe_mismatches, max_product_size, cross_check, probes_only, method, dimer_check,
primerMonovalentCations, primerDivalentCations, primerDNTPs, primerConcentration, primerAnnealingTemp,
probeMonovalentCations, probeDivalentCations, probeDNTPs, probeConcentration, probeAnnealingTemp, prebuilt = ""):
# parameters
self.run_id = run_id
self.max_primer_mismatches = int(max_primer_mismatches)
self.max_probe_mismatches = int(max_probe_mismatches)
self.max_product_size = int(max_product_size)
self.max_mismatches = max(max_primer_mismatches, max_probe_mismatches)
if self.max_mismatches == 0:
self.l, self.e, self.qcov, self.perciden = 5, 10, 100, 100
elif self.max_mismatches == 1:
self.l, self.e, self.qcov, self.perciden = 5, 40, 90, 90
elif self.max_mismatches == 2:
self.l, self.e, self.qcov, self.perciden = 5, 70, 85, 85
else:
self.l, self.e, self.qcov, self.perciden = 5, 100, 80, 80
self.prebuilt = str(prebuilt)
self.bowtie_dbs = ["list_of_prebuilt_dbs_in_prebuilt_folder"]
self.bowtie_runs = []
self.blast_db_name = "user_db"
self.bowtie_index_name = "bindex"
self.num_threads = 48
self.method = method
if dimer_check == "True":
self.dimer_check = True
else:
self.dimer_check = False
if cross_check == "True":
self.same_package = False
else:
self.same_package = True
if probes_only == "True":
self.probes_only = True
else:
self.probes_only = False
# Cross dimer check
self.primer_monovalent_cations = str(primerMonovalentCations)
self.primer_divalent_cations = str(primerDivalentCations)
self.primer_dntps = str(primerDNTPs)
self.primer_annealing_oligo = str(primerConcentration)
self.primer_annealing_temp = str(primerAnnealingTemp)
self.probe_monovalent_cations = str(probeMonovalentCations)
self.probe_divalent_cations = str(probeDivalentCations)
self.probe_dntps = str(probeDNTPs)
self.probe_annealing_oligo = str(probeConcentration)
self.probe_annealing_temp = str(probeAnnealingTemp)
self.cross_dimer_dfs = []
self.cross_dimer_dfs_dg = []
self.hairpin_dfs = []
# Aho-Corasick Automaton
self.aho = ahocorasick.Automaton()
# folders
self.base_folder = os.getcwd() + "/"
self.run_folder = self.base_folder + "runs/" + str(self.run_id) + "/"
self.input_folder = self.run_folder + "input/"
self.output_folder = self.run_folder + "output/"
self.tmp_folder = self.run_folder + "tmp/"
self.input_contigs = self.run_folder + "input/contigs/"
self.primer_input_folder = self.run_folder + "input/primers/"
self.probes_input_folder = self.run_folder + "input/probes/"
self.blast_db_folder = self.run_folder + "tmp/blastdb/"
self.prebuilt_genomes = self.base_folder + "prebuilt/genomes/"
self.prebuilt_bowtie = self.base_folder + "prebuilt/bowtie/"
# files
self.output_contigs = self.run_folder + "tmp/merged_contigs.fasta"
self.blast_output_tmp_file = self.run_folder + "tmp/blast_tmp_results.txt"
self.blast_output_file = self.run_folder + "tmp/blast_results.txt"
self.bowtie_output_tmp_file = self.run_folder + "tmp/bowtie_tmp_results.txt"
self.bowtie_output_file = self.run_folder + "tmp/bowtie_results.txt"
self.bowtie_index_folder = self.run_folder + "tmp/bowtie_index_folder/"
self.oligo_file = self.run_folder + "output/oligos.fasta"
self.results_all = self.run_folder + "output/results.csv"
self.results_wob = self.run_folder + "output/results_wobbled.csv"
self.results_dimers = self.run_folder + "output/results_dimers.xlsx"
# settings
self.blastdb_cmd = "/path/to/makeblastdb"
self.bowtie_build_cmd = "/path/to/bowtie-build"
self.blast_cmd = "/path/to/blastn"
self.bowtie_cmd = "/path/to/bowtie"
self.faidx_cmd = "/path/to/samtools faidx "
self.pd_col_hits = ["Sequence", "Type", "Name", "Package", "StartPos", "EndPos", "MismatchesTotal",
"Strand", "HitSequence", "Tm", "dG"]
self.pd_col_results = ["Sequence", "Contig", "Primer1", "Primer2", "Probe", "Primer1Package",
"Primer2Package", "ProbePackage", "StartPos1", "EndPos1", "StartPos2", "EndPos2",
"StartPos3", "EndPos3", "Primer1Tm", "Primer2Tm", "ProbeTm", "Primer1dG", "Primer2dG", "ProbedG", "ProductSize", "ProductTm", "NoMismatchesLeft", "NoMismatchesRight",
"NoMismatchesProbe", "MismatchesLeft", "MismatchesRight", "MismatchesProbe",
"Comment", "Product"]
self.blast_txt_params = "\"6 qseqid sseqid nident qlen length mismatch qstart qend sstart sseq sstrand " \
"send\""
self.blast_txt_fields = ["qseqid", "sseqid", "nident", "qlen", "length", "mismatch", "qstart", "qend",
"sstart", "sseq", "sstrand", "send"]
# return list of all possible sequences given an ambiguous DNA input
def _extend_ambiguous_dna(self, seq):
d = Seq.IUPAC.IUPACData.ambiguous_dna_values
return list(map("".join, product(*map(d.get, seq))))
def _get_sequence(self, contig_file, wanted_contig, start, end, strand=1):
try:
command = self.faidx_cmd + contig_file + " '" + wanted_contig + ":" + str(start) + "-" + str(end) + "'"
call = subprocess.check_output(command, shell=True, stderr=subprocess.DEVNULL).decode().split("\n", 1)[1]
except:
try:
contig_file = self.prebuilt_genomes + wanted_contig.split("__contigname__", 1)[0] + ".fasta"
command = self.faidx_cmd + contig_file + " '" + wanted_contig + ":" + str(start) + "-" + str(end) + "'"
call = subprocess.check_output(command, shell=True, stderr=subprocess.DEVNULL).decode().split("\n", 1)[1]
except:
sys.exit("Failed retrieving: " + command)
call = re.sub("\n|\r", "", call)
sequence = Seq.Seq(call)
if strand == 1:
return sequence.upper()
else:
return sequence.reverse_complement().upper()
# Get a visual representation of mismatches between two sequences
def _mismatch_visualization(self, seq_a, seq_b):
seq_a, seq_b = seq_a.upper(), seq_b.upper()
mismatches = ""
if (len(seq_a) - len(seq_b) != 0):
return "Error"
for pos in range(0, len(seq_a)):
if seq_a[pos] != seq_b[pos]:
mismatches += "(" + seq_a[pos] + "/" + seq_b[pos] + ")"
else:
mismatches += "="
return mismatches
def _prepare_folders(self):
if os.path.exists(self.output_folder):
shutil.rmtree(self.output_folder)
if os.path.exists(self.tmp_folder):
shutil.rmtree(self.tmp_folder)
# Create output and tmp folders
if not os.path.exists(self.output_folder):
os.makedirs(self.output_folder)
if not os.path.exists(self.tmp_folder):
os.makedirs(self.tmp_folder)
if not os.path.exists(self.bowtie_index_folder):
os.makedirs(self.bowtie_index_folder)
def _clean_up_folders(self):
# if os.path.exists(self.input_folder):
# shutil.rmtree(self.input_folder)
if os.path.exists(self.tmp_folder):
shutil.rmtree(self.tmp_folder)
# Rename primer and probes, create new sequences without IUPAC codes and save in file
# Used for dimer check: self.packages, packages, package, oligo_name
def _import_oligos(self, folder, oligotype):
packages = {}
primer_records = []
allowed_chars = "[^0-9a-zA-Z()'_\+-]+"
for file in os.listdir(folder):
if file.endswith(".fasta"):
package = file.rsplit(".fasta", 1)[0]
packages[package] = {}
sequences = SeqIO.parse(open(folder + file), "fasta")
for fasta in sequences:
m = re.search("[M,R,W,S,Y,K,V,H,D,B,N]", str(fasta.seq))
if m:
sequence_mutations = self._extend_ambiguous_dna(str(fasta.seq))
mutation_count = 0
for mutation in sequence_mutations:
mutation_count += 1
oligo_name = re.sub(allowed_chars, "_", fasta.description) + "_mut" + str(mutation_count)
packages[package][oligo_name] = str(mutation)
if oligotype == "probe":
rec = SeqRecord(Seq.Seq(mutation, IUPAC),
id=package + "^" + re.sub(allowed_chars, "_",
fasta.description) + "_mut" + str(
mutation_count) + "_probe", description="")
else:
rec = SeqRecord(Seq.Seq(mutation, IUPAC),
id=package + "^" + re.sub(allowed_chars, "_",
fasta.description) + "_mut" + str(
mutation_count), description="")
primer_records.append(rec)
else:
oligo_name = re.sub(allowed_chars, "_", fasta.description)
packages[package][oligo_name] = str(fasta.seq)
if oligotype == "probe":
rec = SeqRecord(fasta.seq, id=package + "^" + re.sub(allowed_chars, "_",
fasta.description) + "_probe",
description="")
else:
rec = SeqRecord(fasta.seq,
id=package + "^" + re.sub(allowed_chars, "_", fasta.description),
description="")
primer_records.append(rec)
output_handle = open(self.oligo_file, "a")
SeqIO.write(primer_records, output_handle, "fasta")
output_handle.close()
if oligotype == "primer":
self.primer_packages = packages
else:
self.probe_packages = packages
# Rename and merge contigs
def _import_contigs(self):
seq_records = []
for file in os.listdir(self.input_contigs):
# CHANGE: other file endings should also be possible (see with Django upload permitted filenames)
if file.endswith(".fasta"):
base_contig_name = file.replace(".fasta", "")
for entry in SeqIO.parse(self.input_contigs + file, "fasta"):
my_new_id = base_contig_name + "__contigname__" + entry.id
seq_records.append(SeqRecord(entry.seq, id=my_new_id, description=""))
output_handle = open(self.output_contigs, "w")
SeqIO.write(seq_records, output_handle, "fasta")
output_handle.close()
command = self.faidx_cmd + self.output_contigs
subprocess.call(command, shell=True)
def _import_sequences(self):
if self.probes_only == False:
self._import_oligos(self.primer_input_folder, "primer")
self._import_oligos(self.probes_input_folder, "probe")
self._import_contigs()
def _create_blast_db(self):
command = self.blastdb_cmd + " -in " + self.output_contigs + " -dbtype nucl -out " + self.blast_db_folder + self.blast_db_name
subprocess.call(command, shell=True)
def _create_bowtie_index(self):
command = self.bowtie_build_cmd + " --threads " + str(
self.num_threads) + " -f " + self.output_contigs + " " + self.bowtie_index_folder + self.bowtie_index_name
subprocess.call(command, shell=True)
def _blast_call(self):
command = self.blast_cmd + " -db " + self.blast_db_folder + self.blast_db_name + " -query " + self.oligo_file + " -out " + \
self.blast_output_tmp_file + " -outfmt " + self.blast_txt_params + " -num_threads " + str(
self.num_threads) + " -evalue 200000 " \
"-qcov_hsp_perc " + str(self.qcov) + " -perc_identity " + str(self.perciden) + " -max_target_seqs 2000000 -word_size 4 -ungapped"
subprocess.call(command, shell=True)
with open(self.blast_output_file, "a") as out_file:
with open(self.blast_output_tmp_file) as in_file:
out_file.write(in_file.read())
def _bowtie_call(self, index_folder = "", index_name = ""):
mismatches = self.max_primer_mismatches if self.max_primer_mismatches >= self.max_probe_mismatches else self.max_probe_mismatches
if index_folder == "" and index_name == "":
if os.path.getsize(self.output_contigs) == 0:
return
index_folder = self.bowtie_index_folder
index_name = self.bowtie_index_name
command = self.bowtie_cmd + " -f -a -p " + str(self.num_threads) + " -n " + str(
mismatches) + " -l " + str(self.l) + " -e " + str(self.e) + " " + index_folder + index_name + " " + self.oligo_file + " " + self.bowtie_output_tmp_file
subprocess.call(command, shell=True)
with open(self.bowtie_output_file, "a") as out_file:
with open(self.bowtie_output_tmp_file) as in_file:
out_file.write(in_file.read())
def _specificity_calls(self):
for db in self.bowtie_runs:
self._bowtie_call(self.prebuilt_bowtie, db)
def _multiprocess_convert_bowtie_to_blast(self):
# in case no hits are returned
try:
df = pd.read_csv(self.bowtie_output_file, sep="\t", header=None)
except pd.errors.EmptyDataError:
df = pd.DataFrame(columns = ["", "", "", "", "", "", "", "", "", "", "", ""])
split_df = self.splitDataFrameIntoChunks(df)
func = partial(self._convert_bowtie_to_blast)
with Pool(self.num_threads) as p:
multiprocessing_results = list(tqdm.tqdm(p.imap(func, split_df), total=len(split_df)))
self.df_bowtie = pd.DataFrame(
columns=["qseqid", "sseqid", "nident", "qlen", "length", "mismatch", "qstart", "qend", "sstart", "sseq",
"sstrand", "send"])
self.df_bowtie = pd.concat(multiprocessing_results, ignore_index=True)
def _convert_bowtie_to_blast(self, df):
df_bowtie = pd.DataFrame(
columns=["qseqid", "sseqid", "nident", "qlen", "length", "mismatch", "qstart", "qend", "sstart", "sseq",
"sstrand", "send"])
for index, line in df.iterrows():
mismatch = str(line[7]).count(":")
if line[0].endswith("_probe") and mismatch > self.max_probe_mismatches:
continue
if not line[0].endswith("_probe") and mismatch > self.max_primer_mismatches:
continue
sstrand = "plus" if line[1] == "+" else "minus"
qseqid = line[0]
sseqid = line[2]
qlen = len(line[4])
length = qlen
qstart = 1
qend = qlen
sstart = int(line[3]) + 1
send = sstart + qlen - 1
nident = qlen - mismatch
if sstrand == "minus":
temp_swap = sstart
sstart = send
send = temp_swap
if mismatch == 0:
sseq = str(Seq.Seq(line[4]).reverse_complement())
else:
sseq = self._resolve_bowtie_mismtches(line[4], line[7], -1)
else:
if mismatch == 0:
sseq = line[4]
else:
sseq = self._resolve_bowtie_mismtches(line[4], line[7], 1)
df_bowtie.loc[len(df_bowtie)] = [str(qseqid), str(sseqid), str(nident), str(qlen), str(length),
str(mismatch), str(qstart),
str(qend), str(sstart), str(sseq), str(sstrand), str(send)]
return df_bowtie
def _resolve_bowtie_mismtches(self, sequence, mismatches, strand):
sequence = Seq.Seq(sequence) if strand == 1 else Seq.Seq(sequence).reverse_complement()
mismatches = mismatches.split(",")
for mismatch in mismatches:
position, base = mismatch.split(":", 1)
position = int(position)
base = base[0] if strand == 1 else Seq.Seq(base[0]).reverse_complement()
sequence = sequence[:position] + base + sequence[position+1:]
return str(sequence)
def _split_output(self):
if self.method == "blast":
# in case no hits are returned
try:
df_blast = pd.read_csv(self.blast_output_file, sep="\t", header=None)
except pd.errors.EmptyDataError:
df_blast = pd.DataFrame(columns = ["", "", "", "", "", "", "", "", "", "", "", ""])
self.df_blast_split = self.splitDataFrameIntoChunks(df_blast)
if self.method == "bowtie":
self.df_bowtie_split = self.splitDataFrameIntoChunks(self.df_bowtie)
if self.method == "aho-corasick":
self.df_aho_split = self.splitDataFrameIntoChunks(self.df_aho)
def splitDataFrameIntoChunks(self, df):
chunkSize = math.ceil(len(df) / self.num_threads)
if chunkSize == 0:
chunkSize = 1
listOfDf = list()
numberChunks = len(df) // chunkSize + 1
for i in range(numberChunks):
listOfDf.append(df[i * chunkSize:(i + 1) * chunkSize])
return listOfDf
def _multiprocess_split_files(self):
if self.method == "blast":
input_files = self.df_blast_split
if self.method == "bowtie":
input_files = self.df_bowtie_split
if self.method == "aho-corasick":
input_files = self.df_aho_split
func = partial(self._parse_blastlike_results_df)
with Pool(self.num_threads) as p:
multiprocessing_results = list(tqdm.tqdm(p.imap(func, input_files), total=len(input_files)))
self.hits = pd.concat(multiprocessing_results, ignore_index=True)
hits_output = self.hits.copy()
if len(hits_output.index) > 0:
hits_output[['Sequence', 'Contig']] = hits_output['Sequence'].str.split("__contigname__", 1, expand=True)
hits_output = hits_output[
['Sequence', 'Contig', 'Type', 'Name', 'Package', 'StartPos', 'EndPos', 'MismatchesTotal', 'Strand',
'HitSequence', 'Tm', 'dG']]
tmp = hits_output['Name'].str.rsplit("_probe", 1, expand = True)
hits_output['Name'] = tmp[0]
hits_output.to_csv(self.output_folder + "all_hits.csv", index=False, sep=";")
def _process_probes_only(self):
probes_df = self.hits[(self.hits['Type'] == "Probe")]
if len(probes_df.index) > 0:
oligos_full_sequences = SeqIO.index(self.oligo_file, "fasta")
probes_df = probes_df.drop(columns = ['Type', 'Strand'])
probes_df = probes_df.rename(columns = {'Name': 'Probe', 'Package': 'ProbePackage', 'MismatchesTotal': 'NoMismatchesProbe'})
probes_df[['Sequence', 'Contig']] = probes_df['Sequence'].str.split("__contigname__", 1, expand = True)
probes_df['MismatchesProbe'] = probes_df.apply(lambda x: self._mismatch_visualization(oligos_full_sequences[x['ProbePackage'] + "^" + x['Probe']].seq, x['HitSequence']), axis=1)
probes_df = probes_df[['Sequence', 'Contig', 'Probe', 'ProbePackage', 'StartPos', 'EndPos', 'NoMismatchesProbe', 'MismatchesProbe', 'HitSequence', 'Tm' ,'dG']]
tmp = probes_df['Probe'].str.rsplit("_probe", 1, expand = True)
probes_df['Probe'] = tmp[0]
probes_df.to_csv(self.results_all, index=False, sep=";")
# parse wobbled primers
subset = probes_df[probes_df['Probe'].str.contains("_mut")]
subset_r = subset.replace(['_mut([0-9])+'], [''], regex=True)
# hits without mutations
unique = probes_df.merge(subset, indicator=True, how="outer")
unique = unique[unique['_merge'] == 'left_only']
unique = unique.drop("_merge", axis=1)
results2 = pd.DataFrame(columns=['Sequence', 'Contig', 'Probe', 'ProbePackage', 'StartPos', 'EndPos', 'NoMismatchesProbe', 'MismatchesProbe', 'HitSequence', 'Tm', 'dG'])
for s in subset_r.groupby(['Sequence', 'Contig', 'Probe', 'ProbePackage', 'StartPos', 'EndPos', 'HitSequence']).groups.items():
# Fields to be changed: NoMismatchesProbe, MismatchesProbe
sample = subset_r.loc[s[1]] # get one set
first_row = sample.iloc[0]
if len(sample) < 2:
results2.loc[len(results2)] = first_row
else:
mismatch_min, mismatch_max = min(sample['NoMismatchesProbe']), max(sample['NoMismatchesProbe'])
mismatches = mismatch_min if mismatch_min == mismatch_max else str(mismatch_min) + "-" + str(mismatch_max)
tm_min, tm_max = min(sample['Tm']), max(sample['Tm'])
tm = tm_min if tm_min == tm_max else str(tm_min) + "-" + str(tm_max)
dg_min, dg_max = min(sample['dG']), max(sample['dG'])
dg = dg_min if dg_min == dg_max else str(dg_min) + "-" + str(dg_max)
# Get first row and then replace values of first row (all other fields are identifical)
results2.loc[len(results2)] = [first_row['Sequence'], first_row['Contig'], first_row['Probe'], first_row['ProbePackage'],
first_row['StartPos'], first_row['EndPos'], mismatches, '', first_row['HitSequence'], tm, dg]
wobbled = unique.append(results2)
wobbled.to_csv(self.results_wob, index=False, sep=";")
def _parse_blastlike_results_df(self, blast_df):
oligos_full_sequences = SeqIO.index(self.oligo_file, "fasta")
hits = pd.DataFrame(columns=self.pd_col_hits)
for index, line in blast_df.iterrows():
if self.method == "aho-corasick":
if line[0].endswith("_probe") and int(line[5]) > self.max_probe_mismatches:
continue
if not line[0].endswith("_probe") and int(line[5]) > self.max_primer_mismatches:
continue
new_package = line[0].split("^", 1)[0]
new_qresult = line[0].split("^", 1)[1]
hit_strand = 1 if line[10] == "plus" else -1
mismatches_total = int(line[5])
hit_seq = line[9]
type = "Probe" if line[0].endswith("_probe") == True else "Primer"
if hit_strand == -1:
temp_swap = int(line[8])
sstart = int(line[11])
send = temp_swap
else:
sstart = int(line[8])
send = int(line[11])
tm, dg = self._calc_thermal_parameters(str(oligos_full_sequences[line[0]].seq.reverse_complement()), hit_seq, type)
hits.loc[len(hits)] = [line[1], type, new_qresult, new_package, sstart, send,
mismatches_total, hit_strand, hit_seq, tm, dg]
else:
mismatches_left = int(line[6])
mismatches_right = int(line[3]) - int(line[7])
mismatches_middle = int(line[3]) - int(line[2]) - mismatches_left - mismatches_right
mismatches_total = mismatches_left + mismatches_right + mismatches_middle
if line[0].endswith("_probe") and mismatches_total > self.max_probe_mismatches:
continue
if not line[0].endswith("_probe") and mismatches_total > self.max_primer_mismatches:
continue
new_package = line[0].split("^", 1)[0]
new_qresult = line[0].split("^", 1)[1]
hit_strand = 1 if line[10] == "plus" else -1
type = "Probe" if line[0].endswith("_probe") == True else "Primer"
correct_start = mismatches_left - 1 if hit_strand == 1 else mismatches_right if hit_strand == -1 else 0
correct_end = mismatches_right if hit_strand == 1 else mismatches_left - 1 if hit_strand == -1 else 0
if hit_strand == -1:
temp_swap = int(line[8])
sstart = int(line[11]) - correct_start
send = temp_swap + correct_end
else:
sstart = int(line[8]) - correct_start
send = int(line[11]) + correct_end
if mismatches_left > 0 or mismatches_right > 0:
hit_seq = self._get_sequence(self.output_contigs, line[1], sstart, send, hit_strand)
else:
hit_seq = line[9]
tm, dg = self._calc_thermal_parameters(str(oligos_full_sequences[line[0]].seq.reverse_complement()), hit_seq, type)
hits.loc[len(hits)] = [line[1], type, new_qresult, new_package, sstart, send,
mismatches_total, hit_strand, hit_seq, tm, dg]
return hits
def _multiprocess_hits(self):
objects = []
groups = []
num_groups = math.ceil(len(self.hits[self.hits['Type'] == "Primer"].groupby("Sequence").groups.items()) / self.num_threads)
i = 1
for s in self.hits[self.hits['Type'] == "Primer"].groupby("Sequence").groups.items():
if i > num_groups:
groups.append(objects)
objects = []
i = 1
objects.append(self.hits.loc[s[1]])
i += 1
groups.append(objects)
multiprocessing_results = []
func = partial(self._parse_hits)
with Pool(self.num_threads) as p:
multiprocessing_results = list(tqdm.tqdm(p.imap(func, groups), total=len(groups)))
self.results = | pd.concat(multiprocessing_results, ignore_index=True) | pandas.concat |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 21 23:24:11 2021
@author: rayin
"""
import os, sys
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import math
import re
import random
from collections import Counter
from pprint import pprint
os.chdir("/Users/rayin/Google Drive/Harvard/5_data/UDN/work")
case_gene_update = pd.read_csv("data/processed/variant_clean.csv", index_col=0)
aa_variant = list(case_gene_update['\\12_Candidate variants\\09 Protein\\'])
#pd.DataFrame(aa_variant).to_csv('aa_variant.csv')
#aa_variant_update = pd.read_csv("data/processed/aa_variant_update.csv", index_col=0)
#aa_variant_update = list(aa_variant_update['\\12_Candidate variants\\09 Protein\\'])
amino_acid = {'CYS': 'C', 'ASP': 'D', 'SER': 'S', 'GLN': 'Q', 'LYS': 'K', 'ILE': 'I', 'PRO': 'P', 'THR': 'T', 'PHE': 'F', 'ASN': 'N',
'GLY': 'G', 'HIS': 'H', 'LEU': 'L', 'ARG': 'R', 'TRP': 'W', 'ALA': 'A', 'VAL':'V', 'GLU': 'E', 'TYR': 'Y', 'MET': 'M', 'TER': 'X'}
aa_3 = []
aa_1 = []
for i in amino_acid.keys():
aa_3.append(i)
aa_1.append(amino_acid[i])
for i in range(0, len(aa_variant)):
for j in range(len(aa_3)):
if isinstance(aa_variant[i], float):
break
aa_variant[i] = str(aa_variant[i].upper())
if aa_3[j] in aa_variant[i]:
aa_variant[i] = aa_variant[i].replace(aa_3[j], aa_1[j])
#extracting aa properties from aaindex
#https://www.genome.jp/aaindex/
aa = ['A', 'R', 'N', 'D', 'C', 'Q', 'E', 'G', 'H', 'I', 'L', 'K', 'M', 'F', 'P', 'S', 'T', 'W', 'Y', 'V']
#RADA880108
polarity = [-0.06, -0.84, -0.48, -0.80, 1.36, -0.73, -0.77, -0.41, 0.49, 1.31, 1.21, -1.18, 1.27, 1.27, 0.0, -0.50, -0.27, 0.88, 0.33, 1.09]
aa_polarity = pd.concat([pd.Series(aa), pd.Series(polarity)], axis=1)
aa_polarity = aa_polarity.rename(columns={0:'amino_acid', 1: 'polarity_value'})
#KLEP840101
net_charge = [0, 1, 0, -1, 0, 0, -1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0]
aa_net_charge = pd.concat([pd.Series(aa), pd.Series(net_charge)], axis=1)
aa_net_charge = aa_net_charge.rename(columns={0:'amino_acid', 1: 'net_charge_value'})
#CIDH920103
hydrophobicity = [0.36, -0.52, -0.90, -1.09, 0.70, -1.05, -0.83, -0.82, 0.16, 2.17, 1.18, -0.56, 1.21, 1.01, -0.06, -0.60, -1.20, 1.31, 1.05, 1.21]
aa_hydrophobicity = pd.concat([pd.Series(aa), pd.Series(hydrophobicity)], axis=1)
aa_hydrophobicity = aa_hydrophobicity.rename(columns={0:'amino_acid', 1: 'hydrophobicity_value'})
#FAUJ880103 -- Normalized van der Waals volume
normalized_vdw = [1.00, 6.13, 2.95, 2.78, 2.43, 3.95, 3.78, 0.00, 4.66, 4.00, 4.00, 4.77, 4.43, 5.89, 2.72, 1.60, 2.60, 8.08, 6.47, 3.00]
aa_normalized_vdw = pd.concat([pd.Series(aa), pd.Series(normalized_vdw)], axis=1)
aa_normalized_vdw = aa_normalized_vdw.rename(columns={0:'amino_acid', 1: 'normalized_vdw_value'})
#CHAM820101
polarizability = [0.046, 0.291, 0.134, 0.105, 0.128, 0.180, 0.151, 0.000, 0.230, 0.186, 0.186, 0.219, 0.221, 0.290, 0.131, 0.062, 0.108, 0.409, 0.298, 0.140]
aa_polarizability = pd.concat([pd.Series(aa), pd.Series(polarizability)], axis=1)
aa_polarizability = aa_polarizability.rename(columns={0:'amino_acid', 1: 'polarizability_value'})
#JOND750102
pK_COOH = [2.34, 1.18, 2.02, 2.01, 1.65, 2.17, 2.19, 2.34, 1.82, 2.36, 2.36, 2.18, 2.28, 1.83, 1.99, 2.21, 2.10, 2.38, 2.20, 2.32]
aa_pK_COOH = pd.concat([pd.Series(aa), pd.Series(pK_COOH)], axis=1)
aa_pK_COOH = aa_pK_COOH.rename(columns={0:'amino_acid', 1: 'pK_COOH_value'})
#FASG760104
pK_NH2 = [9.69, 8.99, 8.80, 9.60, 8.35, 9.13, 9.67, 9.78, 9.17, 9.68, 9.60, 9.18, 9.21, 9.18, 10.64, 9.21, 9.10, 9.44, 9.11, 9.62]
aa_pK_NH2 = pd.concat([pd.Series(aa), pd.Series(pK_NH2)], axis=1)
aa_pK_NH2 = aa_pK_NH2.rename(columns={0:'amino_acid', 1: 'pK_NH2_value'})
#ROBB790101 Hydration free energy
hydration = [-1.0, 0.3, -0.7, -1.2, 2.1, -0.1, -0.7, 0.3, 1.1, 4.0, 2.0, -0.9, 1.8, 2.8, 0.4, -1.2, -0.5, 3.0, 2.1, 1.4]
aa_hydration = pd.concat([pd.Series(aa), pd.Series(hydration)], axis=1)
aa_hydration = aa_hydration.rename(columns={0:'amino_acid', 1: 'hydration_value'})
#FASG760101
molecular_weight = [89.09, 174.20, 132.12, 133.10, 121.15, 146.15, 147.13, 75.07, 155.16, 131.17, 131.17, 146.19, 149.21, 165.19,
115.13, 105.09, 119.12, 204.24, 181.19, 117.15]
aa_molecular_weight = pd.concat([pd.Series(aa), pd.Series(molecular_weight)], axis=1)
aa_molecular_weight = aa_molecular_weight.rename(columns={0:'amino_acid', 1: 'molecular_weight_value'})
#FASG760103
optical_rotation = [1.80, 12.50, -5.60, 5.05, -16.50, 6.30, 12.00, 0.00, -38.50, 12.40, -11.00, 14.60, -10.00, -34.50, -86.20,
-7.50, -28.00, -33.70, -10.00, 5.63]
aa_optical_rotation = pd.concat([pd.Series(aa), pd.Series(optical_rotation)], axis=1)
aa_optical_rotation = aa_optical_rotation.rename(columns={0:'amino_acid', 1: 'optical_rotation_value'})
#secondary structure #LEVJ860101
#https://pybiomed.readthedocs.io/en/latest/_modules/CTD.html#CalculateCompositionSolventAccessibility
#SecondaryStr = {'1': 'EALMQKRH', '2': 'VIYCWFT', '3': 'GNPSD'}
# '1'stand for Helix; '2'stand for Strand, '3' stand for coil
secondary_structure = [1, 1, 3, 3, 2, 1, 1, 3, 1, 2, 1, 1, 1, 2, 3, 3, 2, 2, 2, 2]
aa_secondary_structure = pd.concat([pd.Series(aa), pd.Series(secondary_structure)], axis=1)
aa_secondary_structure = aa_secondary_structure.rename(columns={0:'amino_acid', 1: 'secondary_structure_value'})
#_SolventAccessibility = {'-1': 'ALFCGIVW', '1': 'RKQEND', '0': 'MPSTHY'}
# '-1'stand for Buried; '1'stand for Exposed, '0' stand for Intermediate
solvent_accessibility = [-1, 1, 1, 1, -1, 1, 1, -1, 0, -1, -1, 1, 0, -1, 0, 0, 0, -1, 0, -1]
aa_solvent_accessibility = pd.concat([pd.Series(aa), pd.Series(solvent_accessibility)], axis=1)
aa_solvent_accessibility = aa_solvent_accessibility.rename(columns={0:'amino_acid', 1: 'solvent_accessibility_value'})
############################################################################################################################################
#CHAM820102 Free energy of solution in water
free_energy_solution = [-0.368, -1.03, 0.0, 2.06, 4.53, 0.731, 1.77, -0.525, 0.0, 0.791, 1.07, 0.0, 0.656, 1.06, -2.24, -0.524, 0.0, 1.60, 4.91, 0.401]
aa_free_energy_solution = pd.concat([pd.Series(aa), pd.Series(free_energy_solution)], axis=1)
aa_free_energy_solution = aa_free_energy_solution.rename(columns={0:'amino_acid', 1: 'free_energy_solution_value'})
#FAUJ880109 Number of hydrogen bond donors
number_of_hydrogen_bond = [0, 4, 2, 1, 0, 2, 1, 0, 1, 0, 0, 2, 0, 0, 0, 1, 1, 1, 1, 0]
aa_number_of_hydrogen_bond = pd.concat([pd.Series(aa), pd.Series(number_of_hydrogen_bond)], axis=1)
aa_number_of_hydrogen_bond = aa_number_of_hydrogen_bond.rename(columns={0:'amino_acid', 1: 'number_of_hydrogen_bond_value'})
#PONJ960101 Average volumes of residues
volumes_of_residues = [91.5, 196.1, 138.3, 135.2, 114.4, 156.4, 154.6, 67.5, 163.2, 162.6, 163.4, 162.5, 165.9, 198.8, 123.4, 102.0, 126.0, 209.8, 237.2, 138.4]
aa_volumes_of_residues = pd.concat([pd.Series(aa), pd.Series(volumes_of_residues)], axis=1)
aa_volumes_of_residues = aa_volumes_of_residues.rename(columns={0:'amino_acid', 1: 'volumes_of_residues_value'})
#JANJ790102
transfer_free_energy = [0.3, -1.4, -0.5, -0.6, 0.9, -0.7, -0.7, 0.3, -0.1, 0.7, 0.5, -1.8, 0.4, 0.5, -0.3, -0.1, -0.2, 0.3, -0.4, 0.6]
aa_transfer_free_energy = pd.concat([pd.Series(aa), pd.Series(transfer_free_energy)], axis=1)
aa_transfer_free_energy = aa_transfer_free_energy.rename(columns={0:'amino_acid', 1: 'transfer_free_energy_value'})
#WARP780101 amino acid side-chain interactions in 21 proteins
side_chain_interaction = [10.04, 6.18, 5.63, 5.76, 8.89, 5.41, 5.37, 7.99, 7.49, 8.7, 8.79, 4.40, 9.15, 7.98, 7.79, 7.08, 7.00, 8.07, 6.90, 8.88]
aa_side_chain_interaction = pd.concat([pd.Series(aa), pd.Series(side_chain_interaction)], axis=1)
aa_side_chain_interaction = aa_side_chain_interaction.rename(columns={0:'amino_acid', 1: 'side_chain_interaction_value'})
#KARS160101
number_of_vertices = [2.00, 8.00, 5.00, 5.00, 3.00, 6.00, 6.00, 1.00, 7.00, 5.00, 5.00, 6.00, 5.00, 8.00, 4.00, 3.00, 4.00, 11.00, 9.00, 4.00]
aa_number_of_vertices = pd.concat([pd.Series(aa), pd.Series(number_of_vertices)], axis=1)
aa_number_of_vertices = aa_number_of_vertices.rename(columns={0:'amino_acid', 1: 'number_of_vertices_value'})
#KARS160102 Number of edges (size of the graph)
number_of_edges = [1.00, 7.00, 4.00, 4.00, 2.00, 5.00, 5.00, 0.00, 6.00, 4.00, 4.00, 5.00, 4.00, 8.00, 4.00, 2.00, 3.00, 12.00, 9.00, 3.00]
aa_number_of_edges = pd.concat([ | pd.Series(aa) | pandas.Series |
from __future__ import print_function
from builtins import range
import numpy as np
import pandas as pd
import sys
import argparse
import os
from sklearn.datasets import load_boston
from sklearn.linear_model import LinearRegression, Ridge, Lasso, LogisticRegression
from sklearn.svm import SVR
from sklearn.ensemble import RandomForestRegressor
from clr_regressors import KPlaneRegressor, CLRpRegressor, CLRcRegressor, RegressorEnsemble
from evaluate import evaluate_all
def preprocess_data(X, y):
X -= np.min(X, axis=0, keepdims=True)
X /= np.max(X, axis=0, keepdims=True) / 2.0
X -= 1.0
shuffle_idx = np.random.choice(X.shape[0], X.shape[0], replace=False)
X = X[shuffle_idx]
y = y[shuffle_idx]
return X, y
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Search parameters')
parser.add_argument('--dataset', required=True)
parser.add_argument('--seed', default=0, type=int)
parser.add_argument('--n_jobs', default=1, type=int)
parser.add_argument('--global_parallel', dest='global_parallel', action='store_true')
args = parser.parse_args()
np.random.seed(args.seed)
if args.dataset == 'boston':
boston = load_boston()
X = boston.data
y = boston.target
constr_id = 8
elif args.dataset == 'abalone':
abalone_data = | pd.read_csv('data/abalone.data', header=None) | pandas.read_csv |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 17 13:06:36 2018
@author: shlomi
OK
1)fix and get time series of SAOD volcanic index - see how kuchar did it
he did it SAD = surface area density of aerosols at 54 hPa
2)use enso3.4 anom for enso
3)use singapore qbo(level=50) for qbo
4)use solar f10.7 for solar
5) for MLR use the MERRA grid (reduced-interpolated), and the swoosh different grids
6) prepare MLR framework and run on cluster
more analysis:
1) include BDC from papar (reconstruct 70hpa - file from chaim weighted mean for tropics after zonal mean i.e time_series only)
2) include Temp (500 hpa) from merra time_series mean -add feature to choose the "tropical" definition where the weighted mean should be taken i.e. -20 to 20 lat to -5 to 5
1) and 2) can replace enso
3) regardless print a heatmap of regressors corr.
4) mask or remove pinatubo in volcalnic series or in all times
1) do this menually this time...
5) adding a time series reconstruction to results
1)already exists...apply on demand bc its a lot of data
2) use plotter subroutines to reconstruct the time-series fields.
3) even bettter : work with psyplot gui to select and plot what i want
6) change the data retrievel scheme to include different regressos:
1) first save the regressors in file format - regressors#1 and have a file;
2) MLR_all will run on all regressors list and append _R# suffix before .nc;
need to change save and load routines to accomplish this.
new methodology:
1) write single function to produce or load single or datasets of
regressors using _produce or _load, remember to save with _index.nc suffix
2) use load_all_regressors to load all of the regressors in reg_path
3) select specific regressors and do anomaly(after time slice)
"""
from strat_paths import work_chaim
from strat_paths import cwd
reg_path = cwd / 'regressors'
def print_saved_file(name, path):
print('{} was saved to {}'.format(name, path))
return
def load_all_regressors(loadpath=reg_path):
"""load all regressors(end with _index.nc') from loadpath to dataset"""
import xarray as xr
from collections import OrderedDict
from aux_functions_strat import path_glob
da_list = []
da_list_from_ds = []
files = sorted(path_glob(reg_path, '*index.nc'))
for file in files:
name = file.as_posix().split(
'/')[-1].split('.')[0].replace('_index', '')
try:
da = xr.load_dataarray(file)
da = da.reset_coords(drop=True)
da.name = name
da_list.append(da)
except ValueError:
ds = xr.load_dataset(file)
for da in ds.data_vars.values():
da = da.reset_coords(drop=True)
try:
da.name = da.attrs['name']
except KeyError:
da.name = name + '_' + da.name
# avoid name repetition:
da.name = "_".join(OrderedDict.fromkeys(da.name.split('_')))
da_list_from_ds.append(da)
for das in da_list_from_ds:
da_list.append(das)
ds = xr.merge(da_list)
return ds
def prepare_regressors(name='Regressors', plot=True, save=False,
rewrite_file=True, normalize=False, savepath=None,
rolling=None):
"""get all the regressors and prepare them save to file.
replaced prepare_regressors for MLR function"""
import aux_functions_strat as aux
import xarray as xr
import seaborn as sns
import matplotlib.pyplot as plt
import os
from pathlib import Path
# regressors names and filenames dict:
reg_file_dict = {'bdc': 'era5_bdc_index.nc',
't500': 'era5_t500_index.nc',
'enso': 'anom_nino3p4_index.nc',
'solar': 'solar_10p7cm_index.nc',
'vol': 'vol_index.nc',
'qbo': 'era5_qbo_index.nc',
'olr': 'olr_index.nc',
'ch4': 'ch4_index.nc',
'wind': 'era5_wind_shear_index.nc',
'cold': 'cpt_index.nc',
'aod': 'merra2_totexttau_index.nc'}
if savepath is None:
savepath = Path().cwd() / 'regressors/'
# aod:
aod = load_regressor(reg_file_dict['aod'], plot=False, dseason=False)
aod.name = 'aod'
# bdc:
bdc = load_regressor(reg_file_dict['bdc'], plot=False, deseason=True)
if rolling is not None:
bdc = bdc.rolling(time=3).mean()
bdc.name = 'bdc'
# t500
t500 = load_regressor(reg_file_dict['t500'], plot=False, deseason=True)
if rolling is not None:
t500 = t500.rolling(time=3).mean()
t500.name = 't500'
# ENSO
enso = load_regressor(reg_file_dict['enso'], plot=False, deseason=False)
enso.name = 'enso'
# SOLAR
solar = load_regressor(reg_file_dict['solar'], plot=False, deseason=False)
solar.name = 'solar'
# Volcanic forcing
vol = load_regressor(reg_file_dict['vol'], plot=False, deseason=False)
vol.name = 'vol'
# get the qbo 2 pcs:
qbo = load_regressor(reg_file_dict['qbo'], plot=False, deseason=False,
is_dataset=True)
qbo_1 = qbo['qbo_1']
qbo_2 = qbo['qbo_2']
# get GHG:
# ghg = load_regressor(reg_file_dict['ghg'], plot=False, deseason=False)
# ghg.name = 'ghg'
# get cold point:
cold = load_regressor(reg_file_dict['cold'], plot=False, deseason=True)
if rolling is not None:
cold = cold.rolling(time=3).mean()
cold.name = 'cold'
# get olr:
olr = load_regressor(reg_file_dict['olr'], plot=False, deseason=True)
olr.name = 'olr'
# get ch4:
ch4 = load_regressor(reg_file_dict['ch4'], plot=False, deseason=False,
normalize=True)
ch4.name = 'ch4'
# get wind_shear:
wind = load_regressor(reg_file_dict['wind'], plot=False, deseason=False)
wind.name = 'wind'
da_list = [x for x in reg_file_dict.keys() if x != 'qbo']
da_list += ['qbo_1', 'qbo_2']
ds = xr.Dataset()
for da_name in da_list:
ds[da_name] = locals()[da_name]
# fix vol and ch4
ds['vol'] = ds['vol'].fillna(1.31)
ds = ds.reset_coords(drop=True)
# ds['ch4'] = ds['ch4'].fillna(0.019076 + 1.91089)
# if poly is not None:
# da = ds.to_array(dim='regressors').dropna(dim='time').T
# da = poly_features(da, feature_dim='regressors', degree=poly,
# interaction_only=False, include_bias=False,
# normalize_poly=False)
# ds = da.to_dataset(dim='regressors')
# name = 'Regressors_d' + str(poly)
# else:
# name = 'Regressors'
if normalize:
ds = ds.apply(aux.normalize_xr, norm=1,
keep_attrs=True, verbose=False)
if save:
if rewrite_file:
try:
os.remove(str(savepath) + name + '.nc')
except OSError as e: # if failed, report it back to the user
print("Error: %s - %s." % (e.filename, e.strerror))
print('Updating ' + name + '.nc' + ' in ' + str(savepath))
filename = name + '.nc'
ds.to_netcdf(savepath / filename)
print_saved_file(name, savepath)
if plot:
le = len(ds.data_vars)
df = ds.to_dataframe()
df.plot()
plt.figure()
if le <= 20:
sns.heatmap(df.corr(), annot=True, fmt='.2f', cmap='bwr',
center=0.0)
else:
sns.heatmap(df.corr(), cmap='bwr', center=0.0)
return ds
def load_regressor(regressor_file, plot=True, deseason=True, normalize=False,
path=None, is_dataset=False):
"""loads a regressor from regressors folder. you can deseason it,
plot it, normalize it, etc..."""
import xarray as xr
from pathlib import Path
if path is None:
path = Path().cwd() / 'regressors/'
if is_dataset:
reg = xr.open_dataset(path / regressor_file)
else:
reg = xr.open_dataarray(path / regressor_file)
if deseason:
from aux_functions_strat import deseason_xr
reg = deseason_xr(reg, how='mean')
if normalize:
from aux_functions_strat import normalize_xr
# normalize = remove mean and divide by std
reg = normalize_xr(reg, verbose=False)
if plot:
if is_dataset:
reg.to_pandas().plot()
else:
reg.plot()
return reg
def split_anom_nino3p4_to_EN_LN_neutral(loadpath=reg_path, savepath=None):
ds = load_all_regressors(loadpath)
enso = ds['anom_nino3p4'].dropna('time')
EN = enso[enso >= 0.5].reindex(time=enso['time']).fillna(0)
EN.attrs['action'] = 'only EN (ENSO >=0.5) kept, other is 0.'
LN = enso[enso <= -0.5].reindex(time=enso['time']).fillna(0)
LN.attrs['action'] = 'only LN (ENSO <=-0.5) kept, other is 0.'
neutral = enso[(enso > -0.5) & (enso < 0.5)
].reindex(time=enso['time']).fillna(0)
neutral.attrs['action'] = 'only neutENSO (ENSO<0.5 & ENSO>-0.5) kept, other is 0.'
if savepath is not None:
EN.to_netcdf(savepath / 'EN_index.nc')
LN.to_netcdf(savepath / 'LN_index.nc')
neutral.to_netcdf(savepath / 'neutENSO_index.nc')
return EN, LN, neutral
def _produce_wind_shear(source='singapore', savepath=None):
import xarray as xr
from pathlib import Path
if source == 'singapore':
u = _download_singapore_qbo(path=savepath)
filename = 'singapore_wind_shear_index.nc'
elif source == 'era5':
u = xr.open_dataarray(savepath / 'ERA5_U_eq_mean.nc')
filename = 'era5_wind_shear_index.nc'
wind_shear = u.diff('level').sel(level=70)
wind_shear.name = 'wind_shear'
if savepath is not None:
wind_shear.to_netcdf(savepath / filename)
print_saved_file(filename, savepath)
return wind_shear
def _download_CH4(filename='ch4_mm.nc', loadpath=None,
trend=False, savepath=None, interpolate=False):
import xarray as xr
import pandas as pd
import wget
filepath = loadpath / filename
if filepath.is_file():
print('CH4 monthly means from NOAA ERSL already d/l and saved!')
# read it to data array (xarray)
ch4_xr = xr.open_dataset(loadpath / filename)
# else d/l the file and fObsirst read it to df (pandas),
# then to xarray then save as nc:
else:
link = 'ftp://aftp.cmdl.noaa.gov/products/trends/ch4/ch4_mm_gl.txt'
wget.download(link, out=loadpath.as_posix() + '/ch4_mm_gl.txt')
ch4_df = pd.read_csv(loadpath / 'ch4_mm_gl.txt', delim_whitespace=True,
comment='#',
names=['year', 'month', 'decimal', 'average',
'average_unc', 'trend', 'trend_unc'])
print('Downloading CH4 monthly means from NOAA ERSL website...')
ch4_df = ch4_df.drop(0)
idx = pd.to_datetime(dict(year=ch4_df.year, month=ch4_df.month,
day='01'))
ch4_df = ch4_df.set_index(idx)
ch4_df = ch4_df.drop(ch4_df.iloc[:, 0:3], axis=1)
ch4_df = ch4_df.rename_axis('time')
ch4_xr = xr.Dataset(ch4_df)
ch4_xr.attrs['long_name'] = 'Monthly averages of CH4 concentrations'
ch4_xr.attrs['units'] = 'ppb'
# if savepath is not None:
# ch4_xr.to_netcdf(savepath / filename)
# print('Downloaded CH4 monthly means data and saved it to: ' + filename)
# return ch4_xr
# if trend:
# ch4 = ch4_xr.trend
# print_saved_file('trend ch4_index.nc', savepath)
# else:
ch4 = ch4_xr.trend
if interpolate:
dt = pd.date_range(start='1979-01-01', end='2019-12-01', freq='MS')
ch4 = ch4.interp(time=dt)
ch4 = ch4.interpolate_na(dim='time', method='spline')
if savepath is not None:
ch4.to_netcdf(savepath / 'ch4_index.nc', 'w')
print_saved_file('ch4_index.nc', savepath)
return ch4
def _produce_CH4_jaxa(load_path, savepath=None):
import pandas as pd
"""http://www.gosat.nies.go.jp/en/assets/whole-atmosphere-monthly-mean_ch4_dec2019.zip"""
df = pd.read_csv(
load_path /
'10x60.trend.method2.txt',
comment='#',
header=None, delim_whitespace=True)
df.columns = ['year', 'month', 'mm', 'trend']
idx = pd.to_datetime(dict(year=df.year, month=df.month,
day='01'))
df = df.set_index(idx)
df.index.name = 'time'
df = df.drop(['year', 'month'], axis=1)
ds = df.to_xarray() * 1000.0
for da in ds.data_vars:
ds[da].attrs['unit'] = 'ppb'
return ds
def _produce_cpt_swoosh(load_path=work_chaim, savepath=None):
import xarray as xr
import pandas as pd
sw = xr.open_dataset(load_path /
'swoosh-v02.6-198401-201812/swoosh-v02.6-198401-201812-latpress-2.5deg-L31.nc', decode_times=False)
time = pd.date_range('1984-01-01', freq='MS', periods=sw.time.size)
sw['time'] = time
# cold point tropopause:
cpt = sw['cptropt']
cpt = cpt.sel(lat=slice(-15, 15))
cpt = cpt.mean('lat')
if savepath is not None:
cpt.to_netcdf(savepath / 'cpt_index.nc')
print_saved_file('cpt_index.nc', savepath)
return cpt
def _produce_cpt_sean_ERA5(load_path=work_chaim/'Sean - tropopause', savepath=None):
import xarray as xr
import pandas as pd
from aux_functions_strat import lat_mean
from aux_functions_strat import anomalize_xr
cpt = xr.load_dataset(load_path/'era5.tp.monmean.zm.nc')['ctpt']
cpt = cpt.sel(lat=slice(15, -15))
# attrs = cpt.attrs
cpt = lat_mean(cpt)
cpt.attrs['data from'] = 'ERA5'
cpt['time'] = pd.to_datetime(cpt['time'].values).to_period('M').to_timestamp()
cpt = anomalize_xr(cpt, freq='MS')
if savepath is not None:
cpt.to_netcdf(savepath / 'cpt_ERA5_index.nc')
return cpt
#def _produce_cold_point(savepath=None, lonslice=None):
# import xarray as xr
# import sys
# import os
# # lonslice is a two-tuple : (minlon, maxlon)
# if savepath is None:
# savepath = os.getcwd() + '/regressors/'
# if sys.platform == 'linux':
# work_path = '/home/shlomi/Desktop/DATA/Work Files/Chaim_Stratosphere_Data/'
# elif sys.platform == 'darwin': # mac os
# work_path = '/Users/shlomi/Documents/Chaim_Stratosphere_Data/'
# era5 = xr.open_dataarray(work_path + 'ERA5_T_eq_all.nc')
# if lonslice is None:
# # cold_point = era5.sel(level=100).quantile(0.1, ['lat',
# # 'lon'])
# cold_point = era5.sel(level=100)
# cold_point = cold_point.mean('lon')
# cold_point = cold_point.mean('lat')
# # cold_point = cold_point.rolling(time=3).mean()
#
# # cold_point = era5.sel(level=slice(150, 50)).min(['level', 'lat',
# # 'lon'])
# else:
# # cold_point = era5.sel(level=100).sel(lon=slice(*lonslice)).quantile(
# # 0.1, ['lat', 'lon'])
# cold_point = era5.sel(level=slice(150, 50)).sel(
# lon=slice(*lonslice)).min(['level', 'lat', 'lon'])
# cold_point.attrs['lon'] = lonslice
# cold_point.name = 'cold'
# cold_point.to_netcdf(savepath + 'cold_point_index.nc')
# print('Saved cold_point_index.nc to ' + savepath)
# return cold_point
def _produce_CDAS_QBO(savepath=None):
import pandas as pd
url = 'https://www.cpc.ncep.noaa.gov/data/indices/qbo.u50.index'
df = pd.read_csv(url, header=2, delim_whitespace=True)
anom_index = df[df['YEAR'] == 'ANOMALY'].index.values.item()
orig = df.iloc[0:anom_index - 2, :]
stan_index = df[df['YEAR'] == 'STANDARDIZED'].index.values.item()
anom = df.iloc[anom_index + 2: stan_index - 2, :]
stan = df.iloc[stan_index + 2:-1, :]
dfs = []
for df in [orig, anom, stan]:
df = df.head(42) # keep all df 1979-2020
# df.drop(df.tail(1).index, inplace=True)
df = df.melt(id_vars='YEAR', var_name='MONTH')
datetime = pd.to_datetime((df.YEAR + '-' + df.MONTH).apply(str), format='%Y-%b')
df.index = datetime
df = df.sort_index()
df = df.drop(['YEAR', 'MONTH'], axis=1)
df['value'] = df['value'].astype(float)
dfs.append(df)
all_df = pd.concat(dfs, axis=1)
all_df.columns = ['original', 'anomaly', 'standardized']
all_df.index.name='time'
qbo = all_df.to_xarray()
qbo.attrs['name'] = 'qbo_cdas'
qbo.attrs['long_name'] = 'CDAS 50 mb zonal wind index'
qbo['standardized'].attrs = qbo.attrs
if savepath is not None:
qbo.to_netcdf(savepath / 'qbo_cdas_index.nc')
print_saved_file('qbo_cdas_index.nc', savepath)
return qbo
def _produce_CO2(loadpath, filename='co2.txt'):
import requests
import io
import xarray as xr
import pandas as pd
from aux_functions_strat import save_ncfile
# TODO: complete this:
filepath = loadpath / filename
if filepath.is_file():
print('co2 index already d/l and saved!')
co2 = xr.open_dataset(filepath)
else:
print('Downloading CO2 index data from cpc website...')
url = 'https://www.esrl.noaa.gov/gmd/webdata/ccgg/trends/co2/co2_mm_mlo.txt'
s = requests.get(url).content
co2_df = pd.read_csv(io.StringIO(s.decode('utf-8')),
delim_whitespace=True, comment='#')
co2_df.columns = ['year', 'month', 'decimal_date', 'monthly_average', 'deseasonalized', 'days', 'days_std', 'mm_uncertainty']
co2_df['dt'] = pd.to_datetime(co2_df['year'].astype(str) + '-' + co2_df['month'].astype(str))
co2_df = co2_df.set_index('dt')
co2_df.index.name = 'time'
co2 = co2_df[['monthly_average', 'mm_uncertainty']].to_xarray()
co2 = co2.rename(
{'monthly_average': 'co2', 'mm_uncertainty': 'co2_error'})
co2.attrs['name'] = 'CO2 index'
co2.attrs['source'] = url
co2['co2'].attrs['units'] = 'ppm'
save_ncfile(co2, loadpath, 'co2_index.nc')
return co2
def _produce_GHG(loadpath, savepath=None):
import xarray as xr
import numpy as np
import pandas as pd
from pathlib import Path
aggi = pd.read_csv(loadpath / 'AGGI_Table.csv', index_col='Year', header=2)
aggi = aggi[:-3]
ghg = aggi.loc[:, '1990 = 1']
ghg.name = 'GHG-RF'
ghg.index = pd.to_datetime(ghg.index, infer_datetime_format=True)
ghg_m = ghg.resample('MS').interpolate()
# extend the index :
ghg_m = pd.DataFrame(data=ghg_m,
index=pd.date_range(start=ghg_m.index[0],
end='2018-09-01',
freq=ghg_m.index.freq))
# fit data:
di = ghg_m.index
df = ghg_m.reset_index().drop('index', 1)
fit_df = df.dropna()
fit = np.polyfit(fit_df.index.values, fit_df.values, 3)
extp_func = np.poly1d(np.squeeze(fit))
# extrapolate:
nans_x = pd.isnull(df).any(1).nonzero()[0]
Y = np.expand_dims(extp_func(nans_x), 1)
df.loc[nans_x] = Y
df.index = di
ghg = xr.DataArray(np.squeeze(df), dims='time')
if savepath is not None:
ghg.to_netcdf(savepath / 'ghg_index.nc')
print_saved_file('ghg_index.nc', savepath)
return ghg
def _produce_OLR(loadpath, savepath=None):
import xarray as xr
import numpy as np
import pandas as pd
from pathlib import Path
olr = xr.open_dataset(loadpath / 'olr-monthly_v02r07_197901_201901.nc',
decode_times=False)
olr['time'] = pd.date_range('1979-01-01', '2019-01-01', freq='MS')
olr = olr.mean('lon', keep_attrs=True)
olr = olr.sel(lat=slice(-20, 20))
olr['cos_lat'] = np.cos(np.deg2rad(olr['lat']))
olr['olr_mean'] = (olr.cos_lat * olr.olr).sum('lat', keep_attrs=True) / \
olr.cos_lat.sum('lat', keep_attrs=True)
olr_da = olr.olr_mean
olr_da.attrs = olr.olr.attrs
if savepath is not None:
olr_da.to_netcdf(savepath / 'olr_index.nc')
print_saved_file('olr_index.nc', savepath)
return olr_da
def _produce_T500_from_era5(loadpath, savepath=None):
""" """
# import os
import xarray as xr
from aux_functions_strat import lat_mean
from aux_functions_strat import xr_rename_sort
t500 = xr.open_dataarray(loadpath / 'era5_t500_mm_1979-2019.nc')
t500 = xr_rename_sort(t500)
t500 = t500.mean('lon')
t500 = lat_mean(t500.sel(lat=slice(-20, 20)))
if savepath is not None:
t500.to_netcdf(savepath / 'era5_t500_index.nc')
print_saved_file('era5_t500_index.nc', savepath)
return t500
# def _produce_qbo_berlin()
def _produce_eof_pcs(loadpath, npcs=2, name='qbo', source='singapore',
levels=(100, 10), plot=True, savepath=None):
import xarray as xr
import aux_functions_strat as aux
from eofs.xarray import Eof
import matplotlib.pyplot as plt
import numpy as np
from pathlib import Path
# load and order data dims for eofs:
if source == 'singapore':
U = _download_singapore_qbo(path=loadpath)
U = aux.xr_order(U)
# get rid of nans:
U = U.sel(level=slice(90, 10))
U = U.dropna(dim='time')
filename = 'singapore_qbo_index.nc'
elif source == 'era5':
U = xr.open_dataarray(loadpath / 'ERA5_U_eq_mean.nc')
U = U.sel(level=slice(100, 10))
# U = U.sel(time=slice('1987', '2018'))
filename = 'era5_qbo_index.nc'
elif source == 'swoosh':
U = xr.open_dataset(loadpath / 'swoosh_latpress-2.5deg.nc')
U = U['combinedanomfillanomh2oq']
U = U.sel(lat=slice(-20, 20), level=slice(*levels))
U = aux.xr_weighted_mean(U)
filename = 'swoosh_h2o_index.nc'
solver = Eof(U)
eof = solver.eofsAsCorrelation(neofs=npcs)
pc = solver.pcs(npcs=npcs, pcscaling=1)
pc.attrs['long_name'] = source + ' ' + name + ' index'
pc['mode'] = pc.mode + 1
eof['mode'] = eof.mode + 1
vf = solver.varianceFraction(npcs)
errors = solver.northTest(npcs, vfscaled=True)
[name + '_' + str(i) for i in pc]
qbo_ds = xr.Dataset()
for ar in pc.groupby('mode'):
qbo_ds[name + '_' + str(ar[0])] = ar[1]
if source == 'era5':
qbo_ds = -qbo_ds
qbo_ds = qbo_ds.reset_coords(drop=True)
if savepath is not None:
qbo_ds.to_netcdf(savepath / filename, 'w')
print_saved_file(filename, savepath)
if plot:
plt.close('all')
plt.figure(figsize=(8, 6))
eof.plot(hue='mode')
plt.figure(figsize=(10, 4))
pc.plot(hue='mode')
plt.figure(figsize=(8, 6))
x = np.arange(1, len(vf.values) + 1)
y = vf.values
ax = plt.gca()
ax.errorbar(x, y, yerr=errors.values, color='b', linewidth=2, fmt='-o')
ax.set_xticks(np.arange(1, len(vf.values) + 1, 1))
ax.set_yticks(np.arange(0, 1, 0.1))
ax.grid()
ax.set_xlabel('Eigen Values')
plt.show()
return qbo_ds
#def download_enso_MEI(path='/Users/shlomi/Dropbox/My_backup/Python_projects/Stratosphere_Chaim/',
# filename='enso_MEI.nc'):
# import os.path
# import io
# import pandas as pd
# import xarray as xr
# import numpy as np
# if os.path.isfile(os.path.join(path, filename)):
# print('NOAA ENSO MEI already d/l and saved!')
# # read it to data array (xarray)
# nino_xr = xr.open_dataset(path + filename)
# # else d/l the file and first read it to df (pandas), then to xarray then save as nc:
# else:
# print('Downloading ENSO MEI data from noaa esrl website...')
# url = 'https://www.esrl.noaa.gov/psd/enso/mei/table.html'
# nino_df = pd.read_html(url)
# # idx = pd.to_datetime(dict(year=nino_df.YR, month=nino_df.MON, day='1'))
# # nino_df = nino_df.set_index(idx)
# # nino_df = nino_df.drop(nino_df.iloc[:, 0:2], axis=1)
# # nino_df.columns = ['NINO1+2', 'ANOM_NINO1+2', 'NINO3', 'ANOM_NINO3',
# # 'NINO4', 'ANOM_NINO4', 'NINO3.4', 'ANOM_NINO3.4']
# # nino_df = nino_df.rename_axis('time')
# # nino_xr = xr.Dataset(nino_df)
# # nino_xr.to_netcdf(path + filename)
# print('Downloaded NOAA ENSO MEI data and saved it to: ' + filename)
# return nino_df
def _download_solar_10p7cm_flux(loadpath, filename='solar_10p7cm.nc',
savepath=None, index=False):
"""download the solar flux from Dominion Radio astrophysical Observatory
Canada"""
import ftputil
import pandas as pd
import xarray as xr
from pathlib import Path
filepath = loadpath / filename
if filepath.is_file():
print('Solar flux 10.7cm from DRAO Canada already d/l and saved!')
# read it to data array (xarray)
solar_xr = xr.open_dataset(path / filename)
# else d/l the file and fObsirst read it to df (pandas),
# then to xarray then save as nc:
else:
filename_todl = 'solflux_monthly_average.txt'
with ftputil.FTPHost('ftp.geolab.nrcan.gc.ca', 'anonymous', '') as ftp_host:
ftp_host.chdir('/data/solar_flux/monthly_averages/')
ftp_host.download(filename_todl, path + filename_todl)
solar_df = pd.read_csv(path / filename_todl, delim_whitespace=True,
skiprows=1)
print('Downloading solar flux 10.7cm from DRAO Canada website...')
idx = pd.to_datetime(dict(year=solar_df.Year, month=solar_df.Mon,
day='1'))
solar_df = solar_df.set_index(idx)
solar_df = solar_df.drop(solar_df.iloc[:, 0:2], axis=1)
solar_df = solar_df.rename_axis('time')
solar_xr = xr.Dataset(solar_df)
solar_xr.attrs['long_name'] = 'Monthly averages of Solar 10.7 cm flux'
if savepath is not None:
solar_xr.to_netcdf(savepath / filename)
print('Downloaded solar flux 10.7cm data and saved it to: ' + filename)
if index:
solar = solar_xr.Adjflux
solar.attrs['long_name'] = 'Solar Adjflux 10.7cm'
if savepath is not None:
solar.to_netcdf(savepath / 'solar_10p7cm_index.nc')
print_saved_file('solar_10p7cm_index.nc', savepath)
return solar
else:
return solar_xr
def _produce_strato_aerosol(loadpath, savepath=None, index=False,
filename='multiple_input4MIPs_aerosolProperties_CMIP_IACETH-SAGE3lambda-3-0-0_gn_185001_201412.nc'):
import os.path
import xarray as xr
import numpy as np
import pandas as pd
from pathlib import Path
from datetime import date, timedelta
filepath = loadpath / filename
if filepath.is_file():
aerosol_xr = xr.open_dataset(loadpath / filename, decode_times=False)
start_date = date(1850, 1, 1)
days_from = aerosol_xr.time.values.astype('O')
offset = np.empty(len(days_from), dtype='O')
for i in range(len(days_from)):
offset[i] = (start_date + timedelta(days_from[i])).strftime('%Y-%m')
aerosol_xr['time'] = pd.to_datetime(offset)
print('Importing ' + str(loadpath) + filename + ' to Xarray')
else:
print('File not found...')
return
if index:
from aux_functions_strat import xr_weighted_mean
vol = aerosol_xr.sad.sel(altitude=20)
vol = vol.rename({'latitude': 'lat'})
vol = xr_weighted_mean(vol.sel(lat=slice(-20, 20)))
vol.attrs['long_name'] = 'Stratoapheric aerosol density'
if savepath is not None:
vol.to_netcdf(savepath / 'vol_index.nc')
print_saved_file('vol_index.nc', savepath)
return vol
else:
return aerosol_xr
#def download_nao(path='/Users/shlomi/Dropbox/My_backup/Python_projects/Stratosphere_Chaim/',
# filename='noaa_nao.nc'):
# import requests
# import os.path
# import io
# import pandas as pd
# import xarray as xr
# import numpy as np
# if os.path.isfile(os.path.join(path, filename)):
# print('Noaa NAO already d/l and saved!')
# # read it to data array (xarray)
# nao_xr = xr.open_dataarray(path + filename)
# # else d/l the file and first read it to df (pandas), then to xarray then save as nc:
# else:
# print('Downloading nao data from noaa ncep website...')
# url = 'http://www.cpc.ncep.noaa.gov/products/precip/CWlink/pna/norm.nao.monthly.b5001.current.ascii'
# s = requests.get(url).content
# nao_df = pd.read_csv(io.StringIO(s.decode('utf-8')), header=None, delim_whitespace=True)
# nao_df.columns = ['YR', 'MON', 'nao']
# idx = pd.to_datetime(dict(year=nao_df.YR, month=nao_df.MON, day='1'))
# nao_df = nao_df.set_index(idx)
# nao_df = nao_df.drop(nao_df.iloc[:, 0:2], axis=1)
# nao_df = nao_df.rename_axis('time')
# nao_df = nao_df.squeeze(axis=1)
# nao_xr = xr.DataArray(nao_df)
# nao_xr.attrs['long_name'] = 'North Atlantic Oscillation'
# nao_xr.name = 'NAO'
# nao_xr.to_netcdf(path + filename)
# print('Downloaded nao data and saved it to: ' + filename)
# return nao_xr
def _download_MJO_from_cpc(loadpath, filename='mjo.nc', savepath=None):
import requests
import io
import xarray as xr
import pandas as pd
from aux_functions_strat import save_ncfile
# TODO: complete this:
filepath = loadpath / filename
if filepath.is_file():
print('MJO index already d/l and saved!')
mjo = xr.open_dataset(filepath)
else:
print('Downloading MJO index data from cpc website...')
url = 'https://www.cpc.ncep.noaa.gov/products/precip/CWlink/daily_mjo_index/proj_norm_order.ascii'
s = requests.get(url).content
mjo_df = pd.read_csv(io.StringIO(s.decode('utf-8')),
delim_whitespace=True, na_values='*****',
header=1)
mjo_df['dt'] = pd.to_datetime(mjo_df['PENTAD'], format='%Y%m%d')
mjo_df = mjo_df.set_index('dt')
mjo_df = mjo_df.drop('PENTAD', axis=1)
mjo_df.index.name = 'time'
mjo = mjo_df.to_xarray()
mjo.attrs['name'] = 'MJO index'
mjo.attrs['source'] = url
save_ncfile(mjo, loadpath, 'mjo.nc')
return mjo
def _read_MISO_index(loadpath, filename='miso.nc', savepath=None):
from aux_functions_strat import path_glob
from aux_functions_strat import save_ncfile
import pandas as pd
files = path_glob(loadpath, '*_pc_MJJASO.dat')
dfs = []
for file in files:
df = pd.read_csv(file, delim_whitespace=True, header=None)
df.columns=['days_begining_at_MAY1', 'miso1', 'miso2', 'phase']
year = file.as_posix().split('/')[-1].split('_')[0]
dt = pd.date_range('{}-05-01'.format(year),
periods=df['days_begining_at_MAY1'].iloc[-1], freq='d')
df = df.set_index(dt)
dfs.append(df)
dff = pd.concat(dfs, axis=0)
dff = dff.sort_index()
dff = dff.drop('days_begining_at_MAY1', axis=1)
dff.index.name = 'time'
miso = dff.to_xarray()
miso.attrs['name'] = 'MISO index'
save_ncfile(miso, loadpath, 'miso.nc')
return miso
def _read_all_indian_rain_index(
loadpath, filename='all_indian_rain.nc', savepath=None):
import pandas as pd
from aux_functions_strat import save_ncfile
df = pd.read_csv(
loadpath /
'all_indian_rain_1871-2016.txt',
delim_whitespace=True)
df = df.drop(['JF', 'MAM', 'JJAS', 'OND', 'ANN'],axis=1)
# transform from table to time-series:
df = df.melt(id_vars='YEAR', var_name='month', value_name='rain')
df['date'] = pd.to_datetime(df['YEAR'].astype(str) + '-' + df['month'])
df.set_index('date', inplace=True)
df = df.drop(['YEAR', 'month'], axis=1)
df = df.sort_index()
df.index.name = 'time'
indian = df.to_xarray()
indian.attrs['name'] = 'All indian rain index'
save_ncfile(indian, loadpath, 'indian_rain.nc')
return indian
def _download_enso_ersst(loadpath, filename='noaa_ersst_nino.nc', index=False,
savepath=None):
import requests
import os.path
import io
import pandas as pd
import xarray as xr
from pathlib import Path
filepath = loadpath / filename
if filepath.is_file():
print('Noaa Ersst El-Nino SO already d/l and saved!')
# read it to data array (xarray)
nino_xr = xr.open_dataset(filepath)
# else d/l the file and first read it to df (pandas),
# then to xarray then save as nc:
else:
print('Downloading ersst nino data from noaa ncep website...')
url = 'http://www.cpc.ncep.noaa.gov/data/indices/ersst5.nino.mth.81-10.ascii'
url2 = 'https://www.cpc.ncep.noaa.gov/data/indices/ersst5.nino.mth.91-20.ascii'
s = requests.get(url2).content
nino_df = pd.read_csv(io.StringIO(s.decode('utf-8')),
delim_whitespace=True)
idx = pd.to_datetime(dict(year=nino_df.YR, month=nino_df.MON, day='1'))
nino_df = nino_df.set_index(idx)
nino_df = nino_df.drop(nino_df.iloc[:, 0:2], axis=1)
nino_df.columns = ['NINO1+2', 'ANOM_NINO1+2', 'NINO3', 'ANOM_NINO3',
'NINO4', 'ANOM_NINO4', 'NINO3.4', 'ANOM_NINO3.4']
nino_df = nino_df.rename_axis('time')
nino_xr = xr.Dataset(nino_df)
if savepath is not None:
nino_xr.to_netcdf(savepath / filename)
print('Downloaded ersst_nino data and saved it to: ' + filename)
if index:
enso = nino_xr['ANOM_NINO3.4']
enso.attrs['long_name'] = enso.name
if savepath is not None:
enso.to_netcdf(savepath / 'anom_nino3p4_index.nc', 'w')
print_saved_file('anom_nino3p4_index.nc', savepath)
return enso
else:
return nino_xr
#def download_enso_sstoi(
# path='/Users/shlomi/Dropbox/My_backup/Python_projects/Stratosphere_Chaim/',
# filename='noaa_sstoi_nino.nc'):
# import requests
# import os.path
# import io
# import pandas as pd
# import xarray as xr
# import numpy as np
# if os.path.isfile(os.path.join(path, filename)):
# print('Noaa Sstoi El-Nino SO already d/l and saved!')
# # read it to data array (xarray)
# nino_xr = xr.open_dataset(path + filename)
# # else d/l the file and first read it to df (pandas), then to xarray then save as nc:
# else:
# print('Downloading sstoi nino data from noaa ncep website...')
# url = 'http://www.cpc.ncep.noaa.gov/data/indices/sstoi.indices'
# s = requests.get(url).content
# nino_df = pd.read_csv(io.StringIO(s.decode('utf-8')),
# delim_whitespace=True)
# idx = pd.to_datetime(dict(year=nino_df.YR, month=nino_df.MON, day='1'))
# nino_df = nino_df.set_index(idx)
# nino_df = nino_df.drop(nino_df.iloc[:, 0:2], axis=1)
# ni(times=None, return_anom=False, plot=False, return_mean=True)no_df.columns = ['NINO1+2', 'ANOM_NINO1+2', 'NINO3', 'ANOM_NINO3',
# 'NINO4', 'ANOM_NINO4', 'NINO3.4', 'ANOM_NINO3.4']
# nino_df = nino_df.rename_axis('time')
# nino_xr = xr.Dataset(nino_df)
# nino_xr.to_netcdf(path + filename)
# print('Downloaded sstoi_nino data and saved it to: ' + filename)
# return nino_xr
#def download_solar_250nm(filename='nrl2_ssi.nc'):
# import requests
# import os.path
# import io
# import pandas as pd
# import xarray as xr
# import numpy as np
# import os
# from datetime import date, timedelta
# path = os.getcwd() + '/'
# if os.path.isfile(os.path.join(path, filename)):
# print('Solar irridiance 250nm already d/l and saved!')
# # (times=None, return_anom=False, plot=False, return_mean=True)read it to data array (xarray)
# solar_xr = xr.open_dataarray(path + filename)
# # else d/l the file and first read it to df (pandas), then to xarray then save as nc:
# else:
# print('Downloading solar 250nm irridiance data from Lasp Interactive Solar IRridiance Datacenter (LISIRD)...')
# url = 'http://lasp.colorado.edu/lisird/latis/nrl2_ssi_P1M.csv?time,wavelength,irradiance&wavelength=249.5'
# s = requests.get(url).content
# solar_df = pd.read_csv(io.StringIO(s.decode('utf-8')))
# start_date = date(1610, 1, 1)
# days_from = solar_df.iloc[:, 0].values
# offset = np.empty(len(days_from), dtype='O')
# for i in range(len(days_from)):
# offset[i] = (start_date + timedelta(days_from[i])).strftime('%Y-%m')
# solar_df = solar_df.set_index(pd.to_datetime(offset))
# solar_df = solar_df.drop(solar_df.iloc[:, 0:2], axis=1)
# solar_df = solar_df.rename_axis('time').rename_axis('irradiance', axis='columns')
# solar_xr = xr.DataArray(solar_df)
# solar_xr.irradiance.attrs = {'long_name': 'Irradiance',
# (times=None, return_anom=False, plot=False, return_mean=True) 'units': 'W/m^2/nm'}
# solar_xr.attrs = {'long_name': 'Solar Spectral Irradiance (SSI) at 249.5 nm wavelength from LASP'}
# solar_xr.name = 'Solar UV'
# solar_xr.to_netcdf(path + filename)
# print('Downloaded ssi_250nm data and saved it to: ' + filename)
# return solar_xr
def _download_singapore_qbo(path=None, filename='singapore_qbo_index.nc'):
import requests
import os.path
import io
import pandas as pd
import xarray as xr
import functools
from pathlib import Path
"""checks the files for the singapore qbo index from Berlin Uni. and
reads them or downloads them if they are
missing. output is the xarray and csv backup locally"""
if path is None:
path = Path().cwd() / 'regressors/'
filepath = path / filename
if filepath.is_file():
print('singapore QBO already d/l and saved!')
# read it to data array (xarray)
sqbo_xr = xr.open_dataset(path / filename)
# else d/l the file and first read it to df (pandas),
# then to xarray then save as nc:
else:
print('Downloading singapore data from Berlin university...')
url = 'http://www.geo.fu-berlin.de/met/ag/strat/produkte/qbo/singapore.dat'
s = requests.get(url).content
sing_qbo = sing_qbo = pd.read_csv(io.StringIO(s.decode('utf-8')),
skiprows=3,
header=None, delim_whitespace=True,
names=list(range(0, 13)))
# take out year data
year = sing_qbo.iloc[0:176:16][0]
# from 1997 they added another layer (100 hPa) to data hence the
# irregular indexing:
year = pd.concat([year, sing_qbo.iloc[177::17][0]], axis=0)
df_list = []
# create a list of dataframes to start assembeling data:
for i in range(len(year)-1):
df_list.append(pd.DataFrame(
data=sing_qbo.iloc[year.index[i] + 1:year.index[i + 1]]))
# don't forget the last year:
df_list.append(pd.DataFrame(data=sing_qbo.iloc[year.index[-1]+1::]))
for i in range(len(df_list)):
# first reset the index:
df_list[i] = df_list[i].reset_index(drop=True)
# drop first row:
df_list[i] = df_list[i].drop(df_list[i].index[0], axis=0)
# change all data to float:
df_list[i] = df_list[i].astype('float')
# set index to first column (hPa levels):
df_list[i].set_index(0, inplace=True)
# multiply all values to X 0.1 (scale factor from website)
df_list[i] = df_list[i] * 0.1
# assemble datetime index and apply to df.columns:
first_month = year.iloc[i] + '-01-01'
time = pd.date_range(first_month, freq='MS', periods=12)
df_list[i].columns = time
# df_list[i].rename_axis('hPa').rename_axis('time', axis='columns')
# do an outer join on all df_list
df_combined = functools.reduce(
lambda df1, df2: df1.join(
df2, how='outer'), df_list)
df_combined = df_combined.rename_axis(
'level').rename_axis('time', axis='columns')
df_combined = df_combined.sort_index(ascending=False)
sqbo_xr = xr.DataArray(df_combined)
sqbo_xr.level.attrs = {'long_name': 'pressure',
'units': 'hPa',
'positive': 'down',
'axis': 'Z'}
sqbo_xr.attrs = {'long_name': 'Monthly mean zonal wind components' +
' at Singapore (48698), 1N/104E',
'units': 'm/s'}
sqbo_xr.name = 'Singapore QBO'
sqbo_xr.to_netcdf(path / filename)
print('Downloaded singapore qbo data and saved it to: ' + filename)
return sqbo_xr
def _produce_BDC(loadpath, plevel=70, savepath=None):
# from era5_tools import proccess_era5_fields
# bdc = proccess_era5_fields(path=loadpath, pre_names='MTTPM_54',
# post_name='bdc_index', mean=True,
# savepath=savepath)
from aux_functions_strat import path_glob
from aux_functions_strat import lat_mean
import xarray as xr
file = path_glob(loadpath, 'era5_mttpm_{}hPa.nc'.format(plevel))[0]
da = xr.load_dataarray(file)
bdc = lat_mean(da.sel(lat=slice(-5, 5)))
bdc = bdc.mean('lon', keep_attrs=True).squeeze(drop=True)
filename = 'era5_bdc{}_index.nc'.format(plevel)
if savepath is not None:
bdc.to_netcdf(savepath / filename, 'w')
print_saved_file(filename, savepath)
return bdc
def _produce_radio_cold(savepath=None, no_qbo=False, rolling=None):
from strato_soundings import calc_cold_point_from_sounding
from aux_functions_strat import overlap_time_xr
from sklearn.linear_model import LinearRegression
from aux_functions_strat import deseason_xr
import xarray as xr
radio = calc_cold_point_from_sounding(
times=None,
return_anom=True,
plot=False,
return_mean=True)
filename_prefix = 'radio_cold'
if no_qbo:
filename_prefix = 'radio_cold_no_qbo'
# qbos = _produce_eof_pcs(reg_path, source='era5', plot=False)
qbos = xr.open_dataset(savepath / 'qbo_cdas_index.nc')
new_time = overlap_time_xr(qbos, radio)
qbos = qbos.sel(time=new_time)
qbos = deseason_xr(qbos.to_array('regressors'), how='mean').to_dataset('regressors')
radio = radio.sel(time=new_time)
lr = LinearRegression()
X = qbos.to_array('regressors').T
lr.fit(X, radio)
radio = radio - lr.predict(X)
if rolling is not None:
filename_prefix += '_rolling{}'.format(rolling)
radio = radio.rolling(time=rolling, center=False).mean()
if savepath is not None:
filename = filename_prefix + '_index.nc'
radio.to_netcdf(savepath / filename, 'w')
print_saved_file(filename, savepath)
return radio
def _produce_totexttau(loadpath=work_chaim/'MERRA2/aerosol_carbon',
savepath=None, indian=False):
import xarray as xr
from aux_functions_strat import lat_mean
ds = xr.load_dataset(loadpath / 'MERRA2_aerosol.nc')
ds = ds.sortby('time')
da = ds['TOTEXTTAU']
if indian:
da = da.sel(lat=slice(5, 30), lon=slice(65, 95))
filename = 'merra2_aod_indian_index.nc'
else:
filename = 'merra2_aod_index.nc'
da = lat_mean(da.mean('lon', keep_attrs=True))
da = da.resample(time='MS').mean()
if savepath is not None:
da.to_netcdf(savepath / filename, 'w')
print_saved_file(filename, savepath)
return da
def _produce_nao(loadpath=reg_path, savepath=reg_path):
import pandas as pd
df = pd.read_csv(loadpath / 'nao.txt',
delim_whitespace=True,
names=[
'year',
'month',
'nao'])
df['time'] = df['year'].astype(str) + '-' + df['month'].astype(str)
df['time'] = pd.to_datetime(df['time'])
df = df.set_index('time')
df = df.drop(['year', 'month'], axis=1)
da = df.to_xarray()
if savepath is not None:
filename = 'nao_index.nc'
da.to_netcdf(savepath / filename)
print_saved_file(filename, savepath)
return da
def _produce_ea_wr(loadpath=reg_path, savepath=reg_path):
import pandas as pd
df = pd.read_csv(loadpath / 'EA-WR.txt',
delim_whitespace=True,
names=[
'year',
'month',
'ea-wr'])
df['time'] = df['year'].astype(str) + '-' + df['month'].astype(str)
df['time'] = pd.to_datetime(df['time'])
df = df.set_index('time')
df = df.drop(['year', 'month'], axis=1)
da = df.to_xarray()
if savepath is not None:
filename = 'ea-wr_index.nc'
da.to_netcdf(savepath / filename)
print_saved_file(filename, savepath)
return da
def _produce_pdo(loadpath=reg_path, savepath=reg_path):
import pandas as pd
df = pd.read_csv(loadpath / 'pdo.txt',
names=['date', 'pdo'], skiprows=2)
df['time']=pd.to_datetime(df['date'],format='%Y%m')
df = df.set_index('time')
df = df.drop('date', axis=1)
da = df.to_xarray()
if savepath is not None:
filename = 'pdo_index.nc'
da.to_netcdf(savepath / filename)
print_saved_file(filename, savepath)
return da
def _produce_moi2(loadpath=reg_path, savepath=reg_path):
import pandas as pd
df = pd.read_fwf(
reg_path /
'moi2.txt',
names=['year', 'date', 'moi2'],
widths=[4, 8, 5])
df['date'] = df['date'].str.strip('.')
df['date'] = df['date'].str.strip(' ')
df['date'] = df['date'].str.replace(' ', '0')
df['date'] = df['date'].str.replace('.', '-')
df['time'] = df['year'].astype(str) + '-' + df['date'].astype(str)
df['time'] = | pd.to_datetime(df['time'], format='%Y-%m-%d') | pandas.to_datetime |
import re
from unittest.mock import Mock, call, patch
import numpy as np
import pandas as pd
import pytest
from rdt.transformers.categorical import (
CategoricalFuzzyTransformer, CategoricalTransformer, LabelEncodingTransformer,
OneHotEncodingTransformer)
RE_SSN = re.compile(r'\d\d\d-\d\d-\d\d\d\d')
class TestCategoricalTransformer:
def test___setstate__(self):
"""Test the ``__set_state__`` method.
Validate that the ``__dict__`` attribute is correctly udpdated when
Setup:
- create an instance of a ``CategoricalTransformer``.
Side effect:
- it updates the ``__dict__`` attribute of the object.
"""
# Setup
transformer = CategoricalTransformer()
# Run
transformer.__setstate__({
'intervals': {
None: 'abc'
}
})
# Assert
assert transformer.__dict__['intervals'][np.nan] == 'abc'
def test___init__(self):
"""Passed arguments must be stored as attributes."""
# Run
transformer = CategoricalTransformer(
fuzzy='fuzzy_value',
clip='clip_value',
)
# Asserts
assert transformer.fuzzy == 'fuzzy_value'
assert transformer.clip == 'clip_value'
def test_is_transform_deterministic(self):
"""Test the ``is_transform_deterministic`` method.
Validate that this method returs the opposite boolean value of the ``fuzzy`` parameter.
Setup:
- initialize a ``CategoricalTransformer`` with ``fuzzy = True``.
Output:
- the boolean value which is the opposite of ``fuzzy``.
"""
# Setup
transformer = CategoricalTransformer(fuzzy=True)
# Run
output = transformer.is_transform_deterministic()
# Assert
assert output is False
def test_is_composition_identity(self):
"""Test the ``is_composition_identity`` method.
Since ``COMPOSITION_IS_IDENTITY`` is True, just validates that the method
returns the opposite boolean value of the ``fuzzy`` parameter.
Setup:
- initialize a ``CategoricalTransformer`` with ``fuzzy = True``.
Output:
- the boolean value which is the opposite of ``fuzzy``.
"""
# Setup
transformer = CategoricalTransformer(fuzzy=True)
# Run
output = transformer.is_composition_identity()
# Assert
assert output is False
def test__get_intervals(self):
"""Test the ``_get_intervals`` method.
Validate that the intervals for each categorical value are correct.
Input:
- a pandas series containing categorical values.
Output:
- a tuple, where the first element describes the intervals for each
categorical value (start, end).
"""
# Run
data = pd.Series(['foo', 'bar', 'bar', 'foo', 'foo', 'tar'])
result = CategoricalTransformer._get_intervals(data)
# Asserts
expected_intervals = {
'foo': (
0,
0.5,
0.25,
0.5 / 6
),
'bar': (
0.5,
0.8333333333333333,
0.6666666666666666,
0.05555555555555555
),
'tar': (
0.8333333333333333,
0.9999999999999999,
0.9166666666666666,
0.027777777777777776
)
}
expected_means = pd.Series({
'foo': 0.25,
'bar': 0.6666666666666666,
'tar': 0.9166666666666666
})
expected_starts = pd.DataFrame({
'category': ['foo', 'bar', 'tar'],
'start': [0, 0.5, 0.8333333333333333]
}).set_index('start')
assert result[0] == expected_intervals
pd.testing.assert_series_equal(result[1], expected_means)
pd.testing.assert_frame_equal(result[2], expected_starts)
def test__get_intervals_nans(self):
"""Test the ``_get_intervals`` method when data contains nan's.
Validate that the intervals for each categorical value are correct, when passed
data containing nan values.
Input:
- a pandas series cotaining nan values and categorical values.
Output:
- a tuple, where the first element describes the intervals for each
categorical value (start, end).
"""
# Setup
data = pd.Series(['foo', np.nan, None, 'foo', 'foo', 'tar'])
# Run
result = CategoricalTransformer._get_intervals(data)
# Assert
expected_intervals = {
'foo': (
0,
0.5,
0.25,
0.5 / 6
),
np.nan: (
0.5,
0.8333333333333333,
0.6666666666666666,
0.05555555555555555
),
'tar': (
0.8333333333333333,
0.9999999999999999,
0.9166666666666666,
0.027777777777777776
)
}
expected_means = pd.Series({
'foo': 0.25,
np.nan: 0.6666666666666666,
'tar': 0.9166666666666666
})
expected_starts = pd.DataFrame({
'category': ['foo', np.nan, 'tar'],
'start': [0, 0.5, 0.8333333333333333]
}).set_index('start')
assert result[0] == expected_intervals
pd.testing.assert_series_equal(result[1], expected_means)
pd.testing.assert_frame_equal(result[2], expected_starts)
def test__fit_intervals(self):
# Setup
transformer = CategoricalTransformer()
# Run
data = pd.Series(['foo', 'bar', 'bar', 'foo', 'foo', 'tar'])
transformer._fit(data)
# Asserts
expected_intervals = {
'foo': (
0,
0.5,
0.25,
0.5 / 6
),
'bar': (
0.5,
0.8333333333333333,
0.6666666666666666,
0.05555555555555555
),
'tar': (
0.8333333333333333,
0.9999999999999999,
0.9166666666666666,
0.027777777777777776
)
}
expected_means = pd.Series({
'foo': 0.25,
'bar': 0.6666666666666666,
'tar': 0.9166666666666666
})
expected_starts = pd.DataFrame({
'category': ['foo', 'bar', 'tar'],
'start': [0, 0.5, 0.8333333333333333]
}).set_index('start')
assert transformer.intervals == expected_intervals
pd.testing.assert_series_equal(transformer.means, expected_means)
pd.testing.assert_frame_equal(transformer.starts, expected_starts)
def test__get_value_no_fuzzy(self):
# Setup
transformer = CategoricalTransformer(fuzzy=False)
transformer.intervals = {
'foo': (0, 0.5, 0.25, 0.5 / 6),
np.nan: (0.5, 1.0, 0.75, 0.5 / 6),
}
# Run
result_foo = transformer._get_value('foo')
result_nan = transformer._get_value(np.nan)
# Asserts
assert result_foo == 0.25
assert result_nan == 0.75
@patch('rdt.transformers.categorical.norm')
def test__get_value_fuzzy(self, norm_mock):
# setup
norm_mock.rvs.return_value = 0.2745
transformer = CategoricalTransformer(fuzzy=True)
transformer.intervals = {
'foo': (0, 0.5, 0.25, 0.5 / 6),
}
# Run
result = transformer._get_value('foo')
# Asserts
assert result == 0.2745
def test__normalize_no_clip(self):
"""Test normalize data"""
# Setup
transformer = CategoricalTransformer(clip=False)
# Run
data = | pd.Series([-0.43, 0.1234, 1.5, -1.31]) | pandas.Series |
# _*_ coding: utf-8 _*_
__author__ = '<NAME>'
__date__ = '1/13/2018 6:39 PM'
import pandas as pd
import numpy as np
# create first series, the element now have index number start from 0
s = | pd.Series([1, 3, 6, np.nan, 44, 1]) | pandas.Series |
from typing import Any, List, Tuple, Union
from pathlib import Path
import os
import unittest
from pandas import DataFrame
from schematics.types import ListType, IntType, StringType
import numpy as np
import skimage.io
from hidebound.core.specification_base import SpecificationBase
import hidebound.core.traits as tr
import hidebound.core.validators as vd
# ------------------------------------------------------------------------------
class DatabaseTestBase(unittest.TestCase):
columns = [
'specification',
'extension',
'filename',
'filepath',
'file_error',
'file_traits',
'asset_name',
'asset_path',
'asset_type',
'asset_traits',
'asset_error',
'asset_valid',
] # type: List[str]
def get_data(self, root, nans=False):
# type: (str, bool) -> DataFrame
data = [
[0, True, 'sequence', 'spec001', 'proj001/spec001/pizza/p-proj001_s-spec001_d-pizza_v001', 'p-proj001_s-spec001_d-pizza_v001_c0000-0001_f0001.png', None ], # noqa E501 E241
[0, True, 'sequence', 'spec001', 'proj001/spec001/pizza/p-proj001_s-spec001_d-pizza_v001', 'p-proj001_s-spec001_d-pizza_v001_c0000-0001_f0002.png', None ], # noqa E501 E241
[0, True, 'sequence', 'spec001', 'proj001/spec001/pizza/p-proj001_s-spec001_d-pizza_v001', 'p-proj001_s-spec001_d-pizza_v001_c0000-0001_f0003.png', None ], # noqa E501 E241
[1, True, 'sequence', 'spec001', 'proj001/spec001/pizza/p-proj001_s-spec001_d-pizza_v002', 'p-proj001_s-spec001_d-pizza_v002_c0000-0000_f0001.png', None ], # noqa E501 E241
[1, True, 'sequence', 'spec001', 'proj001/spec001/pizza/p-proj001_s-spec001_d-pizza_v002', 'p-proj001_s-spec001_d-pizza_v002_c0000-0000_f0002.png', None ], # noqa E501 E241
[1, True, 'sequence', 'spec001', 'proj001/spec001/pizza/p-proj001_s-spec001_d-pizza_v002', 'p-proj001_s-spec001_d-pizza_v002_c0000-0000_f0003.png', None ], # noqa E501 E241
[1, True, 'sequence', 'spec001', 'proj001/spec001/pizza/p-proj001_s-spec001_d-pizza_v002', 'p-proj001_s-spec001_d-pizza_v002_c0000-0000_f0004.png', None ], # noqa E501 E241
[2, True, 'sequence', 'spec001', 'proj001/spec001/pizza/p-proj001_s-spec001_d-pizza_v002', 'p-proj001_s-spec001_d-pizza_v002_c0000-0001_f0001.png', None ], # noqa E501 E241
[2, True, 'sequence', 'spec001', 'proj001/spec001/pizza/p-proj001_s-spec001_d-pizza_v002', 'p-proj001_s-spec001_d-pizza_v002_c0000-0001_f0002.png', None ], # noqa E501 E241
[2, True, 'sequence', 'spec001', 'proj001/spec001/pizza/p-proj001_s-spec001_d-pizza_v002', 'p-proj001_s-spec001_d-pizza_v002_c0000-0001_f0003.png', None ], # noqa E501 E241
[2, True, 'sequence', 'spec001', 'proj001/spec001/pizza/p-proj001_s-spec001_d-pizza_v002', 'p-proj001_s-spec001_d-pizza_v002_c0000-0001_f0004.png', None ], # noqa E501 E241
[3, False, 'sequence', 'spec001', 'proj001/spec001/pizza/p-proj001_s-spec001_d-pizza_v003', 'p-proj001_s-spec001_d-kiwi_v003_c0000-0001_f0001.png', ' Inconsistent descriptor field token'], # noqa E501 E241
[3, False, 'sequence', 'spec001', 'proj001/spec001/pizza/p-proj001_s-spec001_d-pizza_v003', 'p-proj001_s-spec001_d-pizza_v003_c0000-0001_f0002.png', None ], # noqa E501 E241
[3, False, 'sequence', 'spec001', 'proj001/spec001/pizza/p-proj001_s-spec001_d-pizza_v003', 'p-proj001_s-spec001_d-PIZZA_v003_c0000-0001_f0003.png', 'Illegal descriptor field token' ], # noqa E501 E241
[3, False, 'sequence', 'spec001', 'proj001/spec001/pizza/p-proj001_s-spec001_d-pizza_v003', 'p-proj001_s-spec001_d-pizza_v003_c0000-0001_f0004.png', None ], # noqa E501 E241
[3, False, np.nan, None, 'proj001/spec001/pizza/p-proj001_s-spec001_d-pizza_v003', 'p-proj001_s-spec0001_d-pizza_v003_c0000-0001_f0005.png', 'Illegal specification field token' ], # noqa E501 E241
[3, False, np.nan, None, 'proj001/spec001/pizza/p-proj001_s-spec001_d-pizza_v003', 'misc.txt', 'SpecificationBase not found' ], # noqa E501 E241
[4, True, 'sequence', 'spec002', 'proj001/spec002/taco/p-proj001_s-spec002_d-taco_v001', 'p-proj001_s-spec002_d-taco_v001_f0000.jpg', None ], # noqa E501 E241
[4, True, 'sequence', 'spec002', 'proj001/spec002/taco/p-proj001_s-spec002_d-taco_v001', 'p-proj001_s-spec002_d-taco_v001_f0001.jpg', None ], # noqa E501 E241
[4, True, 'sequence', 'spec002', 'proj001/spec002/taco/p-proj001_s-spec002_d-taco_v001', 'p-proj001_s-spec002_d-taco_v001_f0002.jpg', None ], # noqa E501 E241
[5, False, 'sequence', 'spec002', 'proj001/spec002/taco/p-proj001_s-spec002_d-taco_v002', 'p-proj001_s-spec002_d-taco_v001_f0000.jpg', 'Invalid asset directory name' ], # noqa E501 E241
[5, False, 'sequence', 'spec002', 'proj001/spec002/taco/p-proj001_s-spec002_d-taco_v002', 'p-proj001_s-spec002_d-taco_v002_f0001.jpg', None ], # noqa E501 E241
[5, False, 'sequence', 'spec002', 'proj001/spec002/taco/p-proj001_s-spec002_d-taco_v002', 'p-proj001_s-spec002_d-taco_v002', 'Expected "_"' ], # noqa E501 E241
[5, False, 'sequence', 'spec002', 'proj001/spec002/taco/p-proj001_s-spec002_d-taco_v002', 'p-proj001_s-spec002_d-taco_v02_f0003.jpg', 'Illegal version field token' ], # noqa E501 E241
[6, False, 'sequence', 'vdb001', 'proj002/vdb001', 'p-proj002_s-vdb001_d-bagel_v001.vdb', 'Specification not found' ], # noqa E501 E241
[6, False, 'sequence', 'vdb001', 'proj002/vdb001', 'p-proj002_s-vdb001_d-bagel_v002.vdb', 'Specification not found' ], # noqa E501 E241
[6, False, 'sequence', 'vdb001', 'proj002/vdb001', 'p-proj002_s-vdb001_d-bagel_v003.vdb', 'Specification not found' ], # noqa E501 E241
] # type: Any
data = | DataFrame(data) | pandas.DataFrame |
'''
This program converts the output from MESA to a format that is able to be read by FLASH.
'''
import pandas as pd
import numpy as np
import os
#copy the raw mesa output for editing
os.system("cp controls/part3.mod .")
#delete last few lines
os.system("head -n -8 part3.mod > output.csv")
#replace the "D" used by mesa for scientific notation with "E" in a new output file:
f = open("output.csv", 'r')
filedata = f.read()
f.close()
newdata = filedata.replace("D", "E")
f = open("output.csv", 'w')
f.write(newdata)
f.close()
#read the required columns in the new output file
data = pd.read_csv("output.csv", skiprows = 16, usecols = [0, 1, 2, 3, 11, 16], sep = ' ', engine = 'python')
df = | pd.DataFrame(data) | pandas.DataFrame |
# coding=UTF-8
# encoding = utf-8
from _pickle import dumps
from _pickle import loads
import Pythonbepi
import pandas as pd
# from pandas import DataFrame
from sklearn.cluster import KMeans
def testlist(handler,l):
print(l)
return l
def kmeans(handler, input_table, columns, clus_num):
stmt = 'select %s from %s ;' % (columns, input_table)
planIndex = Pythonbepi.BEPI_prepare(handler, stmt)
rv = Pythonbepi.BEPI_execute_plan_with_return(handler, planIndex)
# print(rv)
with open('a.txt','w+',encoding='utf-8') as f:
for i in rv:
f.write(str(i) + '\n')
frame = []
for i in rv:
frame.append(i)
df = | pd.DataFrame(frame) | pandas.DataFrame |
import dash
import dash_html_components as html
import dash_core_components as dcc
from dash.dependencies import Input, Output
import pandas as pd
import numpy as np
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
import sklearn.datasets
import plotly.graph_objs as go
wine_data = sklearn.datasets.load_wine()
Y = wine_data["target"]
wine_data = | pd.DataFrame(wine_data["data"], columns=wine_data["feature_names"]) | pandas.DataFrame |
"""Utils for loading and adding annotations to the data"""
import ast
import logging
import msgpack
import os
import pandas as pd
from functools import lru_cache
from gensim.corpora import Dictionary
from gensim.matutils import corpus2csc
from spacy.tokens import Doc
from tqdm import tqdm
from src import HOME_DIR
from src.utils.spacy import nlp, apply_extensions
from src.utils.wiki2vec import lookup_entity
logger = logging.getLogger(__name__)
cache = lru_cache(maxsize=None)
def _load_spacy():
"""Loads serialized spacy vocab and docs
Returns
-------
dict
Maps doc id to bytes for spacy doc.
"""
spacy_path = os.path.join(HOME_DIR, 'data/processed/spacy')
if os.path.exists(spacy_path):
with open(spacy_path, 'rb') as f:
m = msgpack.load(f)
nlp.vocab.from_bytes(m[b'vocab'])
return m[b'docs']
else:
logger.warn('No serialized Spacy found')
return None
class Paragraph:
"""A paragraph from the corpus
Parameters
----------
row : pd.Series
The row of data referring to this speech.
parent : src.corpus.Speech
A reference to the Speech object that contains this paragraph.
"""
def __init__(self, row, parent):
self.index = row.paragraph_index
self.id_ = row.paragraph_id
self.row = row
self.speech = parent
def spacy_doc(self):
return self.speech.spacy_doc()._.paragraphs[self.index]
def session(self):
return self.row.session
def year(self):
return self.row.year
def country_code(self):
return self.row.country
def country(self):
return self.row.country_name
class Speech:
"""A speech from the corpus
Serialized Spacy docs are lazy loaded.
Parameters
----------
group : pd.DataFrame
The subset of rows/paragraphs that belong to this speech.
spacy_bytes : bytes
Serialized spacy doc.
"""
def __init__(self, group, spacy_bytes=None):
self.id_ = group.document_id.unique()[0]
self._spacy_bytes = spacy_bytes
self.group = group
self.paragraphs = [
Paragraph(row, self)
for _, row in group.iterrows()
]
@cache
def spacy_doc(self):
if self._spacy_bytes is not None:
doc = apply_extensions(Doc(nlp.vocab).from_bytes(self._spacy_bytes))
assert len(doc._.paragraphs) == len(self.paragraphs)
return doc
else:
raise FileNotFoundError('No serialized Spacy found')
def session(self):
return self.group.session.iloc[0]
def year(self):
return self.group.year.iloc[0]
def country_code(self):
return self.group.country.iloc[0]
def country(self):
return self.group.country_name.iloc[0]
class Corpus:
"""UN General Debate Corpus"""
def __init__(self, filename='data/processed/debates_paragraphs.csv'):
self.filename = filename
self._load(filename)
def _load(self, filename):
debates = pd.read_csv(os.path.join(HOME_DIR, filename))
debates.bag_of_words = debates.bag_of_words.apply(ast.literal_eval)
self.debates = debates
spacy = _load_spacy()
# Ensure the following two lists are sorted in the same order as the
# debates df.
self.speeches = [
Speech(
group,
spacy.pop(id_) if spacy else None)
for id_, group in debates.groupby('document_id')
]
self.paragraphs = [par for sp in self.speeches for par in sp.paragraphs]
for par_id_from_df, par in zip(debates.paragraph_id, self.paragraphs):
assert par_id_from_df == par.id_
self.speech_id_to_speech = {
sp.id_: sp for sp in self.speeches}
self.paragraph_id_to_paragraph = {
par.id_: par for par in self.paragraphs}
def paragraph(self, id_):
"""Get a paragraph by id"""
return self.paragraph_id_to_paragraph[id_]
def speech(self, id_):
"""Get a speech by id"""
return self.speech_id_to_speech[id_]
def add_dataframe_column(self, column):
"""Add column to the dataframe
Add a column to the corpus dataframe and save it so that it loads next
time. Useful for adding paragraph level annotations that don't
necessarily make sense as a Spacy extension.
Parameters
----------
column : pd.Series
New column to append to the corpus dataframe. Should be named.
"""
self.debates = | pd.concat([self.debates, column], axis=1) | pandas.concat |
import numpy as np
import pandas as pd
import itertools
import matplotlib.pyplot as plt
from typing import Union
import os
cur_dir = os.getcwd()
figure_dir = os.path.join(cur_dir,'figures')
beta = 2
MIN, MAX = 0, 1
PARTS = 100
LABELS = ['$x_i$', '$x_j$', '$J$']
PROJECTION, CMAP, LW = '3d', 'terrain', 0.2
class Functions:
def sigmoid(self, x: float) -> float:
raise NotImplementedError
def trust(self, x: float) -> float:
raise NotImplementedError
def softmax(self, x: float) -> float:
raise NotImplementedError
def tanh(self, x: float) -> float:
raise NotImplementedError
def arctan(self, x: float) -> float:
raise NotImplementedError
def sig(x: float, beta: float = -1) -> float:
return 1 / (1 + np.exp(beta * x))
class Gradients:
def __init__(self):
self.beta = 2
def softmax(self, x: float) -> float:
return np.exp(self.beta * x) / (np.exp(self.beta * x) + 1)
def trust(self, x: float, beta: Union[None, float]=None) -> float:
beta = self.beta if not beta else beta
return (-beta * (x / (1 - x)) ** beta) / ((x - 1) * x * (1 + (x / (1 - x)) ** beta) ** 2)
def sigmoid(self, x: float) -> float:
return (sig(x, self.beta)) * (1 - (sig(x, self.beta)))
def tanh(self, x: float) -> float:
return (4 * np.exp(-2 * x)) / (np.exp(-2 * x) + 1) ** 2
def arctan(self, x: float) -> float:
return 1 / (1 + x ** 2)
class Surface:
def trust(self, x: float, beta: float = 2.0) -> float:
return Gradients().trust(x, beta)
def evaluate(self, x: float, j: float, beta: float = 2.0) -> float:
if x != j:
y = abs(x - j)
return self.trust(y, beta) * self.trust(x, beta)
def generate(self, beta: float = 2) -> None:
data = {}
for x, j in list(itertools.permutations(np.linspace(MIN, MAX, PARTS), 2)):
try:
data[x, j] = self.evaluate(x, j, beta).astype(float)
except AttributeError:
pass
df = | pd.DataFrame(data) | pandas.DataFrame |
import hashlib
import heapq
import json
import math
import os
import pprint
import random
import re
import sys
import traceback
from dataclasses import dataclass, field
from numbers import Number
from typing import Any
import matplotlib
import matplotlib.lines as mlines
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import bson
from matplotlib.axes import Axes
from matplotlib.figure import Figure
from matplotlib.patches import Polygon
from matplotlib.table import Table
from pandas.plotting import scatter_matrix
SEED = 0
def reset_seeds():
np.random.seed(123)
random.seed(123)
hashseed = os.getenv('PYTHONHASHSEED')
if not hashseed:
os.environ['PYTHONHASHSEED'] = str(SEED)
os.execv(sys.executable, [sys.executable] + sys.argv)
reset_seeds()
def load_raw_data(filename: str, service_name: str, workload: str, skip_reset=False, skip_pruned=False,
skip_tuned=True, show_workload_gap=True, to_gib=False) -> pd.DataFrame:
raw_data = []
def name(data):
return hashlib.md5(bytes(str(data.items()), 'ascii')).hexdigest()
def nbest(trials: list[dict], nursery: list[dict], tenured: list[dict], n: int) -> dict:
states = {trial['uid']: trial['state'] for trial in trials}
@dataclass
class Configuration:
name: str
uid: int
value: float
def __hash__(self):
return hash(self.name)
def __lt__(self, other):
value = self.value or 0
other = other.value or 0
return value < other
# def __eq__(self, other):
# return self.name == other.name
def __repr__(self):
return f'Configuration(uid={self.uid}, name={self.name}, value={self.value})'
nursery = [Configuration(item['name'], item['uid'], item['value']) for item in nursery]
tenured = [Configuration(item['name'], item['uid'], item['value']) for item in tenured]
trials = {trial['uid']: trial['state'] for trial in trials}
tmp = [cfg for cfg in set(nursery + tenured) if trials[cfg.uid] == 'COMPLETE']
heapq.heapify(tmp)
if len(tmp) == 0:
return {}
if n > 1:
return {i: item.name[:3] for i, item in enumerate(heapq.nsmallest(n, tmp))}
with open(filename) as jsonfile:
for i, row in enumerate(jsonfile):
# workaround to avoid problems with mongodb id
row: dict
row = re.sub(r'{"_id":{"\$oid":"[a-z0-9]+"},', '{', row)
row = re.sub(r'\{\"\$numberLong\":\"(?P<N>[0-9]+)\"}', '\g<N>', row)
record = json.loads(row)
# if workload != '' and record['ctx_workload']['name'] != workload:
if workload != '' and record['curr_workload']['name'] != workload:
if show_workload_gap:
raw_data.append(Iteration().__dict__)
continue
if skip_pruned and (record['mostly_workload']['name'] != record['ctx_workload']['name'] or
record['curr_workload']['name'] != record['ctx_workload']['name'] or record['pruned']):
continue
# if skip_pruned and (
# record['mostly_workload'] and record['mostly_workload']['name'] != record['ctx_workload']['name'] or
# record['curr_workload']['name'] != record['ctx_workload']['name'] or record['pruned']):
# # if len(raw_data) > 0:
# # raw_data = raw_data[:-1]
# continue
if skip_reset and record['reset']:
continue
if skip_tuned and 'TunedIteration' == record['status']:
def delete_training(record):
for key, value in record.items():
new_value = None
if isinstance(value, dict):
new_value = delete_training(value)
else:
if isinstance(value, Number):
new_value = 0
elif isinstance(value, str):
new_value = ''
elif isinstance(value, list):
new_value = []
elif isinstance(value, bool):
new_value = False
record[key] = new_value
return record
#
# # delete_training(record['training'])
continue
mem_scale = 1
if to_gib:
mem_scale = 2 ** 20
try:
raw_data.append(Iteration(
# pruned=record['mostly_workload']['name'] != record['ctx_workload']['name'] or
# record['curr_workload']['name'] != record['ctx_workload']['name'],
pruned=record['pruned'],
workload=record['ctx_workload']['name'],
iteration=record['global_iteration'],
pname=record['production']['curr_config']['name'],
# pname=hashlib.md5(bytes(str(record['production']['curr_config']['data']), 'ascii')).hexdigest(),
pscore=math.fabs(record['production']['metric']['objective'] or 0),
pmean=math.fabs(record['production']['curr_config']['stats']['mean'] or 0),
pmedian=math.fabs(record['production']['curr_config']['stats']['median'] or 0),
pmad=math.fabs(record['production']['curr_config']['stats'].get('mad', np.nan) or 0),
pmin=math.fabs(record['production']['curr_config']['stats']['min'] or 0),
pmax=math.fabs(record['production']['curr_config']['stats']['max'] or 0),
pstddev=record['production']['curr_config']['stats']['stddev'] or 0,
ptruput=record['production']['metric']['throughput'],
pproctime=record['production']['metric']['process_time'],
pmem=(record['production']['metric']['memory'] or 0) / mem_scale,
pmem_lim=(record['production']['metric'].get('memory_limit',
(record['production']['curr_config']['data'][
service_name][
'memory'] or 0) * mem_scale) or 0) / mem_scale,
pcpu=(record['production']['metric']['cpu'] or 1),
pcpu_lim=(record['production']['metric']['cpu_limit'] or 1),
preplicas=math.ceil(record['production']['metric'].get('curr_replicas',
record['production']['metric'][
'cpu_limit'])),
pparams=Param(record['production']['curr_config']['data'] or {},
math.fabs(record['production']['curr_config']['score'] or 0)),
# tname=hashlib.md5(bytes(str(record['training']['curr_config']['data']), 'ascii')).hexdigest(),
tname=record['training']['curr_config']['name'],
tscore=math.fabs(record['training']['metric']['objective'] or 0),
tmean=math.fabs(record['training']['curr_config']['stats']['mean'] or 0),
tmedian=math.fabs(record['training']['curr_config']['stats']['median'] or 0),
tmad=math.fabs(record['training']['curr_config']['stats'].get('mad', np.nan) or 0),
tmin=math.fabs(record['training']['curr_config']['stats']['min'] or 0),
tmax=math.fabs(record['training']['curr_config']['stats']['max'] or 0),
tstddev=record['training']['curr_config']['stats']['stddev'],
ttruput=record['training']['metric']['throughput'],
tproctime=record['training']['metric']['process_time'],
tmem=(record['training']['metric']['memory'] or 0) / mem_scale,
tmem_lim=(record['training']['metric'].get('memory_limit',
(record['training']['curr_config']['data'][service_name][
'memory'] or 0) * mem_scale) or 0) / mem_scale,
tcpu=(record['training']['metric']['cpu'] or 1),
tcpu_lim=(record['training']['metric']['cpu_limit'] or 1),
treplicas=math.ceil(record['training']['metric'].get('curr_replicas',
record['training']['metric']['cpu_limit'])),
tparams=Param(record['training']['curr_config']['data'],
math.fabs(record['training']['curr_config']['score'] or 0)),
# create an auxiliary table to hold the 3 best config at every iteration
nbest=nbest(record['trials'] or [], record['nursery'] or [], record['tenured'] or [], 3)
# nbest={chr(i + 97): best['name'][:3] for i, best in enumerate(record['best'])} if 'best' in record else {}
).__dict__)
except Exception as e:
print(i)
raise e
pprint.pprint(record)
return pd.DataFrame(raw_data).reset_index()
class Param:
""" Config params holder"""
def __init__(self, params, score):
self.params: dict[str:dict[str:Any]] = params
self.params['score'] = score
@dataclass
class Iteration:
pruned: bool = False
workload: str = ''
iteration: int = 0
pname: str = ''
pscore: float = 0
pmean: float = 0
pmedian: float = 0
pmad: float = np.nan
pmin: float = 0
pmax: float = 0
pstddev: float = 0
ptruput: float = 0
pproctime: float = 0
pmem: int = 0
pmem_lim: int = 0
pcpu: int = 0
pcpu_lim: int = 0
preplicas: int = 0
pparams: Param = 0
tname: str = ''
tscore: float = 0
tmean: float = 0
tmedian: float = 0
tmad: float = np.nan
tmin: float = 0
tmax: float = 0
tstddev: float = 0
ttruput: float = 0
tproctime: float = 0
tmem: int = 0
tmem_lim: int = 0
tcpu: int = 0
tcpu_lim: int = 0
treplicas: int = 0
tparams: Param = 0
nbest: dict = field(default_factory=dict)
def calculate_mad(df: pd.DataFrame):
df['pmad'] = df.groupby('pname')['pscore'].transform(lambda x: abs(x - x.rolling(len(df), 1).median()))
df['pmad'] = df.groupby('pname')['pmad'].transform(lambda x: x.rolling(len(df), 1).median())
df['tmad'] = df.groupby('tname')['tscore'].transform(lambda x: x.rolling(len(df), 1).median())
df['tmad'] = df.groupby('tname')['tscore'].transform(lambda x: x.rolling(len(df), 1).median())
return df
def plot(df: pd.DataFrame, title: str, objective_label: str = '', save: bool = False, show_table: bool = False,
simple_visualization=False) -> list[
Axes]:
if df['pmad'].dropna().empty and df['tmad'].dropna().empty:
df = calculate_mad(df)
# pip install SecretColors
# create a color map
from SecretColors.cmaps import TableauMap
cm = TableauMap(matplotlib)
colormap = cm.colorblind()
reduced_table = df
reduced_table['ptruput_lim'] = reduced_table['ptruput']
reduced_table['ttruput_lim'] = reduced_table['ttruput']
# reduced_table['pproctime'] = reduced_table['pproctime'].apply(lambda x: 1 if x >= 100000 else x)
# reduced_table['tproctime'] = reduced_table['tproctime'].apply(lambda x: 1 if x >= 100000 else x)
reduced_table['ptruput'] = reduced_table['ptruput'] * reduced_table['pproctime']
reduced_table['ttruput'] = reduced_table['ttruput'] * reduced_table['tproctime']
# reduced_table['psvcutil'] = (reduced_table['ptruput_lim'] / (1/reduced_table['pproctime'])).fillna(0)
# reduced_table['tsvcutil'] = (reduced_table['ttruput_lim'] / (1/reduced_table['tproctime'])).fillna(0)
memoization = {}
new_colors = []
def unique_color(memoization, name):
# hold full and short config name
memoization[name] = abs(hash(name)) / sys.maxsize
memoization[name[:3]] = memoization[name]
for index, row in reduced_table.iterrows():
# create a color table for all configs
punique = row['pname']
tunique = row['tname']
wunique = row['workload']
if punique not in memoization:
# create a unique color table
unique_color(memoization, punique)
if punique in memoization:
new_colors.append(memoization[punique])
if tunique not in memoization:
# update color table with training configs
unique_color(memoization, tunique)
if tunique in memoization:
new_colors.append(memoization[tunique])
if wunique not in memoization:
unique_color(memoization, wunique)
if wunique in memoization:
new_colors.append(memoization[wunique])
# plotting elements
fig: Figure
ax: Axes # iterations
ax_r: Axes # response time row
ax_m: Axes # memory row
ax_t: Axes # truput row
fig = plt.figure(figsize=(32, 8))
gs = fig.add_gridspec(nrows=5, hspace=.001, height_ratios=[1.4, 1.4, 1.4, 1.4, 3])
axs = gs.subplots(sharex='col')
# bottom-up rows
ax = axs[4] # iterations
ax_r = axs[3] # ...
ax_m = axs[2]
ax_c = axs[1]
ax_t = axs[0]
# split chart by configs and paint each region with a unique color
cmap = matplotlib.cm.get_cmap(colormap)
# find highest point in iteration row
# complete plot
top = max([
reduced_table['pscore'].max(),
reduced_table['pmedian'].max() + reduced_table['pmad'].max(),
reduced_table['tscore'].max(),
reduced_table['tmedian'].max() + reduced_table['tmad'].max()
])
# print(top, reduced_table['pscore'].max(), reduced_table['pstddev'].max(), reduced_table['tscore'].max(), reduced_table['tstddev'].max())
magic_number = 0
ax.set_ylim(ymax=top + magic_number)
for index, row in reduced_table.iterrows():
# max min score boundaris at every iteration
_tmax = max(row['pmedian'], row['tscore'])
_tmin = min(row['pmedian'], row['tscore'])
_pmax = max(row['pmedian'], row['pscore'])
_pmin = min(row['pmedian'], row['pscore'])
# configuration label
ax.text(index, 0, f"{row['pname'][:3]}", {'ha': 'center', 'va': 'bottom'}, rotation=45, fontsize='x-small',
color='red') # production
ax.text(index, top + magic_number, f"{row['tname'][:3]}", {'ha': 'center', 'va': 'top'}, rotation=45,
fontsize='x-small',
color='blue') # training
# draw delta(score, (max,min)) -- residual
plot_training([index, _tmin], [index, _tmax], [index, row['tscore']], ax, color='blue', marker='x',
linestyle='--', linewidth=0.4)
plot_training([index, _pmin], [index, _pmax], [index, row['pscore']], ax, color='red', marker='None',
linestyle='--', linewidth=0.4)
# paint full column
interval = ax.xaxis.get_data_interval()
interval = np.linspace(interval[0], interval[1], len(reduced_table))
xf = 0
i = 0
for i, pos in enumerate(interval[:-1]):
delta = interval[i + 1] - pos
if i == 0:
x0 = pos - delta
else:
x0 = xf
xf = pos + delta / 2
rectangle: Polygon = ax.axvspan(x0, xf, facecolor=cmap(memoization[reduced_table.iloc[i]['pname']]), alpha=0.5)
if reduced_table.iloc[i]['pruned']:
rectangle.set_hatch('///')
# add divisions between iterations
newline_yspan([xf, 0], [xf, top], ax)
x0 = xf
xf += xf
rectangle: Polygon = ax.axvspan(x0, xf, facecolor=cmap(memoization[reduced_table.iloc[i + 1]['pname']]), alpha=0.5)
if reduced_table.iloc[i + 1]['pruned']:
rectangle.set_hatch('///')
# print(reduced_table[['pscore','tscore']])
# truput row
# ax_t = reduced_table.plot.bar(ax=ax_t, x='index', y=['ptruput', 'ttruput'], rot=0,
# color={'ptruput': 'red', 'ttruput': 'blue'}, width=0.8, alpha=1)
ax_t = reduced_table.plot.bar(ax=ax_t, x='index', y=['ptruput_lim', 'ttruput_lim'], rot=0,
color={'ptruput_lim': 'red', 'ttruput_lim': 'blue'}, width=0.8, alpha=1)
# psvcutil
# ax_u = reduced_table.plot.bar(ax=ax_u, x='index', y=['psvcutil', 'tsvcutil'], rot=0,
# color={'psvcutil': 'red', 'tsvcutil': 'blue'}, width=0.8, alpha=1)
# trending line
# trend_values = reduced_table['psvcutil'].values.tolist()
# # simulates more iterations after end of tuning
# trend_values = np.array(trend_values + trend_values[-4:-1]*20)
#
# # cap extremely high values for better visualization
# trend_values = np.where(trend_values > 2, 2, trend_values)
# z = np.polyfit(range(len(trend_values)), trend_values, 1)
# # normalize 0-1 yields very small values because the huge outliers
# # z = np.polyfit(range(len(trend_values)), (trend_values-min(trend_values))/(max(trend_values)-min(trend_values)), 1)
# p = np.poly1d(z)
# ax_u.plot(reduced_table.index, p(reduced_table.index), "c--", linewidth=1)
# response time row
# print(reduced_table[[ 'tproctime', 'pproctime']])
ax_r = reduced_table.plot.bar(ax=ax_r, x='index', y=['pproctime', 'tproctime'], rot=0,
color={'pproctime': 'red', 'tproctime': 'blue'}, width=0.8, alpha=1)
# # memory row -- dark shaded to better visualize the runtime consumption
ax_m = reduced_table.plot.bar(ax=ax_m, x='index', y=['pmem', 'tmem'], rot=0, color={'pmem': 'red', 'tmem': 'blue'},
width=0.8, alpha=1)
# memory limit row -- light shaded to better visualize the memory limits
ax_m = reduced_table.plot.bar(ax=ax_m, x='index', y=['pmem_lim', 'tmem_lim'], rot=0,
color={'pmem_lim': 'red', 'tmem_lim': 'blue'}, width=0.8, alpha=0.3)
# reduced_table['pmem_util'] = reduced_table['pmem'] / reduced_table['pmem_lim']
# reduced_table['tmem_util'] = reduced_table['tmem'] / reduced_table['tmem_lim']
# ax_m = reduced_table.plot.bar(ax=ax_m, x='index', y=['pmem_util', 'tmem_util'], rot=0,
# color={'pmem_util': 'red', 'tmem_util': 'blue'},
# width=0.8, alpha=1)
# cpu row -- dark shaded to better visualize the runtime consumption
# reduced_table['pcpu_util'] = reduced_table['pcpu'] / reduced_table['pcpu_lim']
# reduced_table['tcpu_util'] = reduced_table['tcpu'] / reduced_table['tcpu_lim']
# ax_c = reduced_table.plot.bar(ax=ax_c, x='index', y=['pcpu_util', 'tcpu_util'], rot=0, color={'pcpu_util': 'red', 'tcpu_util': 'blue'},
# width=0.8, alpha=1)
ax_c = reduced_table.plot.bar(ax=ax_c, x='index', y=['pcpu', 'tcpu'], rot=0, color={'pcpu': 'red', 'tcpu': 'blue'},
width=0.8, alpha=1)
ax_c = reduced_table.plot.bar(ax=ax_c, x='index', y=['pcpu_lim', 'tcpu_lim'], rot=0,
color={'pcpu_lim': 'red', 'tcpu_lim': 'blue'}, width=0.8, alpha=0.3)
# statistics marks p* stands for production t* stands for training
# ax = reduced_table.plot(ax=ax, x='index', y='pmedian', color='yellow', marker='^', markersize=3, linewidth=0)
if simple_visualization:
ax = reduced_table.plot(ax=ax, x='index', y='pscore', color='black', linewidth=0.3)
else:
ax = reduced_table.plot(ax=ax, x='index', y='pmedian', color='black', marker='o', markersize=3, yerr='pmad',
linewidth=0, elinewidth=0.7, capsize=3)
#
#
ax = reduced_table.plot(ax=ax, x='index', y='pmedian', color='black', marker='o', markersize=3, yerr='pmad',
linewidth=0, elinewidth=0.7, capsize=3)
ax = reduced_table.plot(ax=ax, x='index', y='tmedian', color='lime', marker='^', markersize=3, linewidth=0)
ax = reduced_table.plot(ax=ax, x='index', y='pscore', marker='*', markersize=4, color='red', linewidth=0)
ax.set_ylim(ymin=0) # force the graph to start at y-axis=0
# show 3 best configs table
if not show_table:
ax.xaxis.set_ticks(range(len(reduced_table)))
ax.set_xlabel('index')
ax.margins(x=0)
ax.tick_params(axis='x', which='major', labelsize='x-small')
ax.tick_params(axis='x', which='minor', labelsize='x-small')
else:
# draw table "manually"
ax.xaxis.set_ticks([])
ax.set_xlabel('')
ax.margins(x=0)
# table = pd.DataFrame(reduced_table['nbest'].to_dict())
# table = table.T
# table['replicas'] = reduced_table['preplicas']
table = reduced_table['preplicas']
table = table.T
table = table.fillna(value='')
plt_table: Table
# change replicas line position
tmp = table.iloc[-1]
for i in reversed(range(len(table))):
table.iloc[i] = table.iloc[i - 1]
table.iloc[0] = tmp
try:
reshaped_table = table.to_numpy().reshape(1, -1)
plt_table = ax.table(cellText=reshaped_table, rowLoc='center',
rowLabels=['replicas'], colLabels=reduced_table['index'],
cellLoc='center',
colLoc='center', loc='bottom')
plt_table.set_fontsize('x-small')
# reshaped_table = table.to_numpy().reshape(4, -1)
# plt_table = ax.table(cellText=reshaped_table, rowLoc='center',
# rowLabels=['replicas', '1st', '2nd', '3rd'], colLabels=reduced_table['index'],
# cellLoc='center',
# colLoc='center', loc='bottom')
# plt_table.set_fontsize('x-small')
#
# for pos, cell in plt_table.get_celld().items():
# cell.fill = True
# text: str = cell.get_text().get_text()
# if pos[0] != 0 and len(text) == 3 and text not in '1st2nd3rd':
# try:
# plt_table[pos].set_facecolor(cmap(memoization[text]))
# cell.set_alpha(0.5)
# except KeyError:
# print(f'KeyError: {text}')
# if pos[0] == 0:
# plt_table[pos].set_facecolor(cmap(memoization[reduced_table['pname'].iloc[pos[1]]]))
# # plt_table[pos].set_facecolor(cmap(memoization[reduced_table['workload'].iloc[pos[1]]]))
# cell.set_alpha(0.5)
# cell.set_linewidth(0.3)
except ValueError:
print('cannot plot top 3 table')
# customize legend
handles, labels = ax.get_legend_handles_labels()
handles.pop()
handles.pop()
handles.pop()
handles.pop()
handles.append(
mlines.Line2D([], [], color='black', marker='o', markersize=4, linestyle='-', linewidth=0.7))
# handles.append(
# mlines.Line2D([], [], color='yellow', marker='^', linestyle='None'))
handles.append(
mlines.Line2D([], [], color='lime', marker='^', linestyle='None'))
handles.append(
mlines.Line2D([], [], color='red', marker='*', linestyle='None'))
handles.append(
mlines.Line2D([], [], color='blue', marker='x', linestyle='None'))
handles.append(
mlines.Line2D([], [], color='black', marker='', linestyle='--', linewidth=0.7))
handles.append(matplotlib.patches.Patch(facecolor=cmap(list(memoization.values())[0]), edgecolor='k', alpha=0.7,
label='config. color'))
handles.append(
matplotlib.patches.Patch(facecolor=cmap(list(memoization.values())[0]), edgecolor='k', alpha=0.7, hatch='///',
label='config. pruned'))
handles.append(matplotlib.patches.Patch(facecolor='red', edgecolor='k', alpha=0.7,
label='config. color'))
handles.append(matplotlib.patches.Patch(facecolor='blue', edgecolor='k', alpha=0.7,
label='config. color'))
# customize y-axis labels
ax.get_legend().remove()
ax_m.get_legend().remove()
# ax_u.get_legend().remove()
ax_c.get_legend().remove()
ax_r.get_legend().remove()
if simple_visualization:
ax.minorticks_on()
ax.yaxis.grid(which='both')
ax2 = ax.twinx()
ax2.set_ylim(ax.get_ylim())
# ax_u.set_ylabel('service\nutilization (%)')
# ax_u.set_ylim(0, 1.0)
# ax_u.set_yticks(np.linspace(0, 1.0, 5))
# rlabels = [f'{item:.0f}' for item in np.linspace(0, 1.0, 5)*100]
# rlabels[-1] += '>'
# ax_u.set_yticklabels(rlabels)
ax_t.set_ylabel('arrivals/s\nnormalized')
# ax_t.set_ylim(0, 1)
# ax_t.set_yticks([0, .25, .50, .75, 1])
# ax_t.set_yticklabels([0, .25, .50, .75, 1])
# ax_t.set_ylim(0, ax_t.get_yaxis().get_data_interval()[1])
# ax_t.set_yticks(np.linspace(0, ax_t.get_yaxis().get_data_interval()[1], 4))
ax_r.set_ylabel('resp. time\nnormalized')
# ax_r.set_ylim(0, 1)
# ax_r.set_yticks([0, .25, .50, .75, 1])
# ax_r.set_yticklabels([0, .25, .50, .75, 1])
# ax_r.set_yticks(np.linspace(0, .2, 6))
# rlabels = [f'{item:.4f}' for item in np.linspace(0, .2, 6)]
# rlabels[-1] += '>'
# ax_r.set_yticklabels(rlabels)
ax_m.set_ylabel('Mem\nnormalized')
# ax_m.set_ylim(0, 1)
# ax_m.set_yticks([0, .25, .50, .75, 1])
# ax_m.set_yticklabels([0, .25, .50, .75, 1])
# ax_m.set_ylabel('memory (MB)\n'+r'${\log_2}$ scale')
# ax_m.set_yscale('log', base=2)
# ax_m.set_ylim(256, 8192)
# ax_m.set_yticks([256, 512, 1024, 2048, 4096, 8192])
# ax_m.set_yticklabels([256, 512, 1024, 2048, 4096, 8192])
# ax_m.get_yaxis().get_major_formatter().labelOnlyBase = False
# ax_m.set_ylabel('Mem (%)')
# ax_m.set_ylim(0, 1)
# ax_m.set_yticks(np.linspace(0,1,5))
# ax_m.set_yticklabels([f'{item:.0f}' for item in np.linspace(0, 100, 5)])
# ax_m.get_yaxis().get_major_formatter().labelOnlyBase = False
ax_c.set_ylabel('CPU\nnormalized')
# ax_c.set_ylim(0, 1)
# ax_c.set_yticks([0, .25, .50, .75, 1])
# ax_c.set_yticklabels([0, .25, .50, .75, 1])
# ax_c.set_yticks(np.linspace(0,1,5))
# ax_c.set_yticklabels([f'{item:.0f}' for item in np.linspace(0, 100, 5)])
# ax_c.get_yaxis().get_major_formatter().labelOnlyBase = False
# ax_c.set_ylim(0, ax_c.get_yaxis().get_data_interval()[1])
# # ax_c.set_ylim(1, 8192)
# ax_c.set_yticks(list(range(1, int(ax_c.get_yaxis().get_data_interval()[1]), 2)))
# ax_c.set_yticklabels(list(range(1, int(ax_c.get_yaxis().get_data_interval()[1]), 2)))
# # ax_m.set_yticks([0, 2**9, 2**10, 2**11, 2**12, 2*13])
# ax_c.get_yaxis().get_major_formatter().labelOnlyBase = False
# ax_c.set_ylabel('CPU')
ax_t.set_title(title, loc='left')
ax_t.axes.get_xaxis().set_visible(False)
# ax_u.axes.get_xaxis().set_visible(False)
ax_m.axes.get_xaxis().set_visible(False)
ax_c.axes.get_xaxis().set_visible(False)
ax_r.axes.get_xaxis().set_visible(False)
ax_t.grid(True, linewidth=0.3, alpha=0.7, color='k', linestyle='-')
# ax_u.grid(True, linewidth=0.3, alpha=0.7, color='k', linestyle='-')
ax_m.grid(True, linewidth=0.3, alpha=0.7, color='k', linestyle='-')
ax_c.grid(True, linewidth=0.3, alpha=0.7, color='k', linestyle='-')
ax_r.grid(True, linewidth=0.3, alpha=0.7, color='k', linestyle='-')
# guarantee that legend is above the first row -- see line 180
ax_t.legend(handles, [
# 'avg. of config. \'abc\' in prod',
'median of config \'abc\' in prod',
'median of config \'abc\' in train',
'prod. value at n-th iteration',
'train. value at n-th iteration',
'residual (*:prod, X:train), $y_i - \overline{Y_i}$',
'config. \'abc\' color',
'config. \'abc\' pruned',
'production',
'training',
], frameon=False, ncol=5, bbox_to_anchor=(0.6, 1.72), loc='upper center', fontsize='small')
ax.set_ylabel(objective_label)
# customize label position, -2 and 30 are magic numbers
ax.text(-2, 0, 'prod.\ncfg.', fontsize='smaller')
ax.text(-2, top + magic_number, 'train.\ncfg.', fontsize='smaller')
# hack to change hatch linewidth
mpl.rc('hatch', color='k', linewidth=0.5)
# tight layout to avoid waste of white space
# gs.tight_layout(fig, pad=3.0)
gs.tight_layout(fig)
if save:
fig = plt.gcf()
fig.set_size_inches((18, 8), forward=False)
fig.savefig(title + '.pdf', dpi=150) # Change is over here
else:
plt.show()
# plt.show()
return axs
# def newline(p1, p2, ax, arrow=False, **kwargs):
# xmin, xmax = ax.get_xbound()
#
# if arrow:
# if p1[1] > p2[1]:
# ax.scatter(p2[0], p2[1], marker='^', color=kwargs['color'])
# elif p2[1] > p1[1]:
# ax.scatter(p1[0], p1[1], marker='v', color=kwargs['color'])
#
# l = mlines.Line2D([p1[0], p2[0]], [p1[1], p2[1]], **kwargs)
# ax.add_line(l)
# return l
def newline_yspan(p1, p2, ax):
xmin, xmax = ax.get_xbound()
if (p2[0] == p1[0]):
xmin = xmax = p1[0]
ymin, ymax = ax.get_ybound()
else:
ymax = p1[1] + (p2[1] - p1[1]) / (p2[0] - p1[0]) * (xmax - p1[0])
ymin = p1[1] + (p2[1] - p1[1]) / (p2[0] - p1[0]) * (xmin - p1[0])
l = mlines.Line2D([xmin, xmax], [ymin, ymax], color='black', linestyle='-', linewidth=0.3)
ax.add_line(l)
return l
def plot_training(p1, p2, train, ax, marker=None, **kwargs):
""" dray residual """
ax.scatter(train[0], train[1], marker=marker, color=kwargs['color'])
top_y = max([train[1], p1[1], p2[1]])
bot_y = min([train[1], p1[1], p2[1]])
ax.scatter(train[0], train[1], marker=marker, color=kwargs['color'])
l = mlines.Line2D([p1[0], p2[0]], [bot_y, top_y], **kwargs)
ax.add_line(l)
return l
def plot_app_curves(df: pd.DataFrame, service_name, config_name, title: str = ''):
# data = {}
# for index, row in df.iterrows():
# # extract config only
# tmp = {}
# tmp = row['tparams'].params['daytrader-config-jvm']
# tmp.update(row['tparams'].params['daytrader-config-app'])
# tmp.update(row['tparams'].params['daytrader-service'])
# tmp['score'] = row['tparams'].params['score']
# # workaround to avoid str values like NaN and Inf
# if len(data) == 0:
# for k, v in tmp.items():
# try:
# data[k] = [float(v)]
# except:
# continue
# else:
# for k, v in tmp.items():
# try:
# data[k].append(float(v))
# except:
# continue
#
# # recreate data frame
# df = pd.DataFrame(data)
df = features_table(df, service_name, config_name)
# create correlation matrix
mat_ax = scatter_matrix(df, alpha=0.5, figsize=(6, 6), diagonal='kde')
# resize all labels to smallest size
for row in mat_ax:
for cel in row:
label = cel.get_xlabel()
cel.set_xlabel(label, fontsize='xx-small')
label = cel.get_ylabel()
cel.set_ylabel(label, fontsize='xx-small')
plt.show()
def features_table(df: pd.DataFrame, service_name, config_name, params) -> pd.DataFrame:
data = {}
for index, row in df.iterrows():
# extract config only
tmp = {}
# print(row)
tmp = row[params].params[f'{service_name}-{config_name}-jvm']
tmp.update(row[params].params[f'{service_name}-{config_name}-app'])
tmp.update(row[params].params[f'{service_name}-service'])
tmp['score'] = row[params].params['score']
# workaround to avoid str values like NaN and Inf
if len(data) == 0:
for k, v in tmp.items():
try:
data[k] = [float(v)]
except:
continue
else:
for k, v in tmp.items():
try:
data[k].append(float(v))
except:
continue
# recreate data frame
df = pd.DataFrame.from_dict(data, orient='index')
return df
def importance(df: pd.DataFrame, service_name='', config_name='', params='tparams', name='') -> list:
# df is table of all parameters, one parameter per column, one iteration per row
df = features_table(df, service_name=service_name, config_name=config_name, params=params).T
X = df.iloc[:, :-1]
y = df.iloc[:, -1]
# print(X)
# print(y)
# print(df.info())
# print(df.head())
# print(df.describe())
import xgboost as xgb
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.metrics import mean_squared_error
# https://www.datacamp.com/community/tutorials/xgboost-in-python
# data_dmatrix = xgb.DMatrix(data=X, label=y)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=123)
xg_reg = xgb.XGBRegressor(objective='reg:squarederror', colsample_bytree=0.3, learning_rate=0.1,
max_depth=5, alpha=10, n_estimators=10)
xg_reg.fit(X_train, y_train)
preds = xg_reg.predict(X_test)
rmse = np.sqrt(mean_squared_error(y_test, preds))
print("RMSE: %f" % (rmse))
return {col: score for col, score in zip(X_train.columns, xg_reg.feature_importances_)}
def plot_importance(raw_data: dict):
# fig = plt.figure()
# gs = fig.add_gridspec(nrows=len(raw_data), hspace=0.000, height_ratios=[1 for _ in range(len(raw_data))])
# axs = gs.subplots(sharex='row')
# from SecretColors.cmaps import TableauMap
# cm = TableauMap(matplotlib)
# colormap = cm.colorblind()
#
# plt.set_cmap(colormap)
df = pd.DataFrame.from_dict(raw_data, orient='index')
# print(df.to_csv())
# ax = df.plot.bar()
#
# handles, labels = ax.get_legend_handles_labels()
# ax.legend(handles, labels, frameon=False, ncol=5, bbox_to_anchor=(0.5, 1.15), loc='upper center', fontsize='small')
#
#
#
# plt.show()
def general():
pd.set_option('display.max_columns', None)
| pd.set_option('display.max_rows', None) | pandas.set_option |
# -*- coding: utf-8 -*-
import osmnx as ox
import pandas as pd
from geopandas import GeoDataFrame
import os
import json
import urllib.request
import urllib.parse
import hashlib
import csv
import numpy as np
def osm_net_retrieve(bbox, network_type, osm_folder="OSM/"):
"""
Download .shp format road network within the specified bounding box
:param bbox: tuple of (north, south, east, west)
:param network_type: string e.g., 'drive'
:param osm_folder: string, path to save the downloaded shapefile
:return: None
"""
north, south, east, west = bbox
G = ox.graph_from_bbox(north, south, east, west, network_type=network_type)
ox.save_graphml(G, filepath=os.path.join(osm_folder, network_type + '_network.graphml'))
gdf = ox.graph_to_gdfs(G)
edge = gdf[1]
edge = edge.loc[:, ['geometry', 'highway', 'junction', 'length', 'maxspeed', 'name', 'oneway',
'osmid', 'u', 'v', 'width']]
fields = ['highway', 'junction', 'length', 'maxspeed', 'name', 'oneway',
'osmid', 'u', 'v', 'width']
df_inter = pd.DataFrame()
for f in fields:
df_inter[f] = edge[f].astype(str)
gdf_edge = GeoDataFrame(df_inter, geometry=edge["geometry"])
gdf_edge.to_file(osm_folder + network_type + "_net.shp")
def osm_net_retrieve_polygon(polygon, network_type, osm_folder="OSM/"):
"""
Download .shp format road network within the specified polygon
:param polygon: polygon geodataframe
:param network_type: string e.g., 'drive'
:param osm_folder: string, path to save the downloaded shapefile
:return: None
"""
G = ox.graph_from_polygon(polygon, network_type=network_type)
ox.save_graphml(G, filepath=os.path.join(osm_folder, network_type + '_network.graphml'))
gdf = ox.graph_to_gdfs(G)
edge = gdf[1]
edge = edge.loc[:, ['geometry', 'highway', 'junction', 'length', 'maxspeed', 'name', 'oneway',
'osmid', 'u', 'v', 'width']]
fields = ['highway', 'junction', 'length', 'maxspeed', 'name', 'oneway',
'osmid', 'u', 'v', 'width']
df_inter = | pd.DataFrame() | pandas.DataFrame |
from typing import Generator
import pandas
import pytest
from neo4j import DEFAULT_DATABASE
from graphdatascience.graph_data_science import GraphDataScience
from graphdatascience.query_runner.neo4j_query_runner import Neo4jQueryRunner
from graphdatascience.query_runner.query_runner import QueryRunner
from graphdatascience.server_version.server_version import ServerVersion
GRAPH_NAME = "g"
@pytest.fixture(autouse=True)
def run_around_tests(runner: Neo4jQueryRunner) -> Generator[None, None, None]:
# Runs before each test
runner.run_query(
"""
CREATE
(a: Node {x: 1, y: 2}),
(b: Node {x: 2, y: 3}),
(c: Node {x: 3, y: 4}),
(a)-[:REL {relX: 4, relY: 5}]->(b),
(a)-[:REL {relX: 5, relY: 6}]->(c),
(b)-[:REL {relX: 6, relY: 7}]->(c),
(b)-[:REL2]->(c)
"""
)
yield # Test runs here
# Runs after each test
runner.run_query("MATCH (n) DETACH DELETE n")
runner.run_query(f"CALL gds.graph.drop('{GRAPH_NAME}', false)")
def test_project_graph_native(gds: GraphDataScience) -> None:
G, result = gds.graph.project(GRAPH_NAME, "*", "*")
assert G.name() == GRAPH_NAME
assert result["graphName"] == GRAPH_NAME
result = gds.graph.exists(G.name())
assert result["exists"]
def test_project_graph_native_estimate(gds: GraphDataScience) -> None:
result = gds.graph.project.estimate("*", "*")
assert result["requiredMemory"]
def test_project_graph_cypher(gds: GraphDataScience) -> None:
node_query = "MATCH (n:Node) RETURN id(n) as id"
relationship_query = "MATCH (n:Node)-->(m:Node) RETURN id(n) as source, id(m) as target, 'T' as type"
G, result = gds.graph.project.cypher(GRAPH_NAME, node_query, relationship_query)
assert G.name() == GRAPH_NAME
assert result["graphName"] == GRAPH_NAME
result = gds.graph.exists(G.name())
assert result["exists"]
def test_project_graph_cypher_estimate(gds: GraphDataScience) -> None:
node_query = "MATCH (n:Node) RETURN id(n) as id"
relationship_query = "MATCH (n:Node)-->(m:Node) RETURN id(n) as source, id(m) as target, 'T' as type"
result = gds.graph.project.cypher.estimate(node_query, relationship_query)
assert result["requiredMemory"]
def test_project_subgraph(runner: QueryRunner, gds: GraphDataScience) -> None:
from_G, _ = gds.graph.project(GRAPH_NAME, {"Node": {"properties": "x"}}, "*")
subG, result = gds.beta.graph.project.subgraph("s", from_G, "n.x > 1", "*", concurrency=2)
assert subG.name() == "s"
assert result["graphName"] == "s"
result2 = gds.graph.list(subG)
assert result2["nodeCount"][0] == 2
runner.run_query(f"CALL gds.graph.drop('{subG.name()}')")
def test_graph_list(gds: GraphDataScience) -> None:
result = gds.graph.list()
assert len(result) == 0
G, _ = gds.graph.project(GRAPH_NAME, "*", "*")
result = gds.graph.list()
assert len(result) == 1
result = gds.graph.list(G)
assert result["graphName"][0] == GRAPH_NAME
def test_graph_exists(gds: GraphDataScience) -> None:
G, _ = gds.graph.project(GRAPH_NAME, "*", "*")
result = gds.graph.exists(G.name())
assert result["exists"]
result = gds.graph.exists("bogusName")
assert not result["exists"]
def test_graph_drop(gds: GraphDataScience) -> None:
G, _ = gds.graph.project(GRAPH_NAME, "*", "*")
result = gds.graph.drop(G, True)
assert result is not None
assert result["graphName"] == GRAPH_NAME
with pytest.raises(Exception):
gds.graph.drop(G, True)
def test_graph_export(runner: QueryRunner, gds: GraphDataScience) -> None:
G, _ = gds.graph.project(GRAPH_NAME, "*", "*")
MY_DB_NAME = "testdatabase"
result = gds.graph.export(G, dbName=MY_DB_NAME, batchSize=10000)
assert result["graphName"] == GRAPH_NAME
assert result["dbName"] == MY_DB_NAME
runner.run_query("CREATE DATABASE $dbName", {"dbName": MY_DB_NAME})
runner.set_database(MY_DB_NAME)
node_count = runner.run_query("MATCH (n) RETURN COUNT(n) AS c").squeeze()
assert node_count == 3
runner.run_query("DROP DATABASE $dbName", {"dbName": MY_DB_NAME})
runner.set_database(DEFAULT_DATABASE)
def test_graph_get(gds: GraphDataScience) -> None:
gds.graph.project(GRAPH_NAME, "*", "*")
G = gds.graph.get(GRAPH_NAME)
assert G.name() == GRAPH_NAME
with pytest.raises(
ValueError,
match=f"No projected graph named 'bogusName' exists in current database '{gds.database()}'",
):
gds.graph.get("bogusName")
def test_graph_streamNodeProperty(gds: GraphDataScience) -> None:
G, _ = gds.graph.project(GRAPH_NAME, {"Node": {"properties": "x"}}, "*")
result = gds.graph.streamNodeProperty(G, "x", concurrency=2)
assert {e for e in result["propertyValue"]} == {1, 2, 3}
def test_graph_streamNodeProperty_without_arrow(gds_without_arrow: GraphDataScience) -> None:
G, _ = gds_without_arrow.graph.project(GRAPH_NAME, {"Node": {"properties": "x"}}, "*")
result = gds_without_arrow.graph.streamNodeProperty(G, "x", concurrency=2)
assert {e for e in result["propertyValue"]} == {1, 2, 3}
def test_graph_streamNodeProperties(gds: GraphDataScience) -> None:
G, _ = gds.graph.project(GRAPH_NAME, {"Node": {"properties": ["x", "y"]}}, "*")
result = gds.graph.streamNodeProperties(G, ["x", "y"], concurrency=2)
assert list(result.keys()) == ["nodeId", "nodeProperty", "propertyValue"]
x_values = result[result.nodeProperty == "x"]
assert {e for e in x_values["propertyValue"]} == {1, 2, 3}
y_values = result[result.nodeProperty == "y"]
assert {e for e in y_values["propertyValue"]} == {2, 3, 4}
def test_graph_streamNodeProperties_separate_property_columns(gds: GraphDataScience) -> None:
G, _ = gds.graph.project(GRAPH_NAME, {"Node": {"properties": ["x", "y"]}}, "*")
result = gds.graph.streamNodeProperties(G, ["x", "y"], separate_property_columns=True, concurrency=2)
assert list(result.keys()) == ["nodeId", "x", "y"]
assert {e for e in result["x"]} == {1, 2, 3}
assert {e for e in result["y"]} == {2, 3, 4}
def test_graph_streamNodeProperties_without_arrow(gds_without_arrow: GraphDataScience) -> None:
G, _ = gds_without_arrow.graph.project(GRAPH_NAME, {"Node": {"properties": ["x", "y"]}}, "*")
result = gds_without_arrow.graph.streamNodeProperties(G, ["x", "y"], concurrency=2)
assert list(result.keys()) == ["nodeId", "nodeProperty", "propertyValue"]
x_values = result[result.nodeProperty == "x"]
assert {e for e in x_values["propertyValue"]} == {1, 2, 3}
y_values = result[result.nodeProperty == "y"]
assert {e for e in y_values["propertyValue"]} == {2, 3, 4}
def test_graph_streamNodeProperties_without_arrow_separate_property_columns(
gds_without_arrow: GraphDataScience,
) -> None:
G, _ = gds_without_arrow.graph.project(GRAPH_NAME, {"Node": {"properties": ["x", "y"]}}, "*")
result = gds_without_arrow.graph.streamNodeProperties(G, ["x", "y"], separate_property_columns=True, concurrency=2)
assert list(result.keys()) == ["nodeId", "x", "y"]
assert {e for e in result["x"]} == {1, 2, 3}
assert {e for e in result["y"]} == {2, 3, 4}
def test_graph_streamRelationshipProperty(gds: GraphDataScience) -> None:
G, _ = gds.graph.project(GRAPH_NAME, "*", {"REL": {"properties": "relX"}})
result = gds.graph.streamRelationshipProperty(G, "relX", concurrency=2)
assert {e for e in result["propertyValue"]} == {4, 5, 6}
def test_graph_streamRelationshipProperty_without_arrow(gds_without_arrow: GraphDataScience) -> None:
G, _ = gds_without_arrow.graph.project(GRAPH_NAME, "*", {"REL": {"properties": "relX"}})
result = gds_without_arrow.graph.streamRelationshipProperty(G, "relX", concurrency=2)
assert {e for e in result["propertyValue"]} == {4, 5, 6}
def test_graph_streamRelationshipProperties(gds: GraphDataScience) -> None:
G, _ = gds.graph.project(GRAPH_NAME, "*", {"REL": {"properties": ["relX", "relY"]}})
result = gds.graph.streamRelationshipProperties(G, ["relX", "relY"], concurrency=2)
assert list(result.keys()) == [
"sourceNodeId",
"targetNodeId",
"relationshipType",
"relationshipProperty",
"propertyValue",
]
x_values = result[result.relationshipProperty == "relX"]
assert {e for e in x_values["propertyValue"]} == {4, 5, 6}
y_values = result[result.relationshipProperty == "relY"]
assert {e for e in y_values["propertyValue"]} == {5, 6, 7}
def test_graph_streamRelationshipProperties_separate_property_columns(gds: GraphDataScience) -> None:
G, _ = gds.graph.project(GRAPH_NAME, "*", {"REL": {"properties": ["relX", "relY"]}})
result = gds.graph.streamRelationshipProperties(G, ["relX", "relY"], separate_property_columns=True, concurrency=2)
assert list(result.keys()) == ["sourceNodeId", "targetNodeId", "relationshipType", "relX", "relY"]
assert {e for e in result["relX"]} == {4, 5, 6}
assert {e for e in result["relY"]} == {5, 6, 7}
def test_graph_streamRelationshipProperties_without_arrow(gds_without_arrow: GraphDataScience) -> None:
G, _ = gds_without_arrow.graph.project(GRAPH_NAME, "*", {"REL": {"properties": ["relX", "relY"]}})
result = gds_without_arrow.graph.streamRelationshipProperties(G, ["relX", "relY"], concurrency=2)
assert list(result.keys()) == [
"sourceNodeId",
"targetNodeId",
"relationshipType",
"relationshipProperty",
"propertyValue",
]
x_values = result[result.relationshipProperty == "relX"]
assert {e for e in x_values["propertyValue"]} == {4, 5, 6}
y_values = result[result.relationshipProperty == "relY"]
assert {e for e in y_values["propertyValue"]} == {5, 6, 7}
def test_graph_streamRelationshipProperties_without_arrow_separate_property_columns(
gds_without_arrow: GraphDataScience,
) -> None:
G, _ = gds_without_arrow.graph.project(GRAPH_NAME, "*", {"REL": {"properties": ["relX", "relY"]}})
result = gds_without_arrow.graph.streamRelationshipProperties(
G, ["relX", "relY"], separate_property_columns=True, concurrency=2
)
assert list(result.keys()) == ["sourceNodeId", "targetNodeId", "relationshipType", "relX", "relY"]
assert {e for e in result["relX"]} == {4, 5, 6}
assert {e for e in result["relY"]} == {5, 6, 7}
def test_graph_writeNodeProperties(gds: GraphDataScience) -> None:
G, _ = gds.graph.project(GRAPH_NAME, "*", "*")
gds.pageRank.mutate(G, mutateProperty="rank", dampingFactor=0.2, tolerance=0.3)
result = gds.graph.writeNodeProperties(G, ["rank"], concurrency=2)
assert result["propertiesWritten"] == 3
def test_graph_writeRelationship(gds: GraphDataScience) -> None:
G, _ = gds.graph.project(GRAPH_NAME, "*", "*")
gds.nodeSimilarity.mutate(G, mutateRelationshipType="SIMILAR", mutateProperty="score", similarityCutoff=0)
result = gds.graph.writeRelationship(G, "SIMILAR", "score", concurrency=2)
assert result["relationshipsWritten"] == 2
assert result["propertiesWritten"] == 2
@pytest.mark.compatible_with(min_inclusive=ServerVersion(2, 1, 0))
def test_graph_removeNodeProperties_21(gds: GraphDataScience) -> None:
G, _ = gds.graph.project(GRAPH_NAME, {"Node": {"properties": "x"}}, "*")
result = gds.graph.removeNodeProperties(G, ["x"], concurrency=2)
assert result["propertiesRemoved"] == 3
@pytest.mark.compatible_with(max_exclusive=ServerVersion(2, 1, 0))
def test_graph_removeNodeProperties_20(gds: GraphDataScience) -> None:
G, _ = gds.graph.project(GRAPH_NAME, {"Node": {"properties": "x"}}, "*")
result = gds.graph.removeNodeProperties(G, ["x"], ["*"], concurrency=2)
assert result["propertiesRemoved"] == 3
def test_graph_deleteRelationships(gds: GraphDataScience) -> None:
G, _ = gds.graph.project(GRAPH_NAME, "*", ["REL", "REL2"])
result = gds.graph.deleteRelationships(G, "REL")
assert result["deletedRelationships"] == 3
def test_graph_generate(gds: GraphDataScience) -> None:
G, result = gds.beta.graph.generate(GRAPH_NAME, 12, 2)
assert G.node_count() == 12
assert result["generateMillis"] >= 0
@pytest.mark.enterprise
@pytest.mark.compatible_with(min_inclusive=ServerVersion(2, 1, 0))
def test_graph_construct(gds: GraphDataScience) -> None:
nodes = pandas.DataFrame({"nodeId": [0, 1, 2, 3]})
relationships = pandas.DataFrame({"sourceNodeId": [0, 1, 2, 3], "targetNodeId": [1, 2, 3, 0]})
G = gds.alpha.graph.construct("hello", nodes, relationships)
assert G.name() == "hello"
assert G.node_count() == 4
assert G.relationship_count() == 4
G.drop()
@pytest.mark.enterprise
@pytest.mark.compatible_with(min_inclusive=ServerVersion(2, 1, 0))
def test_graph_construct_multiple_dfs(gds: GraphDataScience) -> None:
nodes = [ | pandas.DataFrame({"nodeId": [0, 1]}) | pandas.DataFrame |
##############################################
## Author: <NAME> ##
## Date of update: 2018/05/15 ##
## Description: Data Mining Final Project ##
## - Data Preprocessing ##
## - Remove NaN ##
## - Add opponent label ##
## - Binary encode W/L and Home/Away ##
## - Pair teams and opponents ##
## - Check games' validity ##
## - Rename and concatenate df ##
##############################################
import numpy as np
import pandas as pd
import time
#-----------------------#
# Main Function #
#-----------------------#
# @param: None
# @return: None
def main():
startTime = time.time()
# Load .csv
season = pd.read_csv('./team_season_all.csv')
playoff = pd.read_csv('./team_playoff_all.csv')
# Merge seasona and playoff
df_all = pd.concat([season, playoff], ignore_index=True)
# Remove NaN
df_all = cleanDataFrame(df_all)
df_all = dropNanScore(df_all)
# Add opponent label
df_all = addOpponentCol(df_all)
# Binary encode W/L and Home/Away
df_all['W/L'] = df_all['W/L'].map({'W':1, 'L':0})
df_all['Home/Away'] = df_all['Home/Away'].map({'Home':1, 'Away':0})
# Pair teams and opponents
df_team, df_oppo, invalid_idx = pairGamePlayers(df_all)
# Check games' validity
df_team, df_oppo, invalid_idx = checkGameValidity(df_team, df_oppo)
# Rename column: Attributes_A and Attributes_B for team and opponent, respectively
df_team = df_team.rename(columns=lambda x: x + '_A')
df_oppo = df_oppo.rename(columns=lambda x: x + '_B')
# Concatenate by column
df_output = pd.concat([df_team, df_oppo], axis=1)
# Save .csv
df_output.to_csv('./nba_preprocessed.csv', encoding='utf-8', index=False)
print("Execution time =", time.time() - startTime)
#-----------------------#
# Sub-Functions #
#-----------------------#
# @param df: pandas.DataFrame
# @return pandas.DataFrame
# NaN cleaner (Numerical)
def cleanDataFrame(df):
assert isinstance(df, pd.DataFrame), "df needs to be a pd.DataFrame"
df.dropna(inplace=True)
indices_to_keep = ~df.isin([np.nan, np.inf, -np.inf]).any(1)
return df[indices_to_keep].reset_index(drop=True)
# @param df: pandas.DataFrame
# @return pandas.DataFrame
# Drop objects which are NaN in Score's label (String)
def dropNanScore(df):
index = []
for idx, score in enumerate(df['Score']):
if score[:3] == 'NAN' or score[:3] == 'NaN':
index.append(idx)
print("Number of objects dropped =", len(index))
return df.drop(df.index[index]).reset_index(drop=True)
# @param df: pandas.DataFrame
# @return df: pandas.DataFrame
# Add opponent label to a game
def addOpponentCol(df):
opponent = [None] * len(df['Score'])
for idx, score in enumerate(df['Score']):
opponent[idx] = score[:3]
df['Opponent'] = opponent
return df
# @param df: pandas.DataFrame
# @return df_team, df_oppo: pandas.DataFrame
# Pair two teams in a single game by searching 'Date' and 'Opponent' labels.
def pairGamePlayers(df):
startTime = time.time()
invalid_idx = []
duplicate = 0
not_found = 0
# Declare empty dataframe w/ columns from existing dataframe
df_team = pd.DataFrame(columns = list(df)) # Team attributes
df_oppo = pd.DataFrame(columns = list(df)) # Opponent attributes
df_dupl = pd.DataFrame(columns = list(df)) # Duplicated dataframe
for idx, date, team in zip(df.index.tolist(), df['Date'], df['Team']):
df_oppo_searched = df.loc[lambda df: df.Date == date, :].loc[lambda df: df.Opponent == team, :]
if len(df_oppo_searched.index.tolist()) > 1:
duplicate += 1
df_dupl = | pd.concat([df_dupl, df_oppo_searched], ignore_index=True) | pandas.concat |
import numpy as np
import pandas as pd
import json
from datetime import datetime
from util import *
# prepare data for scenario projections
# the data is too large to include in the git repo but is already processed
# CMIP5 routed streamflow from USBR. To download full archive (~20 GB):
# wget -r ftp://gdo-dcp.ucllnl.org/pub/dcp/archive/cmip5/hydro/routed_streamflow/*
cmip5_scenarios = pd.read_csv('data/cmip5/scenario_names.csv').name.to_list()
path = '/Users/jon/scratch/cmip5-usbr/cmip5_ncar_day/'
nodes = json.load(open('data/nodes.json'))
rk = [k for k in nodes.keys() if nodes[k]['type'] == 'reservoir']
date_range = pd.date_range(start='1951-10-01', end='2099-09-30', freq='D')
for s in cmip5_scenarios:
print(s)
df = pd.DataFrame(index=date_range)
df['dowy'] = np.array([water_day(d) for d in df.index.dayofyear])
for r in rk:
f = path + 'streamflow_cmip5_ncar_day_%s.csv' % nodes[r]['CMIP5_ID']
dfk = pd.read_csv(f, index_col=0, parse_dates={'datetime': [0,1,2]},
date_parser=lambda x: datetime.strptime(x, '%Y %m %d'))[s]
df[r + '_inflow_cfs'] = dfk[date_range]
df.to_csv('data/cmip5/%s.csv.zip' % s, compression='zip')
# land use change projections (annual, interpolated to daily)
# from LUCAS, GCAM, and FORE-SCE models
# water demand multipliers developed from <NAME> (2019), Robinson et al. (2020)
lulc_scenarios = | pd.read_csv('data/lulc/scenario_names.csv') | pandas.read_csv |
##TO DO Consider adding in 'not suitable for test'
"""
Score population according to:
0: Number of hospitals
1: Mean time to thrombolysis
2: Max time to thrombolysis
3: Mean time to thrombectomy
4: Maximum time to thrombectomy
5: Minimum thrombolysis admissions to any one hospital
6: Maximum thrombolysis admissions to any one hospital
7: Minimum thrombectomy admissions to any one hospital
8: Maximum thrombectomy admissions to any one hospital
9: Proportion patients within target thrombolysis time
10: Proportion patients attending unit with target first admissions
11: Proportion patients meeting both thrombolysis targets
12: Proportion patients within target thrombectomy time
13: Proportion patients attending unit with target thrombectomy
14: Proportion patients meeting targets both thrombectomy targets
15: Proportion patients meeting all thrombolysis + thrombectomy targets
16: 95th percentile time for thrombolysis
17: 95th percentile time for thrombectomy
18: Total transfers
19: Total transfer time
20: Clinical outcome (good outcomes) with no treatment
21: Clinical outcome (good outcomes) with treatment
22: Additional good outcomes per 1000 admissions
23: Median time to thrombolysis
24: Median time to thrombectomy
25: Minimum clinical outcome
26: 5th percentile clinical outcome
27: 95th percentile clinical outcome
28: Maximum clinical outcome
"""
import numpy as np
import pandas as pd
from classes.clinical_outcome import Clinical_outcome
class Score_population_with_diagnostic():
def __init__(self, data, population):
number_of_scenarios = population.shape[0]
number_of_hospitals = population.shape[1]
number_of_areas = len(data.np_admissions)
total_admissions = sum(data.admissions)
# Set up results tables
self.results = np.zeros((number_of_scenarios, 29))
self.hospital_first_admissions = np.zeros((population.shape))
self.hospital_thrombectomy_admissions = np.zeros((population.shape))
node_results = np.zeros((number_of_areas, 47))
# Set up clinical outcome object
self.outcome = Clinical_outcome()
"""
Node results are results for each area (e.g. LSAO)
# General measures
0: Time to closest hospital
1: Orginal (full hosital list) index # of closest hospital
2: Time to closest CSC (direct)
3: Orginal (full hosital list) index # of closest CSC (direct)
4: Transfer time to closest CSC (drip and ship)
5: Orginal (full hosital list) index # of closest CSC (drip and ship)
6: Total drip and ship time: orginal transfer + net delay + transfer
# Negative diagnostic test (assume go to closest)
7: Negative test admissions
8: Chosen thrombolysis centre
9: Time to chosen thrombolysis centre
10: Chosen thrombectomy centre
11: Time to chosen thrombectomy centre
12: Number of transfers to CSC
13: Distance of transfers to CSC
14: Clinical benefit - no treatement
15: Additional clinical benefit
# Positive diagnostic test
16: Positive test admissions
17: Clinical benefit - no treatment
18: Additional clinical direct to CSC
19: Additional clinical drip and ship
20: Choose CSC
21: Chosen thrombolysis centre
22: Time to chosemn thrombolysis centre
23: Chosen thrombectomy centre
24: Time to chosen thrombectomy centre
25: Number of transfers to CSC
26: Distance of transfers to CSC
27: Clinical benefit from chosen location
# Adjusted admissions (takes into account people where no action woiuld
# be taken even with positive LVO diagnostic test)
28: Adjusted IVT admissions
29: Adjusted ET admissions
# Admission numbers
30: -ve test thrombolysis unit admissions
31: -ve test thrombectomy unit procedures
32: +ve test thrombolysis unit admissions
33: +ve test thrombectomy unit procedures
# Targets met
34: -ve test thrombolysis unit target admissions
35: -ve test thrombolysis target time
36: -ve test thrombolysis both targets
37: -ve test thrombectomy unit target admissions
38: -ve test thrombectomy target time
39: -ve test thrombectomy both targets
40: +ve test thrombolysis unit target admissions
41: +ve test thrombolysis target time
42: +ve test thrombolysis both targets
43: +ve test thrombectomy unit target admissions
44: +ve test thrombectomy target time
45: +ve test thrombectomy both targets
# Net clinical benefit
46: Net clinical benefit
"""
for i in range(number_of_scenarios):
# Create and apply mask to remove unused hospitals in scenario
if data.vary_et_centres:
# Have all hospitals open for IVT except forced closed ones
mask = data.hospitals['Fixed'] != -1
# Recalculate travel times to ET units
data.identify_closest_neighbouring_thrombectomy_unit(
population[i,:])
data.identify_closest_thrombectomy_unit_to_each_patient_area(
population[i,:])
data.convert_pandas_to_numpy()
else:
mask = population[i, :] == 1
_ = data.hospitals['hospital'].values
used_hospital_postcodes = _[mask]
_ = data.hospitals['index_#'].values
used_hospital_index_nos = _[mask]
used_travel_matrix = data.np_travel_matrix[:, mask]
# Node result 0: Identify time closest hospital
node_results[:, 0] = np.min(used_travel_matrix, axis=1)
# Node result 1: Identify orginal (full hosital list) index # of
# closest hospital
local_id = np.argmin(used_travel_matrix, axis=1)
node_results[:, 1] = used_hospital_index_nos[local_id]
# Node result 2: Time to closest CSC (direct)
node_results[:, 2] = \
data.np_closest_thrombectomy_to_each_area_time
# Node result 3: orginal (full hosital list) index # of closest
# CSC (direct)
node_results[:, 3] = \
data.np_closest_thrombectomy_to_each_area_index
# Node result 4 & 5: Transfer time and index (original) to
# closest CSC (drip'n'ship)
fancy_index = np.int_(node_results[:, 1])
node_results[:, 4] = \
data.np_closest_neighbouring_thrombectomy_unit_time[
fancy_index]
node_results[:, 5] = \
data.np_closest_neighbouring_thrombectomy_unit_index[
fancy_index]
# Node 6 Total drip and ship time (original travel + net delay +
# transfer)
node_results[:, 6] = (
node_results[:, 0] +
node_results[:, 4])
# Transfer delay if thrombectomy and thrombolysis centres are different
mask = node_results[:, 1] != node_results[:, 3]
node_results[mask, 6] += data.transfer_net_delay
## NEGATIVE DIAGNOSTIC TEST RESULTS
# Admissions with negative diagnostic test
node_results[:, 7] = data.admissions * \
(data.diagnostic_prop_negative)
# Create mask for direct to CSC
mask = node_results[:, 2] <= node_results[:, 0] + \
data.allowable_delay
# Chosen IVT unit
node_results[:, 8] = node_results[:, 1]
node_results[mask, 8] = node_results[mask, 3]
# IVT time
node_results[:, 9] = node_results[:, 0]
node_results[mask, 9] = node_results[mask, 2]
# Chosen ET unit
node_results[:, 10] = node_results[:, 5]
node_results[mask, 10] = node_results[mask, 3]
# ET time
node_results[:, 11] = node_results[:, 6]
node_results[mask, 11] = node_results[mask, 2]
# Number of transfers for drip and ship
node_results[:, 12] = \
(node_results[:, 7] *
data.diagnostic_neg_lvo *
data.prop_lvo_eligible_ivt *
data.prop_thrombolysed_lvo_receiving_thrombectomy)
node_results[mask, 12] = 0
# Distance of transfers for drip and ship
node_results[:, 13] = node_results[:, 12] * node_results[:, 4]
# Clinical benefit for negative diagnostic test
admissions = data.admissions.values * data.diagnostic_prop_negative
areas = len(admissions)
mimic = np.ones(areas) * data.diagnostic_neg_mimic
ich = np.ones(areas) * data.diagnostic_neg_ich
nlvo = np.ones(areas) * data.diagnostic_neg_nlvo
lvo = np.ones(areas) * data.diagnostic_neg_lvo
prop_nlvo_eligible_treatment = np.zeros(len(admissions))
prop_nlvo_eligible_treatment.fill(data.prop_nlvo_eligible_treatment)
prop_lvo_eligible_treatment = np.zeros(len(admissions))
prop_lvo_eligible_treatment.fill(data.prop_lvo_eligible_ivt)
door_to_needle = data.door_to_needle
onset_to_needle = (data.onset_to_travel +
door_to_needle +
node_results[:, 0])
onset_to_puncture = (data.onset_to_travel +
data.door_to_puncture +
node_results[:, 11])
# Get outcome with no treatment
no_treatment_outcome = (
data.diagnostic_neg_lvo * 0.1328 +
data.diagnostic_neg_nlvo * 0.4622 +
data.diagnostic_neg_ich * 0.24 +
data.diagnostic_neg_mimic * 1)
node_results[:, 14] = np.ones(areas) * no_treatment_outcome
# Get outcome with treatment
outcome = self.outcome.calculate_outcome_for_all(
mimic,
ich,
nlvo,
lvo,
onset_to_needle,
onset_to_puncture,
prop_nlvo_eligible_treatment,
prop_lvo_eligible_treatment,
data.prop_thrombolysed_lvo_receiving_thrombectomy)
# Calculate additional clinical benefit from treatment
node_results[:, 15] = outcome - node_results[:, 14]
## POSITIVE DIAGNISTIC TEST RESULTS
# To choose between direct to CSC or drip and ship for each area,
# compare clinical outcomes, and choose the best clinical outcome
# Record admissions for positive test
admissions = data.admissions.values * data.diagnostic_prop_positive
node_results[:, 16] = admissions
# Clinical benefit direct to CSC
door_to_needle = data.door_to_needle
onset_to_needle = (data.onset_to_travel +
door_to_needle +
node_results[:, 2])
onset_to_puncture = \
data.onset_to_travel + data.door_to_puncture + \
node_results[:, 2]
# Get outcome with no treatment
no_treatment_outcome = (
data.diagnostic_pos_lvo * 0.1328 +
data.diagnostic_pos_nlvo * 0.4622 +
data.diagnostic_pos_ich * 0.24 +
data.diagnostic_pos_mimic * 1)
node_results[:, 17] = no_treatment_outcome
# Get clinical benefit with treatment
mimic = np.ones(areas) * data.diagnostic_pos_mimic
ich = np.ones(areas) * data.diagnostic_pos_ich
nlvo = np.ones(areas) * data.diagnostic_pos_nlvo
lvo = np.ones(areas) * data.diagnostic_pos_lvo
outcome = self.outcome.calculate_outcome_for_all(
mimic,
ich,
nlvo,
lvo,
onset_to_needle,
onset_to_puncture,
prop_nlvo_eligible_treatment,
prop_lvo_eligible_treatment,
data.prop_thrombolysed_lvo_receiving_thrombectomy)
# Calculate added benefit with direct to thrombectomy centre
node_results[:, 18] = outcome - node_results[:, 17]
# Clinical benefit drip and ship
door_to_needle = data.door_to_needle
onset_to_needle = (data.onset_to_travel +
door_to_needle +
node_results[:, 9])
onset_to_puncture = (data.onset_to_travel +
data.door_to_puncture +
node_results[:, 6])
outcome = self.outcome.calculate_outcome_for_all(
mimic,
ich,
nlvo,
lvo,
onset_to_needle,
onset_to_puncture,
prop_nlvo_eligible_treatment,
prop_lvo_eligible_treatment,
data.prop_thrombolysed_lvo_receiving_thrombectomy)
node_results[:, 19] = outcome - node_results[:, 17]
# Create mask for direct to CSC
# debug make drip and ship for everyone
mask = node_results[:, 18] >= node_results[:, 19] + \
(data.diagnostic_outcome_signifciant/1000)
# Record direct to CSC (convert Boolean to 0/1)
node_results[:, 20] = mask * 1
# Chosen IVT unit
node_results[:, 21] = node_results[:, 1]
node_results[mask, 21] = node_results[mask, 3]
# IVT time
node_results[:, 22] = node_results[:, 0]
node_results[mask, 22] = node_results[mask, 2]
# Chosen ET unit
node_results[:, 23] = node_results[:, 5]
node_results[mask, 23] = node_results[mask, 3]
# ET time
node_results[:, 24] = node_results[:, 6]
node_results[mask, 24] = node_results[mask, 2]
# Number of transfers for drip and ship
node_results[:, 25] = \
(node_results[:, 16] *
data.diagnostic_pos_lvo *
data.prop_lvo_eligible_ivt *
data.prop_thrombolysed_lvo_receiving_thrombectomy)
node_results[mask, 25] = 0
# Distance of transfers for drip and ship
node_results[:, 26] = node_results[:, 25] * node_results[:, 4]
# Clinical benefit of chosen hospital
node_results[:, 27] = node_results[:, 19]
node_results[mask, 27] = node_results[mask, 18]
# Adjusted admissions
# IVT admitting unit includes positive diagnostic test fraction
# where no diversion would take place (e.g. certain outside
# window. ET admitting unit reduced by the same number
# ADMISSION NUMBERS
# Adjusted IVT admissions (includes 'no action on test' patients)
node_results[:, 28] = \
node_results[:, 7] + \
node_results[:, 16] * (1 - data.proportion_tested)
# Adjust ET admissions (reduced by 'no action on test' patients)
node_results[:, 29] = node_results[:, 16] * data.proportion_tested
# Non-adjusted admissions are used to calculate total thrombectomies
non_adjusted_admissions_concatenated = np.concatenate(
(node_results[:, 7], node_results[:, 16]))
# Adjusted admissions are used to calculate first admitting hospital
adjusted_admissions_concatenated = np.concatenate(
(node_results[:, 28], node_results[:, 29]))
admitting_ivt_hospital = np.concatenate((node_results[:, 8],
node_results[:, 21]))
admitting_et_hospital = np.concatenate((node_results[:, 10],
node_results[:, 23]))
thrombolysis_admissions_by_hospital = np.bincount(
np.int_(admitting_ivt_hospital),
weights=adjusted_admissions_concatenated)
thrombectomy_admissions_by_hospital = np.bincount(
np.int_(admitting_et_hospital),
weights=non_adjusted_admissions_concatenated)
overall_proportion_of_lvo_eligible_for_treatment = (
(data.diagnostic_prop_positive *
data.diagnostic_pos_lvo *
data.prop_lvo_eligible_ivt) +
((data.diagnostic_prop_negative) *
data.diagnostic_neg_lvo *
data.prop_lvo_eligible_ivt))
thrombectomy_admissions_by_hospital *= \
(overall_proportion_of_lvo_eligible_for_treatment *
data.prop_thrombolysed_lvo_receiving_thrombectomy)
# Fill in missing hospital counts at end of array
if len(thrombolysis_admissions_by_hospital) < number_of_hospitals:
zeros_to_add = number_of_hospitals - \
len(thrombolysis_admissions_by_hospital)
thrombolysis_admissions_by_hospital = \
np.hstack((thrombolysis_admissions_by_hospital,
np.zeros(zeros_to_add)))
if len(thrombectomy_admissions_by_hospital) < number_of_hospitals:
zeros_to_add = number_of_hospitals - \
len(thrombectomy_admissions_by_hospital)
thrombectomy_admissions_by_hospital = \
np.hstack((thrombectomy_admissions_by_hospital,
np.zeros(zeros_to_add)))
# Record admission results
self.hospital_first_admissions[i, :] = \
thrombolysis_admissions_by_hospital
self.hospital_thrombectomy_admissions[i, :] = \
thrombectomy_admissions_by_hospital
# Add in unit admission numbers to node results
# -ve test thrombolysis unit admissions
node_results[:, 30] = \
thrombolysis_admissions_by_hospital[np.int_ \
(node_results[:, 8])]
# -ve test thrombectomy unit procedures
node_results[:, 31] = \
(thrombectomy_admissions_by_hospital \
[np.int_(node_results[:, 10])])
# +ve test thrombolysis unit admissions
node_results[:, 32] = \
thrombolysis_admissions_by_hospital[np.int_ \
(node_results[:, 21])]
# +ve test thrombectomy unit procedures
node_results[:, 33] = \
(thrombectomy_admissions_by_hospital \
[np.int_(node_results[:, 23])])
# RECORD TARGETS MET
# -ve test thrombolysis unit target admissions
node_results[:, 34] = \
node_results[:, 30] >= data.target_thrombolysis_admissions
# -ve test thrombolysis target time
node_results[:, 35] = \
node_results[:, 9] <= data.target_travel_thrombolysis
# -ve test thrombolysis both targets
node_results[:, 36] = \
np.logical_and(node_results[:, 34], node_results[:, 35])
# -ve test thrombectomy unit target admissions
node_results[:, 37] = \
node_results[:, 31] >= data.target_thrombectomy_admissions
# -ve test thrombectomy target time
node_results[:, 38] = \
node_results[:, 11] <= data.target_travel_thrombectomy
# -ve test thrombectomy both targets
node_results[:, 39] = \
np.logical_and(node_results[:, 37], node_results[:, 38])
# +ve test thrombolysis unit target admissions
node_results[:, 40] = \
node_results[:, 32] >= data.target_thrombolysis_admissions
# +ve test thrombolysis target time
node_results[:, 41] = \
node_results[:, 22] <= data.target_travel_thrombolysis
# +ve test thrombolysis both targets
node_results[:, 42] = \
np.logical_and(node_results[:, 40], node_results[:, 41])
# +ve test thrombectomy unit target admissions
node_results[:, 43] = \
node_results[:, 33] >= data.target_thrombectomy_admissions
# +ve test thrombectomy target time
node_results[:, 44] = \
node_results[:, 24] <= data.target_travel_thrombectomy
# +ve test thrombectomy both targets
node_results[:, 45] = \
np.logical_and(node_results[:, 43], node_results[:, 44])
# Net clinical benefit (weighted benefit by diagnostic test
# proportion)
node_results[:, 46] = (
node_results[:, 15] * data.diagnostic_prop_negative +
node_results[:, 27] * data.diagnostic_prop_positive)
# Save full node results (not usually used)
if data.save_node_results:
filename = './' + data.output_location_node_results + \
str(i) + '.csv'
node_df = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
import difflib as dfl
import click
import random
import datetime
import moment
common_companies = ['Self Employed', 'Amazon Web Services']
common_positions = {
'Chief Executive Officer': 'CEO',
'CEO': 'CEO',
'Co-Founder & CEO': 'CEO',
'CEO & Founder': 'CEO',
'Vice President': 'VP'
}
def getPositionMatch(position):
#for p in common_positions.keys:
matches = dfl.get_close_matches(position, common_positions.keys(), 1)
if len(matches) > 0:
return common_positions[matches[0]]
return position
def getCompanyMatch(company):
#for c in common_companies:
matches = dfl.get_close_matches(company, common_companies, 1)
if len(matches) > 0:
return matches[0]
return company
def closeMatches(df, row, fieldName, matchFunction):
#print("\n\n====")
#print(row)
# if not df.iloc[row]: return "No Company"
c1 = str(df.iloc[row][fieldName])
if not c1: return "None"
#print(c1)
return matchFunction(c1)
def summarizeByField(df, fieldName, matchFunction):
print(matchFunction("self-employed"))
g = df.groupby(lambda row: closeMatches(df, row, fieldName, matchFunction))
gSorted = g.size().sort_values(ascending=False)
print("\n==== SUMMARY ===")
print(gSorted.head(50))
#return
for i in range(0,50):
fieldValue = gSorted.index[i]
size = gSorted[i]
peopleList = g.indices[fieldValue]
print (fieldValue, " :: ", size)
#print (peopleList)
randomPeople = random.sample(list(peopleList), min(5, size))
for j in randomPeople:
randomPerson = df.iloc[j]
print(" ", randomPerson['First Name'], randomPerson['Last Name'], " (", \
randomPerson['Position'], ", ", randomPerson['Company'], ")")
def messagesOld(mdf):
#print(mdf)
mdf['OTHER'] = mdf.apply(lambda x: x['TO'] if x['FROM'] == '<NAME>' else x['FROM'], axis=1)
filteredStart = mdf[mdf['DATETIME'] < pd.to_datetime(moment.date("1 year ago").date)]
filteredByDate = filteredStart[filteredStart['DATETIME'] > pd.to_datetime(moment.date("2 years ago").date)]
fileredByFolder = filteredByDate[filteredByDate['FOLDER'] == 'INBOX']
groupedConversations = filteredByDate.groupby('OTHER')
#multipleConversations = groupedConversations.filter(lambda x: len(x) > 1)
#print(multipleConversations)
#sampleConversations = multipleConversations.sample(frac=0.1)
for key, conversations in groupedConversations.groups.items():
if len(conversations) <2:
continue
sent = 0
for c in conversations:
if mdf.iloc[c]['FROM'] == '<NAME>':
sent = sent + 1
if sent == 0:
continue
if random.random() > 0.1:
continue
print("\n===\n{}\n===".format(key))
for c in conversations:
print(" [{}] {}".format(mdf.iloc[c]['DATETIME'], mdf.iloc[c]['CONTENT']))
return
@click.command()
@click.option('--linkedindir', default="exported", help='Folder where the LinkedIn Data is unzipped')
@click.option('--company/--no-company', default=True, help="Print Company Analysis")
@click.option('--position/--no-position', default=True, help="Print Position Analysis")
def linkedinAnalysis(linkedindir, company, position):
"""Analyzes your LinkedIn Data Export to find people you can get in touch with"""
# execute only if run as a script
connectionscsv = linkedindir + "/Connections.csv"
messagescsv = linkedindir + "/Messages.csv"
print("Reading file... ", connectionscsv)
df = | pd.read_csv(connectionscsv) | pandas.read_csv |
# Importing libraries
import numpy as np
import pandas as pd
import json
from datetime import datetime
def drop_columns(df:pd.DataFrame):
"""Drops unused columns."""
df = df.drop(columns=['friends','is_backing','is_starred','permissions','currency_symbol','photo','profile','source_url','urls','currency_trailing_code', 'current_currency','disable_communication', 'is_starrable','spotlight','staff_pick', 'static_usd_rate','usd_type','converted_pledged_amount', 'pledged','backers_count','usd_pledged','creator','location'])
return df
def drop_rows(df:pd.DataFrame):
"""Drops the rows of canceled, suspended and live states as well as rows of duplicates in the column id."""
df = df.drop(df[(df.state == 'canceled')|(df.state == 'suspended')|(df.state == 'live')].index)
df = df.drop_duplicates(subset=['id'], keep='last')
df = df.drop(columns='id')
return df
def get_category(df:pd.DataFrame):
"""Extracts the category parent id and makes a new column for the feature and drops the column category."""
a = df.category.apply(json.loads).values.tolist()
df = df.join(pd.DataFrame.from_records(a)["parent_id"])
df = df.rename(columns={"parent_id": "category_parent_id"})
df = df.drop(columns='category')
return df
def get_creator(df:pd.DataFrame):
"""Extracts the creator name and makes a new column for the feature and drops the column creator_name."""
a = df.creator.apply(json.loads).values.tolist()
df = df.join( | pd.DataFrame.from_records(a) | pandas.DataFrame.from_records |
# -*- coding: utf-8 -*-
import csv
import os
import platform
import codecs
import re
import sys
from datetime import datetime
import pytest
import numpy as np
from pandas._libs.lib import Timestamp
import pandas as pd
import pandas.util.testing as tm
from pandas import DataFrame, Series, Index, MultiIndex
from pandas import compat
from pandas.compat import (StringIO, BytesIO, PY3,
range, lrange, u)
from pandas.errors import DtypeWarning, EmptyDataError, ParserError
from pandas.io.common import URLError
from pandas.io.parsers import TextFileReader, TextParser
class ParserTests(object):
"""
Want to be able to test either C+Cython or Python+Cython parsers
"""
data1 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
def test_empty_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
# Parsers support only length-1 decimals
msg = 'Only length-1 decimal markers supported'
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(data), decimal='')
def test_bad_stream_exception(self):
# Issue 13652:
# This test validates that both python engine
# and C engine will raise UnicodeDecodeError instead of
# c engine raising ParserError and swallowing exception
# that caused read to fail.
handle = open(self.csv_shiftjs, "rb")
codec = codecs.lookup("utf-8")
utf8 = codecs.lookup('utf-8')
# stream must be binary UTF8
stream = codecs.StreamRecoder(
handle, utf8.encode, utf8.decode, codec.streamreader,
codec.streamwriter)
if compat.PY3:
msg = "'utf-8' codec can't decode byte"
else:
msg = "'utf8' codec can't decode byte"
with tm.assert_raises_regex(UnicodeDecodeError, msg):
self.read_csv(stream)
stream.close()
def test_read_csv(self):
if not compat.PY3:
if compat.is_platform_windows():
prefix = u("file:///")
else:
prefix = u("file://")
fname = prefix + compat.text_type(self.csv1)
self.read_csv(fname, index_col=0, parse_dates=True)
def test_1000_sep(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334, 13],
'C': [5, 10.]
})
df = self.read_csv(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
def test_squeeze(self):
data = """\
a,1
b,2
c,3
"""
idx = Index(['a', 'b', 'c'], name=0)
expected = Series([1, 2, 3], name=1, index=idx)
result = self.read_table(StringIO(data), sep=',', index_col=0,
header=None, squeeze=True)
assert isinstance(result, Series)
tm.assert_series_equal(result, expected)
def test_squeeze_no_view(self):
# see gh-8217
# Series should not be a view
data = """time,data\n0,10\n1,11\n2,12\n4,14\n5,15\n3,13"""
result = self.read_csv(StringIO(data), index_col='time', squeeze=True)
assert not result._is_view
def test_malformed(self):
# see gh-6607
# all
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
"""
msg = 'Expected 3 fields in line 4, saw 5'
with tm.assert_raises_regex(Exception, msg):
self.read_table(StringIO(data), sep=',',
header=1, comment='#')
# first chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
msg = 'Expected 3 fields in line 6, saw 5'
with tm.assert_raises_regex(Exception, msg):
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1,
skiprows=[2])
it.read(5)
# middle chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
msg = 'Expected 3 fields in line 6, saw 5'
with tm.assert_raises_regex(Exception, msg):
it = self.read_table(StringIO(data), sep=',', header=1,
comment='#', iterator=True, chunksize=1,
skiprows=[2])
it.read(3)
# last chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
msg = 'Expected 3 fields in line 6, saw 5'
with tm.assert_raises_regex(Exception, msg):
it = self.read_table(StringIO(data), sep=',', header=1,
comment='#', iterator=True, chunksize=1,
skiprows=[2])
it.read()
# skipfooter is not supported with the C parser yet
if self.engine == 'python':
# skipfooter
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
footer
"""
msg = 'Expected 3 fields in line 4, saw 5'
with tm.assert_raises_regex(Exception, msg):
self.read_table(StringIO(data), sep=',',
header=1, comment='#',
skipfooter=1)
def test_quoting(self):
bad_line_small = """printer\tresult\tvariant_name
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jacob
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jakob
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\t"Furststiftische Hofdruckerei, <Kempten""
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tGaller, Alois
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tHochfurstliche Buchhandlung <Kempten>""" # noqa
pytest.raises(Exception, self.read_table, StringIO(bad_line_small),
sep='\t')
good_line_small = bad_line_small + '"'
df = self.read_table(StringIO(good_line_small), sep='\t')
assert len(df) == 3
def test_unnamed_columns(self):
data = """A,B,C,,
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
expected = np.array([[1, 2, 3, 4, 5],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]], dtype=np.int64)
df = self.read_table(StringIO(data), sep=',')
tm.assert_almost_equal(df.values, expected)
tm.assert_index_equal(df.columns,
Index(['A', 'B', 'C', 'Unnamed: 3',
'Unnamed: 4']))
def test_csv_mixed_type(self):
data = """A,B,C
a,1,2
b,3,4
c,4,5
"""
expected = DataFrame({'A': ['a', 'b', 'c'],
'B': [1, 3, 4],
'C': [2, 4, 5]})
out = self.read_csv(StringIO(data))
tm.assert_frame_equal(out, expected)
def test_read_csv_dataframe(self):
df = self.read_csv(self.csv1, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv1, sep=',', index_col=0,
parse_dates=True)
tm.assert_index_equal(df.columns, pd.Index(['A', 'B', 'C', 'D']))
assert df.index.name == 'index'
assert isinstance(
df.index[0], (datetime, np.datetime64, Timestamp))
assert df.values.dtype == np.float64
tm.assert_frame_equal(df, df2)
def test_read_csv_no_index_name(self):
df = self.read_csv(self.csv2, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv2, sep=',', index_col=0,
parse_dates=True)
tm.assert_index_equal(df.columns,
pd.Index(['A', 'B', 'C', 'D', 'E']))
assert isinstance(df.index[0], (datetime, np.datetime64, Timestamp))
assert df.loc[:, ['A', 'B', 'C', 'D']].values.dtype == np.float64
tm.assert_frame_equal(df, df2)
def test_read_table_unicode(self):
fin = BytesIO(u('\u0141aski, Jan;1').encode('utf-8'))
df1 = self.read_table(fin, sep=";", encoding="utf-8", header=None)
assert isinstance(df1[0].values[0], compat.text_type)
def test_read_table_wrong_num_columns(self):
# too few!
data = """A,B,C,D,E,F
1,2,3,4,5,6
6,7,8,9,10,11,12
11,12,13,14,15,16
"""
pytest.raises(ValueError, self.read_csv, StringIO(data))
def test_read_duplicate_index_explicit(self):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
result = self.read_csv(StringIO(data), index_col=0)
expected = self.read_csv(StringIO(data)).set_index(
'index', verify_integrity=False)
tm.assert_frame_equal(result, expected)
result = self.read_table(StringIO(data), sep=',', index_col=0)
expected = self.read_table(StringIO(data), sep=',', ).set_index(
'index', verify_integrity=False)
tm.assert_frame_equal(result, expected)
def test_read_duplicate_index_implicit(self):
data = """A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
# make sure an error isn't thrown
self.read_csv(StringIO(data))
self.read_table(StringIO(data), sep=',')
def test_parse_bools(self):
data = """A,B
True,1
False,2
True,3
"""
data = self.read_csv(StringIO(data))
assert data['A'].dtype == np.bool_
data = """A,B
YES,1
no,2
yes,3
No,3
Yes,3
"""
data = self.read_csv(StringIO(data),
true_values=['yes', 'Yes', 'YES'],
false_values=['no', 'NO', 'No'])
assert data['A'].dtype == np.bool_
data = """A,B
TRUE,1
FALSE,2
TRUE,3
"""
data = self.read_csv(StringIO(data))
assert data['A'].dtype == np.bool_
data = """A,B
foo,bar
bar,foo"""
result = self.read_csv(StringIO(data), true_values=['foo'],
false_values=['bar'])
expected = DataFrame({'A': [True, False], 'B': [False, True]})
tm.assert_frame_equal(result, expected)
def test_int_conversion(self):
data = """A,B
1.0,1
2.0,2
3.0,3
"""
data = self.read_csv(StringIO(data))
assert data['A'].dtype == np.float64
assert data['B'].dtype == np.int64
def test_read_nrows(self):
expected = self.read_csv(StringIO(self.data1))[:3]
df = self.read_csv(StringIO(self.data1), nrows=3)
tm.assert_frame_equal(df, expected)
# see gh-10476
df = self.read_csv(StringIO(self.data1), nrows=3.0)
tm.assert_frame_equal(df, expected)
msg = r"'nrows' must be an integer >=0"
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), nrows=1.2)
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), nrows='foo')
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), nrows=-1)
def test_read_chunksize(self):
reader = self.read_csv(StringIO(self.data1), index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
# with invalid chunksize value:
msg = r"'chunksize' must be an integer >=1"
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), chunksize=1.3)
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), chunksize='foo')
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), chunksize=0)
def test_read_chunksize_and_nrows(self):
# gh-15755
# With nrows
reader = self.read_csv(StringIO(self.data1), index_col=0,
chunksize=2, nrows=5)
df = self.read_csv(StringIO(self.data1), index_col=0, nrows=5)
tm.assert_frame_equal(pd.concat(reader), df)
# chunksize > nrows
reader = self.read_csv(StringIO(self.data1), index_col=0,
chunksize=8, nrows=5)
df = self.read_csv(StringIO(self.data1), index_col=0, nrows=5)
tm.assert_frame_equal(pd.concat(reader), df)
# with changing "size":
reader = self.read_csv(StringIO(self.data1), index_col=0,
chunksize=8, nrows=5)
df = self.read_csv(StringIO(self.data1), index_col=0, nrows=5)
tm.assert_frame_equal(reader.get_chunk(size=2), df.iloc[:2])
tm.assert_frame_equal(reader.get_chunk(size=4), df.iloc[2:5])
with pytest.raises(StopIteration):
reader.get_chunk(size=3)
def test_read_chunksize_named(self):
reader = self.read_csv(
StringIO(self.data1), index_col='index', chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col='index')
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_get_chunk_passed_chunksize(self):
data = """A,B,C
1,2,3
4,5,6
7,8,9
1,2,3"""
result = self.read_csv(StringIO(data), chunksize=2)
piece = result.get_chunk()
assert len(piece) == 2
def test_read_chunksize_generated_index(self):
# GH 12185
reader = self.read_csv(StringIO(self.data1), chunksize=2)
df = self.read_csv(StringIO(self.data1))
tm.assert_frame_equal(pd.concat(reader), df)
reader = self.read_csv(StringIO(self.data1), chunksize=2, index_col=0)
df = self.read_csv(StringIO(self.data1), index_col=0)
tm.assert_frame_equal(pd.concat(reader), df)
def test_read_text_list(self):
data = """A,B,C\nfoo,1,2,3\nbar,4,5,6"""
as_list = [['A', 'B', 'C'], ['foo', '1', '2', '3'], ['bar',
'4', '5', '6']]
df = self.read_csv(StringIO(data), index_col=0)
parser = TextParser(as_list, index_col=0, chunksize=2)
chunk = parser.read(None)
tm.assert_frame_equal(chunk, df)
def test_iterator(self):
# See gh-6607
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunk = reader.read(3)
tm.assert_frame_equal(chunk, df[:3])
last_chunk = reader.read(5)
tm.assert_frame_equal(last_chunk, df[3:])
# pass list
lines = list(csv.reader(StringIO(self.data1)))
parser = TextParser(lines, index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
# pass skiprows
parser = TextParser(lines, index_col=0, chunksize=2, skiprows=[1])
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[1:3])
treader = self.read_table(StringIO(self.data1), sep=',', index_col=0,
iterator=True)
assert isinstance(treader, TextFileReader)
# gh-3967: stopping iteration when chunksize is specified
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
reader = self.read_csv(StringIO(data), iterator=True)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
tm.assert_frame_equal(result[0], expected)
# chunksize = 1
reader = self.read_csv(StringIO(data), chunksize=1)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
assert len(result) == 3
tm.assert_frame_equal(pd.concat(result), expected)
# skipfooter is not supported with the C parser yet
if self.engine == 'python':
# test bad parameter (skipfooter)
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True, skipfooter=1)
pytest.raises(ValueError, reader.read, 3)
def test_pass_names_with_index(self):
lines = self.data1.split('\n')
no_header = '\n'.join(lines[1:])
# regular index
names = ['index', 'A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=0, names=names)
expected = self.read_csv(StringIO(self.data1), index_col=0)
tm.assert_frame_equal(df, expected)
# multi index
data = """index1,index2,A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
lines = data.split('\n')
no_header = '\n'.join(lines[1:])
names = ['index1', 'index2', 'A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=[0, 1],
names=names)
expected = self.read_csv(StringIO(data), index_col=[0, 1])
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(data), index_col=['index1', 'index2'])
tm.assert_frame_equal(df, expected)
def test_multi_index_no_level_names(self):
data = """index1,index2,A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
data2 = """A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
lines = data.split('\n')
no_header = '\n'.join(lines[1:])
names = ['A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=[0, 1],
header=None, names=names)
expected = self.read_csv(StringIO(data), index_col=[0, 1])
tm.assert_frame_equal(df, expected, check_names=False)
# 2 implicit first cols
df2 = self.read_csv(StringIO(data2))
tm.assert_frame_equal(df2, df)
# reverse order of index
df = self.read_csv(StringIO(no_header), index_col=[1, 0], names=names,
header=None)
expected = self.read_csv(StringIO(data), index_col=[1, 0])
tm.assert_frame_equal(df, expected, check_names=False)
def test_multi_index_blank_df(self):
# GH 14545
data = """a,b
"""
df = self.read_csv(StringIO(data), header=[0])
expected = DataFrame(columns=['a', 'b'])
tm.assert_frame_equal(df, expected)
round_trip = self.read_csv(StringIO(
expected.to_csv(index=False)), header=[0])
tm.assert_frame_equal(round_trip, expected)
data_multiline = """a,b
c,d
"""
df2 = self.read_csv(StringIO(data_multiline), header=[0, 1])
cols = MultiIndex.from_tuples([('a', 'c'), ('b', 'd')])
expected2 = DataFrame(columns=cols)
tm.assert_frame_equal(df2, expected2)
round_trip = self.read_csv(StringIO(
expected2.to_csv(index=False)), header=[0, 1])
tm.assert_frame_equal(round_trip, expected2)
def test_no_unnamed_index(self):
data = """ id c0 c1 c2
0 1 0 a b
1 2 0 c d
2 2 2 e f
"""
df = self.read_table(StringIO(data), sep=' ')
assert df.index.name is None
def test_read_csv_parse_simple_list(self):
text = """foo
bar baz
qux foo
foo
bar"""
df = self.read_csv(StringIO(text), header=None)
expected = DataFrame({0: ['foo', 'bar baz', 'qux foo',
'foo', 'bar']})
tm.assert_frame_equal(df, expected)
@tm.network
def test_url(self):
# HTTP(S)
url = ('https://raw.github.com/pandas-dev/pandas/master/'
'pandas/tests/io/parser/data/salaries.csv')
url_table = self.read_table(url)
dirpath = tm.get_data_path()
localtable = os.path.join(dirpath, 'salaries.csv')
local_table = self.read_table(localtable)
tm.assert_frame_equal(url_table, local_table)
# TODO: ftp testing
@pytest.mark.slow
def test_file(self):
dirpath = tm.get_data_path()
localtable = os.path.join(dirpath, 'salaries.csv')
local_table = self.read_table(localtable)
try:
url_table = self.read_table('file://localhost/' + localtable)
except URLError:
# fails on some systems
pytest.skip("failing on %s" %
' '.join(platform.uname()).strip())
tm.assert_frame_equal(url_table, local_table)
def test_path_pathlib(self):
df = tm.makeDataFrame()
result = tm.round_trip_pathlib(df.to_csv,
lambda p: self.read_csv(p, index_col=0))
tm.assert_frame_equal(df, result)
def test_path_localpath(self):
df = tm.makeDataFrame()
result = tm.round_trip_localpath(
df.to_csv,
lambda p: self.read_csv(p, index_col=0))
tm.assert_frame_equal(df, result)
def test_nonexistent_path(self):
# gh-2428: pls no segfault
# gh-14086: raise more helpful FileNotFoundError
path = '%s.csv' % tm.rands(10)
pytest.raises(compat.FileNotFoundError, self.read_csv, path)
def test_missing_trailing_delimiters(self):
data = """A,B,C,D
1,2,3,4
1,3,3,
1,4,5"""
result = self.read_csv(StringIO(data))
assert result['D'].isna()[1:].all()
def test_skipinitialspace(self):
s = ('"09-Apr-2012", "01:10:18.300", 2456026.548822908, 12849, '
'1.00361, 1.12551, 330.65659, 0355626618.16711, 73.48821, '
'314.11625, 1917.09447, 179.71425, 80.000, 240.000, -350, '
'70.06056, 344.98370, 1, 1, -0.689265, -0.692787, '
'0.212036, 14.7674, 41.605, -9999.0, -9999.0, '
'-9999.0, -9999.0, -9999.0, -9999.0, 000, 012, 128')
sfile = StringIO(s)
# it's 33 columns
result = self.read_csv(sfile, names=lrange(33), na_values=['-9999.0'],
header=None, skipinitialspace=True)
assert pd.isna(result.iloc[0, 29])
def test_utf16_bom_skiprows(self):
# #2298
data = u("""skip this
skip this too
A\tB\tC
1\t2\t3
4\t5\t6""")
data2 = u("""skip this
skip this too
A,B,C
1,2,3
4,5,6""")
path = '__%s__.csv' % tm.rands(10)
with tm.ensure_clean(path) as path:
for sep, dat in [('\t', data), (',', data2)]:
for enc in ['utf-16', 'utf-16le', 'utf-16be']:
bytes = dat.encode(enc)
with open(path, 'wb') as f:
f.write(bytes)
s = BytesIO(dat.encode('utf-8'))
if compat.PY3:
# somewhat False since the code never sees bytes
from io import TextIOWrapper
s = TextIOWrapper(s, encoding='utf-8')
result = self.read_csv(path, encoding=enc, skiprows=2,
sep=sep)
expected = self.read_csv(s, encoding='utf-8', skiprows=2,
sep=sep)
s.close()
tm.assert_frame_equal(result, expected)
def test_utf16_example(self):
path = tm.get_data_path('utf16_ex.txt')
# it works! and is the right length
result = self.read_table(path, encoding='utf-16')
assert len(result) == 50
if not compat.PY3:
buf = BytesIO(open(path, 'rb').read())
result = self.read_table(buf, encoding='utf-16')
assert len(result) == 50
def test_unicode_encoding(self):
pth = tm.get_data_path('unicode_series.csv')
result = self.read_csv(pth, header=None, encoding='latin-1')
result = result.set_index(0)
got = result[1][1632]
expected = u('\xc1 k\xf6ldum klaka (Cold Fever) (1994)')
assert got == expected
def test_trailing_delimiters(self):
# #2442. grumble grumble
data = """A,B,C
1,2,3,
4,5,6,
7,8,9,"""
result = self.read_csv(StringIO(data), index_col=False)
expected = DataFrame({'A': [1, 4, 7], 'B': [2, 5, 8],
'C': [3, 6, 9]})
tm.assert_frame_equal(result, expected)
def test_escapechar(self):
# http://stackoverflow.com/questions/13824840/feature-request-for-
# pandas-read-csv
data = '''SEARCH_TERM,ACTUAL_URL
"bra tv bord","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"
"tv p\xc3\xa5 hjul","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"
"SLAGBORD, \\"Bergslagen\\", IKEA:s 1700-tals serie","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"''' # noqa
result = self.read_csv(StringIO(data), escapechar='\\',
quotechar='"', encoding='utf-8')
assert result['SEARCH_TERM'][2] == ('SLAGBORD, "Bergslagen", '
'IKEA:s 1700-tals serie')
tm.assert_index_equal(result.columns,
Index(['SEARCH_TERM', 'ACTUAL_URL']))
def test_int64_min_issues(self):
# #2599
data = 'A,B\n0,0\n0,'
result = self.read_csv(StringIO(data))
expected = DataFrame({'A': [0, 0], 'B': [0, np.nan]})
tm.assert_frame_equal(result, expected)
def test_parse_integers_above_fp_precision(self):
data = """Numbers
17007000002000191
17007000002000191
17007000002000191
17007000002000191
17007000002000192
17007000002000192
17007000002000192
17007000002000192
17007000002000192
17007000002000194"""
result = self.read_csv(StringIO(data))
expected = DataFrame({'Numbers': [17007000002000191,
17007000002000191,
17007000002000191,
17007000002000191,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000194]})
tm.assert_series_equal(result['Numbers'], expected['Numbers'])
def test_chunks_have_consistent_numerical_type(self):
integers = [str(i) for i in range(499999)]
data = "a\n" + "\n".join(integers + ["1.0", "2.0"] + integers)
with tm.assert_produces_warning(False):
df = self.read_csv(StringIO(data))
# Assert that types were coerced.
assert type(df.a[0]) is np.float64
assert df.a.dtype == np.float
def test_warn_if_chunks_have_mismatched_type(self):
warning_type = False
integers = [str(i) for i in range(499999)]
data = "a\n" + "\n".join(integers + ['a', 'b'] + integers)
# see gh-3866: if chunks are different types and can't
# be coerced using numerical types, then issue warning.
if self.engine == 'c' and self.low_memory:
warning_type = DtypeWarning
with tm.assert_produces_warning(warning_type):
df = self.read_csv(StringIO(data))
assert df.a.dtype == np.object
def test_integer_overflow_bug(self):
# see gh-2601
data = "65248E10 11\n55555E55 22\n"
result = self.read_csv(StringIO(data), header=None, sep=' ')
assert result[0].dtype == np.float64
result = self.read_csv(StringIO(data), header=None, sep=r'\s+')
assert result[0].dtype == np.float64
def test_catch_too_many_names(self):
# see gh-5156
data = """\
1,2,3
4,,6
7,8,9
10,11,12\n"""
pytest.raises(ValueError, self.read_csv, StringIO(data),
header=0, names=['a', 'b', 'c', 'd'])
def test_ignore_leading_whitespace(self):
# see gh-3374, gh-6607
data = ' a b c\n 1 2 3\n 4 5 6\n 7 8 9'
result = self.read_table(StringIO(data), sep=r'\s+')
expected = DataFrame({'a': [1, 4, 7], 'b': [2, 5, 8], 'c': [3, 6, 9]})
tm.assert_frame_equal(result, expected)
def test_chunk_begins_with_newline_whitespace(self):
# see gh-10022
data = '\n hello\nworld\n'
result = self.read_csv(StringIO(data), header=None)
assert len(result) == 2
# see gh-9735: this issue is C parser-specific (bug when
# parsing whitespace and characters at chunk boundary)
if self.engine == 'c':
chunk1 = 'a' * (1024 * 256 - 2) + '\na'
chunk2 = '\n a'
result = self.read_csv(StringIO(chunk1 + chunk2), header=None)
expected = DataFrame(['a' * (1024 * 256 - 2), 'a', ' a'])
tm.assert_frame_equal(result, expected)
def test_empty_with_index(self):
# see gh-10184
data = 'x,y'
result = self.read_csv(StringIO(data), index_col=0)
expected = DataFrame([], columns=['y'], index=Index([], name='x'))
tm.assert_frame_equal(result, expected)
def test_empty_with_multiindex(self):
# see gh-10467
data = 'x,y,z'
result = self.read_csv(StringIO(data), index_col=['x', 'y'])
expected = DataFrame([], columns=['z'],
index=MultiIndex.from_arrays(
[[]] * 2, names=['x', 'y']))
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_with_reversed_multiindex(self):
data = 'x,y,z'
result = self.read_csv(StringIO(data), index_col=[1, 0])
expected = DataFrame([], columns=['z'],
index=MultiIndex.from_arrays(
[[]] * 2, names=['y', 'x']))
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_float_parser(self):
# see gh-9565
data = '45e-1,4.5,45.,inf,-inf'
result = self.read_csv(StringIO(data), header=None)
expected = DataFrame([[float(s) for s in data.split(',')]])
tm.assert_frame_equal(result, expected)
def test_scientific_no_exponent(self):
# see gh-12215
df = DataFrame.from_items([('w', ['2e']), ('x', ['3E']),
('y', ['42e']), ('z', ['632E'])])
data = df.to_csv(index=False)
for prec in self.float_precision_choices:
df_roundtrip = self.read_csv(
StringIO(data), float_precision=prec)
tm.assert_frame_equal(df_roundtrip, df)
def test_int64_overflow(self):
data = """ID
00013007854817840016671868
00013007854817840016749251
00013007854817840016754630
00013007854817840016781876
00013007854817840017028824
00013007854817840017963235
00013007854817840018860166"""
# 13007854817840016671868 > UINT64_MAX, so this
# will overflow and return object as the dtype.
result = self.read_csv(StringIO(data))
assert result['ID'].dtype == object
# 13007854817840016671868 > UINT64_MAX, so attempts
# to cast to either int64 or uint64 will result in
# an OverflowError being raised.
for conv in (np.int64, np.uint64):
pytest.raises(OverflowError, self.read_csv,
StringIO(data), converters={'ID': conv})
# These numbers fall right inside the int64-uint64 range,
# so they should be parsed as string.
ui_max = np.iinfo(np.uint64).max
i_max = np.iinfo(np.int64).max
i_min = np.iinfo(np.int64).min
for x in [i_max, i_min, ui_max]:
result = self.read_csv(StringIO(str(x)), header=None)
expected = DataFrame([x])
tm.assert_frame_equal(result, expected)
# These numbers fall just outside the int64-uint64 range,
# so they should be parsed as string.
too_big = ui_max + 1
too_small = i_min - 1
for x in [too_big, too_small]:
result = self.read_csv(StringIO(str(x)), header=None)
expected = DataFrame([str(x)])
tm.assert_frame_equal(result, expected)
# No numerical dtype can hold both negative and uint64 values,
# so they should be cast as string.
data = '-1\n' + str(2**63)
expected = DataFrame([str(-1), str(2**63)])
result = self.read_csv(StringIO(data), header=None)
tm.assert_frame_equal(result, expected)
data = str(2**63) + '\n-1'
expected = DataFrame([str(2**63), str(-1)])
result = self.read_csv(StringIO(data), header=None)
tm.assert_frame_equal(result, expected)
def test_empty_with_nrows_chunksize(self):
# see gh-9535
expected = DataFrame([], columns=['foo', 'bar'])
result = self.read_csv(StringIO('foo,bar\n'), nrows=10)
tm.assert_frame_equal(result, expected)
result = next(iter(self.read_csv(
StringIO('foo,bar\n'), chunksize=10)))
tm.assert_frame_equal(result, expected)
with tm.assert_produces_warning(
FutureWarning, check_stacklevel=False):
result = self.read_csv(StringIO('foo,bar\n'),
nrows=10, as_recarray=True)
result = DataFrame(result[2], columns=result[1],
index=result[0])
tm.assert_frame_equal(DataFrame.from_records(
result), expected, check_index_type=False)
with tm.assert_produces_warning(
FutureWarning, check_stacklevel=False):
result = next(iter(self.read_csv(StringIO('foo,bar\n'),
chunksize=10, as_recarray=True)))
result = DataFrame(result[2], columns=result[1], index=result[0])
tm.assert_frame_equal(DataFrame.from_records(result), expected,
check_index_type=False)
def test_eof_states(self):
# see gh-10728, gh-10548
# With skip_blank_lines = True
expected = DataFrame([[4, 5, 6]], columns=['a', 'b', 'c'])
# gh-10728: WHITESPACE_LINE
data = 'a,b,c\n4,5,6\n '
result = self.read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
# gh-10548: EAT_LINE_COMMENT
data = 'a,b,c\n4,5,6\n#comment'
result = self.read_csv(StringIO(data), comment='#')
tm.assert_frame_equal(result, expected)
# EAT_CRNL_NOP
data = 'a,b,c\n4,5,6\n\r'
result = self.read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
# EAT_COMMENT
data = 'a,b,c\n4,5,6#comment'
result = self.read_csv(StringIO(data), comment='#')
tm.assert_frame_equal(result, expected)
# SKIP_LINE
data = 'a,b,c\n4,5,6\nskipme'
result = self.read_csv(StringIO(data), skiprows=[2])
tm.assert_frame_equal(result, expected)
# With skip_blank_lines = False
# EAT_LINE_COMMENT
data = 'a,b,c\n4,5,6\n#comment'
result = self.read_csv(
StringIO(data), comment='#', skip_blank_lines=False)
expected = DataFrame([[4, 5, 6]], columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
# IN_FIELD
data = 'a,b,c\n4,5,6\n '
result = self.read_csv(StringIO(data), skip_blank_lines=False)
expected = DataFrame(
[['4', 5, 6], [' ', None, None]], columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
# EAT_CRNL
data = 'a,b,c\n4,5,6\n\r'
result = self.read_csv(StringIO(data), skip_blank_lines=False)
expected = DataFrame(
[[4, 5, 6], [None, None, None]], columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
# Should produce exceptions
# ESCAPED_CHAR
data = "a,b,c\n4,5,6\n\\"
pytest.raises(Exception, self.read_csv,
StringIO(data), escapechar='\\')
# ESCAPE_IN_QUOTED_FIELD
data = 'a,b,c\n4,5,6\n"\\'
pytest.raises(Exception, self.read_csv,
StringIO(data), escapechar='\\')
# IN_QUOTED_FIELD
data = 'a,b,c\n4,5,6\n"'
pytest.raises(Exception, self.read_csv,
StringIO(data), escapechar='\\')
def test_uneven_lines_with_usecols(self):
# See gh-12203
csv = r"""a,b,c
0,1,2
3,4,5,6,7
8,9,10
"""
# make sure that an error is still thrown
# when the 'usecols' parameter is not provided
msg = r"Expected \d+ fields in line \d+, saw \d+"
with tm.assert_raises_regex(ValueError, msg):
df = self.read_csv(StringIO(csv))
expected = DataFrame({
'a': [0, 3, 8],
'b': [1, 4, 9]
})
usecols = [0, 1]
df = self.read_csv(StringIO(csv), usecols=usecols)
tm.assert_frame_equal(df, expected)
usecols = ['a', 'b']
df = self.read_csv(StringIO(csv), usecols=usecols)
tm.assert_frame_equal(df, expected)
def test_read_empty_with_usecols(self):
# See gh-12493
names = ['Dummy', 'X', 'Dummy_2']
usecols = names[1:2] # ['X']
# first, check to see that the response of
# parser when faced with no provided columns
# throws the correct error, with or without usecols
errmsg = "No columns to parse from file"
with tm.assert_raises_regex(EmptyDataError, errmsg):
self.read_csv(StringIO(''))
with tm.assert_raises_regex(EmptyDataError, errmsg):
self.read_csv(StringIO(''), usecols=usecols)
expected = DataFrame(columns=usecols, index=[0], dtype=np.float64)
df = self.read_csv(StringIO(',,'), names=names, usecols=usecols)
tm.assert_frame_equal(df, expected)
expected = DataFrame(columns=usecols)
df = self.read_csv( | StringIO('') | pandas.compat.StringIO |
from sklearn.model_selection import train_test_split
import pandas as pd
import tensorflow as tf
import tensorflow_hub as hub
from datetime import datetime
import bert
from bert import run_classifier
from bert import optimization
from bert import tokenization
from tensorflow import keras
import os
import re
# Load all files from a directory in a DataFrame.
def load_directory_data(directory):
data = {}
data["sentence"] = []
data["sentiment"] = []
for file_path in os.listdir(directory):
with tf.gfile.GFile(os.path.join(directory, file_path), "r") as f:
data["sentence"].append(f.read())
data["sentiment"].append(re.match("\d+_(\d+)\.txt", file_path).group(1))
return | pd.DataFrame.from_dict(data) | pandas.DataFrame.from_dict |
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
# !kaggle datasets download -d ejlok1/toronto-emotional-speech-set-tess
# !kaggle datasets download -d ejlok1/cremad
# In[ ]:
# !kaggle datasets download -d ejlok1/surrey-audiovisual-expressed-emotion-savee
# !kaggle datasets download -d uwrfkaggler/ravdess-emotional-speech-audio
# In[1]:
import pandas as pd
import torch
import numpy as np
import torchaudio
import requests
import matplotlib.pyplot as plt
import librosa
import librosa.display
import IPython.display as ipd # To play sound in the notebook
import os
SAVEE = '/Users/devpatelio/Downloads/Coding/Python/pyTorch/audio_mood/ALL'
RAV = '/Users/devpatelio/Downloads/Coding/Python/pyTorch/audio_mood/ravdess-emotional-speech-audio/audio_speech_actors_01-24'
TESS = '/Users/devpatelio/Downloads/Coding/Python/pyTorch/audio_mood/TESS Toronto emotional speech set data'
CREMA = '/Users/devpatelio/Downloads/Coding/Python/pyTorch/audio_mood/AudioWAV'
# In[2]:
dirlist_SAVEE = os.listdir(SAVEE)
emotion_SAVEE = []
path_SAVEE = []
def PrintThree(filename):
return filename[-8:-6]
for i in dirlist_SAVEE:
if PrintThree(i)=='_a':
emotion_SAVEE.append('male_angry')
elif PrintThree(i)=='_d':
emotion_SAVEE.append('male_disgust')
elif PrintThree(i)=='_f':
emotion_SAVEE.append('male_fear')
elif PrintThree(i)=='_h':
emotion_SAVEE.append('male_happy')
elif PrintThree(i)=='sa':
emotion_SAVEE.append('male_sad')
elif PrintThree(i)=='su':
emotion_SAVEE.append('male_surprise')
elif PrintThree(i)=='_n':
emotion_SAVEE.append('male_neutral')
path_SAVEE.append(SAVEE + '/' + i)
SAVEE_df = | pd.DataFrame(emotion_SAVEE, columns=['labels']) | pandas.DataFrame |
# ----------------------------------------------------------------------------
# Copyright (c) 2016-2022, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
from sklearn.manifold import TSNE
import skbio.stats.ordination
import pandas as pd
import numpy as np
import umap as up
def pcoa(distance_matrix: skbio.DistanceMatrix,
number_of_dimensions: int = None) -> skbio.OrdinationResults:
if number_of_dimensions is None:
# calculate full decomposition using eigh
return skbio.stats.ordination.pcoa(distance_matrix, method='eigh',
inplace=False)
else:
# calculate the decomposition only for the `number_of_dimensions`
# using fast heuristic eigendecomposition (fsvd)
return skbio.stats.ordination.pcoa(
distance_matrix, method='fsvd',
number_of_dimensions=number_of_dimensions,
inplace=False)
def pcoa_biplot(pcoa: skbio.OrdinationResults,
features: pd.DataFrame) -> skbio.OrdinationResults:
return skbio.stats.ordination.pcoa_biplot(pcoa, features)
def tsne(distance_matrix: skbio.DistanceMatrix,
number_of_dimensions: int = 2,
perplexity: float = 25.0,
n_iter: int = 1000,
learning_rate: float = 200.0,
early_exaggeration: float = 12.0,
random_state: int = None) -> skbio.OrdinationResults:
data = distance_matrix.data
ids = distance_matrix.ids
tsne = TSNE(number_of_dimensions, perplexity=perplexity,
learning_rate=learning_rate,
n_iter=n_iter,
early_exaggeration=early_exaggeration,
random_state=random_state).fit_transform(data)
if number_of_dimensions == 2:
number_of_dimensions = 3
add_zeros = np.zeros((tsne.shape[0], 1), dtype=np.int64)
tsne = np.append(tsne, add_zeros, axis=1)
axis_labels = ["TSNE%d" % i for i in range(1, number_of_dimensions + 1)]
eigenvalues = [0 for i in axis_labels]
return skbio.OrdinationResults(
short_method_name="T-SNE",
long_method_name="t-distributed stochastic neighbor embedding",
eigvals=pd.Series(eigenvalues, index=axis_labels),
proportion_explained= | pd.Series(None, index=axis_labels) | pandas.Series |
# -*- coding: utf-8 -*-
"""
docstring goes here.
:copyright: Copyright 2014 by the Elephant team, see AUTHORS.txt.
:license: Modified BSD, see LICENSE.txt for details.
"""
from __future__ import division, print_function
import unittest
from itertools import chain
from neo.test.generate_datasets import fake_neo
import numpy as np
from numpy.testing.utils import assert_array_equal
import quantities as pq
try:
import pandas as pd
from pandas.util.testing import assert_frame_equal, assert_index_equal
except ImportError:
HAVE_PANDAS = False
else:
import elephant.pandas_bridge as ep
HAVE_PANDAS = True
@unittest.skipUnless(HAVE_PANDAS, 'requires pandas')
class MultiindexFromDictTestCase(unittest.TestCase):
def test__multiindex_from_dict(self):
inds = {'test1': 6.5,
'test2': 5,
'test3': 'test'}
targ = pd.MultiIndex(levels=[[6.5], [5], ['test']],
labels=[[0], [0], [0]],
names=['test1', 'test2', 'test3'])
res0 = ep._multiindex_from_dict(inds)
self.assertEqual(targ.levels, res0.levels)
self.assertEqual(targ.names, res0.names)
self.assertEqual(targ.labels, res0.labels)
def _convert_levels(levels):
"""Convert a list of levels to the format pandas returns for a MultiIndex.
Parameters
----------
levels : list
The list of levels to convert.
Returns
-------
list
The the level in `list` converted to values like what pandas will give.
"""
levels = list(levels)
for i, level in enumerate(levels):
if hasattr(level, 'lower'):
try:
level = unicode(level)
except NameError:
pass
elif hasattr(level, 'date'):
levels[i] = pd.DatetimeIndex(data=[level])
continue
elif level is None:
levels[i] = pd.Index([])
continue
levels[i] = pd.Index([level])
return levels
@unittest.skipUnless(HAVE_PANDAS, 'requires pandas')
class ConvertValueSafeTestCase(unittest.TestCase):
def test__convert_value_safe__float(self):
targ = 5.5
value = targ
res = ep._convert_value_safe(value)
self.assertIs(res, targ)
def test__convert_value_safe__str(self):
targ = 'test'
value = targ
res = ep._convert_value_safe(value)
self.assertIs(res, targ)
def test__convert_value_safe__bytes(self):
targ = 'test'
value = b'test'
res = ep._convert_value_safe(value)
self.assertEqual(res, targ)
def test__convert_value_safe__numpy_int_scalar(self):
targ = 5
value = np.array(5)
res = ep._convert_value_safe(value)
self.assertEqual(res, targ)
self.assertFalse(hasattr(res, 'dtype'))
def test__convert_value_safe__numpy_float_scalar(self):
targ = 5.
value = np.array(5.)
res = ep._convert_value_safe(value)
self.assertEqual(res, targ)
self.assertFalse(hasattr(res, 'dtype'))
def test__convert_value_safe__numpy_unicode_scalar(self):
targ = u'test'
value = np.array('test', dtype='U')
res = ep._convert_value_safe(value)
self.assertEqual(res, targ)
self.assertFalse(hasattr(res, 'dtype'))
def test__convert_value_safe__numpy_str_scalar(self):
targ = u'test'
value = np.array('test', dtype='S')
res = ep._convert_value_safe(value)
self.assertEqual(res, targ)
self.assertFalse(hasattr(res, 'dtype'))
def test__convert_value_safe__quantity_scalar(self):
targ = (10., 'ms')
value = 10. * pq.ms
res = ep._convert_value_safe(value)
self.assertEqual(res, targ)
self.assertFalse(hasattr(res[0], 'dtype'))
self.assertFalse(hasattr(res[0], 'units'))
@unittest.skipUnless(HAVE_PANDAS, 'requires pandas')
class SpiketrainToDataframeTestCase(unittest.TestCase):
def test__spiketrain_to_dataframe__parents_empty(self):
obj = fake_neo('SpikeTrain', seed=0)
res0 = ep.spiketrain_to_dataframe(obj)
res1 = ep.spiketrain_to_dataframe(obj, child_first=True)
res2 = ep.spiketrain_to_dataframe(obj, child_first=False)
res3 = ep.spiketrain_to_dataframe(obj, parents=True)
res4 = ep.spiketrain_to_dataframe(obj, parents=True,
child_first=True)
res5 = ep.spiketrain_to_dataframe(obj, parents=True,
child_first=False)
res6 = ep.spiketrain_to_dataframe(obj, parents=False)
res7 = ep.spiketrain_to_dataframe(obj, parents=False, child_first=True)
res8 = ep.spiketrain_to_dataframe(obj, parents=False,
child_first=False)
targvalues = pq.Quantity(obj.magnitude, units=obj.units)
targvalues = targvalues.rescale('s').magnitude[np.newaxis].T
targindex = np.arange(len(targvalues))
attrs = ep._extract_neo_attrs_safe(obj, parents=True, child_first=True)
keys, values = zip(*sorted(attrs.items()))
values = _convert_levels(values)
self.assertEqual(1, len(res0.columns))
self.assertEqual(1, len(res1.columns))
self.assertEqual(1, len(res2.columns))
self.assertEqual(1, len(res3.columns))
self.assertEqual(1, len(res4.columns))
self.assertEqual(1, len(res5.columns))
self.assertEqual(1, len(res6.columns))
self.assertEqual(1, len(res7.columns))
self.assertEqual(1, len(res8.columns))
self.assertEqual(len(obj), len(res0.index))
self.assertEqual(len(obj), len(res1.index))
self.assertEqual(len(obj), len(res2.index))
self.assertEqual(len(obj), len(res3.index))
self.assertEqual(len(obj), len(res4.index))
self.assertEqual(len(obj), len(res5.index))
self.assertEqual(len(obj), len(res6.index))
self.assertEqual(len(obj), len(res7.index))
self.assertEqual(len(obj), len(res8.index))
assert_array_equal(targvalues, res0.values)
assert_array_equal(targvalues, res1.values)
assert_array_equal(targvalues, res2.values)
assert_array_equal(targvalues, res3.values)
assert_array_equal(targvalues, res4.values)
assert_array_equal(targvalues, res5.values)
assert_array_equal(targvalues, res6.values)
assert_array_equal(targvalues, res7.values)
assert_array_equal(targvalues, res8.values)
assert_array_equal(targindex, res0.index)
assert_array_equal(targindex, res1.index)
assert_array_equal(targindex, res2.index)
assert_array_equal(targindex, res3.index)
assert_array_equal(targindex, res4.index)
assert_array_equal(targindex, res5.index)
assert_array_equal(targindex, res6.index)
assert_array_equal(targindex, res7.index)
assert_array_equal(targindex, res8.index)
self.assertEqual(['spike_number'], res0.index.names)
self.assertEqual(['spike_number'], res1.index.names)
self.assertEqual(['spike_number'], res2.index.names)
self.assertEqual(['spike_number'], res3.index.names)
self.assertEqual(['spike_number'], res4.index.names)
self.assertEqual(['spike_number'], res5.index.names)
self.assertEqual(['spike_number'], res6.index.names)
self.assertEqual(['spike_number'], res7.index.names)
self.assertEqual(['spike_number'], res8.index.names)
self.assertEqual(keys, res0.columns.names)
self.assertEqual(keys, res1.columns.names)
self.assertEqual(keys, res2.columns.names)
self.assertEqual(keys, res3.columns.names)
self.assertEqual(keys, res4.columns.names)
self.assertEqual(keys, res5.columns.names)
self.assertEqual(keys, res6.columns.names)
self.assertEqual(keys, res7.columns.names)
self.assertEqual(keys, res8.columns.names)
for value, level in zip(values, res0.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res1.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res2.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res3.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res4.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res5.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res6.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res7.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res8.columns.levels):
assert_index_equal(value, level)
def test__spiketrain_to_dataframe__noparents(self):
blk = fake_neo('Block', seed=0)
obj = blk.list_children_by_class('SpikeTrain')[0]
res0 = ep.spiketrain_to_dataframe(obj, parents=False)
res1 = ep.spiketrain_to_dataframe(obj, parents=False,
child_first=True)
res2 = ep.spiketrain_to_dataframe(obj, parents=False,
child_first=False)
targvalues = pq.Quantity(obj.magnitude, units=obj.units)
targvalues = targvalues.rescale('s').magnitude[np.newaxis].T
targindex = np.arange(len(targvalues))
attrs = ep._extract_neo_attrs_safe(obj, parents=False,
child_first=True)
keys, values = zip(*sorted(attrs.items()))
values = _convert_levels(values)
self.assertEqual(1, len(res0.columns))
self.assertEqual(1, len(res1.columns))
self.assertEqual(1, len(res2.columns))
self.assertEqual(len(obj), len(res0.index))
self.assertEqual(len(obj), len(res1.index))
self.assertEqual(len(obj), len(res2.index))
assert_array_equal(targvalues, res0.values)
assert_array_equal(targvalues, res1.values)
assert_array_equal(targvalues, res2.values)
assert_array_equal(targindex, res0.index)
assert_array_equal(targindex, res1.index)
assert_array_equal(targindex, res2.index)
self.assertEqual(['spike_number'], res0.index.names)
self.assertEqual(['spike_number'], res1.index.names)
self.assertEqual(['spike_number'], res2.index.names)
self.assertEqual(keys, res0.columns.names)
self.assertEqual(keys, res1.columns.names)
self.assertEqual(keys, res2.columns.names)
for value, level in zip(values, res0.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res1.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res2.columns.levels):
assert_index_equal(value, level)
def test__spiketrain_to_dataframe__parents_childfirst(self):
blk = fake_neo('Block', seed=0)
obj = blk.list_children_by_class('SpikeTrain')[0]
res0 = ep.spiketrain_to_dataframe(obj)
res1 = ep.spiketrain_to_dataframe(obj, child_first=True)
res2 = ep.spiketrain_to_dataframe(obj, parents=True)
res3 = ep.spiketrain_to_dataframe(obj, parents=True, child_first=True)
targvalues = pq.Quantity(obj.magnitude, units=obj.units)
targvalues = targvalues.rescale('s').magnitude[np.newaxis].T
targindex = np.arange(len(targvalues))
attrs = ep._extract_neo_attrs_safe(obj, parents=True, child_first=True)
keys, values = zip(*sorted(attrs.items()))
values = _convert_levels(values)
self.assertEqual(1, len(res0.columns))
self.assertEqual(1, len(res1.columns))
self.assertEqual(1, len(res2.columns))
self.assertEqual(1, len(res3.columns))
self.assertEqual(len(obj), len(res0.index))
self.assertEqual(len(obj), len(res1.index))
self.assertEqual(len(obj), len(res2.index))
self.assertEqual(len(obj), len(res3.index))
assert_array_equal(targvalues, res0.values)
assert_array_equal(targvalues, res1.values)
assert_array_equal(targvalues, res2.values)
assert_array_equal(targvalues, res3.values)
assert_array_equal(targindex, res0.index)
assert_array_equal(targindex, res1.index)
assert_array_equal(targindex, res2.index)
assert_array_equal(targindex, res3.index)
self.assertEqual(['spike_number'], res0.index.names)
self.assertEqual(['spike_number'], res1.index.names)
self.assertEqual(['spike_number'], res2.index.names)
self.assertEqual(['spike_number'], res3.index.names)
self.assertEqual(keys, res0.columns.names)
self.assertEqual(keys, res1.columns.names)
self.assertEqual(keys, res2.columns.names)
self.assertEqual(keys, res3.columns.names)
for value, level in zip(values, res0.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res1.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res2.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res3.columns.levels):
assert_index_equal(value, level)
def test__spiketrain_to_dataframe__parents_parentfirst(self):
blk = fake_neo('Block', seed=0)
obj = blk.list_children_by_class('SpikeTrain')[0]
res0 = ep.spiketrain_to_dataframe(obj, child_first=False)
res1 = ep.spiketrain_to_dataframe(obj, parents=True, child_first=False)
targvalues = pq.Quantity(obj.magnitude, units=obj.units)
targvalues = targvalues.rescale('s').magnitude[np.newaxis].T
targindex = np.arange(len(targvalues))
attrs = ep._extract_neo_attrs_safe(obj, parents=True,
child_first=False)
keys, values = zip(*sorted(attrs.items()))
values = _convert_levels(values)
self.assertEqual(1, len(res0.columns))
self.assertEqual(1, len(res1.columns))
self.assertEqual(len(obj), len(res0.index))
self.assertEqual(len(obj), len(res1.index))
assert_array_equal(targvalues, res0.values)
assert_array_equal(targvalues, res1.values)
assert_array_equal(targindex, res0.index)
assert_array_equal(targindex, res1.index)
self.assertEqual(['spike_number'], res0.index.names)
self.assertEqual(['spike_number'], res1.index.names)
self.assertEqual(keys, res0.columns.names)
self.assertEqual(keys, res1.columns.names)
for value, level in zip(values, res0.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res1.columns.levels):
assert_index_equal(value, level)
@unittest.skipUnless(HAVE_PANDAS, 'requires pandas')
class EventToDataframeTestCase(unittest.TestCase):
def test__event_to_dataframe__parents_empty(self):
obj = fake_neo('Event', seed=42)
res0 = ep.event_to_dataframe(obj)
res1 = ep.event_to_dataframe(obj, child_first=True)
res2 = ep.event_to_dataframe(obj, child_first=False)
res3 = ep.event_to_dataframe(obj, parents=True)
res4 = ep.event_to_dataframe(obj, parents=True, child_first=True)
res5 = ep.event_to_dataframe(obj, parents=True, child_first=False)
res6 = ep.event_to_dataframe(obj, parents=False)
res7 = ep.event_to_dataframe(obj, parents=False, child_first=True)
res8 = ep.event_to_dataframe(obj, parents=False, child_first=False)
targvalues = obj.labels[:len(obj.times)][np.newaxis].T.astype('U')
targindex = obj.times[:len(obj.labels)].rescale('s').magnitude
attrs = ep._extract_neo_attrs_safe(obj, parents=True, child_first=True)
keys, values = zip(*sorted(attrs.items()))
values = _convert_levels(values)
self.assertEqual(1, len(res0.columns))
self.assertEqual(1, len(res1.columns))
self.assertEqual(1, len(res2.columns))
self.assertEqual(1, len(res3.columns))
self.assertEqual(1, len(res4.columns))
self.assertEqual(1, len(res5.columns))
self.assertEqual(1, len(res6.columns))
self.assertEqual(1, len(res7.columns))
self.assertEqual(1, len(res8.columns))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res0.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res1.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res2.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res3.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res4.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res5.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res6.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res7.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res8.index))
assert_array_equal(targvalues, res0.values)
assert_array_equal(targvalues, res1.values)
assert_array_equal(targvalues, res2.values)
assert_array_equal(targvalues, res3.values)
assert_array_equal(targvalues, res4.values)
assert_array_equal(targvalues, res5.values)
assert_array_equal(targvalues, res6.values)
assert_array_equal(targvalues, res7.values)
assert_array_equal(targvalues, res8.values)
assert_array_equal(targindex, res0.index)
assert_array_equal(targindex, res1.index)
assert_array_equal(targindex, res2.index)
assert_array_equal(targindex, res3.index)
assert_array_equal(targindex, res4.index)
assert_array_equal(targindex, res5.index)
assert_array_equal(targindex, res6.index)
assert_array_equal(targindex, res7.index)
assert_array_equal(targindex, res8.index)
self.assertEqual(['times'], res0.index.names)
self.assertEqual(['times'], res1.index.names)
self.assertEqual(['times'], res2.index.names)
self.assertEqual(['times'], res3.index.names)
self.assertEqual(['times'], res4.index.names)
self.assertEqual(['times'], res5.index.names)
self.assertEqual(['times'], res6.index.names)
self.assertEqual(['times'], res7.index.names)
self.assertEqual(['times'], res8.index.names)
self.assertEqual(keys, res0.columns.names)
self.assertEqual(keys, res1.columns.names)
self.assertEqual(keys, res2.columns.names)
self.assertEqual(keys, res3.columns.names)
self.assertEqual(keys, res4.columns.names)
self.assertEqual(keys, res5.columns.names)
self.assertEqual(keys, res6.columns.names)
self.assertEqual(keys, res7.columns.names)
self.assertEqual(keys, res8.columns.names)
for value, level in zip(values, res0.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res1.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res2.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res3.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res4.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res5.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res6.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res7.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res8.columns.levels):
assert_index_equal(value, level)
def test__event_to_dataframe__noparents(self):
blk = fake_neo('Block', seed=42)
obj = blk.list_children_by_class('Event')[0]
res0 = ep.event_to_dataframe(obj, parents=False)
res1 = ep.event_to_dataframe(obj, parents=False, child_first=False)
res2 = ep.event_to_dataframe(obj, parents=False, child_first=True)
targvalues = obj.labels[:len(obj.times)][np.newaxis].T.astype('U')
targindex = obj.times[:len(obj.labels)].rescale('s').magnitude
attrs = ep._extract_neo_attrs_safe(obj, parents=False,
child_first=True)
keys, values = zip(*sorted(attrs.items()))
values = _convert_levels(values)
self.assertEqual(1, len(res0.columns))
self.assertEqual(1, len(res1.columns))
self.assertEqual(1, len(res2.columns))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res0.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res1.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res2.index))
assert_array_equal(targvalues, res0.values)
assert_array_equal(targvalues, res1.values)
assert_array_equal(targvalues, res2.values)
assert_array_equal(targindex, res0.index)
assert_array_equal(targindex, res1.index)
assert_array_equal(targindex, res2.index)
self.assertEqual(['times'], res0.index.names)
self.assertEqual(['times'], res1.index.names)
self.assertEqual(['times'], res2.index.names)
self.assertEqual(keys, res0.columns.names)
self.assertEqual(keys, res1.columns.names)
self.assertEqual(keys, res2.columns.names)
for value, level in zip(values, res0.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res1.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res2.columns.levels):
assert_index_equal(value, level)
def test__event_to_dataframe__parents_childfirst(self):
blk = fake_neo('Block', seed=42)
obj = blk.list_children_by_class('Event')[0]
res0 = ep.event_to_dataframe(obj)
res1 = ep.event_to_dataframe(obj, child_first=True)
res2 = ep.event_to_dataframe(obj, parents=True)
res3 = ep.event_to_dataframe(obj, parents=True, child_first=True)
targvalues = obj.labels[:len(obj.times)][np.newaxis].T.astype('U')
targindex = obj.times[:len(obj.labels)].rescale('s').magnitude
attrs = ep._extract_neo_attrs_safe(obj, parents=True, child_first=True)
keys, values = zip(*sorted(attrs.items()))
values = _convert_levels(values)
self.assertEqual(1, len(res0.columns))
self.assertEqual(1, len(res1.columns))
self.assertEqual(1, len(res2.columns))
self.assertEqual(1, len(res3.columns))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res0.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res1.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res2.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res3.index))
assert_array_equal(targvalues, res0.values)
assert_array_equal(targvalues, res1.values)
assert_array_equal(targvalues, res2.values)
assert_array_equal(targvalues, res3.values)
assert_array_equal(targindex, res0.index)
assert_array_equal(targindex, res1.index)
assert_array_equal(targindex, res2.index)
assert_array_equal(targindex, res3.index)
self.assertEqual(['times'], res0.index.names)
self.assertEqual(['times'], res1.index.names)
self.assertEqual(['times'], res2.index.names)
self.assertEqual(['times'], res3.index.names)
self.assertEqual(keys, res0.columns.names)
self.assertEqual(keys, res1.columns.names)
self.assertEqual(keys, res2.columns.names)
self.assertEqual(keys, res3.columns.names)
for value, level in zip(values, res0.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res1.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res2.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res3.columns.levels):
assert_index_equal(value, level)
def test__event_to_dataframe__parents_parentfirst(self):
blk = fake_neo('Block', seed=42)
obj = blk.list_children_by_class('Event')[0]
res0 = ep.event_to_dataframe(obj, child_first=False)
res1 = ep.event_to_dataframe(obj, parents=True, child_first=False)
targvalues = obj.labels[:len(obj.times)][np.newaxis].T.astype('U')
targindex = obj.times[:len(obj.labels)].rescale('s').magnitude
attrs = ep._extract_neo_attrs_safe(obj, parents=True,
child_first=False)
keys, values = zip(*sorted(attrs.items()))
values = _convert_levels(values)
self.assertEqual(1, len(res0.columns))
self.assertEqual(1, len(res1.columns))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res0.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res1.index))
assert_array_equal(targvalues, res0.values)
assert_array_equal(targvalues, res1.values)
assert_array_equal(targindex, res0.index)
assert_array_equal(targindex, res1.index)
self.assertEqual(['times'], res0.index.names)
self.assertEqual(['times'], res1.index.names)
self.assertEqual(keys, res0.columns.names)
self.assertEqual(keys, res1.columns.names)
for value, level in zip(values, res0.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res1.columns.levels):
assert_index_equal(value, level)
@unittest.skipUnless(HAVE_PANDAS, 'requires pandas')
class EpochToDataframeTestCase(unittest.TestCase):
def test__epoch_to_dataframe__parents_empty(self):
obj = fake_neo('Epoch', seed=42)
res0 = ep.epoch_to_dataframe(obj)
res1 = ep.epoch_to_dataframe(obj, child_first=True)
res2 = ep.epoch_to_dataframe(obj, child_first=False)
res3 = ep.epoch_to_dataframe(obj, parents=True)
res4 = ep.epoch_to_dataframe(obj, parents=True, child_first=True)
res5 = ep.epoch_to_dataframe(obj, parents=True, child_first=False)
res6 = ep.epoch_to_dataframe(obj, parents=False)
res7 = ep.epoch_to_dataframe(obj, parents=False, child_first=True)
res8 = ep.epoch_to_dataframe(obj, parents=False, child_first=False)
minlen = min([len(obj.times), len(obj.durations), len(obj.labels)])
targvalues = obj.labels[:minlen][np.newaxis].T.astype('U')
targindex = np.vstack([obj.durations[:minlen].rescale('s').magnitude,
obj.times[:minlen].rescale('s').magnitude])
targvalues = targvalues[targindex.argsort()[0], :]
targindex.sort()
attrs = ep._extract_neo_attrs_safe(obj, parents=True,
child_first=True)
keys, values = zip(*sorted(attrs.items()))
values = _convert_levels(values)
self.assertEqual(1, len(res0.columns))
self.assertEqual(1, len(res1.columns))
self.assertEqual(1, len(res2.columns))
self.assertEqual(1, len(res3.columns))
self.assertEqual(1, len(res4.columns))
self.assertEqual(1, len(res5.columns))
self.assertEqual(1, len(res6.columns))
self.assertEqual(1, len(res7.columns))
self.assertEqual(1, len(res8.columns))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res0.index))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res1.index))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res2.index))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res3.index))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res4.index))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res5.index))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res6.index))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res7.index))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res8.index))
assert_array_equal(targvalues, res0.values)
assert_array_equal(targvalues, res1.values)
assert_array_equal(targvalues, res2.values)
assert_array_equal(targvalues, res3.values)
assert_array_equal(targvalues, res4.values)
assert_array_equal(targvalues, res5.values)
assert_array_equal(targvalues, res6.values)
assert_array_equal(targvalues, res7.values)
assert_array_equal(targvalues, res8.values)
self.assertEqual(keys, res0.columns.names)
self.assertEqual(keys, res1.columns.names)
self.assertEqual(keys, res2.columns.names)
self.assertEqual(keys, res3.columns.names)
self.assertEqual(keys, res4.columns.names)
self.assertEqual(keys, res5.columns.names)
self.assertEqual(keys, res6.columns.names)
self.assertEqual(keys, res7.columns.names)
self.assertEqual(keys, res8.columns.names)
self.assertEqual([u'durations', u'times'], res0.index.names)
self.assertEqual([u'durations', u'times'], res1.index.names)
self.assertEqual([u'durations', u'times'], res2.index.names)
self.assertEqual([u'durations', u'times'], res3.index.names)
self.assertEqual([u'durations', u'times'], res4.index.names)
self.assertEqual([u'durations', u'times'], res5.index.names)
self.assertEqual([u'durations', u'times'], res6.index.names)
self.assertEqual([u'durations', u'times'], res7.index.names)
self.assertEqual([u'durations', u'times'], res8.index.names)
self.assertEqual(2, len(res0.index.levels))
self.assertEqual(2, len(res1.index.levels))
self.assertEqual(2, len(res2.index.levels))
self.assertEqual(2, len(res3.index.levels))
self.assertEqual(2, len(res4.index.levels))
self.assertEqual(2, len(res5.index.levels))
self.assertEqual(2, len(res6.index.levels))
self.assertEqual(2, len(res7.index.levels))
self.assertEqual(2, len(res8.index.levels))
assert_array_equal(targindex, res0.index.levels)
assert_array_equal(targindex, res1.index.levels)
assert_array_equal(targindex, res2.index.levels)
assert_array_equal(targindex, res3.index.levels)
assert_array_equal(targindex, res4.index.levels)
assert_array_equal(targindex, res5.index.levels)
assert_array_equal(targindex, res6.index.levels)
assert_array_equal(targindex, res7.index.levels)
assert_array_equal(targindex, res8.index.levels)
for value, level in zip(values, res0.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res1.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res2.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res3.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res4.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res5.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res6.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res7.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res8.columns.levels):
assert_index_equal(value, level)
def test__epoch_to_dataframe__noparents(self):
blk = fake_neo('Block', seed=42)
obj = blk.list_children_by_class('Epoch')[0]
res0 = ep.epoch_to_dataframe(obj, parents=False)
res1 = ep.epoch_to_dataframe(obj, parents=False, child_first=True)
res2 = ep.epoch_to_dataframe(obj, parents=False, child_first=False)
minlen = min([len(obj.times), len(obj.durations), len(obj.labels)])
targvalues = obj.labels[:minlen][np.newaxis].T.astype('U')
targindex = np.vstack([obj.durations[:minlen].rescale('s').magnitude,
obj.times[:minlen].rescale('s').magnitude])
targvalues = targvalues[targindex.argsort()[0], :]
targindex.sort()
attrs = ep._extract_neo_attrs_safe(obj, parents=False,
child_first=True)
keys, values = zip(*sorted(attrs.items()))
values = _convert_levels(values)
self.assertEqual(1, len(res0.columns))
self.assertEqual(1, len(res1.columns))
self.assertEqual(1, len(res2.columns))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res0.index))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res1.index))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res2.index))
assert_array_equal(targvalues, res0.values)
assert_array_equal(targvalues, res1.values)
assert_array_equal(targvalues, res2.values)
self.assertEqual(keys, res0.columns.names)
self.assertEqual(keys, res1.columns.names)
self.assertEqual(keys, res2.columns.names)
self.assertEqual([u'durations', u'times'], res0.index.names)
self.assertEqual([u'durations', u'times'], res1.index.names)
self.assertEqual([u'durations', u'times'], res2.index.names)
self.assertEqual(2, len(res0.index.levels))
self.assertEqual(2, len(res1.index.levels))
self.assertEqual(2, len(res2.index.levels))
assert_array_equal(targindex, res0.index.levels)
assert_array_equal(targindex, res1.index.levels)
assert_array_equal(targindex, res2.index.levels)
for value, level in zip(values, res0.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res1.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res2.columns.levels):
assert_index_equal(value, level)
def test__epoch_to_dataframe__parents_childfirst(self):
blk = fake_neo('Block', seed=42)
obj = blk.list_children_by_class('Epoch')[0]
res0 = ep.epoch_to_dataframe(obj)
res1 = ep.epoch_to_dataframe(obj, child_first=True)
res2 = ep.epoch_to_dataframe(obj, parents=True)
res3 = ep.epoch_to_dataframe(obj, parents=True, child_first=True)
minlen = min([len(obj.times), len(obj.durations), len(obj.labels)])
targvalues = obj.labels[:minlen][np.newaxis].T.astype('U')
targindex = np.vstack([obj.durations[:minlen].rescale('s').magnitude,
obj.times[:minlen].rescale('s').magnitude])
targvalues = targvalues[targindex.argsort()[0], :]
targindex.sort()
attrs = ep._extract_neo_attrs_safe(obj, parents=True, child_first=True)
keys, values = zip(*sorted(attrs.items()))
values = _convert_levels(values)
self.assertEqual(1, len(res0.columns))
self.assertEqual(1, len(res1.columns))
self.assertEqual(1, len(res2.columns))
self.assertEqual(1, len(res3.columns))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res0.index))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res1.index))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res2.index))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res3.index))
assert_array_equal(targvalues, res0.values)
assert_array_equal(targvalues, res1.values)
assert_array_equal(targvalues, res2.values)
assert_array_equal(targvalues, res3.values)
self.assertEqual(keys, res0.columns.names)
self.assertEqual(keys, res1.columns.names)
self.assertEqual(keys, res2.columns.names)
self.assertEqual(keys, res3.columns.names)
self.assertEqual([u'durations', u'times'], res0.index.names)
self.assertEqual([u'durations', u'times'], res1.index.names)
self.assertEqual([u'durations', u'times'], res2.index.names)
self.assertEqual([u'durations', u'times'], res3.index.names)
self.assertEqual(2, len(res0.index.levels))
self.assertEqual(2, len(res1.index.levels))
self.assertEqual(2, len(res2.index.levels))
self.assertEqual(2, len(res3.index.levels))
assert_array_equal(targindex, res0.index.levels)
assert_array_equal(targindex, res1.index.levels)
assert_array_equal(targindex, res2.index.levels)
assert_array_equal(targindex, res3.index.levels)
for value, level in zip(values, res0.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res1.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res2.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res3.columns.levels):
assert_index_equal(value, level)
def test__epoch_to_dataframe__parents_parentfirst(self):
blk = fake_neo('Block', seed=42)
obj = blk.list_children_by_class('Epoch')[0]
res0 = ep.epoch_to_dataframe(obj, child_first=False)
res1 = ep.epoch_to_dataframe(obj, parents=True, child_first=False)
minlen = min([len(obj.times), len(obj.durations), len(obj.labels)])
targvalues = obj.labels[:minlen][np.newaxis].T.astype('U')
targindex = np.vstack([obj.durations[:minlen].rescale('s').magnitude,
obj.times[:minlen].rescale('s').magnitude])
targvalues = targvalues[targindex.argsort()[0], :]
targindex.sort()
attrs = ep._extract_neo_attrs_safe(obj, parents=True,
child_first=False)
keys, values = zip(*sorted(attrs.items()))
values = _convert_levels(values)
self.assertEqual(1, len(res0.columns))
self.assertEqual(1, len(res1.columns))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res0.index))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res1.index))
assert_array_equal(targvalues, res0.values)
assert_array_equal(targvalues, res1.values)
self.assertEqual(keys, res0.columns.names)
self.assertEqual(keys, res1.columns.names)
self.assertEqual([u'durations', u'times'], res0.index.names)
self.assertEqual([u'durations', u'times'], res1.index.names)
self.assertEqual(2, len(res0.index.levels))
self.assertEqual(2, len(res1.index.levels))
assert_array_equal(targindex, res0.index.levels)
assert_array_equal(targindex, res1.index.levels)
for value, level in zip(values, res0.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res1.columns.levels):
assert_index_equal(value, level)
@unittest.skipUnless(HAVE_PANDAS, 'requires pandas')
class MultiSpiketrainsToDataframeTestCase(unittest.TestCase):
def setUp(self):
if hasattr(self, 'assertItemsEqual'):
self.assertCountEqual = self.assertItemsEqual
def test__multi_spiketrains_to_dataframe__single(self):
obj = fake_neo('SpikeTrain', seed=0, n=5)
res0 = ep.multi_spiketrains_to_dataframe(obj)
res1 = ep.multi_spiketrains_to_dataframe(obj, parents=False)
res2 = ep.multi_spiketrains_to_dataframe(obj, parents=True)
res3 = ep.multi_spiketrains_to_dataframe(obj, child_first=True)
res4 = ep.multi_spiketrains_to_dataframe(obj, parents=False,
child_first=True)
res5 = ep.multi_spiketrains_to_dataframe(obj, parents=True,
child_first=True)
res6 = ep.multi_spiketrains_to_dataframe(obj, child_first=False)
res7 = ep.multi_spiketrains_to_dataframe(obj, parents=False,
child_first=False)
res8 = ep.multi_spiketrains_to_dataframe(obj, parents=True,
child_first=False)
targ = ep.spiketrain_to_dataframe(obj)
keys = ep._extract_neo_attrs_safe(obj, parents=True,
child_first=True).keys()
keys = list(keys)
targwidth = 1
targlen = len(obj)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targwidth, len(res1.columns))
self.assertEqual(targwidth, len(res2.columns))
self.assertEqual(targwidth, len(res3.columns))
self.assertEqual(targwidth, len(res4.columns))
self.assertEqual(targwidth, len(res5.columns))
self.assertEqual(targwidth, len(res6.columns))
self.assertEqual(targwidth, len(res7.columns))
self.assertEqual(targwidth, len(res8.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertEqual(targlen, len(res1.index))
self.assertEqual(targlen, len(res2.index))
self.assertEqual(targlen, len(res3.index))
self.assertEqual(targlen, len(res4.index))
self.assertEqual(targlen, len(res5.index))
self.assertEqual(targlen, len(res6.index))
self.assertEqual(targlen, len(res7.index))
self.assertEqual(targlen, len(res8.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
self.assertCountEqual(keys, res1.columns.names)
self.assertCountEqual(keys, res2.columns.names)
self.assertCountEqual(keys, res3.columns.names)
self.assertCountEqual(keys, res4.columns.names)
self.assertCountEqual(keys, res5.columns.names)
self.assertCountEqual(keys, res6.columns.names)
self.assertCountEqual(keys, res7.columns.names)
self.assertCountEqual(keys, res8.columns.names)
assert_array_equal(targ.values, res0.values)
assert_array_equal(targ.values, res1.values)
assert_array_equal(targ.values, res2.values)
assert_array_equal(targ.values, res3.values)
assert_array_equal(targ.values, res4.values)
assert_array_equal(targ.values, res5.values)
assert_array_equal(targ.values, res6.values)
assert_array_equal(targ.values, res7.values)
assert_array_equal(targ.values, res8.values)
assert_frame_equal(targ, res0)
assert_frame_equal(targ, res0)
assert_frame_equal(targ, res1)
assert_frame_equal(targ, res2)
assert_frame_equal(targ, res3)
assert_frame_equal(targ, res4)
assert_frame_equal(targ, res5)
assert_frame_equal(targ, res6)
assert_frame_equal(targ, res7)
assert_frame_equal(targ, res8)
def test__multi_spiketrains_to_dataframe__unit_default(self):
obj = fake_neo('Unit', seed=0, n=5)
res0 = ep.multi_spiketrains_to_dataframe(obj)
objs = obj.spiketrains
targ = [ep.spiketrain_to_dataframe(iobj) for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=True,
child_first=True).keys()
keys = list(keys)
targwidth = len(objs)
targlen = max(len(iobj) for iobj in objs)
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
assert_array_equal(targ.values, res0.values)
assert_frame_equal(targ, res0)
def test__multi_spiketrains_to_dataframe__segment_default(self):
obj = fake_neo('Segment', seed=0, n=5)
res0 = ep.multi_spiketrains_to_dataframe(obj)
objs = obj.spiketrains
targ = [ep.spiketrain_to_dataframe(iobj) for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=True,
child_first=True).keys()
keys = list(keys)
targwidth = len(objs)
targlen = max(len(iobj) for iobj in objs)
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
assert_array_equal(targ.values, res0.values)
assert_frame_equal(targ, res0)
def test__multi_spiketrains_to_dataframe__block_noparents(self):
obj = fake_neo('Block', seed=0, n=3)
res0 = ep.multi_spiketrains_to_dataframe(obj, parents=False)
res1 = ep.multi_spiketrains_to_dataframe(obj, parents=False,
child_first=True)
res2 = ep.multi_spiketrains_to_dataframe(obj, parents=False,
child_first=False)
objs = obj.list_children_by_class('SpikeTrain')
targ = [ep.spiketrain_to_dataframe(iobj,
parents=False, child_first=True)
for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=False,
child_first=True).keys()
keys = list(keys)
targwidth = len(objs)
targlen = max(len(iobj) for iobj in objs)
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targwidth, len(res1.columns))
self.assertEqual(targwidth, len(res2.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertEqual(targlen, len(res1.index))
self.assertEqual(targlen, len(res2.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
self.assertCountEqual(keys, res1.columns.names)
self.assertCountEqual(keys, res2.columns.names)
assert_array_equal(targ.values, res0.values)
assert_array_equal(targ.values, res1.values)
assert_array_equal(targ.values, res2.values)
assert_frame_equal(targ, res0)
assert_frame_equal(targ, res1)
assert_frame_equal(targ, res2)
def test__multi_spiketrains_to_dataframe__block_parents_childfirst(self):
obj = fake_neo('Block', seed=0, n=3)
res0 = ep.multi_spiketrains_to_dataframe(obj)
res1 = ep.multi_spiketrains_to_dataframe(obj, parents=True)
res2 = ep.multi_spiketrains_to_dataframe(obj, child_first=True)
res3 = ep.multi_spiketrains_to_dataframe(obj, parents=True,
child_first=True)
objs = obj.list_children_by_class('SpikeTrain')
targ = [ep.spiketrain_to_dataframe(iobj,
parents=True, child_first=True)
for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=True,
child_first=True).keys()
keys = list(keys)
targwidth = len(objs)
targlen = max(len(iobj) for iobj in objs)
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targwidth, len(res1.columns))
self.assertEqual(targwidth, len(res2.columns))
self.assertEqual(targwidth, len(res3.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertEqual(targlen, len(res1.index))
self.assertEqual(targlen, len(res2.index))
self.assertEqual(targlen, len(res3.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
self.assertCountEqual(keys, res1.columns.names)
self.assertCountEqual(keys, res2.columns.names)
self.assertCountEqual(keys, res3.columns.names)
assert_array_equal(targ.values, res0.values)
assert_array_equal(targ.values, res1.values)
assert_array_equal(targ.values, res2.values)
assert_array_equal(targ.values, res3.values)
assert_frame_equal(targ, res0)
assert_frame_equal(targ, res1)
assert_frame_equal(targ, res2)
assert_frame_equal(targ, res3)
def test__multi_spiketrains_to_dataframe__block_parents_parentfirst(self):
obj = fake_neo('Block', seed=0, n=3)
res0 = ep.multi_spiketrains_to_dataframe(obj, child_first=False)
res1 = ep.multi_spiketrains_to_dataframe(obj, parents=True,
child_first=False)
objs = obj.list_children_by_class('SpikeTrain')
targ = [ep.spiketrain_to_dataframe(iobj,
parents=True, child_first=False)
for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=True,
child_first=False).keys()
keys = list(keys)
targwidth = len(objs)
targlen = max(len(iobj) for iobj in objs)
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targwidth, len(res1.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertEqual(targlen, len(res1.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
self.assertCountEqual(keys, res1.columns.names)
assert_array_equal(targ.values, res0.values)
assert_array_equal(targ.values, res1.values)
assert_frame_equal(targ, res0)
assert_frame_equal(targ, res1)
def test__multi_spiketrains_to_dataframe__list_noparents(self):
obj = [fake_neo('Block', seed=i, n=3) for i in range(3)]
res0 = ep.multi_spiketrains_to_dataframe(obj, parents=False)
res1 = ep.multi_spiketrains_to_dataframe(obj, parents=False,
child_first=True)
res2 = ep.multi_spiketrains_to_dataframe(obj, parents=False,
child_first=False)
objs = (iobj.list_children_by_class('SpikeTrain') for iobj in obj)
objs = list(chain.from_iterable(objs))
targ = [ep.spiketrain_to_dataframe(iobj,
parents=False, child_first=True)
for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=False,
child_first=True).keys()
keys = list(keys)
targwidth = len(objs)
targlen = max(len(iobj) for iobj in objs)
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targwidth, len(res1.columns))
self.assertEqual(targwidth, len(res2.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertEqual(targlen, len(res1.index))
self.assertEqual(targlen, len(res2.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
self.assertCountEqual(keys, res1.columns.names)
self.assertCountEqual(keys, res2.columns.names)
assert_array_equal(targ.values, res0.values)
assert_array_equal(targ.values, res1.values)
assert_array_equal(targ.values, res2.values)
assert_frame_equal(targ, res0)
assert_frame_equal(targ, res1)
assert_frame_equal(targ, res2)
def test__multi_spiketrains_to_dataframe__list_parents_childfirst(self):
obj = [fake_neo('Block', seed=i, n=3) for i in range(3)]
res0 = ep.multi_spiketrains_to_dataframe(obj)
res1 = ep.multi_spiketrains_to_dataframe(obj, parents=True)
res2 = ep.multi_spiketrains_to_dataframe(obj, child_first=True)
res3 = ep.multi_spiketrains_to_dataframe(obj, parents=True,
child_first=True)
objs = (iobj.list_children_by_class('SpikeTrain') for iobj in obj)
objs = list(chain.from_iterable(objs))
targ = [ep.spiketrain_to_dataframe(iobj,
parents=True, child_first=True)
for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=True,
child_first=True).keys()
keys = list(keys)
targwidth = len(objs)
targlen = max(len(iobj) for iobj in objs)
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targwidth, len(res1.columns))
self.assertEqual(targwidth, len(res2.columns))
self.assertEqual(targwidth, len(res3.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertEqual(targlen, len(res1.index))
self.assertEqual(targlen, len(res2.index))
self.assertEqual(targlen, len(res3.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
self.assertCountEqual(keys, res1.columns.names)
self.assertCountEqual(keys, res2.columns.names)
self.assertCountEqual(keys, res3.columns.names)
assert_array_equal(targ.values, res0.values)
assert_array_equal(targ.values, res1.values)
assert_array_equal(targ.values, res2.values)
assert_array_equal(targ.values, res3.values)
assert_frame_equal(targ, res0)
assert_frame_equal(targ, res1)
assert_frame_equal(targ, res2)
assert_frame_equal(targ, res3)
def test__multi_spiketrains_to_dataframe__list_parents_parentfirst(self):
obj = [fake_neo('Block', seed=i, n=3) for i in range(3)]
res0 = ep.multi_spiketrains_to_dataframe(obj, child_first=False)
res1 = ep.multi_spiketrains_to_dataframe(obj, parents=True,
child_first=False)
objs = (iobj.list_children_by_class('SpikeTrain') for iobj in obj)
objs = list(chain.from_iterable(objs))
targ = [ep.spiketrain_to_dataframe(iobj,
parents=True, child_first=False)
for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=True,
child_first=False).keys()
keys = list(keys)
targwidth = len(objs)
targlen = max(len(iobj) for iobj in objs)
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targwidth, len(res1.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertEqual(targlen, len(res1.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
self.assertCountEqual(keys, res1.columns.names)
assert_array_equal(targ.values, res0.values)
assert_array_equal(targ.values, res1.values)
assert_frame_equal(targ, res0)
assert_frame_equal(targ, res1)
def test__multi_spiketrains_to_dataframe__tuple_default(self):
obj = tuple(fake_neo('Block', seed=i, n=3) for i in range(3))
res0 = ep.multi_spiketrains_to_dataframe(obj)
objs = (iobj.list_children_by_class('SpikeTrain') for iobj in obj)
objs = list(chain.from_iterable(objs))
targ = [ep.spiketrain_to_dataframe(iobj) for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=True,
child_first=True).keys()
keys = list(keys)
targwidth = len(objs)
targlen = max(len(iobj) for iobj in objs)
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
assert_array_equal(targ.values, res0.values)
assert_frame_equal(targ, res0)
def test__multi_spiketrains_to_dataframe__iter_default(self):
obj = [fake_neo('Block', seed=i, n=3) for i in range(3)]
res0 = ep.multi_spiketrains_to_dataframe(iter(obj))
objs = (iobj.list_children_by_class('SpikeTrain') for iobj in obj)
objs = list(chain.from_iterable(objs))
targ = [ep.spiketrain_to_dataframe(iobj) for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=True,
child_first=True).keys()
keys = list(keys)
targwidth = len(objs)
targlen = max(len(iobj) for iobj in objs)
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
assert_array_equal(targ.values, res0.values)
assert_frame_equal(targ, res0)
def test__multi_spiketrains_to_dataframe__dict_default(self):
obj = dict((i, fake_neo('Block', seed=i, n=3)) for i in range(3))
res0 = ep.multi_spiketrains_to_dataframe(obj)
objs = (iobj.list_children_by_class('SpikeTrain') for iobj in
obj.values())
objs = list(chain.from_iterable(objs))
targ = [ep.spiketrain_to_dataframe(iobj) for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=True,
child_first=True).keys()
keys = list(keys)
targwidth = len(objs)
targlen = max(len(iobj) for iobj in objs)
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
assert_array_equal(targ.values, res0.values)
assert_frame_equal(targ, res0)
@unittest.skipUnless(HAVE_PANDAS, 'requires pandas')
class MultiEventsToDataframeTestCase(unittest.TestCase):
def setUp(self):
if hasattr(self, 'assertItemsEqual'):
self.assertCountEqual = self.assertItemsEqual
def test__multi_events_to_dataframe__single(self):
obj = fake_neo('Event', seed=0, n=5)
res0 = ep.multi_events_to_dataframe(obj)
res1 = ep.multi_events_to_dataframe(obj, parents=False)
res2 = ep.multi_events_to_dataframe(obj, parents=True)
res3 = ep.multi_events_to_dataframe(obj, child_first=True)
res4 = ep.multi_events_to_dataframe(obj, parents=False,
child_first=True)
res5 = ep.multi_events_to_dataframe(obj, parents=True,
child_first=True)
res6 = ep.multi_events_to_dataframe(obj, child_first=False)
res7 = ep.multi_events_to_dataframe(obj, parents=False,
child_first=False)
res8 = ep.multi_events_to_dataframe(obj, parents=True,
child_first=False)
targ = ep.event_to_dataframe(obj)
keys = ep._extract_neo_attrs_safe(obj, parents=True,
child_first=True).keys()
keys = list(keys)
targwidth = 1
targlen = min(len(obj.times), len(obj.labels))
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targwidth, len(res1.columns))
self.assertEqual(targwidth, len(res2.columns))
self.assertEqual(targwidth, len(res3.columns))
self.assertEqual(targwidth, len(res4.columns))
self.assertEqual(targwidth, len(res5.columns))
self.assertEqual(targwidth, len(res6.columns))
self.assertEqual(targwidth, len(res7.columns))
self.assertEqual(targwidth, len(res8.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertEqual(targlen, len(res1.index))
self.assertEqual(targlen, len(res2.index))
self.assertEqual(targlen, len(res3.index))
self.assertEqual(targlen, len(res4.index))
self.assertEqual(targlen, len(res5.index))
self.assertEqual(targlen, len(res6.index))
self.assertEqual(targlen, len(res7.index))
self.assertEqual(targlen, len(res8.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
self.assertCountEqual(keys, res1.columns.names)
self.assertCountEqual(keys, res2.columns.names)
self.assertCountEqual(keys, res3.columns.names)
self.assertCountEqual(keys, res4.columns.names)
self.assertCountEqual(keys, res5.columns.names)
self.assertCountEqual(keys, res6.columns.names)
self.assertCountEqual(keys, res7.columns.names)
self.assertCountEqual(keys, res8.columns.names)
assert_array_equal(targ.values, res0.values)
assert_array_equal(targ.values, res1.values)
assert_array_equal(targ.values, res2.values)
assert_array_equal(targ.values, res3.values)
assert_array_equal(targ.values, res4.values)
assert_array_equal(targ.values, res5.values)
assert_array_equal(targ.values, res6.values)
assert_array_equal(targ.values, res7.values)
assert_array_equal(targ.values, res8.values)
assert_frame_equal(targ, res0)
assert_frame_equal(targ, res1)
assert_frame_equal(targ, res2)
assert_frame_equal(targ, res3)
assert_frame_equal(targ, res4)
assert_frame_equal(targ, res5)
assert_frame_equal(targ, res6)
assert_frame_equal(targ, res7)
assert_frame_equal(targ, res8)
def test__multi_events_to_dataframe__segment_default(self):
obj = fake_neo('Segment', seed=0, n=5)
res0 = ep.multi_events_to_dataframe(obj)
objs = obj.events
targ = [ep.event_to_dataframe(iobj) for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=True,
child_first=True).keys()
keys = list(keys)
targwidth = len(objs)
targlen = [iobj.times[:min(len(iobj.times), len(iobj.labels))]
for iobj in objs]
targlen = len(np.unique(np.hstack(targlen)))
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
assert_array_equal(targ.values, res0.values)
assert_frame_equal(targ, res0)
def test__multi_events_to_dataframe__block_noparents(self):
obj = fake_neo('Block', seed=0, n=3)
res0 = ep.multi_events_to_dataframe(obj, parents=False)
res1 = ep.multi_events_to_dataframe(obj, parents=False,
child_first=True)
res2 = ep.multi_events_to_dataframe(obj, parents=False,
child_first=False)
objs = obj.list_children_by_class('Event')
targ = [ep.event_to_dataframe(iobj, parents=False, child_first=True)
for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=False,
child_first=True).keys()
keys = list(keys)
targwidth = len(objs)
targlen = [iobj.times[:min(len(iobj.times), len(iobj.labels))]
for iobj in objs]
targlen = len(np.unique(np.hstack(targlen)))
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targwidth, len(res1.columns))
self.assertEqual(targwidth, len(res2.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertEqual(targlen, len(res1.index))
self.assertEqual(targlen, len(res2.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
self.assertCountEqual(keys, res1.columns.names)
self.assertCountEqual(keys, res2.columns.names)
assert_array_equal(targ.values, res0.values)
assert_array_equal(targ.values, res1.values)
assert_array_equal(targ.values, res2.values)
assert_frame_equal(targ, res0)
assert_frame_equal(targ, res1)
assert_frame_equal(targ, res2)
def test__multi_events_to_dataframe__block_parents_childfirst(self):
obj = fake_neo('Block', seed=0, n=3)
res0 = ep.multi_events_to_dataframe(obj)
res1 = ep.multi_events_to_dataframe(obj, parents=True)
res2 = ep.multi_events_to_dataframe(obj, child_first=True)
res3 = ep.multi_events_to_dataframe(obj, parents=True,
child_first=True)
objs = obj.list_children_by_class('Event')
targ = [ep.event_to_dataframe(iobj, parents=True, child_first=True)
for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=True,
child_first=True).keys()
keys = list(keys)
targwidth = len(objs)
targlen = [iobj.times[:min(len(iobj.times), len(iobj.labels))]
for iobj in objs]
targlen = len(np.unique(np.hstack(targlen)))
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targwidth, len(res1.columns))
self.assertEqual(targwidth, len(res2.columns))
self.assertEqual(targwidth, len(res3.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertEqual(targlen, len(res1.index))
self.assertEqual(targlen, len(res2.index))
self.assertEqual(targlen, len(res3.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
self.assertCountEqual(keys, res1.columns.names)
self.assertCountEqual(keys, res2.columns.names)
self.assertCountEqual(keys, res3.columns.names)
assert_array_equal(targ.values, res0.values)
assert_array_equal(targ.values, res1.values)
assert_array_equal(targ.values, res2.values)
assert_array_equal(targ.values, res3.values)
assert_frame_equal(targ, res0)
assert_frame_equal(targ, res1)
assert_frame_equal(targ, res2)
assert_frame_equal(targ, res3)
def test__multi_events_to_dataframe__block_parents_parentfirst(self):
obj = fake_neo('Block', seed=0, n=3)
res0 = ep.multi_events_to_dataframe(obj, child_first=False)
res1 = ep.multi_events_to_dataframe(obj, parents=True,
child_first=False)
objs = obj.list_children_by_class('Event')
targ = [ep.event_to_dataframe(iobj, parents=True, child_first=False)
for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=True,
child_first=False).keys()
keys = list(keys)
targwidth = len(objs)
targlen = [iobj.times[:min(len(iobj.times), len(iobj.labels))]
for iobj in objs]
targlen = len(np.unique(np.hstack(targlen)))
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targwidth, len(res1.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertEqual(targlen, len(res1.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
self.assertCountEqual(keys, res1.columns.names)
assert_array_equal(targ.values, res0.values)
assert_array_equal(targ.values, res1.values)
assert_frame_equal(targ, res0)
assert_frame_equal(targ, res1)
def test__multi_events_to_dataframe__list_noparents(self):
obj = [fake_neo('Block', seed=i, n=3) for i in range(3)]
res0 = ep.multi_events_to_dataframe(obj, parents=False)
res1 = ep.multi_events_to_dataframe(obj, parents=False,
child_first=True)
res2 = ep.multi_events_to_dataframe(obj, parents=False,
child_first=False)
objs = (iobj.list_children_by_class('Event') for iobj in obj)
objs = list(chain.from_iterable(objs))
targ = [ep.event_to_dataframe(iobj, parents=False, child_first=True)
for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=False,
child_first=True).keys()
keys = list(keys)
targwidth = len(objs)
targlen = [iobj.times[:min(len(iobj.times), len(iobj.labels))]
for iobj in objs]
targlen = len(np.unique(np.hstack(targlen)))
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targwidth, len(res1.columns))
self.assertEqual(targwidth, len(res2.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertEqual(targlen, len(res1.index))
self.assertEqual(targlen, len(res2.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
self.assertCountEqual(keys, res1.columns.names)
self.assertCountEqual(keys, res2.columns.names)
assert_array_equal(targ.values, res0.values)
assert_array_equal(targ.values, res1.values)
assert_array_equal(targ.values, res2.values)
assert_frame_equal(targ, res0)
assert_frame_equal(targ, res1)
assert_frame_equal(targ, res2)
def test__multi_events_to_dataframe__list_parents_childfirst(self):
obj = [fake_neo('Block', seed=i, n=3) for i in range(3)]
res0 = ep.multi_events_to_dataframe(obj)
res1 = ep.multi_events_to_dataframe(obj, parents=True)
res2 = ep.multi_events_to_dataframe(obj, child_first=True)
res3 = ep.multi_events_to_dataframe(obj, parents=True,
child_first=True)
objs = (iobj.list_children_by_class('Event') for iobj in obj)
objs = list(chain.from_iterable(objs))
targ = [ep.event_to_dataframe(iobj, parents=True, child_first=True)
for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=True,
child_first=True).keys()
keys = list(keys)
targwidth = len(objs)
targlen = [iobj.times[:min(len(iobj.times), len(iobj.labels))]
for iobj in objs]
targlen = len(np.unique(np.hstack(targlen)))
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targwidth, len(res1.columns))
self.assertEqual(targwidth, len(res2.columns))
self.assertEqual(targwidth, len(res3.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertEqual(targlen, len(res1.index))
self.assertEqual(targlen, len(res2.index))
self.assertEqual(targlen, len(res3.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
self.assertCountEqual(keys, res1.columns.names)
self.assertCountEqual(keys, res2.columns.names)
self.assertCountEqual(keys, res3.columns.names)
assert_array_equal(targ.values, res0.values)
assert_array_equal(targ.values, res1.values)
assert_array_equal(targ.values, res2.values)
assert_array_equal(targ.values, res3.values)
assert_frame_equal(targ, res0)
assert_frame_equal(targ, res1)
assert_frame_equal(targ, res2)
assert_frame_equal(targ, res3)
def test__multi_events_to_dataframe__list_parents_parentfirst(self):
obj = [fake_neo('Block', seed=i, n=3) for i in range(3)]
res0 = ep.multi_events_to_dataframe(obj, child_first=False)
res1 = ep.multi_events_to_dataframe(obj, parents=True,
child_first=False)
objs = (iobj.list_children_by_class('Event') for iobj in obj)
objs = list(chain.from_iterable(objs))
targ = [ep.event_to_dataframe(iobj, parents=True, child_first=False)
for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=True,
child_first=False).keys()
keys = list(keys)
targwidth = len(objs)
targlen = [iobj.times[:min(len(iobj.times), len(iobj.labels))]
for iobj in objs]
targlen = len(np.unique(np.hstack(targlen)))
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targwidth, len(res1.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertEqual(targlen, len(res1.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
self.assertCountEqual(keys, res1.columns.names)
assert_array_equal(targ.values, res0.values)
assert_array_equal(targ.values, res1.values)
assert_frame_equal(targ, res0)
assert_frame_equal(targ, res1)
def test__multi_events_to_dataframe__tuple_default(self):
obj = tuple(fake_neo('Block', seed=i, n=3) for i in range(3))
res0 = ep.multi_events_to_dataframe(obj)
objs = (iobj.list_children_by_class('Event') for iobj in obj)
objs = list(chain.from_iterable(objs))
targ = [ep.event_to_dataframe(iobj) for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=True,
child_first=True).keys()
keys = list(keys)
targwidth = len(objs)
targlen = [iobj.times[:min(len(iobj.times), len(iobj.labels))]
for iobj in objs]
targlen = len(np.unique(np.hstack(targlen)))
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
assert_array_equal(targ.values, res0.values)
assert_frame_equal(targ, res0)
def test__multi_events_to_dataframe__iter_default(self):
obj = [fake_neo('Block', seed=i, n=3) for i in range(3)]
res0 = ep.multi_events_to_dataframe(iter(obj))
objs = (iobj.list_children_by_class('Event') for iobj in obj)
objs = list(chain.from_iterable(objs))
targ = [ep.event_to_dataframe(iobj) for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=True,
child_first=True).keys()
keys = list(keys)
targwidth = len(objs)
targlen = [iobj.times[:min(len(iobj.times), len(iobj.labels))]
for iobj in objs]
targlen = len(np.unique(np.hstack(targlen)))
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
assert_array_equal(targ.values, res0.values)
assert_frame_equal(targ, res0)
def test__multi_events_to_dataframe__dict_default(self):
obj = dict((i, fake_neo('Block', seed=i, n=3)) for i in range(3))
res0 = ep.multi_events_to_dataframe(obj)
objs = (iobj.list_children_by_class('Event') for iobj in
obj.values())
objs = list(chain.from_iterable(objs))
targ = [ep.event_to_dataframe(iobj) for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=True,
child_first=True).keys()
keys = list(keys)
targwidth = len(objs)
targlen = [iobj.times[:min(len(iobj.times), len(iobj.labels))]
for iobj in objs]
targlen = len(np.unique(np.hstack(targlen)))
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
assert_array_equal(targ.values, res0.values)
assert_frame_equal(targ, res0)
@unittest.skipUnless(HAVE_PANDAS, 'requires pandas')
class MultiEpochsToDataframeTestCase(unittest.TestCase):
def setUp(self):
if hasattr(self, 'assertItemsEqual'):
self.assertCountEqual = self.assertItemsEqual
def test__multi_epochs_to_dataframe__single(self):
obj = fake_neo('Epoch', seed=0, n=5)
res0 = ep.multi_epochs_to_dataframe(obj)
res1 = ep.multi_epochs_to_dataframe(obj, parents=False)
res2 = ep.multi_epochs_to_dataframe(obj, parents=True)
res3 = ep.multi_epochs_to_dataframe(obj, child_first=True)
res4 = ep.multi_epochs_to_dataframe(obj, parents=False,
child_first=True)
res5 = ep.multi_epochs_to_dataframe(obj, parents=True,
child_first=True)
res6 = ep.multi_epochs_to_dataframe(obj, child_first=False)
res7 = ep.multi_epochs_to_dataframe(obj, parents=False,
child_first=False)
res8 = ep.multi_epochs_to_dataframe(obj, parents=True,
child_first=False)
targ = ep.epoch_to_dataframe(obj)
keys = ep._extract_neo_attrs_safe(obj, parents=True,
child_first=True).keys()
keys = list(keys)
targwidth = 1
targlen = min(len(obj.times), len(obj.durations), len(obj.labels))
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targwidth, len(res1.columns))
self.assertEqual(targwidth, len(res2.columns))
self.assertEqual(targwidth, len(res3.columns))
self.assertEqual(targwidth, len(res4.columns))
self.assertEqual(targwidth, len(res5.columns))
self.assertEqual(targwidth, len(res6.columns))
self.assertEqual(targwidth, len(res7.columns))
self.assertEqual(targwidth, len(res8.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertEqual(targlen, len(res1.index))
self.assertEqual(targlen, len(res2.index))
self.assertEqual(targlen, len(res3.index))
self.assertEqual(targlen, len(res4.index))
self.assertEqual(targlen, len(res5.index))
self.assertEqual(targlen, len(res6.index))
self.assertEqual(targlen, len(res7.index))
self.assertEqual(targlen, len(res8.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
self.assertCountEqual(keys, res1.columns.names)
self.assertCountEqual(keys, res2.columns.names)
self.assertCountEqual(keys, res3.columns.names)
self.assertCountEqual(keys, res4.columns.names)
self.assertCountEqual(keys, res5.columns.names)
self.assertCountEqual(keys, res6.columns.names)
self.assertCountEqual(keys, res7.columns.names)
self.assertCountEqual(keys, res8.columns.names)
assert_array_equal(targ.values, res0.values)
assert_array_equal(targ.values, res1.values)
assert_array_equal(targ.values, res2.values)
assert_array_equal(targ.values, res3.values)
assert_array_equal(targ.values, res4.values)
assert_array_equal(targ.values, res5.values)
assert_array_equal(targ.values, res6.values)
assert_array_equal(targ.values, res7.values)
assert_array_equal(targ.values, res8.values)
assert_frame_equal(targ, res0)
assert_frame_equal(targ, res1)
assert_frame_equal(targ, res2)
assert_frame_equal(targ, res3)
assert_frame_equal(targ, res4)
assert_frame_equal(targ, res5)
assert_frame_equal(targ, res6)
assert_frame_equal(targ, res7)
assert_frame_equal(targ, res8)
def test__multi_epochs_to_dataframe__segment_default(self):
obj = fake_neo('Segment', seed=0, n=5)
res0 = ep.multi_epochs_to_dataframe(obj)
objs = obj.epochs
targ = [ep.epoch_to_dataframe(iobj) for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=True,
child_first=True).keys()
keys = list(keys)
targwidth = len(objs)
targlen = [iobj.times[:min(len(iobj.times), len(iobj.durations),
len(iobj.labels))] for iobj in objs]
targlen = len(np.unique(np.hstack(targlen)))
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
assert_array_equal(targ.values, res0.values)
assert_frame_equal(targ, res0)
def test__multi_epochs_to_dataframe__block_noparents(self):
obj = fake_neo('Block', seed=0, n=3)
res0 = ep.multi_epochs_to_dataframe(obj, parents=False)
res1 = ep.multi_epochs_to_dataframe(obj, parents=False,
child_first=True)
res2 = ep.multi_epochs_to_dataframe(obj, parents=False,
child_first=False)
objs = obj.list_children_by_class('Epoch')
targ = [ep.epoch_to_dataframe(iobj, parents=False, child_first=True)
for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=False,
child_first=True).keys()
keys = list(keys)
targwidth = len(objs)
targlen = [iobj.times[:min(len(iobj.times), len(iobj.durations),
len(iobj.labels))] for iobj in objs]
targlen = len(np.unique(np.hstack(targlen)))
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targwidth, len(res1.columns))
self.assertEqual(targwidth, len(res2.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertEqual(targlen, len(res1.index))
self.assertEqual(targlen, len(res2.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
self.assertCountEqual(keys, res1.columns.names)
self.assertCountEqual(keys, res2.columns.names)
assert_array_equal(targ.values, res0.values)
assert_array_equal(targ.values, res1.values)
assert_array_equal(targ.values, res2.values)
assert_frame_equal(targ, res0)
assert_frame_equal(targ, res1)
assert_frame_equal(targ, res2)
def test__multi_epochs_to_dataframe__block_parents_childfirst(self):
obj = fake_neo('Block', seed=0, n=3)
res0 = ep.multi_epochs_to_dataframe(obj)
res1 = ep.multi_epochs_to_dataframe(obj, parents=True)
res2 = ep.multi_epochs_to_dataframe(obj, child_first=True)
res3 = ep.multi_epochs_to_dataframe(obj, parents=True,
child_first=True)
objs = obj.list_children_by_class('Epoch')
targ = [ep.epoch_to_dataframe(iobj, parents=True, child_first=True)
for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=True,
child_first=True).keys()
keys = list(keys)
targwidth = len(objs)
targlen = [iobj.times[:min(len(iobj.times), len(iobj.durations),
len(iobj.labels))] for iobj in objs]
targlen = len(np.unique(np.hstack(targlen)))
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targwidth, len(res1.columns))
self.assertEqual(targwidth, len(res2.columns))
self.assertEqual(targwidth, len(res3.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertEqual(targlen, len(res1.index))
self.assertEqual(targlen, len(res2.index))
self.assertEqual(targlen, len(res3.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
self.assertCountEqual(keys, res1.columns.names)
self.assertCountEqual(keys, res2.columns.names)
self.assertCountEqual(keys, res3.columns.names)
assert_array_equal(targ.values, res0.values)
assert_array_equal(targ.values, res1.values)
assert_array_equal(targ.values, res2.values)
assert_array_equal(targ.values, res3.values)
assert_frame_equal(targ, res0)
assert_frame_equal(targ, res1)
assert_frame_equal(targ, res2)
assert_frame_equal(targ, res3)
def test__multi_epochs_to_dataframe__block_parents_parentfirst(self):
obj = fake_neo('Block', seed=0, n=3)
res0 = ep.multi_epochs_to_dataframe(obj, child_first=False)
res1 = ep.multi_epochs_to_dataframe(obj, parents=True,
child_first=False)
objs = obj.list_children_by_class('Epoch')
targ = [ep.epoch_to_dataframe(iobj, parents=True, child_first=False)
for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=True,
child_first=False).keys()
keys = list(keys)
targwidth = len(objs)
targlen = [iobj.times[:min(len(iobj.times), len(iobj.durations),
len(iobj.labels))] for iobj in objs]
targlen = len(np.unique(np.hstack(targlen)))
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targwidth, len(res1.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertEqual(targlen, len(res1.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
self.assertCountEqual(keys, res1.columns.names)
assert_array_equal(targ.values, res0.values)
assert_array_equal(targ.values, res1.values)
assert_frame_equal(targ, res0)
assert_frame_equal(targ, res1)
def test__multi_epochs_to_dataframe__list_noparents(self):
obj = [fake_neo('Block', seed=i, n=3) for i in range(3)]
res0 = ep.multi_epochs_to_dataframe(obj, parents=False)
res1 = ep.multi_epochs_to_dataframe(obj, parents=False,
child_first=True)
res2 = ep.multi_epochs_to_dataframe(obj, parents=False,
child_first=False)
objs = (iobj.list_children_by_class('Epoch') for iobj in obj)
objs = list(chain.from_iterable(objs))
targ = [ep.epoch_to_dataframe(iobj, parents=False, child_first=True)
for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=False,
child_first=True).keys()
keys = list(keys)
targwidth = len(objs)
targlen = [iobj.times[:min(len(iobj.times), len(iobj.durations),
len(iobj.labels))] for iobj in objs]
targlen = len(np.unique(np.hstack(targlen)))
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targwidth, len(res1.columns))
self.assertEqual(targwidth, len(res2.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertEqual(targlen, len(res1.index))
self.assertEqual(targlen, len(res2.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
self.assertCountEqual(keys, res1.columns.names)
self.assertCountEqual(keys, res2.columns.names)
assert_array_equal(targ.values, res0.values)
assert_array_equal(targ.values, res1.values)
assert_array_equal(targ.values, res2.values)
assert_frame_equal(targ, res0)
assert_frame_equal(targ, res1)
assert_frame_equal(targ, res2)
def test__multi_epochs_to_dataframe__list_parents_childfirst(self):
obj = [fake_neo('Block', seed=i, n=3) for i in range(3)]
res0 = ep.multi_epochs_to_dataframe(obj)
res1 = ep.multi_epochs_to_dataframe(obj, parents=True)
res2 = ep.multi_epochs_to_dataframe(obj, child_first=True)
res3 = ep.multi_epochs_to_dataframe(obj, parents=True,
child_first=True)
objs = (iobj.list_children_by_class('Epoch') for iobj in obj)
objs = list(chain.from_iterable(objs))
targ = [ep.epoch_to_dataframe(iobj, parents=True, child_first=True)
for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=True,
child_first=True).keys()
keys = list(keys)
targwidth = len(objs)
targlen = [iobj.times[:min(len(iobj.times), len(iobj.durations),
len(iobj.labels))] for iobj in objs]
targlen = len(np.unique(np.hstack(targlen)))
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targwidth, len(res1.columns))
self.assertEqual(targwidth, len(res2.columns))
self.assertEqual(targwidth, len(res3.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertEqual(targlen, len(res1.index))
self.assertEqual(targlen, len(res2.index))
self.assertEqual(targlen, len(res3.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
self.assertCountEqual(keys, res1.columns.names)
self.assertCountEqual(keys, res2.columns.names)
self.assertCountEqual(keys, res3.columns.names)
assert_array_equal(targ.values, res0.values)
assert_array_equal(targ.values, res1.values)
assert_array_equal(targ.values, res2.values)
assert_array_equal(targ.values, res3.values)
assert_frame_equal(targ, res0)
assert_frame_equal(targ, res1)
| assert_frame_equal(targ, res2) | pandas.util.testing.assert_frame_equal |
import pandas as pd
import numpy as np
import tensorflow as tf
import os, pickle
class Reader(object):
def read(self, data_path):
self.read_data()
self.merge_id()
self.add_reverse()
if self.args.reindex:
self.reindex_kb()
self.gen_t_label()
self._ent_num = self._entity_num
self._rel_num = self._relation_num
self._ent_mapping = pd.DataFrame({'kb_1':{}, 'kb_2':{}})
self._rel_mapping = pd.DataFrame({'kb_1':{}, 'kb_2':{}})
self._ent_testing = pd.DataFrame({'kb_1':{}, 'kb_2':{}})
self._rel_testing = pd.DataFrame({'kb_1':{}, 'kb_2':{}})
self.gen_filter_mat()
self._kb = self._train_data
return
def read_data(self):
pass
def merge_id(self):
self._train_data['h_id'] = self._e_id[self._train_data.h].values
self._train_data['r_id'] = self._r_id[self._train_data.r].values
self._train_data['t_id'] = self._e_id[self._train_data.t].values
self._test_data['h_id'] = self._e_id[self._test_data.h].values
self._test_data['r_id'] = self._r_id[self._test_data.r].values
self._test_data['t_id'] = self._e_id[self._test_data.t].values
self._valid_data['h_id'] = self._e_id[self._valid_data.h].values
self._valid_data['r_id'] = self._r_id[self._valid_data.r].values
self._valid_data['t_id'] = self._e_id[self._valid_data.t].values
def gen_t_label(self):
full = pd.concat([self._train_data, self._test_data, self._valid_data], ignore_index=True)
f_t_labels = full['t_id'].groupby([full['h_id'], full['r_id']]).apply(lambda x: pd.unique(x.values))
f_t_labels.name = 't_label'
self._test_data = self._test_data.join(f_t_labels, on=['h_id', 'r_id'])
self._valid_data = self._valid_data.join(f_t_labels, on=['h_id', 'r_id'])
def add_reverse(self):
def add_reverse_for_data(data):
reversed_data = data.rename(columns={'h_id': 't_id', 't_id': 'h_id'})
reversed_data.r_id += self._relation_num
data = pd.concat(([data, reversed_data]), ignore_index=True, sort=False)
return data
self._train_data = add_reverse_for_data(self._train_data)
self._test_data = add_reverse_for_data(self._test_data)
self._valid_data = add_reverse_for_data(self._valid_data)
self._relation_num_for_eval = self._relation_num
self._relation_num *= 2
def reindex_kb(self):
train_data = self._train_data
test_data = self._test_data
valid_data = self._valid_data
eids = pd.concat([train_data.h_id, train_data.t_id, self._e_id], ignore_index=True)
tv_eids = np.unique(pd.concat([test_data.h_id, test_data.t_id, valid_data.t_id, valid_data.h_id]))
not_train_eids = tv_eids[~np.in1d(tv_eids, eids)]
rids = pd.concat([train_data.r_id, pd.Series(np.arange(self._relation_num))],ignore_index=True)
def gen_map(eids, rids):
e_num = eids.groupby(eids.values).size().sort_values()[::-1]
not_train = pd.Series(np.zeros_like(not_train_eids), index=not_train_eids)
e_num = pd.concat([e_num, not_train])
r_num = rids.groupby(rids.values).size().sort_values()[::-1]
e_map = pd.Series(range(e_num.shape[0]), index=e_num.index)
r_map = pd.Series(range(r_num.shape[0]), index=r_num.index)
return e_map, r_map
def remap_kb(kb, e_map, r_map):
kb.loc[:, 'h_id'] = e_map.loc[kb.h_id.values].values
kb.loc[:, 'r_id'] = r_map.loc[kb.r_id.values].values
kb.loc[:, 't_id'] = e_map.loc[kb.t_id.values].values
return kb
def remap_id(s, rm):
s = rm.loc[s.values].values
return s
e_map, r_map = gen_map(eids, rids)
self._e_map, self._r_map = e_map, r_map
self._train_data = remap_kb(train_data, e_map, r_map)
self._valid_data = remap_kb(self._valid_data, e_map, r_map)
self._test_data = remap_kb(self._test_data, e_map, r_map)
self._e_id = remap_id(self._e_id, e_map)
self._r_id = remap_id(self._r_id, r_map)
return not_train_eids
def in2d(self, arr1, arr2):
"""Generalisation of numpy.in1d to 2D arrays"""
assert arr1.dtype == arr2.dtype
arr1_view = np.ascontiguousarray(arr1).view(np.dtype((np.void,
arr1.dtype.itemsize * arr1.shape[1])))
arr2_view = np.ascontiguousarray(arr2).view(np.dtype((np.void,
arr2.dtype.itemsize * arr2.shape[1])))
intersected = np.in1d(arr1_view, arr2_view)
return intersected.view(np.bool).reshape(-1)
def gen_filter_mat(self):
def sp_gen_filter_mat(t_label):
rows, cols = [], []
for row, tails in enumerate(t_label):
rows += list(np.repeat(row, repeats=len(tails)))
cols += list(tails)
return (rows, cols)
self._tail_valid_filter_mat = sp_gen_filter_mat(self._valid_data.t_label)
self._tail_test_filter_mat = sp_gen_filter_mat(self._test_data.t_label)
def gen_label_mat_for_train(self):
def gen_train_relation_label_vac(r):
c = pd.value_counts(r)
values = 1. * c.values / c.sum()
return np.stack([c.index, values], axis=1)
def gen_train_entity_label_vac(r):
indices = np.stack([r.label_id.values, r.values], axis=1)
values = np.ones_like(r.values, dtype=np.int)
return tf.SparseTensor(indices=indices, values=values, dense_shape=[1, self._entity_num])
tr = self._train_data
labels = tr['t_id'].groupby([tr['h_id'], tr['r_id']]).size()
labels = pd.Series(range(labels.shape[0]), index=labels.index)
labels.name = 'label_id'
tr = tr.join(labels, on=['h_id', 'r_id'])
self._train_data = tr
sp_tr = tf.SparseTensor(tr[['label_id', 't_id']].values, np.ones([len(tr)], dtype=np.float32), dense_shape=[len(tr), self._entity_num])
self._label_indices, self._label_values = sp_tr.indices[:], sp_tr.values[:]
class FreeBaseReader(Reader):
def read_data(self):
path = self._options.data_path
tr = pd.read_csv(path + 'train.txt', header=None, sep='\t', names=['h', 't', 'r'])
te = pd.read_csv(path + 'test.txt', header=None, sep='\t', names=['h', 't', 'r'])
val = pd.read_csv(path + 'valid.txt', header=None, sep='\t', names=['h', 't', 'r'])
e_id = pd.read_csv(path + 'entity2id.txt', header=None, sep='\t', names=['e', 'eid'])
e_id = pd.Series(e_id.eid.values, index=e_id.e.values)
r_id = | pd.read_csv(path + 'relation2id.txt', header=None, sep='\t', names=['r', 'rid']) | pandas.read_csv |
import numpy as np
import pandas as pd
import gc
import warnings
warnings.filterwarnings('ignore')
np.random.seed(123)
class Config():
def __init__(self, load=True):
"""Load the train and test sets with some basic EDA"""
# self.train_filename = train_filename
# self.test_filename = test_filename
def load_data(self, train_filename, test_filename, print_EDA=False):
self.filename_test = train_filename
self.filename_train = test_filename
# Read data
# train_cols = ['id', 'vendor_id', 'pickup_datetime', 'dropoff_datetime', 'passenger_count', 'pickup_longitude', 'pickup_latitude', 'dropoff_longitude', 'dropoff_latitude', 'store_and_fwd_flag', 'trip_duration']
# test_cols = ['id', 'vendor_id', 'pickup_datetime', 'passenger_count', 'pickup_longitude', 'pickup_latitude', 'dropoff_longitude', 'dropoff_latitude', 'store_and_fwd_flag']
train = pd.read_csv(train_filename, header=0) #names=train_cols,
test = | pd.read_csv(test_filename, header=0) | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Tests the usecols functionality during parsing
for all of the parsers defined in parsers.py
"""
import nose
import numpy as np
import pandas.util.testing as tm
from pandas import DataFrame, Index
from pandas.lib import Timestamp
from pandas.compat import StringIO
class UsecolsTests(object):
def test_raise_on_mixed_dtype_usecols(self):
# See gh-12678
data = """a,b,c
1000,2000,3000
4000,5000,6000
"""
msg = ("The elements of 'usecols' must "
"either be all strings, all unicode, or all integers")
usecols = [0, 'b', 2]
with tm.assertRaisesRegexp(ValueError, msg):
self.read_csv(StringIO(data), usecols=usecols)
def test_usecols(self):
data = """\
a,b,c
1,2,3
4,5,6
7,8,9
10,11,12"""
result = self.read_csv(StringIO(data), usecols=(1, 2))
result2 = self.read_csv(StringIO(data), usecols=('b', 'c'))
exp = self.read_csv(StringIO(data))
self.assertEqual(len(result.columns), 2)
self.assertTrue((result['b'] == exp['b']).all())
self.assertTrue((result['c'] == exp['c']).all())
tm.assert_frame_equal(result, result2)
result = self.read_csv(StringIO(data), usecols=[1, 2], header=0,
names=['foo', 'bar'])
expected = self.read_csv(StringIO(data), usecols=[1, 2])
expected.columns = ['foo', 'bar']
tm.assert_frame_equal(result, expected)
data = """\
1,2,3
4,5,6
7,8,9
10,11,12"""
result = self.read_csv(StringIO(data), names=['b', 'c'],
header=None, usecols=[1, 2])
expected = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None)
expected = expected[['b', 'c']]
tm.assert_frame_equal(result, expected)
result2 = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None, usecols=['b', 'c'])
tm.assert_frame_equal(result2, result)
# see gh-5766
result = self.read_csv(StringIO(data), names=['a', 'b'],
header=None, usecols=[0, 1])
expected = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None)
expected = expected[['a', 'b']]
tm.assert_frame_equal(result, expected)
# length conflict, passed names and usecols disagree
self.assertRaises(ValueError, self.read_csv, StringIO(data),
names=['a', 'b'], usecols=[1], header=None)
def test_usecols_index_col_False(self):
# see gh-9082
s = "a,b,c,d\n1,2,3,4\n5,6,7,8"
s_malformed = "a,b,c,d\n1,2,3,4,\n5,6,7,8,"
cols = ['a', 'c', 'd']
expected = DataFrame({'a': [1, 5], 'c': [3, 7], 'd': [4, 8]})
df = self.read_csv(StringIO(s), usecols=cols, index_col=False)
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(s_malformed),
usecols=cols, index_col=False)
tm.assert_frame_equal(expected, df)
def test_usecols_index_col_conflict(self):
# see gh-4201: test that index_col as integer reflects usecols
data = 'a,b,c,d\nA,a,1,one\nB,b,2,two'
expected = DataFrame({'c': [1, 2]}, index=Index(
['a', 'b'], name='b'))
df = self.read_csv(StringIO(data), usecols=['b', 'c'],
index_col=0)
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(data), usecols=['b', 'c'],
index_col='b')
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(data), usecols=[1, 2],
index_col='b')
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(data), usecols=[1, 2],
index_col=0)
tm.assert_frame_equal(expected, df)
expected = DataFrame(
{'b': ['a', 'b'], 'c': [1, 2], 'd': ('one', 'two')})
expected = expected.set_index(['b', 'c'])
df = self.read_csv(StringIO(data), usecols=['b', 'c', 'd'],
index_col=['b', 'c'])
tm.assert_frame_equal(expected, df)
def test_usecols_implicit_index_col(self):
# see gh-2654
data = 'a,b,c\n4,apple,bat,5.7\n8,orange,cow,10'
result = self.read_csv(StringIO(data), usecols=['a', 'b'])
expected = DataFrame({'a': ['apple', 'orange'],
'b': ['bat', 'cow']}, index=[4, 8])
tm.assert_frame_equal(result, expected)
def test_usecols_regex_sep(self):
# see gh-2733
data = 'a b c\n4 apple bat 5.7\n8 orange cow 10'
df = self.read_csv(StringIO(data), sep='\s+', usecols=('a', 'b'))
expected = DataFrame({'a': ['apple', 'orange'],
'b': ['bat', 'cow']}, index=[4, 8])
tm.assert_frame_equal(df, expected)
def test_usecols_with_whitespace(self):
data = 'a b c\n4 apple bat 5.7\n8 orange cow 10'
result = self.read_csv(StringIO(data), delim_whitespace=True,
usecols=('a', 'b'))
expected = DataFrame({'a': ['apple', 'orange'],
'b': ['bat', 'cow']}, index=[4, 8])
tm.assert_frame_equal(result, expected)
def test_usecols_with_integer_like_header(self):
data = """2,0,1
1000,2000,3000
4000,5000,6000
"""
usecols = [0, 1] # column selection by index
expected = DataFrame(data=[[1000, 2000],
[4000, 5000]],
columns=['2', '0'])
df = self.read_csv(StringIO(data), usecols=usecols)
tm.assert_frame_equal(df, expected)
usecols = ['0', '1'] # column selection by name
expected = DataFrame(data=[[2000, 3000],
[5000, 6000]],
columns=['0', '1'])
df = self.read_csv(StringIO(data), usecols=usecols)
tm.assert_frame_equal(df, expected)
def test_usecols_with_parse_dates(self):
# See gh-9755
s = """a,b,c,d,e
0,1,20140101,0900,4
0,1,20140102,1000,4"""
parse_dates = [[1, 2]]
cols = {
'a': [0, 0],
'c_d': [
Timestamp('2014-01-01 09:00:00'),
Timestamp('2014-01-02 10:00:00')
]
}
expected = DataFrame(cols, columns=['c_d', 'a'])
df = self.read_csv(StringIO(s), usecols=[0, 2, 3],
parse_dates=parse_dates)
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(s), usecols=[3, 0, 2],
parse_dates=parse_dates)
tm.assert_frame_equal(df, expected)
def test_usecols_with_parse_dates_and_full_names(self):
# See gh-9755
s = """0,1,20140101,0900,4
0,1,20140102,1000,4"""
parse_dates = [[1, 2]]
names = list('abcde')
cols = {
'a': [0, 0],
'c_d': [
Timestamp('2014-01-01 09:00:00'),
Timestamp('2014-01-02 10:00:00')
]
}
expected = DataFrame(cols, columns=['c_d', 'a'])
df = self.read_csv(StringIO(s), names=names,
usecols=[0, 2, 3],
parse_dates=parse_dates)
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(s), names=names,
usecols=[3, 0, 2],
parse_dates=parse_dates)
tm.assert_frame_equal(df, expected)
def test_usecols_with_parse_dates_and_usecol_names(self):
# See gh-9755
s = """0,1,20140101,0900,4
0,1,20140102,1000,4"""
parse_dates = [[1, 2]]
names = list('acd')
cols = {
'a': [0, 0],
'c_d': [
Timestamp('2014-01-01 09:00:00'),
Timestamp('2014-01-02 10:00:00')
]
}
expected = DataFrame(cols, columns=['c_d', 'a'])
df = self.read_csv(StringIO(s), names=names,
usecols=[0, 2, 3],
parse_dates=parse_dates)
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(s), names=names,
usecols=[3, 0, 2],
parse_dates=parse_dates)
tm.assert_frame_equal(df, expected)
def test_usecols_with_unicode_strings(self):
# see gh-13219
s = '''AAA,BBB,CCC,DDD
0.056674973,8,True,a
2.613230982,2,False,b
3.568935038,7,False,a
'''
data = {
'AAA': {
0: 0.056674972999999997,
1: 2.6132309819999997,
2: 3.5689350380000002
},
'BBB': {0: 8, 1: 2, 2: 7}
}
expected = DataFrame(data)
df = self.read_csv(StringIO(s), usecols=[u'AAA', u'BBB'])
tm.assert_frame_equal(df, expected)
def test_usecols_with_single_byte_unicode_strings(self):
# see gh-13219
s = '''A,B,C,D
0.056674973,8,True,a
2.613230982,2,False,b
3.568935038,7,False,a
'''
data = {
'A': {
0: 0.056674972999999997,
1: 2.6132309819999997,
2: 3.5689350380000002
},
'B': {0: 8, 1: 2, 2: 7}
}
expected = DataFrame(data)
df = self.read_csv(StringIO(s), usecols=[u'A', u'B'])
tm.assert_frame_equal(df, expected)
def test_usecols_with_mixed_encoding_strings(self):
s = '''AAA,BBB,CCC,DDD
0.056674973,8,True,a
2.613230982,2,False,b
3.568935038,7,False,a
'''
msg = ("The elements of 'usecols' must "
"either be all strings, all unicode, or all integers")
with tm.assertRaisesRegexp(ValueError, msg):
self.read_csv( | StringIO(s) | pandas.compat.StringIO |
import pandas as pd
def generate_difference(df, strat_column=2, copy_previous=True):
out_df = | pd.DataFrame() | pandas.DataFrame |
# coding: utf-8 -*-
'''
GFS.py contains utility functions for GFS
'''
__all__ = ['get_akbk',
'get_pcoord',
'read_atcf']
import numpy as _np
import pandas as _pd
def get_akbk():
'''
Returns ak,bk for 64 level GFS model
vcoord is obtained from global_fcst.fd/gfsio_module.f
ak,bk are as computed from treadeo.gfsio.f for
hybrid = .true. and idvc == 2
'''
vcoord = _np.array([1.0000000,0.99467099,0.98863202,0.98180002,0.97408301, \
0.96538502,0.95560300,0.94463098,0.93235999,0.91867799,0.90347999, \
0.88666302,0.86813903,0.84783000,0.82568502,0.80167699,0.77581102, \
0.74813300,0.71872902,0.68773103,0.65531600,0.62170500,0.58715999, \
0.55197400,0.51646298,0.48095500,0.44577801,0.41124901,0.37765899, \
0.34526899,0.31430000,0.28492799,0.25728399,0.23145400,0.20748200, \
0.18537199,0.16509899,0.14660800,0.12982300,0.11465500,0.10100200, \
0.88756002E-01,0.77808000E-01,0.68048999E-01,0.59370000E-01, \
0.51670998E-01,0.44854999E-01,0.38830999E-01,0.33514999E-01, \
0.28829999E-01,0.24707999E-01,0.21083999E-01,0.17901000E-01, \
0.15107000E-01,0.12658000E-01,0.10511000E-01,0.86310003E-02, \
0.69849999E-02,0.55439998E-02,0.42840000E-02,0.31830000E-02, \
0.22199999E-02,0.13780000E-02,0.64200000E-03,0.0000000])
ak = vcoord / 1000.
bk = vcoord / 1.
return ak,bk
def get_pcoord():
'''
Returns the pressure levels in hPa of the native GFS model with 64 levels.
OUTPUT:
pres = pressure levels (hPa) assuming pref=1013.0
'''
ak,bk = get_akbk()
pref = 101.3
pres = ak[:-1] + bk[:-1]*pref
return pres * 10.
def read_atcf(filename):
'''
Read an ATCF file into a dataframe for ease of processing.
INPUT:
filename = ATCF filename
The file contents are specified at:
http://www.nrlmry.navy.mil/atcf_web/docs/database/new/abdeck.html
OUTPUT:
df = DataFrame containing the file contents
'''
def _to_number(s):
tmp = 0.1 * _np.float(s[:-1])
if s[-1] in ['S','W']:
v = -1.0 * tmp if s[-1] in ['S'] else 360.0 - tmp
else:
v = tmp
return v
# column names
names = ['BASIN','CY','YYYYMMDDHH','TECHNUM','TECH','TAU','LAT','LON','VMAX','MSLP','TY','RAD','WINDCODE','RAD1','RAD2','RAD3','RAD4','POUTER','ROUTER','RMW','GUSTS','EYE','SUBREGION','MAXSEAS','INITIALS','DIR','SPEED','STORMNAME','DEPTH','SEAS','SEASCODE','SEAS1','SEAS2','SEAS3','SEAS4','USERDEFINE1','USERDATA1','USERDEFINE2','USERDATA2','USERDEFINE3','USERDATA3','USERDEFINE4','USERDATA4','USERDEFINE5','USERDATA5']
# column datatypes
dtypes = {'BASIN':str,'CY':str,'YYYYMMDDHH':str,'TECHNUM':_np.float,'TECH':str,'TAU':_np.float,'LAT':str,'LON':str,'VMAX':_np.float,'MSLP':_np.float,'TY':str,'RAD':_np.float,'WINDCODE':str,'RAD1':_np.float,'RAD2':_np.float,'RAD3':_np.float,'RAD4':_np.float,'POUTER':_np.float,'ROUTER':_np.float,'RMW':_np.float,'GUSTS':_np.float,'EYE':_np.float,'SUBREGION':str,'MAXSEAS':_np.float,'INITIALS':str,'DIR':_np.float,'SPEED':_np.float,'STORMNAME':str,'DEPTH':str,'SEAS':_np.float,'SEASCODE':str,'SEAS1':_np.float,'SEAS2':_np.float,'SEAS3':_np.float,'SEAS4':_np.float,'USERDEFINE1':str,'USERDATA1':str,'USERDEFINE2':str,'USERDATA2':str,'USERDEFINE3':str,'USERDATA3':str,'USERDEFINE4':str,'USERDATA4':str,'USERDEFINE5':str,'USERDATA5':str}
df = _pd.read_csv(filename,skipinitialspace=True,header=None,names=names,dtype=dtypes)
# convert YYYYMMDDHH into datetime
df['YYYYMMDDHH'] = | _pd.to_datetime(df['YYYYMMDDHH'], format='%Y%m%d%H') | pandas.to_datetime |
#!/usr/bin/env python
# coding: utf-8
import math
import glob
import re
import os.path
import numpy as np
import pandas as pd
import geopandas as gpd
import matplotlib.pyplot as plt
import matplotlib.colors as colors
from pathlib import Path
from io import StringIO
from pyproj import Transformer
from itertools import takewhile
from scipy import stats
import multiprocessing as mp
from sklearn.metrics import r2_score
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_absolute_error
from shapely.geometry import box, Point, LineString
def standardise_source(df):
# Dictionary containing method values to rename
remap_dict = {'emery': 'emery/levelling',
'levelling': 'emery/levelling',
'dunefill': np.NaN,
'rtk gps': 'gps',
'photogrammetry': 'aerial photogrammetry',
'stereo photogrammtery': 'aerial photogrammetry',
'ads80': 'aerial photogrammetry',
'photos': 'aerial photogrammetry',
'total station': 'total station',
'total station\t': 'total station',
'laser scanning': 'terrestrial laser scanning',
'satellite': 'satellite',
'gps rtk gps': 'gps'}
# Set all values to lower case for easier conversion
df['source'] = df.source.str.lower()
# Replace values
df['source'] = df.source.replace(remap_dict)
def to_vector(df,
fname='test.shp',
x='x',
y='y',
crs='EPSG:3577',
output_crs='EPSG:3577'):
# Convert datetimes to strings
df = df.copy()
is_datetime = df.dtypes == 'datetime64[ns]'
df.loc[:, is_datetime] = df.loc[:, is_datetime].astype(str)
# Export to file
gdf = gpd.GeoDataFrame(data=df.loc[:, df.dtypes != 'datetime64[ns]'],
geometry=gpd.points_from_xy(x=df[x], y=df[y]),
crs=crs).to_crs(output_crs).to_file(fname)
return gdf
def export_eval(df, output_name, output_crs='EPSG:3577'):
from shapely.geometry import box, Point, LineString
# Extract geometries
val_points = gpd.points_from_xy(x=df.val_x, y=df.val_y)
deacl_points = gpd.points_from_xy(x=df.deacl_x, y=df.deacl_y)
df_profiles = df.groupby('id').first()
profile_lines = df_profiles.apply(
lambda x: LineString([(x.start_x, x.start_y), (x.end_x, x.end_y)]), axis=1)
# Export validation points
val_gdf = gpd.GeoDataFrame(data=df,
geometry=val_points,
crs=output_crs).to_crs('EPSG:4326')
val_gdf.to_file(f'figures/eval/{output_name}_val.geojson',
driver='GeoJSON')
# Export DEACL points
deacl_gdf = gpd.GeoDataFrame(data=df,
geometry=deacl_points,
crs=output_crs).to_crs('EPSG:4326')
deacl_gdf.to_file(f'figures/eval/{output_name}_deacl.geojson',
driver='GeoJSON')
# Export profiles
profile_gdf = gpd.GeoDataFrame(data=df_profiles,
geometry=profile_lines,
crs=output_crs).to_crs('EPSG:4326')
profile_gdf.to_file(f'figures/eval/{output_name}_profiles.geojson',
driver='GeoJSON')
def deacl_val_stats(val_dist, deacl_dist, n=None, remove_bias=False):
np.seterr(all='ignore')
# Compute difference and bias
diff_dist = val_dist - deacl_dist
bias = diff_dist.mean()
if remove_bias:
deacl_dist += bias
diff_dist = val_dist - deacl_dist
# Compute stats
if n is None:
n = len(val_dist)
else:
n = sum(n)
mae = mean_absolute_error(val_dist, deacl_dist)
rmse = mean_squared_error(val_dist, deacl_dist)**0.5
if n > 1:
corr = np.corrcoef(x=val_dist, y=deacl_dist)[0][1]
stdev = diff_dist.std()
else:
corr = np.nan
stdev = np.nan
return pd.Series({
'n': n,
'mae': f'{mae:.2f}',
'rmse': f'{rmse:.2f}',
'stdev': f'{stdev:.2f}',
'corr': f'{corr:.3f}',
'bias': f'{bias:.2f}',
}).astype(float)
def rse_tableformat(not_bias_corrected, bias_corrected, groupby='source'):
# Fix rounding and total observations
not_bias_corrected['n'] = not_bias_corrected['n'].astype(int)
not_bias_corrected[['bias', 'stdev', 'mae', 'rmse']] = not_bias_corrected[['bias', 'stdev', 'mae', 'rmse']].round(1)
not_bias_corrected['n'] = not_bias_corrected.groupby(groupby)['n'].sum()
# Move bias corrected values into brackets
not_bias_corrected['MAE (m)'] = (not_bias_corrected.mae.astype('str') + ' (' +
bias_corrected.mae.round(1).astype('str') + ')')
not_bias_corrected['RMSE (m)'] = (not_bias_corrected.rmse.astype('str') + ' (' +
bias_corrected.rmse.round(1).astype('str') + ')')
# Sort by MAE, rename columns
not_bias_corrected = (not_bias_corrected.sort_values('mae')
.drop(['mae', 'rmse'], axis=1)
.rename({'stdev': 'SD (m)', 'corr': 'Correlation', 'bias': 'Bias (m)'}, axis=1)
[['n', 'Bias (m)', 'MAE (m)', 'RMSE (m)', 'SD (m)', 'Correlation']])
return not_bias_corrected
def val_slope(profiles_df, intercept_df, datum=0, buffer=25, method='distance'):
# Join datum dist to full profile dataframe
profiles_datum_dist = (profiles_df.set_index(
['id', 'date'])[['distance', 'z']].join(intercept_df[f'{datum}_dist']))
if method == 'distance':
# Filter to measurements within distance of datum distance
beach_data = profiles_datum_dist[profiles_datum_dist.distance.between(
profiles_datum_dist[f'{datum}_dist'] - buffer,
profiles_datum_dist[f'{datum}_dist'] + buffer)]
elif method == 'height':
# Filter measurements within height of datum
beach_data = profiles_datum_dist.loc[
profiles_datum_dist.z.between(-buffer, buffer)]
# Calculate slope
beach_slope = beach_data.groupby(['id', 'date']).apply(
lambda x: stats.linregress(x=x.distance, y=x.z).slope)
return beach_slope.round(3)
def dms2dd(s):
# example: s = "0°51'56.29"
degrees, minutes, seconds = re.split('[°\'"]+', s)
if float(degrees) > 0:
dd = float(degrees) + float(minutes) / 60 + float(seconds) / (60 * 60)
else:
dd = float(degrees) - float(minutes) / 60 - float(seconds) / (60 * 60);
return dd
def dist_angle(lon, lat, dist, angle):
lon_end = lon + dist * np.sin(angle * np.pi / 180)
lat_end = lat + dist * np.cos(angle * np.pi / 180)
return pd.Series({'end_y': lat_end, 'end_x': lon_end})
def interp_intercept(x, y1, y2, reverse=False):
"""
Find the intercept of two curves, given by the same x data
References:
----------
Source: https://stackoverflow.com/a/43551544/2510900
"""
def intercept(point1, point2, point3, point4):
"""find the intersection between two lines
the first line is defined by the line between point1 and point2
the first line is defined by the line between point3 and point4
each point is an (x,y) tuple.
So, for example, you can find the intersection between
intercept((0,0), (1,1), (0,1), (1,0)) = (0.5, 0.5)
Returns: the intercept, in (x,y) format
"""
def line(p1, p2):
A = (p1[1] - p2[1])
B = (p2[0] - p1[0])
C = (p1[0] * p2[1] - p2[0] * p1[1])
return A, B, -C
def intersection(L1, L2):
D = L1[0] * L2[1] - L1[1] * L2[0]
Dx = L1[2] * L2[1] - L1[1] * L2[2]
Dy = L1[0] * L2[2] - L1[2] * L2[0]
x = Dx / D
y = Dy / D
return x,y
L1 = line([point1[0],point1[1]], [point2[0],point2[1]])
L2 = line([point3[0],point3[1]], [point4[0],point4[1]])
R = intersection(L1, L2)
return R
try:
if isinstance(y2, (int, float)):
y2 = np.array([y2] * len(x))
if reverse:
x = x[::-1]
y1 = y1[::-1]
y2 = y2[::-1]
idx = np.argwhere(np.diff(np.sign(y1 - y2)) != 0)
xc, yc = intercept((x[idx], y1[idx]),((x[idx + 1], y1[idx + 1])),
((x[idx], y2[idx])), ((x[idx + 1], y2[idx + 1])))
return xc[0][0]
except:
return np.nan
def dist_along_transect(dist, start_x, start_y, end_x, end_y):
transect_line = LineString([(start_x, start_y), (end_x, end_y)])
distance_coords = transect_line.interpolate(dist).coords.xy
return [coord[0] for coord in distance_coords]
def waterline_intercept(x,
dist_col='distance',
x_col='x',
y_col='y',
z_col='z',
z_val=0,
debug=False):
# Extract distance and coordinates of where the z_val first
# intersects with the profile line
dist_int = interp_intercept(x[dist_col].values, x[z_col].values, z_val)
x_int = interp_intercept(x[x_col].values, x[z_col].values, z_val)
y_int = interp_intercept(x[y_col].values, x[z_col].values, z_val)
# Identify last distance where the z_value intersects the profile
rev_int = interp_intercept(x[dist_col].values, x[z_col].values, z_val,
reverse=True)
# If first and last intersects are the identical, return data.
# If not, the comparison is invalid (i.e. NaN)
if dist_int == rev_int:
if debug: print('Single intersection found')
return pd.Series({f'{z_val}_dist': dist_int,
f'{z_val}_x': x_int,
f'{z_val}_y': y_int})
else:
if debug: print('Multiple intersections returned')
return pd.Series({f'{z_val}_dist': np.NaN,
f'{z_val}_x': np.NaN,
f'{z_val}_y': np.NaN})
def reproj_crs(in_data,
in_crs,
x='x',
y='y',
out_crs='EPSG:3577'):
# Reset index to allow merging new data with original data
in_data = in_data.reset_index(drop=True)
# Reproject coords to Albers and create geodataframe
trans = Transformer.from_crs(in_crs, out_crs, always_xy=True)
coords = trans.transform(in_data[x].values, in_data[y].values)
in_data[['x', 'y']] = pd.DataFrame(zip(*coords))
return in_data
def profiles_from_dist(profiles_df,
id_col='id',
dist_col='distance',
x_col='x',
y_col='y'):
# Compute origin points for each profile
min_ids = profiles_df.groupby(id_col)[dist_col].idxmin()
start_xy = profiles_df.loc[min_ids, [id_col, x_col, y_col]]
start_xy = start_xy.rename({x_col: f'start_{x_col}',
y_col: f'start_{y_col}'},
axis=1)
# Compute end points for each profile
max_ids = profiles_df.groupby(id_col)[dist_col].idxmax()
end_xy = profiles_df.loc[max_ids, [x_col, y_col]]
# Add end coords into same dataframe
start_xy = start_xy.reset_index(drop=True)
end_xy = end_xy.reset_index(drop=True)
start_xy[[f'end_{x_col}', f'end_{y_col}']] = end_xy
return start_xy
def perpendicular_line(input_line, length):
# Generate parallel lines either side of input line
left = input_line.parallel_offset(length / 2.0, 'left')
right = input_line.parallel_offset(length / 2.0, 'right')
# Create new line between centroids of parallel line.
# This should be perpendicular to the original line
return LineString([left.centroid, right.centroid])
def generate_transects(line_geom,
length=400,
interval=200,
buffer=20):
# Create tangent line at equal intervals along line geom
interval_dists = np.arange(buffer, line_geom.length, interval)
tangent_geom = [LineString([line_geom.interpolate(dist - buffer),
line_geom.interpolate(dist + buffer)])
for dist in interval_dists]
# Convert to geoseries and remove erroneous lines by length
tangent_gs = gpd.GeoSeries(tangent_geom)
tangent_gs = tangent_gs.loc[tangent_gs.length.round(1) <= buffer * 2]
# Compute perpendicular lines
return tangent_gs.apply(perpendicular_line, length=length)
def coastal_transects(bbox,
name,
interval=200,
transect_length=400,
simplify_length=200,
transect_buffer=20,
output_crs='EPSG:3577',
coastline='../input_data/Smartline.gdb',
land_poly='/g/data/r78/rt1527/shapefiles/australia/australia/cstauscd_r.shp'):
# Load smartline
coastline_gdf = gpd.read_file(coastline, bbox=bbox).to_crs(output_crs)
coastline_geom = coastline_gdf.geometry.unary_union.simplify(simplify_length)
# Load Australian land water polygon
land_gdf = gpd.read_file(land_poly, bbox=bbox).to_crs(output_crs)
land_gdf = land_gdf.loc[land_gdf.FEAT_CODE.isin(["mainland", "island"])]
land_geom = gpd.overlay(df1=land_gdf, df2=bbox).unary_union
# Extract transects along line
geoms = generate_transects(coastline_geom,
length=transect_length,
interval=interval,
buffer=transect_buffer)
# Test if end points of transects fall in water or land
p1 = gpd.GeoSeries([Point(i.coords[0]) for i in geoms])
p2 = gpd.GeoSeries([Point(i.coords[1]) for i in geoms])
p1_within_land = p1.within(land_geom)
p2_within_land = p2.within(land_geom)
# Create geodataframe, remove invalid land-land/water-water transects
transect_gdf = gpd.GeoDataFrame(data={'p1': p1_within_land,
'p2': p2_within_land},
geometry=geoms.values,
crs=output_crs)
transect_gdf = transect_gdf[~(transect_gdf.p1 == transect_gdf.p2)]
# Reverse transects so all point away from land
transect_gdf['geometry'] = transect_gdf.apply(
lambda i: LineString([i.geometry.coords[1],
i.geometry.coords[0]])
if i.p1 < i.p2 else i.geometry, axis=1)
# Export to file
transect_gdf[['geometry']].to_file(f'input_data/coastal_transects_{name}.geojson',
driver='GeoJSON')
def coastal_transects_parallel(
regions_gdf,
interval=200,
transect_length=400,
simplify_length=200,
transect_buffer=20,
overwrite=False,
output_path='input_data/combined_transects_wadot.geojson'):
if not os.path.exists(output_path) or overwrite:
if os.path.exists(output_path):
print('Removing existing file')
os.remove(output_path)
# Generate transects for each region
print('Generating transects')
with mp.Pool(mp.cpu_count()) as pool:
for i, _ in regions_gdf.iterrows():
name = str(i).replace(' ', '').replace('/', '').lower()
pool.apply_async(coastal_transects, [
regions_gdf.loc[[i]], name, interval, transect_length,
simplify_length, transect_buffer
])
pool.close()
pool.join()
# Load regional transects and combine into a single file
print('Combining data')
transect_list = glob.glob('input_data/coastal_transects_*.geojson')
gdf = pd.concat(
[gpd.read_file(shp, ignore_index=True) for shp in transect_list])
gdf = gdf.reset_index(drop=True)
gdf['profile'] = gdf.index.astype(str)
gdf.to_file(output_path, driver='GeoJSON')
# Clean files
[os.remove(f) for f in transect_list]
def preprocess_wadot(compartment,
overwrite=True,
fname='input_data/wadot/Coastline_Movements_20190819.gdb'):
beach = str(compartment.index.item())
fname_out = f'output_data/wadot_{beach}.csv'
print(f'Processing {beach:<80}', end='\r')
# Test if file exists
if not os.path.exists(fname_out) or overwrite:
# Read file and filter to AHD 0 shorelines
val_gdf = gpd.read_file(fname,
bbox=compartment).to_crs('EPSG:3577')
val_gdf = gpd.clip(gdf=val_gdf, mask=compartment, keep_geom_type=True)
val_gdf = val_gdf[(val_gdf.TYPE == 'AHD 0m') |
(val_gdf.TYPE == 'AHD 0m ')]
# Filter to post 1987 shorelines and set index to year
val_gdf = val_gdf[val_gdf.PHOTO_YEAR > 1987]
val_gdf = val_gdf.set_index('PHOTO_YEAR')
# If no data is returned, skip this iteration
if len(val_gdf.index) == 0:
print(f'Failed: {beach:<80}', end='\r')
return None
######################
# Generate transects #
######################
transect_gdf = gpd.read_file('input_data/combined_transects_wadot.geojson',
bbox=compartment)
transect_gdf = gpd.clip(gdf=transect_gdf, mask=compartment, keep_geom_type=True)
################################
# Identify 0 MSL intersections #
################################
output_list = []
# Select one year
for year in val_gdf.index.unique().sort_values():
# Extract validation contour
print(f'Processing {beach} {year:<80}', end='\r')
val_contour = val_gdf.loc[[year]].geometry.unary_union
# Copy transect data, and find intersects
# between transects and contour
intersect_gdf = transect_gdf.copy()
intersect_gdf['val_point'] = transect_gdf.intersection(val_contour)
to_keep = gpd.GeoSeries(intersect_gdf['val_point']).geom_type == 'Point'
intersect_gdf = intersect_gdf.loc[to_keep]
# If no data is returned, skip this iteration
if len(intersect_gdf.index) == 0:
print(f'Failed: {beach} {year:<80}', end='\r')
continue
# Add generic metadata
intersect_gdf['date'] = pd.to_datetime(str(year))
intersect_gdf['beach'] = beach
intersect_gdf['section'] = 'all'
intersect_gdf['source'] = 'aerial photogrammetry'
intersect_gdf['name'] = 'wadot'
intersect_gdf['id'] = (intersect_gdf.beach + '_' +
intersect_gdf.section + '_' +
intersect_gdf.profile)
# Add measurement metadata
intersect_gdf[['start_x', 'start_y']] = intersect_gdf.apply(
lambda x: pd.Series(x.geometry.coords[0]), axis=1)
intersect_gdf[['end_x', 'end_y']] = intersect_gdf.apply(
lambda x: pd.Series(x.geometry.coords[1]), axis=1)
intersect_gdf['0_dist'] = intersect_gdf.apply(
lambda x: Point(x.start_x, x.start_y).distance(x['val_point']), axis=1)
intersect_gdf[['0_x', '0_y']] = intersect_gdf.apply(
lambda x: pd.Series(x.val_point.coords[0]), axis=1)
# Add empty slope var (not possible to compute without profile data)
intersect_gdf['slope'] = np.nan
# Keep required columns
intersect_gdf = intersect_gdf[['id', 'date', 'beach',
'section', 'profile', 'name',
'source', 'slope', 'start_x',
'start_y', 'end_x', 'end_y',
'0_dist', '0_x', '0_y']]
# Append to file
output_list.append(intersect_gdf)
# Combine all year data and export to file
if len(output_list) > 0:
shoreline_df = pd.concat(output_list)
shoreline_df.to_csv(fname_out, index=False)
else:
print(f'Skipping {beach:<80}', end='\r')
def preprocess_dasilva2021(fname='input_data/dasilva2021/dasilva_etal_2021_shorelines.shp'):
beach = 'dasilva2021'
print(f'Processing {beach:<80}', end='\r')
# Read file and filter to AHD 0 shorelines
fname='input_data/dasilva2021/dasilva_etal_2021_shorelines.shp'
val_gdf = gpd.read_file(fname).to_crs('EPSG:3577')
val_gdf = val_gdf.loc[val_gdf.Year_ > 1987]
val_gdf['Year_'] = val_gdf.Year_.astype(str)
val_gdf = val_gdf.set_index('Year_')
# If no data is returned, skip this iteration
if len(val_gdf.index) == 0:
print(f'Failed: {beach:<80}', end='\r')
return None
######################
# Generate transects #
######################
transect_gdf = gpd.read_file('input_data/dasilva2021/dasilva_etal_2021_retransects.shp').to_crs('EPSG:3577')[['TransectID', 'Direction', 'order', 'geometry']]
transect_gdf.columns = ['profile', 'section', 'order', 'geometry']
transect_gdf = transect_gdf.sort_values('order').set_index('order')
transect_gdf['profile'] = transect_gdf.profile.astype(str)
################################
# Identify 0 MSL intersections #
################################
output_list = []
# Select one year
for year in val_gdf.index.unique().sort_values():
# Extract validation contour
print(f'Processing {beach} {year:<80}', end='\r')
val_contour = val_gdf.loc[[year]].geometry.unary_union
# Copy transect data, and find intersects
# between transects and contour
intersect_gdf = transect_gdf.copy()
intersect_gdf['val_point'] = transect_gdf.intersection(val_contour)
to_keep = gpd.GeoSeries(intersect_gdf['val_point']).geom_type == 'Point'
intersect_gdf = intersect_gdf.loc[to_keep]
# If no data is returned, skip this iteration
if len(intersect_gdf.index) == 0:
print(f'Failed: {beach} {year:<80}', end='\r')
continue
# Add generic metadata
intersect_gdf['date'] = pd.to_datetime(str(year))
intersect_gdf['beach'] = beach
intersect_gdf['source'] = 'satellite'
intersect_gdf['name'] = 'dasilva2021'
intersect_gdf['id'] = (intersect_gdf.beach + '_' +
intersect_gdf.section + '_' +
intersect_gdf.profile)
# Add measurement metadata
intersect_gdf[['start_x', 'start_y']] = intersect_gdf.apply(
lambda x: pd.Series(x.geometry.coords[0]), axis=1)
intersect_gdf[['end_x', 'end_y']] = intersect_gdf.apply(
lambda x: pd.Series(x.geometry.coords[1]), axis=1)
intersect_gdf['0_dist'] = intersect_gdf.apply(
lambda x: Point(x.start_x, x.start_y).distance(x['val_point']), axis=1)
intersect_gdf[['0_x', '0_y']] = intersect_gdf.apply(
lambda x: pd.Series(x.val_point.coords[0][0:2]), axis=1)
# Add empty slope var (not possible to compute without profile data)
intersect_gdf['slope'] = np.nan
# Keep required columns
intersect_gdf = intersect_gdf[['id', 'date', 'beach',
'section', 'profile', 'name',
'source', 'slope', 'start_x',
'start_y', 'end_x', 'end_y',
'0_dist', '0_x', '0_y']]
# Append to file
output_list.append(intersect_gdf)
# Combine all year data and export to file
if len(output_list) > 0:
shoreline_df = pd.concat(output_list)
shoreline_df.to_csv(f'output_data/{beach}.csv', index=False)
def preprocess_stirling(fname_out, datum=0):
# List containing files to import and all params to extract data
survey_xl = [
{'fname': 'input_data/stirling/2015 05 28 - From Stirling - Coastal Profiles 2014-2015 April-Feb with updated reef#2.xlsm',
'skiprows': 2,
'skipcols': 5,
'nrows': 100,
'meta_skiprows': 0,
'meta_nrows': 1,
'meta_usecols': [6, 7]},
{'fname': 'input_data/stirling/Coastal Profiles 2013-2014 JUL-MAY#2.xlsx',
'skiprows': 2,
'skipcols': 5,
'nrows': 100,
'meta_skiprows': 0,
'meta_nrows': 1,
'meta_usecols': [6, 7]},
{'fname': 'input_data/stirling/COASTAL PROFILES 2013 JAN - JUNE#2.xls',
'skiprows': 3,
'skipcols': 0,
'nrows': 40,
'meta_skiprows': 1,
'meta_nrows': 2,
'meta_usecols': [1, 2]},
{'fname': 'input_data/stirling/COASTAL PROFILES 2012 JUN - DEC#2.xls',
'skiprows': 3,
'skipcols': 0,
'nrows': 40,
'meta_skiprows': 1,
'meta_nrows': 2,
'meta_usecols': [1, 2]},
{'fname': 'input_data/stirling/COASTAL PROFILES 2011-2012 NOV - MAY#2.xls',
'skiprows': 3,
'skipcols': 0,
'nrows': 40,
'meta_skiprows': 1,
'meta_nrows': 2,
'meta_usecols': [1, 2]}
]
# List to contain processed profile data
output = []
# For each survey excel file in the list above:
for survey in survey_xl:
# Load profile start metadata
all_meta = pd.read_excel(survey['fname'],
sheet_name=None,
nrows=survey['meta_nrows'],
skiprows=survey['meta_skiprows'],
usecols=survey['meta_usecols'],
header=None,
on_demand=True)
# Load data
all_sheets = pd.read_excel(survey['fname'],
sheet_name=None,
skiprows=survey['skiprows'],
nrows=survey['nrows'],
parse_dates=False,
usecols=lambda x: 'Unnamed' not in str(x))
# Iterate through each profile in survey data
for profile_id in np.arange(1, 20).astype('str'):
# Extract profile start metadata and profile data
start_x, start_y = all_meta[profile_id].values[0]
sheet = all_sheets[profile_id].iloc[:,survey['skipcols']:]
# First set all column names to lower case strings
sheet.columns = (sheet.columns.astype(str)
.str.slice(0, 10)
.str.lower())
# Drop note columns and distance/angle offset
sheet = sheet.loc[:,~sheet.columns.str.contains('note|notes')]
sheet = sheet.drop(['dist', 'angle dd'], axis=1, errors='ignore')
# Expand date column values into rows for each sampling event
sheet.loc[:,sheet.columns[::4]] = sheet.columns[::4]
# Number date columns incrementally to match other fields
start_num = 1 if survey['skipcols'] > 0 else 0
rename_dict = {name: f'date.{i + start_num}' for
i, name in enumerate(sheet.columns[::4])}
sheet = sheet.rename(rename_dict, axis=1).reset_index()
sheet = sheet.rename({'x': 'x.0', 'y': 'y.0', 'z': 'z.0'}, axis=1)
# Reshape data into long format
profile_df = pd.wide_to_long(sheet,
stubnames=['date', 'x', 'y', 'z'],
i='index',
j='dropme',
sep='.').reset_index(drop=True)
# Set datetimes
profile_df['date'] = pd.to_datetime(profile_df.date,
errors='coerce',
dayfirst=True)
# Add profile metadata
profile_df['beach'] = 'stirling'
profile_df['section'] = 'all'
profile_df['profile'] = profile_id
profile_df['name'] = 'stirling'
profile_df['source'] = 'gps'
profile_df['start_x'] = start_x
profile_df['start_y'] = start_y
profile_df['id'] = (profile_df.beach + '_' +
profile_df.section + '_' +
profile_df.profile)
# Add results to list
output.append(profile_df.dropna())
# Combine all survey and profile data
profiles_df = | pd.concat(output) | pandas.concat |
from pathlib import Path
import numpy as np
import numpy.testing as npt
import pandas as pd
import pytest
from message_ix import Scenario, macro
from message_ix.models import MACRO
from message_ix.testing import SCENARIO, make_westeros
W_DATA_PATH = Path(__file__).parent / "data" / "westeros_macro_input.xlsx"
MR_DATA_PATH = Path(__file__).parent / "data" / "multiregion_macro_input.xlsx"
class MockScenario:
def __init__(self):
self.data = pd.read_excel(MR_DATA_PATH, sheet_name=None, engine="openpyxl")
for name, df in self.data.items():
if "year" in df:
df = df[df.year >= 2030]
self.data[name] = df
def has_solution(self):
return True
def var(self, name, **kwargs):
df = self.data["aeei"]
# Add extra commodity to be removed
extra_commod = df[df.sector == "i_therm"].copy()
extra_commod["sector"] = "bar"
# Add extra region to be removed
extra_region = df[df.node == "R11_AFR"].copy()
extra_region["node"] = "foo"
df = pd.concat([df, extra_commod, extra_region])
if name == "DEMAND":
df = df.rename(columns={"sector": "commodity"})
elif name in ["COST_NODAL_NET", "PRICE_COMMODITY"]:
df = df.rename(columns={"sector": "commodity", "value": "lvl"})
df["lvl"] = 1e3
return df
@pytest.fixture(scope="class")
def westeros_solved(test_mp):
yield make_westeros(test_mp, solve=True, quiet=True)
@pytest.fixture(scope="class")
def westeros_not_solved(westeros_solved):
yield westeros_solved.clone(keep_solution=False)
def test_calc_valid_data_file(westeros_solved):
s = westeros_solved
c = macro.Calculate(s, W_DATA_PATH)
c.read_data()
def test_calc_invalid_data(westeros_solved):
with pytest.raises(TypeError, match="neither a dict nor a valid path"):
macro.Calculate(westeros_solved, list())
with pytest.raises(ValueError, match="not an Excel data file"):
macro.Calculate(westeros_solved, Path(__file__).joinpath("other.zip"))
def test_calc_valid_data_dict(westeros_solved):
s = westeros_solved
data = | pd.read_excel(W_DATA_PATH, sheet_name=None, engine="openpyxl") | pandas.read_excel |
import ast
import nose
import os
import shutil
import subprocess
import unittest
import numpy as np
import pandas.io.gbq as gbq
import pandas.util.testing as tm
from pandas.core.frame import DataFrame
from pandas.util.testing import with_connectivity_check
from pandas import NaT
try:
import bq
import bigquery_client
import gflags as flags
except ImportError:
raise nose.SkipTest
####################################################################################
# Fake Google BigQuery Client
class FakeClient:
def __init__(self):
self.apiclient = FakeApiClient()
def GetTableSchema(self,table_dict):
retval = {'fields': [
{'type': 'STRING', 'name': 'corpus', 'mode': 'NULLABLE'},
{'type': 'INTEGER', 'name': 'corpus_date', 'mode': 'NULLABLE'},
{'type': 'STRING', 'name': 'word', 'mode': 'NULLABLE'},
{'type': 'INTEGER', 'name': 'word_count', 'mode': 'NULLABLE'}
]}
return retval
# Fake Google BigQuery API Client
class FakeApiClient:
def __init__(self):
self._tabledata = FakeTableData()
def tabledata(self):
return self._tabledata
class FakeTableData:
def __init__(self):
self._list = FakeList()
def list(self,maxResults = None, pageToken = None, **table_dict):
return self._list
class FakeList:
def execute(self):
return {'rows': [ {'f': [{'v': 'othello'}, {'v': '1603'}, {'v': 'brave'}, {'v': '3'}]},
{'f': [{'v': 'othello'}, {'v': '1603'}, {'v': 'attended'}, {'v': '1'}]},
{'f': [{'v': 'othello'}, {'v': '1603'}, {'v': 'treason'}, {'v': '1'}]},
{'f': [{'v': 'othello'}, {'v': '1603'}, {'v': 'islanders'}, {'v': '1'}]},
{'f': [{'v': 'othello'}, {'v': '1603'}, {'v': 'heed'}, {'v': '3'}]},
{'f': [{'v': 'othello'}, {'v': '1603'}, {'v': 'alehouse'}, {'v': '1'}]},
{'f': [{'v': 'othello'}, {'v': '1603'}, {'v': 'corrigible'}, {'v': '1'}]},
{'f': [{'v': 'othello'}, {'v': '1603'}, {'v': 'brawl'}, {'v': '2'}]},
{'f': [{'v': 'othello'}, {'v': '1603'}, {'v': "'"}, {'v': '17'}]},
{'f': [{'v': 'othello'}, {'v': '1603'}, {'v': 'troubled'}, {'v': '1'}]}
],
'kind': 'bigquery#tableDataList',
'etag': '"4PTsVxg68bQkQs1RJ1Ndewqkgg4/hoRHzb4qfhJAIa2mEewC-jhs9Bg"',
'totalRows': '10'}
####################################################################################
class test_gbq(unittest.TestCase):
def setUp(self):
with open(self.fake_job_path, 'r') as fin:
self.fake_job = ast.literal_eval(fin.read())
self.test_data_small = [{'f': [{'v': 'othello'}, {'v': '1603'}, {'v': 'brave'}, {'v': '3'}]},
{'f': [{'v': 'othello'}, {'v': '1603'}, {'v': 'attended'}, {'v': '1'}]},
{'f': [{'v': 'othello'}, {'v': '1603'}, {'v': 'treason'}, {'v': '1'}]},
{'f': [{'v': 'othello'}, {'v': '1603'}, {'v': 'islanders'}, {'v': '1'}]},
{'f': [{'v': 'othello'}, {'v': '1603'}, {'v': 'heed'}, {'v': '3'}]},
{'f': [{'v': 'othello'}, {'v': '1603'}, {'v': 'alehouse'}, {'v': '1'}]},
{'f': [{'v': 'othello'}, {'v': '1603'}, {'v': 'corrigible'}, {'v': '1'}]},
{'f': [{'v': 'othello'}, {'v': '1603'}, {'v': 'brawl'}, {'v': '2'}]},
{'f': [{'v': 'othello'}, {'v': '1603'}, {'v': "'"}, {'v': '17'}]},
{'f': [{'v': 'othello'}, {'v': '1603'}, {'v': 'troubled'},
{'v': '1'}]}]
self.correct_data_small = np.array(
[('othello', 1603, 'brave', 3),
('othello', 1603, 'attended', 1),
('othello', 1603, 'treason', 1),
('othello', 1603, 'islanders', 1),
('othello', 1603, 'heed', 3),
('othello', 1603, 'alehouse', 1),
('othello', 1603, 'corrigible', 1),
('othello', 1603, 'brawl', 2),
('othello', 1603, "'", 17),
('othello', 1603, 'troubled', 1)
],
dtype=[('corpus', 'S16'),
('corpus_date', '<i8'),
('word', 'S16'),
('word_count', '<i8')]
)
self.correct_test_datatype = DataFrame(
{'VALID_STRING' : ['PI'],
'EMPTY_STRING' : [""],
'NULL_STRING' : [None],
'VALID_INTEGER' : [3],
'NULL_INTEGER' : [np.nan],
'VALID_FLOAT' : [3.141592653589793],
'NULL_FLOAT' : [np.nan],
'UNIX_EPOCH' : [np.datetime64('1970-01-01T00:00:00.000000Z')],
'VALID_TIMESTAMP' : [np.datetime64('2004-09-15T05:00:00.000000Z')],
'NULL_TIMESTAMP' :[NaT],
'TRUE_BOOLEAN' : [True],
'FALSE_BOOLEAN' : [False],
'NULL_BOOLEAN' : [None]
}
)[['VALID_STRING',
'EMPTY_STRING',
'NULL_STRING',
'VALID_INTEGER',
'NULL_INTEGER',
'VALID_FLOAT',
'NULL_FLOAT',
'UNIX_EPOCH',
'VALID_TIMESTAMP',
'NULL_TIMESTAMP',
'TRUE_BOOLEAN',
'FALSE_BOOLEAN',
'NULL_BOOLEAN']]
@classmethod
def setUpClass(self):
# Integration tests require a valid bigquery token
# be present in the user's home directory. This
# can be generated with 'bq init' in the command line
self.dirpath = tm.get_data_path()
home = os.path.expanduser("~")
self.bq_token = os.path.join(home, '.bigquery.v2.token')
self.fake_job_path = os.path.join(self.dirpath, 'gbq_fake_job.txt')
# If we're using a valid token, make a test dataset
# Note, dataset functionality is beyond the scope
# of the module under test, so we rely on the command
# line utility for this.
if os.path.exists(self.bq_token):
subprocess.call(['bq','mk', '-d', 'pandas_testing_dataset'])
@classmethod
def tearDownClass(self):
# If we're using a valid token, remove the test dataset
# created.
if os.path.exists(self.bq_token):
subprocess.call(['bq', 'rm', '-r', '-f', '-d', 'pandas_testing_dataset'])
@with_connectivity_check
def test_valid_authentication(self):
# If the user has a token file, they should recieve a client from gbq._authenticate
if not os.path.exists(self.bq_token):
raise nose.SkipTest('Skipped because authentication information is not available.')
self.assertTrue(gbq._authenticate is not None, 'Authentication To GBQ Failed')
@with_connectivity_check
def test_malformed_query(self):
# If the user has a connection file, performing an invalid query should raise an error
if not os.path.exists(self.bq_token):
raise nose.SkipTest('Skipped because authentication information is not available.')
else:
self.assertRaises(bigquery_client.BigqueryInvalidQueryError,
gbq.read_gbq, "SELCET * FORM [publicdata:samples.shakespeare]")
def test_type_conversion(self):
# All BigQuery Types should be cast into appropriate numpy types
sample_input = [('1.095292800E9', 'TIMESTAMP'),
('false', 'BOOLEAN'),
('2', 'INTEGER'),
('3.14159', 'FLOAT'),
('Hello World', 'STRING')]
actual_output = [gbq._parse_entry(result[0],result[1]) for result in sample_input]
sample_output = [np.datetime64('2004-09-16T00:00:00.000000Z'),
np.bool(False),
np.int('2'),
np.float('3.14159'),
'Hello World']
self.assertEqual(actual_output, sample_output, 'A format conversion failed')
def test_data_small(self):
# Parsing a fixed page of data should return the proper fixed np.array()
result_frame = gbq._parse_page(self.test_data_small,
['corpus','corpus_date','word','word_count'],
['STRING','INTEGER','STRING','INTEGER'],
[object,np.dtype(int),object,np.dtype(int)]
)
tm.assert_frame_equal(DataFrame(result_frame), DataFrame(self.correct_data_small),
'An element in the result DataFrame didn\'t match the sample set')
def test_index_column(self):
# A user should be able to specify an index column for return
result_frame = gbq._parse_data(FakeClient(), self.fake_job, index_col='word')
correct_frame = DataFrame(self.correct_data_small)
correct_frame.set_index('word', inplace=True)
self.assertTrue(result_frame.index.name == correct_frame.index.name)
def test_column_order(self):
# A User should be able to specify the order in which columns are returned in the dataframe
col_order = ['corpus_date', 'word_count', 'corpus', 'word']
result_frame = gbq._parse_data(FakeClient(), self.fake_job, col_order=col_order)
tm.assert_index_equal(result_frame.columns, DataFrame(self.correct_data_small)[col_order].columns)
def test_column_order_plus_index(self):
# A User should be able to specify an index and the order of THE REMAINING columns.. they should be notified
# if they screw up
col_order = ['corpus_date', 'word', 'corpus']
result_frame = gbq._parse_data(FakeClient(), self.fake_job, index_col='word_count', col_order=col_order)
correct_frame_small = DataFrame(self.correct_data_small)
correct_frame_small.set_index('word_count',inplace=True)
correct_frame_small = DataFrame(correct_frame_small)[col_order]
tm.assert_index_equal(result_frame.columns, correct_frame_small.columns)
# @with_connectivity_check
# def test_download_dataset_larger_than_100k_rows(self):
# # Test for known BigQuery bug in datasets larger than 100k rows
# # http://stackoverflow.com/questions/19145587/bq-py-not-paging-results
# if not os.path.exists(self.bq_token):
# raise nose.SkipTest('Skipped because authentication information is not available.')
# client = gbq._authenticate()
# a = gbq.read_gbq("SELECT id, FROM [publicdata:samples.wikipedia] LIMIT 100005")
# self.assertTrue(len(a) == 100005)
@with_connectivity_check
def test_download_all_data_types(self):
# Test that all available data types from BigQuery (as of now)
# are handled properly
if not os.path.exists(self.bq_token):
raise nose.SkipTest('Skipped because authentication information is not available.')
query = """SELECT "PI" as VALID_STRING,
"" as EMPTY_STRING,
STRING(NULL) as NULL_STRING,
INTEGER(3) as VALID_INTEGER,
INTEGER(NULL) as NULL_INTEGER,
PI() as VALID_FLOAT,
FLOAT(NULL) as NULL_FLOAT,
TIMESTAMP("1970-01-01 00:00:00") as UNIX_EPOCH,
TIMESTAMP("2004-09-15 05:00:00") as VALID_TIMESTAMP,
TIMESTAMP(NULL) as NULL_TIMESTAMP,
BOOLEAN(TRUE) as TRUE_BOOLEAN,
BOOLEAN(FALSE) as FALSE_BOOLEAN,
BOOLEAN(NULL) as NULL_BOOLEAN"""
client = gbq._authenticate()
a = gbq.read_gbq(query, col_order = ['VALID_STRING',
'EMPTY_STRING',
'NULL_STRING',
'VALID_INTEGER',
'NULL_INTEGER',
'VALID_FLOAT',
'NULL_FLOAT',
'UNIX_EPOCH',
'VALID_TIMESTAMP',
'NULL_TIMESTAMP',
'TRUE_BOOLEAN',
'FALSE_BOOLEAN',
'NULL_BOOLEAN'])
tm.assert_frame_equal(a, self.correct_test_datatype)
@with_connectivity_check
def test_table_exists(self):
# Given a table name in the format {dataset}.{tablename}, if a table exists,
# the GetTableReference should accurately indicate this.
# This could possibly change in future implementations of bq,
# but it is the simplest way to provide users with appropriate
# error messages regarding schemas.
if not os.path.exists(self.bq_token):
raise nose.SkipTest('Skipped because authentication information is not available.')
client = gbq._authenticate()
table_reference = client.GetTableReference("publicdata:samples.shakespeare")
self.assertTrue(client.TableExists(table_reference))
@with_connectivity_check
def test_table__not_exists(self):
# Test the inverse of `test_table_exists`
if not os.path.exists(self.bq_token):
raise nose.SkipTest('Skipped because authentication information is not available.')
client = gbq._authenticate()
table_reference = client.GetTableReference("publicdata:samples.does_not_exist")
self.assertFalse(client.TableExists(table_reference))
@with_connectivity_check
def test_upload_new_table_schema_error(self):
# Attempting to upload to a non-existent table without a schema should fail
if not os.path.exists(self.bq_token):
raise nose.SkipTest('Skipped because authentication information is not available.')
df = DataFrame(self.correct_data_small)
with self.assertRaises(gbq.SchemaMissing):
gbq.to_gbq(df, 'pandas_testing_dataset.test_database', schema=None, col_order=None, if_exists='fail')
@with_connectivity_check
def test_upload_replace_schema_error(self):
# Attempting to replace an existing table without specifying a schema should fail
if not os.path.exists(self.bq_token):
raise nose.SkipTest('Skipped because authentication information is not available.')
df = DataFrame(self.correct_data_small)
with self.assertRaises(gbq.SchemaMissing):
gbq.to_gbq(df, 'pandas_testing_dataset.test_database', schema=None, col_order=None, if_exists='replace')
@with_connectivity_check
def test_upload_public_data_error(self):
# Attempting to upload to a public, read-only, dataset should fail
if not os.path.exists(self.bq_token):
raise nose.SkipTest('Skipped because authentication information is not available.')
array = [['TESTING_GBQ', 999999999, 'hi', 0, True, 9999999999, '00.000.00.000', 1, 'hola',
99999999, False, False, 1, 'Jedi', 11210]]
df = DataFrame(array)
with self.assertRaises(bigquery_client.BigqueryServiceError):
gbq.to_gbq(df, 'publicdata:samples.wikipedia', schema=None, col_order=None, if_exists='append')
@with_connectivity_check
def test_upload_new_table(self):
# Attempting to upload to a new table with valid data and a valid schema should succeed
if not os.path.exists(self.bq_token):
raise nose.SkipTest('Skipped because authentication information is not available.')
schema = ['STRING', 'INTEGER', 'STRING', 'INTEGER', 'BOOLEAN',
'INTEGER', 'STRING', 'INTEGER',
'STRING', 'INTEGER', 'BOOLEAN', 'BOOLEAN',
'INTEGER', 'STRING', 'INTEGER']
array = [['TESTING_GBQ', 999999999, 'hi', 0, True, 9999999999, '00.000.00.000', 1, 'hola',
99999999, False, False, 1, 'Jedi', 11210]]
df = DataFrame(array, columns=['title','id','language','wp_namespace','is_redirect','revision_id',
'contributor_ip','contributor_id','contributor_username','timestamp',
'is_minor','is_bot','reversion_id','comment','num_characters'])
gbq.to_gbq(df, 'pandas_testing_dataset.test_data2', schema=schema, col_order=None, if_exists='append')
a = | gbq.read_gbq("SELECT * FROM pandas_testing_dataset.test_data2") | pandas.io.gbq.read_gbq |
import os
from io import StringIO
from unittest import TestCase
import pandas as pd
from datatransformer.AimedXmlToDataFrame import AimedXmlToDataFrame
class TestAimedXmlToDataFrame(TestCase):
def test___call__no_relation(self):
# Arrange
xml = """
<corpus source="AIMed">
<document id="AIMed.d0">
<sentence id="AIMed.d0.s0" text="Th1/Th2 type cytokines in hepatitis B patients treated with interferon-alpha."
seqId="s0">
<entity id="AIMed.d0.s0.e0" charOffset="60-75" type="protein" text="interferon-alpha" seqId="e0"/>
</sentence>
</document>
</corpus>
"""
xml_handle = StringIO(xml)
sut = AimedXmlToDataFrame()
expected_json = []
expected_df = | pd.DataFrame(expected_json) | pandas.DataFrame |
import json
import pandas as pd
def check_num_type():
lst = []
in_file = open('dataset/train_base.json','r')
for line in in_file:
line = line.strip()
line = json.loads(line)
#print(line)
ids = line['id']
content = line['content']
for k in line['events']:
evn_type = k['type']
lst.append(evn_type)
lst = set(lst)
print(lst)
def change_data():
in_file = open('dataset/train_base.json','r')
final_lst = []
for line in in_file:
# org_lst = ['质押','股份股权转让','起诉','投资','减持']
# org_lst = ['Business:Declare-Bankruptcy','Business:End-Org','Business:Merge-Org','Business:Start-Org','Conflict:Attack','Conflict:Demonstrate','Contact:Meet','Contact:Phone-Write','Justice:Acquit','Justice:Appeal','Justice:Arrest-Jail','Justice:Charge-Indict','Justice:Convict','Justice:Extradite','Justice:Fine','Justice:Pardon','Justice:Release-Parole','Justice:Sentence','Justice:Sue','Justice:Trial-Hearing','Life:Be-Born','Life:Die','Life:Divorce','Life:Injure','Life:Marry','Movement:Transport','Personnel:Elect','Personnel:End-Position','Personnel:Nominate','Personnel:Start-Position','Transaction:Transfer-Money','Transaction:Transfer-Ownership']
org_lst = ['BusinessDeclareBankruptcy','BusinessEndOrg','BusinessMergeOrg','BusinessStartOrg','ConflictAttack','ConflictDemonstrate','ContactMeet','ContactPhoneWrite','JusticeAcquit','JusticeAppeal','JusticeArrestJail','JusticeChargeIndict','JusticeConvict','JusticeExtradite','JusticeFine','JusticePardon','JusticeReleaseParole','JusticeSentence','JusticeSue','JusticeTrialHearing','LifeBeBorn','LifeDie','LifeDivorce','LifeInjure','LifeMarry','MovementTransport','PersonnelElect','PersonnelEndPosition','PersonnelNominate','PersonnelStartPosition','TransactionTransferMoney','TransactionTransferOwnership']
line = line.strip()
line = json.loads(line)
#print(line)
ids = line['id']
content = line['content']
lst = []
for k in line['events']:
if len(k)==1:
continue
evn_type = k['type']
lst.append(evn_type)
#print(ids,content,lst)
label_lst = []
label_lst.append(ids)
label_lst.append(content)
for i in org_lst:
if i in lst:
label_lst.append(1)
else:
label_lst.append(0)
#print(label_lst)
final_lst.append(label_lst)
return final_lst
def get_cls_train_data():
final_lst = change_data()
df = pd.DataFrame()
df = df.append(final_lst,ignore_index=True)
df.columns = ['id','content','Bankruptcy','EndOrg','MergeOrg','StartOrg','Attack','Demonstrate','Meet','PhoneWrite','Acquit','Appeal','Jail','Indict','Convict','Extradite','Fine','Pardon','Parole','Sentence','Sue','Hearing','Born','Die','Divorce','Injure','Marry','Transport','Elect','EndPosition','Nominate','StartPosition','Money','Ownership']
df.to_csv('TC/pybert/dataset/train_sample.csv',index=0)
print('分类模型训练集已转换完成!')
def get_cls_test_data():
test_df = open('dataset/dev_base.json')
lst=[]
for line in test_df:
line = line.strip()
line = json.loads(line)
#print(line)
lst.append(line)
df = | pd.DataFrame(lst) | pandas.DataFrame |
"""
Copyright (c) 2018, <NAME>
All rights reserved.
Licensed under the Modified BSD License.
For full license terms see LICENSE.txt
"""
from collections import OrderedDict
from copy import deepcopy
import functools
import pandas as pd
from seq_experiment.indexing import get_indexer_mappings, _Indexer
from seq_experiment.plotting import plot_abundance
class SeqExp(object):
"""
Main sequence experiment object.
Container for the separate data frames containing the matching features, classifications, and metadata records.
Only the features table is required to create a new SeqExp object.
"""
def __init__(self, features, classifications=None, metadata=None, seqs=None):
self._features = None
self._classifications = None
self._metadata = None
self._seqs = None
self.features = features
self.classifications = classifications
self.metadata = metadata
self.seqs = seqs
# -------------- basic getters -------------- #
@property
def features(self):
return self._features
@property
def classifications(self):
return self._classifications
@property
def metadata(self):
return self._metadata
@property
def seqs(self):
return self._seqs
def _get_components(self):
return {
'features': self.features,
'classifications': self.classifications,
'metadata': self.metadata,
'seqs': self.seqs
}
# -------------- basic setters -------------- #
@features.setter
def features(self, features):
"""Checks that if a feature table already exists the index and columns remain the same."""
if self._features is not None:
if not self._features.index.equals(features.index) and self._features.columns.equals(features.columns):
raise KeyError('new features index and columns must match the existing features')
self._features = features
@classifications.setter
def classifications(self, classifications):
"""Checks that the classification data matches the existing feature data before setting."""
if classifications is not None:
if not classifications.index.equals(self.features.index):
raise KeyError('classifications index does not match the features index.')
self._classifications = classifications
@metadata.setter
def metadata(self, metadata):
"""Checks that the metadata matches the existing feature data before setting."""
if metadata is not None:
if not metadata.index.equals(self.features.columns):
raise KeyError('metadata index does not match the features columns.')
self._metadata = metadata
@seqs.setter
def seqs(self, seqs):
"""Checks that the metadata matches the existing feature data before setting."""
if seqs is not None:
if not seqs.index.equals(self.features.index):
raise KeyError('seqs index does not match the features index.')
self._seqs = seqs
# -------------- convenience getters/setters -------------- #
@property
def feature_names(self):
return self.features.index
@feature_names.setter
def feature_names(self, feature_names):
self.features.index = feature_names
if self.classifications is not None:
self.classifications.index = feature_names
if self.seqs is not None:
self.seqs.index = feature_names
@property
def sample_names(self):
return self.features.columns
@sample_names.setter
def sample_names(self, sample_names):
self.features.columns = sample_names
if self.metadata is not None:
self.metadata.index = sample_names
# -------------- _ -------------- #
def __str__(self):
"""."""
feature_summary = 'features:\t{features} features x {classes} classes'.format(
features=len(self.features.index),
classes=len(self.features.columns)
)
if self.classifications is not None:
classification_summary = 'classifications:\t{features} features x {ranks} classification ranks'.format(
features=len(self.classifications.index),
ranks=len(self.classifications.columns)
)
else:
classification_summary = None
if self.metadata is not None:
metadata_summary = 'metadata:\t{classes} classes x {metadata} sample data'.format(
classes=len(self.metadata.index),
metadata=len(self.metadata.columns)
)
else:
metadata_summary = None
if self.seqs is not None:
seqs_summary = 'seqs:\t{features} features x {seqs} seqs'.format(
features=len(self.seqs.index),
seqs=len(self.seqs.columns)
)
else:
seqs_summary = None
outputs = [feature_summary]
for i in [classification_summary, metadata_summary, seqs_summary]:
if i is not None:
outputs.append(i)
return '\n'.join(outputs) + '\n'
# -------------- advanced subsetting/indexing -------------- #
def __getitem__(self, key):
"""
Subsets the data contained within the SeqExp by columns in the features dataframe.
..see also:: for more advanced subsetting based on the contents of each separate dataframe attribute, and to
subset the features dataframe by features rather than by samples use `sxp.fx`, `sxp.cx`, `sxp.mx`, and
`sxp.sx` for advanced subsetting by the features, classifications, metadata, and sequences dataframes
respectively.
"""
# features are always subset when using SeqExp.__getitem__
new_features = self.features[key]
# conditionally correct type of attribute if dimensionality has been reduced during subset
# assumes attr is pd.DataFrame or pd.DataFrame like object, with _constructor_sliced method implemented
if isinstance(new_features, type(self.features)):
pass
elif isinstance(new_features, self.features._constructor_sliced):
print('here')
new_features = | pd.DataFrame(new_features) | pandas.DataFrame |
# -*- coding: utf-8 -*-
try:
import json
except ImportError:
import simplejson as json
import math
import pytz
import locale
import pytest
import time
import datetime
import calendar
import re
import decimal
import dateutil
from functools import partial
from pandas.compat import range, StringIO, u
from pandas._libs.tslib import Timestamp
import pandas._libs.json as ujson
import pandas.compat as compat
import numpy as np
from pandas import DataFrame, Series, Index, NaT, DatetimeIndex, date_range
import pandas.util.testing as tm
json_unicode = (json.dumps if compat.PY3
else partial(json.dumps, encoding="utf-8"))
def _clean_dict(d):
"""
Sanitize dictionary for JSON by converting all keys to strings.
Parameters
----------
d : dict
The dictionary to convert.
Returns
-------
cleaned_dict : dict
"""
return {str(k): v for k, v in compat.iteritems(d)}
@pytest.fixture(params=[
None, # Column indexed by default.
"split",
"records",
"values",
"index"])
def orient(request):
return request.param
@pytest.fixture(params=[None, True])
def numpy(request):
return request.param
class TestUltraJSONTests(object):
@pytest.mark.skipif(compat.is_platform_32bit(),
reason="not compliant on 32-bit, xref #15865")
def test_encode_decimal(self):
sut = decimal.Decimal("1337.1337")
encoded = ujson.encode(sut, double_precision=15)
decoded = ujson.decode(encoded)
assert decoded == 1337.1337
sut = decimal.Decimal("0.95")
encoded = ujson.encode(sut, double_precision=1)
assert encoded == "1.0"
decoded = ujson.decode(encoded)
assert decoded == 1.0
sut = decimal.Decimal("0.94")
encoded = ujson.encode(sut, double_precision=1)
assert encoded == "0.9"
decoded = ujson.decode(encoded)
assert decoded == 0.9
sut = decimal.Decimal("1.95")
encoded = ujson.encode(sut, double_precision=1)
assert encoded == "2.0"
decoded = ujson.decode(encoded)
assert decoded == 2.0
sut = decimal.Decimal("-1.95")
encoded = ujson.encode(sut, double_precision=1)
assert encoded == "-2.0"
decoded = ujson.decode(encoded)
assert decoded == -2.0
sut = decimal.Decimal("0.995")
encoded = ujson.encode(sut, double_precision=2)
assert encoded == "1.0"
decoded = ujson.decode(encoded)
assert decoded == 1.0
sut = decimal.Decimal("0.9995")
encoded = ujson.encode(sut, double_precision=3)
assert encoded == "1.0"
decoded = ujson.decode(encoded)
assert decoded == 1.0
sut = decimal.Decimal("0.99999999999999944")
encoded = ujson.encode(sut, double_precision=15)
assert encoded == "1.0"
decoded = ujson.decode(encoded)
assert decoded == 1.0
@pytest.mark.parametrize("ensure_ascii", [True, False])
def test_encode_string_conversion(self, ensure_ascii):
string_input = "A string \\ / \b \f \n \r \t </script> &"
not_html_encoded = ('"A string \\\\ \\/ \\b \\f \\n '
'\\r \\t <\\/script> &"')
html_encoded = ('"A string \\\\ \\/ \\b \\f \\n \\r \\t '
'\\u003c\\/script\\u003e \\u0026"')
def helper(expected_output, **encode_kwargs):
output = ujson.encode(string_input,
ensure_ascii=ensure_ascii,
**encode_kwargs)
assert output == expected_output
assert string_input == json.loads(output)
assert string_input == ujson.decode(output)
# Default behavior assumes encode_html_chars=False.
helper(not_html_encoded)
# Make sure explicit encode_html_chars=False works.
helper(not_html_encoded, encode_html_chars=False)
# Make sure explicit encode_html_chars=True does the encoding.
helper(html_encoded, encode_html_chars=True)
@pytest.mark.parametrize("long_number", [
-4342969734183514, -12345678901234.56789012, -528656961.4399388
])
def test_double_long_numbers(self, long_number):
sut = {u("a"): long_number}
encoded = ujson.encode(sut, double_precision=15)
decoded = ujson.decode(encoded)
assert sut == decoded
def test_encode_non_c_locale(self):
lc_category = locale.LC_NUMERIC
# We just need one of these locales to work.
for new_locale in ("it_IT.UTF-8", "Italian_Italy"):
if tm.can_set_locale(new_locale, lc_category):
with tm.set_locale(new_locale, lc_category):
assert ujson.loads(ujson.dumps(4.78e60)) == 4.78e60
assert ujson.loads("4.78", precise_float=True) == 4.78
break
def test_decimal_decode_test_precise(self):
sut = {u("a"): 4.56}
encoded = ujson.encode(sut)
decoded = ujson.decode(encoded, precise_float=True)
assert sut == decoded
@pytest.mark.skipif(compat.is_platform_windows() and not compat.PY3,
reason="buggy on win-64 for py2")
def test_encode_double_tiny_exponential(self):
num = 1e-40
assert num == ujson.decode(ujson.encode(num))
num = 1e-100
assert num == ujson.decode(ujson.encode(num))
num = -1e-45
assert num == ujson.decode(ujson.encode(num))
num = -1e-145
assert np.allclose(num, ujson.decode(ujson.encode(num)))
@pytest.mark.parametrize("unicode_key", [
u("key1"), u("بن")
])
def test_encode_dict_with_unicode_keys(self, unicode_key):
unicode_dict = {unicode_key: u("value1")}
assert unicode_dict == ujson.decode(ujson.encode(unicode_dict))
@pytest.mark.parametrize("double_input", [
math.pi,
-math.pi # Should work with negatives too.
])
def test_encode_double_conversion(self, double_input):
output = ujson.encode(double_input)
assert round(double_input, 5) == round(json.loads(output), 5)
assert round(double_input, 5) == round(ujson.decode(output), 5)
def test_encode_with_decimal(self):
decimal_input = 1.0
output = ujson.encode(decimal_input)
assert output == "1.0"
def test_encode_array_of_nested_arrays(self):
nested_input = [[[[]]]] * 20
output = ujson.encode(nested_input)
assert nested_input == json.loads(output)
assert nested_input == ujson.decode(output)
nested_input = np.array(nested_input)
tm.assert_numpy_array_equal(nested_input, ujson.decode(
output, numpy=True, dtype=nested_input.dtype))
def test_encode_array_of_doubles(self):
doubles_input = [31337.31337, 31337.31337,
31337.31337, 31337.31337] * 10
output = ujson.encode(doubles_input)
assert doubles_input == json.loads(output)
assert doubles_input == ujson.decode(output)
tm.assert_numpy_array_equal(np.array(doubles_input),
ujson.decode(output, numpy=True))
def test_double_precision(self):
double_input = 30.012345678901234
output = ujson.encode(double_input, double_precision=15)
assert double_input == json.loads(output)
assert double_input == ujson.decode(output)
for double_precision in (3, 9):
output = ujson.encode(double_input,
double_precision=double_precision)
rounded_input = round(double_input, double_precision)
assert rounded_input == json.loads(output)
assert rounded_input == ujson.decode(output)
@pytest.mark.parametrize("invalid_val", [
20, -1, "9", None
])
def test_invalid_double_precision(self, invalid_val):
double_input = 30.12345678901234567890
expected_exception = (ValueError if isinstance(invalid_val, int)
else TypeError)
with pytest.raises(expected_exception):
ujson.encode(double_input, double_precision=invalid_val)
def test_encode_string_conversion2(self):
string_input = "A string \\ / \b \f \n \r \t"
output = ujson.encode(string_input)
assert string_input == json.loads(output)
assert string_input == ujson.decode(output)
assert output == '"A string \\\\ \\/ \\b \\f \\n \\r \\t"'
@pytest.mark.parametrize("unicode_input", [
"Räksmörgås اسامة بن محمد بن عوض بن لادن",
"\xe6\x97\xa5\xd1\x88"
])
def test_encode_unicode_conversion(self, unicode_input):
enc = ujson.encode(unicode_input)
dec = ujson.decode(enc)
assert enc == json_unicode(unicode_input)
assert dec == json.loads(enc)
def test_encode_control_escaping(self):
escaped_input = "\x19"
enc = ujson.encode(escaped_input)
dec = ujson.decode(enc)
assert escaped_input == dec
assert enc == json_unicode(escaped_input)
def test_encode_unicode_surrogate_pair(self):
surrogate_input = "\xf0\x90\x8d\x86"
enc = ujson.encode(surrogate_input)
dec = ujson.decode(enc)
assert enc == json_unicode(surrogate_input)
assert dec == json.loads(enc)
def test_encode_unicode_4bytes_utf8(self):
four_bytes_input = "\xf0\x91\x80\xb0TRAILINGNORMAL"
enc = ujson.encode(four_bytes_input)
dec = ujson.decode(enc)
assert enc == json_unicode(four_bytes_input)
assert dec == json.loads(enc)
def test_encode_unicode_4bytes_utf8highest(self):
four_bytes_input = "\xf3\xbf\xbf\xbfTRAILINGNORMAL"
enc = ujson.encode(four_bytes_input)
dec = ujson.decode(enc)
assert enc == json_unicode(four_bytes_input)
assert dec == json.loads(enc)
def test_encode_array_in_array(self):
arr_in_arr_input = [[[[]]]]
output = ujson.encode(arr_in_arr_input)
assert arr_in_arr_input == json.loads(output)
assert output == json.dumps(arr_in_arr_input)
assert arr_in_arr_input == ujson.decode(output)
tm.assert_numpy_array_equal(np.array(arr_in_arr_input),
ujson.decode(output, numpy=True))
@pytest.mark.parametrize("num_input", [
31337,
-31337, # Negative number.
-9223372036854775808 # Large negative number.
])
def test_encode_num_conversion(self, num_input):
output = ujson.encode(num_input)
assert num_input == json.loads(output)
assert output == json.dumps(num_input)
assert num_input == ujson.decode(output)
def test_encode_list_conversion(self):
list_input = [1, 2, 3, 4]
output = ujson.encode(list_input)
assert list_input == json.loads(output)
assert list_input == ujson.decode(output)
tm.assert_numpy_array_equal(np.array(list_input),
ujson.decode(output, numpy=True))
def test_encode_dict_conversion(self):
dict_input = {"k1": 1, "k2": 2, "k3": 3, "k4": 4}
output = ujson.encode(dict_input)
assert dict_input == json.loads(output)
assert dict_input == ujson.decode(output)
@pytest.mark.parametrize("builtin_value", [None, True, False])
def test_encode_builtin_values_conversion(self, builtin_value):
output = ujson.encode(builtin_value)
assert builtin_value == json.loads(output)
assert output == json.dumps(builtin_value)
assert builtin_value == ujson.decode(output)
def test_encode_datetime_conversion(self):
datetime_input = datetime.datetime.fromtimestamp(time.time())
output = ujson.encode(datetime_input, date_unit="s")
expected = calendar.timegm(datetime_input.utctimetuple())
assert int(expected) == json.loads(output)
assert int(expected) == ujson.decode(output)
def test_encode_date_conversion(self):
date_input = datetime.date.fromtimestamp(time.time())
output = ujson.encode(date_input, date_unit="s")
tup = (date_input.year, date_input.month, date_input.day, 0, 0, 0)
expected = calendar.timegm(tup)
assert int(expected) == json.loads(output)
assert int(expected) == ujson.decode(output)
@pytest.mark.parametrize("test", [
datetime.time(),
datetime.time(1, 2, 3),
datetime.time(10, 12, 15, 343243),
])
def test_encode_time_conversion_basic(self, test):
output = ujson.encode(test)
expected = '"{iso}"'.format(iso=test.isoformat())
assert expected == output
def test_encode_time_conversion_pytz(self):
# see gh-11473: to_json segfaults with timezone-aware datetimes
test = datetime.time(10, 12, 15, 343243, pytz.utc)
output = ujson.encode(test)
expected = '"{iso}"'.format(iso=test.isoformat())
assert expected == output
def test_encode_time_conversion_dateutil(self):
# see gh-11473: to_json segfaults with timezone-aware datetimes
test = datetime.time(10, 12, 15, 343243, dateutil.tz.tzutc())
output = ujson.encode(test)
expected = '"{iso}"'.format(iso=test.isoformat())
assert expected == output
@pytest.mark.parametrize("decoded_input", [
NaT,
np.datetime64("NaT"),
np.nan,
np.inf,
-np.inf
])
def test_encode_as_null(self, decoded_input):
assert ujson.encode(decoded_input) == "null", "Expected null"
def test_datetime_units(self):
val = datetime.datetime(2013, 8, 17, 21, 17, 12, 215504)
stamp = Timestamp(val)
roundtrip = ujson.decode(ujson.encode(val, date_unit='s'))
assert roundtrip == stamp.value // 10**9
roundtrip = ujson.decode(ujson.encode(val, date_unit='ms'))
assert roundtrip == stamp.value // 10**6
roundtrip = ujson.decode(ujson.encode(val, date_unit='us'))
assert roundtrip == stamp.value // 10**3
roundtrip = ujson.decode(ujson.encode(val, date_unit='ns'))
assert roundtrip == stamp.value
pytest.raises(ValueError, ujson.encode, val, date_unit='foo')
def test_encode_to_utf8(self):
unencoded = "\xe6\x97\xa5\xd1\x88"
enc = ujson.encode(unencoded, ensure_ascii=False)
dec = ujson.decode(enc)
assert enc == json_unicode(unencoded, ensure_ascii=False)
assert dec == json.loads(enc)
def test_decode_from_unicode(self):
unicode_input = u("{\"obj\": 31337}")
dec1 = ujson.decode(unicode_input)
dec2 = ujson.decode(str(unicode_input))
assert dec1 == dec2
def test_encode_recursion_max(self):
# 8 is the max recursion depth
class O2(object):
member = 0
pass
class O1(object):
member = 0
pass
decoded_input = O1()
decoded_input.member = O2()
decoded_input.member.member = decoded_input
with pytest.raises(OverflowError):
ujson.encode(decoded_input)
def test_decode_jibberish(self):
jibberish = "fdsa sda v9sa fdsa"
with pytest.raises(ValueError):
ujson.decode(jibberish)
@pytest.mark.parametrize("broken_json", [
"[", # Broken array start.
"{", # Broken object start.
"]", # Broken array end.
"}", # Broken object end.
])
def test_decode_broken_json(self, broken_json):
with pytest.raises(ValueError):
ujson.decode(broken_json)
@pytest.mark.parametrize("too_big_char", [
"[",
"{",
])
def test_decode_depth_too_big(self, too_big_char):
with pytest.raises(ValueError):
ujson.decode(too_big_char * (1024 * 1024))
@pytest.mark.parametrize("bad_string", [
"\"TESTING", # Unterminated.
"\"TESTING\\\"", # Unterminated escape.
"tru", # Broken True.
"fa", # Broken False.
"n", # Broken None.
])
def test_decode_bad_string(self, bad_string):
with pytest.raises(ValueError):
ujson.decode(bad_string)
@pytest.mark.parametrize("broken_json", [
'{{1337:""}}',
'{{"key":"}',
'[[[true',
])
def test_decode_broken_json_leak(self, broken_json):
for _ in range(1000):
with pytest.raises(ValueError):
ujson.decode(broken_json)
@pytest.mark.parametrize("invalid_dict", [
"{{{{31337}}}}", # No key.
"{{{{\"key\":}}}}", # No value.
"{{{{\"key\"}}}}", # No colon or value.
])
def test_decode_invalid_dict(self, invalid_dict):
with pytest.raises(ValueError):
ujson.decode(invalid_dict)
@pytest.mark.parametrize("numeric_int_as_str", [
"31337", "-31337" # Should work with negatives.
])
def test_decode_numeric_int(self, numeric_int_as_str):
assert int(numeric_int_as_str) == ujson.decode(numeric_int_as_str)
@pytest.mark.skipif(compat.PY3, reason="only PY2")
def test_encode_unicode_4bytes_utf8_fail(self):
with pytest.raises(OverflowError):
ujson.encode("\xfd\xbf\xbf\xbf\xbf\xbf")
def test_encode_null_character(self):
wrapped_input = "31337 \x00 1337"
output = ujson.encode(wrapped_input)
assert wrapped_input == json.loads(output)
assert output == json.dumps(wrapped_input)
assert wrapped_input == ujson.decode(output)
alone_input = "\x00"
output = ujson.encode(alone_input)
assert alone_input == json.loads(output)
assert output == json.dumps(alone_input)
assert alone_input == ujson.decode(output)
assert '" \\u0000\\r\\n "' == ujson.dumps(u(" \u0000\r\n "))
def test_decode_null_character(self):
wrapped_input = "\"31337 \\u0000 31337\""
assert ujson.decode(wrapped_input) == json.loads(wrapped_input)
def test_encode_list_long_conversion(self):
long_input = [9223372036854775807, 9223372036854775807,
9223372036854775807, 9223372036854775807,
9223372036854775807, 9223372036854775807]
output = ujson.encode(long_input)
assert long_input == json.loads(output)
assert long_input == ujson.decode(output)
tm.assert_numpy_array_equal(np.array(long_input),
ujson.decode(output, numpy=True,
dtype=np.int64))
def test_encode_long_conversion(self):
long_input = 9223372036854775807
output = ujson.encode(long_input)
assert long_input == json.loads(output)
assert output == json.dumps(long_input)
assert long_input == ujson.decode(output)
@pytest.mark.parametrize("int_exp", [
"1337E40", "1.337E40", "1337E+9", "1.337e+40", "1.337E-4"
])
def test_decode_numeric_int_exp(self, int_exp):
assert ujson.decode(int_exp) == json.loads(int_exp)
def test_dump_to_file(self):
f = StringIO()
ujson.dump([1, 2, 3], f)
assert "[1,2,3]" == f.getvalue()
def test_dump_to_file_like(self):
class FileLike(object):
def __init__(self):
self.bytes = ''
def write(self, data_bytes):
self.bytes += data_bytes
f = FileLike()
ujson.dump([1, 2, 3], f)
assert "[1,2,3]" == f.bytes
def test_dump_file_args_error(self):
with pytest.raises(TypeError):
ujson.dump([], "")
def test_load_file(self):
data = "[1,2,3,4]"
exp_data = [1, 2, 3, 4]
f = StringIO(data)
assert exp_data == ujson.load(f)
f = StringIO(data)
tm.assert_numpy_array_equal(np.array(exp_data),
ujson.load(f, numpy=True))
def test_load_file_like(self):
class FileLike(object):
def read(self):
try:
self.end
except AttributeError:
self.end = True
return "[1,2,3,4]"
exp_data = [1, 2, 3, 4]
f = FileLike()
assert exp_data == ujson.load(f)
f = FileLike()
tm.assert_numpy_array_equal(np.array(exp_data),
ujson.load(f, numpy=True))
def test_load_file_args_error(self):
with pytest.raises(TypeError):
ujson.load("[]")
def test_version(self):
assert re.match(r'^\d+\.\d+(\.\d+)?$', ujson.__version__), \
"ujson.__version__ must be a string like '1.4.0'"
def test_encode_numeric_overflow(self):
with pytest.raises(OverflowError):
ujson.encode(12839128391289382193812939)
def test_encode_numeric_overflow_nested(self):
class Nested(object):
x = 12839128391289382193812939
for _ in range(0, 100):
with pytest.raises(OverflowError):
ujson.encode(Nested())
@pytest.mark.parametrize("val", [
3590016419, 2**31, 2**32, (2**32) - 1
])
def test_decode_number_with_32bit_sign_bit(self, val):
# Test that numbers that fit within 32 bits but would have the
# sign bit set (2**31 <= x < 2**32) are decoded properly.
doc = '{{"id": {val}}}'.format(val=val)
assert ujson.decode(doc)["id"] == val
def test_encode_big_escape(self):
# Make sure no Exception is raised.
for _ in range(10):
base = '\u00e5'.encode("utf-8") if compat.PY3 else "\xc3\xa5"
escape_input = base * 1024 * 1024 * 2
ujson.encode(escape_input)
def test_decode_big_escape(self):
# Make sure no Exception is raised.
for _ in range(10):
base = '\u00e5'.encode("utf-8") if compat.PY3 else "\xc3\xa5"
quote = compat.str_to_bytes("\"")
escape_input = quote + (base * 1024 * 1024 * 2) + quote
ujson.decode(escape_input)
def test_to_dict(self):
d = {u("key"): 31337}
class DictTest(object):
def toDict(self):
return d
o = DictTest()
output = ujson.encode(o)
dec = ujson.decode(output)
assert dec == d
def test_default_handler(self):
class _TestObject(object):
def __init__(self, val):
self.val = val
@property
def recursive_attr(self):
return _TestObject("recursive_attr")
def __str__(self):
return str(self.val)
pytest.raises(OverflowError, ujson.encode, _TestObject("foo"))
assert '"foo"' == ujson.encode(_TestObject("foo"),
default_handler=str)
def my_handler(_):
return "foobar"
assert '"foobar"' == ujson.encode(_TestObject("foo"),
default_handler=my_handler)
def my_handler_raises(_):
raise TypeError("I raise for anything")
with pytest.raises(TypeError, match="I raise for anything"):
ujson.encode(_TestObject("foo"), default_handler=my_handler_raises)
def my_int_handler(_):
return 42
assert ujson.decode(ujson.encode(_TestObject("foo"),
default_handler=my_int_handler)) == 42
def my_obj_handler(_):
return datetime.datetime(2013, 2, 3)
assert (ujson.decode(ujson.encode(datetime.datetime(2013, 2, 3))) ==
ujson.decode(ujson.encode(_TestObject("foo"),
default_handler=my_obj_handler)))
obj_list = [_TestObject("foo"), _TestObject("bar")]
assert (json.loads(json.dumps(obj_list, default=str)) ==
ujson.decode(ujson.encode(obj_list, default_handler=str)))
class TestNumpyJSONTests(object):
@pytest.mark.parametrize("bool_input", [True, False])
def test_bool(self, bool_input):
b = np.bool(bool_input)
assert ujson.decode(ujson.encode(b)) == b
def test_bool_array(self):
bool_array = np.array([
True, False, True, True,
False, True, False, False], dtype=np.bool)
output = np.array(ujson.decode(
ujson.encode(bool_array)), dtype=np.bool)
tm.assert_numpy_array_equal(bool_array, output)
def test_int(self, any_int_dtype):
klass = np.dtype(any_int_dtype).type
num = klass(1)
assert klass(ujson.decode(ujson.encode(num))) == num
def test_int_array(self, any_int_dtype):
arr = np.arange(100, dtype=np.int)
arr_input = arr.astype(any_int_dtype)
arr_output = np.array(ujson.decode(ujson.encode(arr_input)),
dtype=any_int_dtype)
tm.assert_numpy_array_equal(arr_input, arr_output)
def test_int_max(self, any_int_dtype):
if any_int_dtype in ("int64", "uint64") and compat.is_platform_32bit():
pytest.skip("Cannot test 64-bit integer on 32-bit platform")
klass = np.dtype(any_int_dtype).type
# uint64 max will always overflow,
# as it's encoded to signed.
if any_int_dtype == "uint64":
num = np.iinfo("int64").max
else:
num = np.iinfo(any_int_dtype).max
assert klass(ujson.decode(ujson.encode(num))) == num
def test_float(self, float_dtype):
klass = np.dtype(float_dtype).type
num = klass(256.2013)
assert klass(ujson.decode(ujson.encode(num))) == num
def test_float_array(self, float_dtype):
arr = np.arange(12.5, 185.72, 1.7322, dtype=np.float)
float_input = arr.astype(float_dtype)
float_output = np.array(ujson.decode(
ujson.encode(float_input, double_precision=15)),
dtype=float_dtype)
tm.assert_almost_equal(float_input, float_output)
def test_float_max(self, float_dtype):
klass = np.dtype(float_dtype).type
num = klass(np.finfo(float_dtype).max / 10)
tm.assert_almost_equal(klass(ujson.decode(
ujson.encode(num, double_precision=15))), num)
def test_array_basic(self):
arr = np.arange(96)
arr = arr.reshape((2, 2, 2, 2, 3, 2))
tm.assert_numpy_array_equal(
np.array(ujson.decode(ujson.encode(arr))), arr)
tm.assert_numpy_array_equal(ujson.decode(
ujson.encode(arr), numpy=True), arr)
@pytest.mark.parametrize("shape", [
(10, 10),
(5, 5, 4),
(100, 1),
])
def test_array_reshaped(self, shape):
arr = np.arange(100)
arr = arr.reshape(shape)
tm.assert_numpy_array_equal(
np.array(ujson.decode(ujson.encode(arr))), arr)
tm.assert_numpy_array_equal(ujson.decode(
ujson.encode(arr), numpy=True), arr)
def test_array_list(self):
arr_list = ["a", list(), dict(), dict(), list(),
42, 97.8, ["a", "b"], {"key": "val"}]
arr = np.array(arr_list)
tm.assert_numpy_array_equal(
np.array(ujson.decode(ujson.encode(arr))), arr)
def test_array_float(self):
dtype = np.float32
arr = np.arange(100.202, 200.202, 1, dtype=dtype)
arr = arr.reshape((5, 5, 4))
arr_out = np.array(ujson.decode(ujson.encode(arr)), dtype=dtype)
tm.assert_almost_equal(arr, arr_out)
arr_out = ujson.decode(ujson.encode(arr), numpy=True, dtype=dtype)
tm.assert_almost_equal(arr, arr_out)
def test_0d_array(self):
with pytest.raises(TypeError):
ujson.encode(np.array(1))
@pytest.mark.parametrize("bad_input,exc_type,kwargs", [
([{}, []], ValueError, {}),
([42, None], TypeError, {}),
([["a"], 42], ValueError, {}),
([42, {}, "a"], TypeError, {}),
([42, ["a"], 42], ValueError, {}),
(["a", "b", [], "c"], ValueError, {}),
([{"a": "b"}], ValueError, dict(labelled=True)),
({"a": {"b": {"c": 42}}}, ValueError, dict(labelled=True)),
([{"a": 42, "b": 23}, {"c": 17}], ValueError, dict(labelled=True))
])
def test_array_numpy_except(self, bad_input, exc_type, kwargs):
with pytest.raises(exc_type):
ujson.decode(ujson.dumps(bad_input), numpy=True, **kwargs)
def test_array_numpy_labelled(self):
labelled_input = {"a": []}
output = ujson.loads(ujson.dumps(labelled_input),
numpy=True, labelled=True)
assert (np.empty((1, 0)) == output[0]).all()
assert (np.array(["a"]) == output[1]).all()
assert output[2] is None
labelled_input = [{"a": 42}]
output = ujson.loads(ujson.dumps(labelled_input),
numpy=True, labelled=True)
assert (np.array([u("a")]) == output[2]).all()
assert (np.array([42]) == output[0]).all()
assert output[1] is None
# see gh-10837: write out the dump explicitly
# so there is no dependency on iteration order
input_dumps = ('[{"a": 42, "b":31}, {"a": 24, "c": 99}, '
'{"a": 2.4, "b": 78}]')
output = ujson.loads(input_dumps, numpy=True, labelled=True)
expected_vals = np.array(
[42, 31, 24, 99, 2.4, 78], dtype=int).reshape((3, 2))
assert (expected_vals == output[0]).all()
assert output[1] is None
assert (np.array([u("a"), "b"]) == output[2]).all()
input_dumps = ('{"1": {"a": 42, "b":31}, "2": {"a": 24, "c": 99}, '
'"3": {"a": 2.4, "b": 78}}')
output = ujson.loads(input_dumps, numpy=True, labelled=True)
expected_vals = np.array(
[42, 31, 24, 99, 2.4, 78], dtype=int).reshape((3, 2))
assert (expected_vals == output[0]).all()
assert (np.array(["1", "2", "3"]) == output[1]).all()
assert (np.array(["a", "b"]) == output[2]).all()
class TestPandasJSONTests(object):
def test_dataframe(self, orient, numpy):
if orient == "records" and numpy:
pytest.skip("Not idiomatic pandas")
df = DataFrame([[1, 2, 3], [4, 5, 6]], index=[
"a", "b"], columns=["x", "y", "z"])
encode_kwargs = {} if orient is None else dict(orient=orient)
decode_kwargs = {} if numpy is None else dict(numpy=numpy)
output = ujson.decode(ujson.encode(df, **encode_kwargs),
**decode_kwargs)
# Ensure proper DataFrame initialization.
if orient == "split":
dec = _clean_dict(output)
output = DataFrame(**dec)
else:
output = DataFrame(output)
# Corrections to enable DataFrame comparison.
if orient == "values":
df.columns = [0, 1, 2]
df.index = [0, 1]
elif orient == "records":
df.index = [0, 1]
elif orient == "index":
df = df.transpose()
tm.assert_frame_equal(output, df, check_dtype=False)
def test_dataframe_nested(self, orient):
df = DataFrame([[1, 2, 3], [4, 5, 6]], index=[
"a", "b"], columns=["x", "y", "z"])
nested = {"df1": df, "df2": df.copy()}
kwargs = {} if orient is None else dict(orient=orient)
exp = {"df1": ujson.decode(ujson.encode(df, **kwargs)),
"df2": ujson.decode(ujson.encode(df, **kwargs))}
assert ujson.decode(ujson.encode(nested, **kwargs)) == exp
def test_dataframe_numpy_labelled(self, orient):
if orient in ("split", "values"):
pytest.skip("Incompatible with labelled=True")
df = DataFrame([[1, 2, 3], [4, 5, 6]], index=[
"a", "b"], columns=["x", "y", "z"], dtype=np.int)
kwargs = {} if orient is None else dict(orient=orient)
output = DataFrame(*ujson.decode(ujson.encode(df, **kwargs),
numpy=True, labelled=True))
if orient is None:
df = df.T
elif orient == "records":
df.index = [0, 1]
tm.assert_frame_equal(output, df)
def test_series(self, orient, numpy):
s = Series([10, 20, 30, 40, 50, 60], name="series",
index=[6, 7, 8, 9, 10, 15]).sort_values()
encode_kwargs = {} if orient is None else dict(orient=orient)
decode_kwargs = {} if numpy is None else dict(numpy=numpy)
output = ujson.decode(ujson.encode(s, **encode_kwargs),
**decode_kwargs)
if orient == "split":
dec = _clean_dict(output)
output = Series(**dec)
else:
output = Series(output)
if orient in (None, "index"):
s.name = None
output = output.sort_values()
s.index = ["6", "7", "8", "9", "10", "15"]
elif orient in ("records", "values"):
s.name = None
s.index = [0, 1, 2, 3, 4, 5]
tm.assert_series_equal(output, s, check_dtype=False)
def test_series_nested(self, orient):
s = Series([10, 20, 30, 40, 50, 60], name="series",
index=[6, 7, 8, 9, 10, 15]).sort_values()
nested = {"s1": s, "s2": s.copy()}
kwargs = {} if orient is None else dict(orient=orient)
exp = {"s1": ujson.decode(ujson.encode(s, **kwargs)),
"s2": ujson.decode(ujson.encode(s, **kwargs))}
assert ujson.decode(ujson.encode(nested, **kwargs)) == exp
def test_index(self):
i = Index([23, 45, 18, 98, 43, 11], name="index")
# Column indexed.
output = Index(ujson.decode(ujson.encode(i)), name="index")
tm.assert_index_equal(i, output)
output = Index(ujson.decode(ujson.encode(i), numpy=True), name="index")
tm.assert_index_equal(i, output)
dec = _clean_dict(ujson.decode(ujson.encode(i, orient="split")))
output = Index(**dec)
tm.assert_index_equal(i, output)
assert i.name == output.name
dec = _clean_dict(ujson.decode(ujson.encode(i, orient="split"),
numpy=True))
output = Index(**dec)
tm.assert_index_equal(i, output)
assert i.name == output.name
output = Index(ujson.decode(ujson.encode(i, orient="values")),
name="index")
tm.assert_index_equal(i, output)
output = Index(ujson.decode(ujson.encode(i, orient="values"),
numpy=True), name="index")
tm.assert_index_equal(i, output)
output = Index(ujson.decode(ujson.encode(i, orient="records")),
name="index")
tm.assert_index_equal(i, output)
output = Index(ujson.decode(ujson.encode(i, orient="records"),
numpy=True), name="index")
tm.assert_index_equal(i, output)
output = Index(ujson.decode( | ujson.encode(i, orient="index") | pandas._libs.json.encode |
import pandas as pd
import numpy as np
# 生成‘白’‘中’‘夜’时间序列
# time_index = pd.Series(pd.bdate_range(start='2019-01-01',end='2020-10-01',freq="8H"))
# time_index = time_index[:-1]
p = pd.DataFrame(pd.read_excel('C:\\Users\\98243\\Desktop\\配煤\\table\\1\\row\\PHM-20190101-20200930.xls'))
## 1.列中全为NAT,则删除 2.按是否进行基本工业分析,删除取样失败的行
p = p.dropna(axis=1,how='all')
p = p.dropna(subset=['Mad(%)','Ad(%)','Vdaf(%)','Fcad(%)'],how="all")
p = p[:-1]
p = p.reset_index(drop=True)
p = p.sort_index(ascending=False,ignore_index=True)
time_index_p = p['取样时刻'].values.astype('datetime64[D]')
time_index_p = pd.Series(time_index_p).T
## 测试用:DataFrame取差
# delete_what_p = p1.append(p).drop_duplicates(keep=False)
result_BlendedCoal = pd.concat([time_index_p,p],axis=1)
result_BlendedCoal.rename(columns = {0:'Time'},inplace = True)
useless1 = ['选择','检验委托号','取样日期','取样位置','取样编号','制样编码','取样类别(C)','取样方式','样序号','品名代码','班别','备注']
result_BlendedCoal.drop(labels=useless1,axis=1,inplace=True)
lj = pd.DataFrame(pd.read_excel('C:\\Users\\98243\\Desktop\\配煤\\table\\1\\row\\YJJ-20190101-20200930.xls'))
lj = lj[:-1]
#去除‘水分分析’‘DC(抽样)’
lj = lj[~lj['分析类别'].isin(["水分分析"])]
lj = lj[~lj['取样类别'].isin(["DC"])]
lj = lj.dropna(subset=['M40(%)','M10(%)','Ad(%)','Vdaf(%)'],how="any")
#删除多余列
useless2 = ['样序号','考核','选择','制样编码','检验委托号','取样方式','品名','班别','产量','冶金焦干熄焦','等级','取样类别(C)']
lj.drop(labels=useless2,axis=1,inplace=True)
#取出CK
dict_CK= {}
x = 0
for i in range (len(lj)):
for j in range(lj.shape[1]):
if lj.iloc[i,j] == "CK":
dict_CK[x]=lj.iloc[i,:]
x = x+1
dict_CK = | pd.DataFrame(dict_CK) | pandas.DataFrame |
import pandas as pd
from datetime import date, timedelta
import calendar
from itertools import product
from typing import *
__all__=[
"Time",
"GroupBy",
"AssistCol",
"Others",
]
class Time:
@classmethod
def next_month_date(cls, cur_date: date, step: int):
'''
得到一年内的下一个月头的日期
'''
if (step > 12) or (step < -12):
raise ValueError('months is out of range')
cur_year, cur_month = cur_date.year, cur_date.month
month_sum = cur_month+step
if month_sum > 12:
next_year = cur_year+1
next_month = month_sum-12
elif month_sum < 1:
next_year = cur_year-1
next_month = month_sum+12
else:
next_year = cur_year
next_month = month_sum
return date(next_year, next_month, 1)
@classmethod
def month_days(cls, year: int, month: int):
'''
计算该日期所在的月份总共有多少天
'''
weekday, monthdays = calendar.monthrange(year, month)
return monthdays
@classmethod
def month_progress(cls, cut_date: date):
'''
计算该日期所在的月份的月时间进度
'''
return cut_date.day/cls.month_days(cut_date.year,cut_date.month)
@classmethod
def get_time_progress(cls, date: date, cut_date: date):
if cut_date.year > date.year:
return 1
elif cut_date.year == date.year:
if cut_date.month > date.month:
return 1
elif cut_date.month == date.month:
return cls.month_progress(cut_date)
else:
return 0
@classmethod
def time_progress(cls, df: pd.DataFrame, idx_list: list, drop_na: bool):
def get_dataframe_part(df: pd.DataFrame):
df['time_progress'] = df['deliver_date'].apply(
cls.get_time_progress, args=(cls.get_cut_date(df),))
pt = df.pivot_table(
'time_progress', idx_list, 'month_assist')
columns = []
columns.extend(idx_list)
columns.extend(pt.columns)
row_list = pt.values.tolist()
lst = [pd.Series(row).dropna().tolist() for row in row_list]
length = [len(row) for row in lst]
value = row_list[length.index(max(length))]
return value, columns
if drop_na == False:
time_lst, columns = get_dataframe_part(df)
ret = pd.DataFrame(columns=columns)
idx_lists = [df[idx].drop_duplicates().tolist()
for idx in idx_list]
for i in product(*idx_lists):
row = list(i)
row.extend(time_lst)
df = pd.DataFrame(row, index=columns)
ret = ret.append(df.T)
ret = ret.set_index(idx_list, drop=True).astype(float)
else:
df['time_progress'] = df['deliver_date'].apply(
cls.get_time_progress, args=(cls.get_cut_date(df),))
ret = df.pivot_table(
'time_progress', idx_list, 'month_assist')
return ret
@staticmethod
def get_cut_date(df: pd.DataFrame) -> date:
date_col = [col for col in df.columns if 'date' in col][0]
m_date = df[date_col].max()
return m_date
class GroupBy:
@staticmethod
def fill_dic(keys: list, value_list: list):
if len(keys) != len(value_list):
raise ValueError('length is not equal')
dic = {}
length = len(keys)
for i in range(length):
dic[keys[i]] = value_list[i]
return dic
@staticmethod
def group_by(df: pd.DataFrame, by: Union[str, list], aggfunc: str) -> pd.DataFrame:
'''[summary]
Args:
df (pd.DataFrame): The DataFrame to group by.
by (Union[str, list]): Can use 'all' or the str or list in DataFrame columns or index name.
aggfunc (str): Can use 'sum','mean','min','max','count's
Returns:
pd.DataFrame: The DataFrame after group by.
'''
if aggfunc == 'sum':
if by == 'all':
df = pd.DataFrame(df.sum()).T
else:
df = df.groupby(by).sum()
elif aggfunc == 'mean':
if by == 'all':
df = pd.DataFrame(df.mean()).T
else:
df = df.groupby(by).mean()
elif aggfunc == 'min':
if by == 'all':
df = pd.DataFrame(df.min()).T
else:
df = df.groupby(by).min()
elif aggfunc == 'max':
if by == 'all':
df = pd.DataFrame(df.max()).T
else:
df = df.groupby(by).max()
elif aggfunc == 'count':
if by == 'all':
df = pd.DataFrame(df.count()).T
else:
df = df.groupby(by).count()
return df
@staticmethod
def dicts_group_by(dic: dict, by: Union[str, list], aggfunc: str) -> Dict[str,pd.DataFrame]:
'''[summary]
Args:
dic (dict): DataFrame dict
by (Union[str, list]): The str or list in DataFrame columns or index name.
aggfunc (str): Can use 'sum','mean','min','max','count'
Returns:
dict: A dict saved the DataFrame after group_by in the same key.
'''
m_dic = {}
keys = list(dic.keys())
for key in keys:
df = dic[key].copy(True)
if isinstance(df, pd.DataFrame):
if by == None:
m_dic[key] = df
else:
df = GroupBy.group_by(df, by, aggfunc)
m_dic[key] = df
return m_dic
@staticmethod
def fixed_index(df: pd.DataFrame, idx_list: list) -> pd.DataFrame:
'''Set DataFrame index as idx_list.Not exisit index will be filled with 'All'.
Args:
df (pd.DataFrame): [description].
idx_list (list): [description]
Returns:
pd.DataFrame: [description]
'''
# 补全index
df.reset_index(inplace=True)
for col in idx_list:
if col not in df.columns:
df[col] = 'All'
df = df.set_index(idx_list)
if 'index' in df.columns:
df.drop('index', axis=1, inplace=True)
return df
@staticmethod
def multi_group_by(df: pd.DataFrame, idx_list: list, aggfunc: str) -> List[pd.DataFrame]:
'''Group_by df by 'all',idx_list,source and concat them to an empty DataFrame.
Args:
df (pd.DataFrame): [description]
idx_list (list): [description]
aggfunc (str): [description]
Returns:
pd.DataFrame: [description]
'''
df = df.copy(True)
ret = []
# 所有的合计计算
all_all = GroupBy.group_by(df, 'all', aggfunc)
all_all = GroupBy.fixed_index(all_all, idx_list)
ret.append(all_all)
# 部分的合计计算
if len(idx_list) > 1:
part = [GroupBy.group_by(
df, i, aggfunc) for i in idx_list]
part = [GroupBy.fixed_index(p, idx_list) for p in part]
ret.extend(part)
# 传入的本体
ret.append(df)
return ret
class AssistCol:
@staticmethod
def day_assist_col(df: pd.DataFrame):
date_col = [col for col in df.columns if 'date' in col][0]
day_series = df[date_col].apply(lambda x: x.strftime('%YY%m%d'))
return day_series
@staticmethod
def week_assist_col(df: pd.DataFrame):
date_col = [col for col in df.columns if 'date' in col][0]
save_str = ['']
def date_to_week(x: date):
weekday = x.weekday()
if (save_str[0] == '') | (weekday == 0):
monday_date = x-timedelta(weekday)
sunday_date = monday_date+timedelta(days=6)
monday_date = monday_date.strftime('%YY%m%d')
sunday_date = sunday_date.strftime('%YY%m%d')
year_week = '{}-{}'.format(monday_date, sunday_date)
save_str[0] = year_week
return save_str[0]
week_series = df[date_col].apply(date_to_week)
return week_series
@staticmethod
def month_assist_col(df: pd.DataFrame):
# 月辅助列
date_col = [col for col in df.columns if 'date' in col][0]
month_series = df[date_col].apply(lambda x: x.strftime('%Y年%m月'))
return month_series
@staticmethod
def area_assist_col(df: pd.DataFrame):
# 地区辅助列
area_series = df['customer_id'].apply(
lambda x: '深圳' if 'cn' in x else '杭州')
return area_series
class Others:
@classmethod
def str_fix(cls, ustring: str):
if not isinstance(ustring, str):
if ustring != None:
return ustring
else:
return 'None'
def strQ2B(ustring):
"""全角转半角"""
rstring = ""
for uchar in ustring:
inside_code = ord(uchar)
if inside_code == 12288: # 全角空格直接转换
inside_code = 32
elif (inside_code >= 65281 and inside_code <= 65374): # 全角字符(除空格)根据关系转化
inside_code -= 65248
rstring += chr(inside_code)
return rstring
ustring = strQ2B(ustring)
return ustring.strip().lower().replace('\n', '')
@classmethod
def data_fix(cls, data):
if isinstance(data, (int, float)):
return data
elif isinstance(data, str):
data = data.strip()
return float(data)
else:
raise TypeError('data type is {}'.format(type(data)))
@classmethod
def pivot2df(cls, df: pd.DataFrame, index_name: list, columns: list):
ret = pd.DataFrame()
if df.index.names != [None]:
if (df.index.names != index_name):
df = df.reset_index()
df = df.set_index(index_name).sort_index()
else:
df = df.set_index(index_name).sort_index()
for col in columns:
if col in df.columns:
cut_df = df.reindex(columns=[col])
cut_df = cut_df.rename(columns={col: 'value'})
if cut_df['value'].notnull().all():
cut_df['value'] = cut_df['value'].astype(float)
cut_df['col'] = col
ret = ret.append(cut_df)
else:
print('column', col, 'has null')
ret = ret[(ret['value'] != 0) & (ret['value'].notnull())]
ret = ret.reset_index()
return ret
@classmethod
def drop_repeat_columns(cls, df: pd.DataFrame):
'''
找出重复的列名,选择第一个列
'''
df = df.copy()
repeate = df.columns[df.columns.duplicated()]
for r in repeate:
# 获取当前重复列名r的所有列,组成新的df
t_df = df.loc[:, r]
# 新的df列重命名
t_df.columns = range(t_df.shape[1])
# 原df抛弃当前重复列名r
df.drop(r, axis=1, inplace=True)
# 取第一个重复列名作为原df的列名补充回去
df[r] = t_df[0]
return df
@classmethod
def find_col_name(cls, df: pd.DataFrame, findname: str):
'''
搜索实际列名。由于实际列名不固定会需要
'''
columns = df.columns.to_series()
columns = columns[columns.notnull()]
t_columns = columns[columns == findname]
if t_columns.empty:
columns = columns[columns.str.startswith(findname)]
else:
columns = t_columns
if columns.empty:
raise ValueError('not found column as {}'.format(findname))
return columns[0]
@classmethod
def move_col(cls, df: pd.DataFrame, col_name: Union[str, list], pos: int):
if isinstance(col_name, str):
col = df[col_name]
df = df.drop(col_name, axis=1)
df.insert(pos, col_name, col)
return df
elif isinstance(col_name, list):
col_name.reverse()
for col in col_name:
df = Others.move_col(df, col, pos)
return df
@staticmethod
def add_order_index(df_dict:Dict[str,pd.DataFrame], col_name='project'):
df_list=[ | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
import numpy as np
import sys
from main import Main
import random
if __name__ == "__main__":
valid_pairs = []
good_ids = | pd.read_csv("data/good_trips.csv") | pandas.read_csv |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.