prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
# -*- coding: utf-8 -*-
"""
Reading data for WB, PRO,
for kennisimpulse project
to read data from province, water companies, and any other sources
Created on Sun Jul 26 21:55:57 2020
@author: <NAME>
"""
import pytest
import numpy as np
import pandas as pd
from pathlib import Path
import pickle as pckl
from hgc import ner
from hgc import io
import tests
# import xlsxwriter
def test_province():
# WD = Path(tests.__file__).parent / 'provincie_data_long_preprocessed.csv'
WD = r'C:\Users\beta6\Documents\Dropbox\008KWR\0081Projects\kennisimpulse'+'/provincie_data_long_preprocessed.csv'
df_temp = pd.read_csv(WD, encoding='ISO-8859-1', header=None)
# define the nrow here
n_row = None
feature_map, feature_unmapped, df_feature_map = ner.generate_feature_map(entity_orig=list(df_temp.iloc[slice(2, n_row), 25].dropna()))
unit_map, unit_unmapped, df_unit_map = ner.generate_unit_map(entity_orig=list(df_temp.iloc[slice(2, n_row), 26].dropna()))
# create a df to record what has been mapped and what has not
df_map = pd.DataFrame((feature_map.keys(),feature_map.values(),unit_map.keys(),unit_map.values()), index=['Feature','Mapped feature','Unit','Mapped unit']).transpose()
if not not feature_unmapped:
df_map = df_map.join( | pd.DataFrame(feature_unmapped, columns=['Unmapped feature']) | pandas.DataFrame |
import pandas as pd
from visions import StandardSet
from compressio import DefaultCompressor
from compressio.compress import compress_func
def test_copy_frame():
df = pd.DataFrame({"column": | pd.Series([1], dtype="int64") | pandas.Series |
# %%
import json
from collections import Counter
import matplotlib as mpl
import matplotlib.font_manager as fm
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import plotnine
import scipy
import seaborn as sns
from matplotlib import rc
from pandas.plotting import register_matplotlib_converters
from plotnine import (
aes,
element_text,
facet_wrap,
geom_bar,
ggplot,
labs,
scale_color_hue,
theme,
theme_light,
)
from tqdm import notebook
font_path = "/usr/share/fonts/truetype/nanum/NanumBarunGothic.ttf"
font_name = fm.FontProperties(fname=font_path, size=10).get_name()
plt.rc("font", family=font_name, size=12)
plt.rcParams["figure.figsize"] = (20, 10)
register_matplotlib_converters()
mpl.font_manager._rebuild()
mpl.pyplot.rc("font", family="NanumGothic")
# %%
# 입력데이터 로드
train = pd.read_json("../input/melon-playlist/train.json", typ="frame")
test = pd.read_json("../input/melon-playlist/test.json", typ="frame")
val = pd.read_json("../input/melon-playlist/val.json", typ="frame")
genre = pd.read_json("../input/melon-playlist/genre_gn_all.json", typ="series")
meta = | pd.read_json("../input/melon-playlist/song_meta.json", typ="frame") | pandas.read_json |
#!/usr/bin/env python3
###
# Based on cell.R
import sys,os,logging
import pandas as pd
if __name__=="__main__":
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.DEBUG)
if (len(sys.argv) < 2):
logging.error("3 file args required, LINCS cell info for GSE70138 and GSE92742, and output file.")
sys.exit(1)
fn1 = sys.argv[1]
fn2 = sys.argv[2]
ofile = sys.argv[3]
GSE70138 = pd.read_table(fn1, "\t", na_values=["-666"])
logging.info(f"columns: {GSE70138.columns}")
GSE70138 = GSE70138.rename(columns={
"donor_sex":"gender",
"primary_site":"cell_lineage",
"subtype":"cell_histology",
"provider_catalog_id":"cell_source_id",
"original_source_vendor":"cell_source"})
logging.info(f"columns: {GSE70138.columns}")
GSE70138 = GSE70138[["cell_id", "cell_type", "cell_lineage", "cell_histology", "cell_source_id", "cell_source", "gender"]]
#
GSE92742 = pd.read_table(fn2, "\t", na_values=["-666"])
GSE92742 = GSE92742.rename(columns={
"donor_sex":"gender",
"primary_site":"cell_lineage",
"subtype":"cell_histology",
"provider_catalog_id":"cell_source_id",
"original_source_vendor":"cell_source"})
GSE92742 = GSE92742[["cell_id", "cell_type", "cell_lineage", "cell_histology", "cell_source_id", "cell_source", "gender"]]
#
cells = | pd.concat([GSE70138, GSE92742]) | pandas.concat |
from __future__ import print_function
from __future__ import absolute_import
from ku import generators as gr
from ku import generic as gen
from ku import image_utils as iu
from munch import Munch
import pandas as pd, numpy as np
import pytest, shutil
gen_params = Munch(batch_size = 2,
data_path = 'images',
input_shape = (224,224,3),
inputs = ['filename'],
outputs = ['score'],
shuffle = False,
fixed_batches = True)
ids = pd.read_csv('ids.csv', encoding='latin-1')
def test_correct_df_input():
assert (np.all(ids.columns == ['filename', 'score']))
assert (np.all(ids.score == range(1,5)))
def test_init_DataGeneratorDisk():
g = gr.DataGeneratorDisk(ids, **gen_params)
assert isinstance(g[0], tuple)
assert isinstance(g[0][0], list)
assert isinstance(g[0][1], list)
assert (gen.get_sizes(g[0]) == '([array<2,224,224,3>], [array<2,1>])')
assert (np.all(g[0][1][0] == np.array([[1],[2]])))
def test_read_fn_DataGeneratorDisk():
import os
def read_fn(name, g):
# g is the parent generator object
# name is the image name read from the DataFrame
image_path = os.path.join(g.data_path, name)
return iu.resize_image(iu.read_image(image_path), (100,100))
g = gr.DataGeneratorDisk(ids, read_fn=read_fn, **gen_params)
gen.get_sizes(g[0]) =='([array<2,100,100,3>], [array<2,1>])'
def test_process_args_DataGeneratorDisk():
def preproc(im, arg):
return np.zeros(1) + arg
gen_params_local = gen_params.copy()
gen_params_local.process_fn = preproc
gen_params_local.process_args = {'filename': 'filename_args'}
gen_params_local.batch_size = 4
ids_local = ids.copy()
ids_local['filename_args'] = range(len(ids_local))
g = gr.DataGeneratorDisk(ids_local, **gen_params_local)
x = g[0][0]
assert np.array_equal(np.squeeze(x[0].T), np.arange(gen_params_local.batch_size))
def test_get_sizes():
x = np.array([[1,2,3]])
assert gen.get_sizes(([x.T],1,[4,5])) == '([array<3,1>], <1>, [<1>, <1>])'
assert gen.get_sizes(np.array([[1,[1,2]]])) == 'array<1,2>'
def test_DataGeneratorDisk():
g = gr.DataGeneratorDisk(ids, **gen_params)
g.inputs = ['filename', 'filename']
assert gen.get_sizes(g[0]) == '([array<2,224,224,3>, array<2,224,224,3>], [array<2,1>])'
g.inputs_df = ['score', 'score']
g.inputs = []
g.outputs = []
assert gen.get_sizes(g[0]) == '([array<2,2>], [])'
g.inputs_df = [['score'], ['score','score']]
assert gen.get_sizes(g[0]) == '([array<2,1>, array<2,2>], [])'
g.inputs_df = []
g.outputs = ['score']
assert gen.get_sizes(g[0]) == '([], [array<2,1>])'
g.outputs = ['score',['score']]
with pytest.raises(AssertionError): g[0]
g.outputs = [['score'],['score']]
assert gen.get_sizes(g[0]) == '([], [array<2,1>, array<2,1>])'
def test_H5Reader_and_Writer():
with gen.H5Helper('data.h5', overwrite=True) as h:
data = np.expand_dims(np.array(ids.score), 1)
h.write_data(data, list(ids.filename))
with gen.H5Helper('data.h5', 'r') as h:
data = h.read_data(list(ids.filename))
assert all(data == np.array([[1],[2],[3],[4]]))
def test_DataGeneratorHDF5():
gen_params_local = gen_params.copy()
gen_params_local.update(data_path='data.h5', inputs=['filename'])
g = gr.DataGeneratorHDF5(ids, **gen_params_local)
assert gen.get_sizes(g[0]) == '([array<2,1>], [array<2,1>])'
g.inputs_df = ['score', 'score']
g.inputs = []
g.outputs = []
assert gen.get_sizes(g[0]) == '([array<2,2>], [])'
g.inputs_df = [['score'], ['score','score']]
assert gen.get_sizes(g[0]) == '([array<2,1>, array<2,2>], [])'
g.inputs_df = []
g.outputs = ['score']
assert gen.get_sizes(g[0]) == '([], [array<2,1>])'
g.outputs = ['score',['score']]
with pytest.raises(AssertionError): g[0]
g.outputs = [['score'],['score']]
assert gen.get_sizes(g[0]) == '([], [array<2,1>, array<2,1>])'
def test_process_args_DataGeneratorHDF5():
def preproc(im, *arg):
if arg:
return np.zeros(im.shape) + arg
else:
return im
gen_params_local = gen_params.copy()
gen_params_local.update(process_fn = preproc,
data_path = 'data.h5',
inputs = ['filename', 'filename1'],
process_args = {'filename' :'args'},
batch_size = 4,
shuffle = False)
ids_local = ids.copy()
ids_local['filename1'] = ids_local['filename']
ids_local['args'] = range(len(ids_local))
ids_local['args1'] = range(len(ids_local),0,-1)
g = gr.DataGeneratorHDF5(ids_local, **gen_params_local)
assert np.array_equal(np.squeeze(g[0][0][0]), np.arange(4))
assert np.array_equal(np.squeeze(g[0][0][1]), np.arange(1,5))
assert np.array_equal(np.squeeze(g[0][1]), np.arange(1,5))
def test_multi_process_args_DataGeneratorHDF5():
def preproc(im, arg1, arg2):
return np.zeros(1) + arg1 + arg2
gen_params_local = gen_params.copy()
gen_params_local.process_fn = preproc
gen_params_local.process_args = {'filename': ['filename_args','filename_args']}
gen_params_local.batch_size = 4
ids_local = ids.copy()
ids_local['filename_args'] = range(len(ids_local))
g = gr.DataGeneratorDisk(ids_local, **gen_params_local)
x = g[0]
assert np.array_equal(np.squeeze(x[0][0].T), np.arange(4)*2)
def test_callable_outputs_DataGeneratorHDF5():
d = {'features': [1, 2, 3, 4, 5],
'mask': [1, 0, 1, 1, 0]}
df = pd.DataFrame(data=d)
def filter_features(df):
return np.array(df.loc[df['mask']==1,['features']])
gen_params_local = gen_params.copy()
gen_params_local.update(data_path = None,
outputs = filter_features,
inputs = [],
inputs_df = ['features'],
shuffle = False,
batch_size= 5)
g = gr.DataGeneratorHDF5(df, **gen_params_local)
assert gen.get_sizes(g[0]) == '([array<5,1>], array<3,1>)'
assert all(np.squeeze(g[0][0]) == np.arange(1,6))
assert all(np.squeeze(g[0][1]) == [1,3,4])
def test_multi_return_proc_fn_DataGeneratorDisk():
gen_params_local = gen_params.copy()
gen_params_local.process_fn = lambda im: [im, im+1]
g = gr.DataGeneratorDisk(ids.copy(), **gen_params_local)
assert np.array_equal(g[0][0][0], g[0][0][1]-1)
assert np.array_equal(g[0][1][0], np.array([[1],[2]]))
def test_multi_return_and_read_fn_DataGeneratorDisk():
def read_fn(*args):
g = args[1]
score = np.float32(g.ids[g.ids.filename==args[0]].score)
return np.ones((3,3)) * score
gen_params_local = gen_params.copy()
gen_params_local.batch_size = 3
gen_params_local.read_fn = read_fn
gen_params_local.process_fn = lambda im: [im+1, im+2]
g = gr.DataGeneratorDisk(ids, **gen_params_local)
assert np.array_equal(g[0][0][0], g[0][0][1]-1)
assert np.array_equal(g[0][0][1][0,...], np.ones((3,3))*3.)
def test_generator_len_with_group_by_DataGeneratorDisk():
size = 10
ids_defa = | pd.read_csv(u'ids.csv', encoding='latin-1') | pandas.read_csv |
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def read_table(your_project_id):
original_your_project_id = your_project_id
# [START bigquerystorage_pandas_tutorial_read_session]
your_project_id = "project-for-read-session"
# [END bigquerystorage_pandas_tutorial_read_session]
your_project_id = original_your_project_id
# [START bigquerystorage_pandas_tutorial_read_session]
from google.cloud import bigquery_storage
from google.cloud.bigquery_storage import types
import pandas
bqstorageclient = bigquery_storage.BigQueryReadClient()
project_id = "bigquery-public-data"
dataset_id = "new_york_trees"
table_id = "tree_species"
table = f"projects/{project_id}/datasets/{dataset_id}/tables/{table_id}"
# Select columns to read with read options. If no read options are
# specified, the whole table is read.
read_options = types.ReadSession.TableReadOptions(
selected_fields=["species_common_name", "fall_color"]
)
parent = "projects/{}".format(your_project_id)
requested_session = types.ReadSession(
table=table,
# Avro is also supported, but the Arrow data format is optimized to
# work well with column-oriented data structures such as pandas
# DataFrames.
data_format=types.DataFormat.ARROW,
read_options=read_options,
)
read_session = bqstorageclient.create_read_session(
parent=parent, read_session=requested_session, max_stream_count=1,
)
# This example reads from only a single stream. Read from multiple streams
# to fetch data faster. Note that the session may not contain any streams
# if there are no rows to read.
stream = read_session.streams[0]
reader = bqstorageclient.read_rows(stream.name)
# Parse all Arrow blocks and create a dataframe.
frames = []
for message in reader.rows().pages:
frames.append(message.to_dataframe())
dataframe = | pandas.concat(frames) | pandas.concat |
# -*- coding: utf-8 -*-
# This file as well as the whole tsfresh package are licenced under the MIT licence (see the LICENCE.txt)
# <NAME> (<EMAIL>), Blue Yonder Gmbh, 2016
import warnings
from unittest import TestCase
import pandas as pd
from tsfresh.utilities import dataframe_functions
import numpy as np
import six
class NormalizeTestCase(TestCase):
def test_with_dictionaries_one_row(self):
test_df = pd.DataFrame([{"value": 1, "id": "id_1"}])
test_dict = {"a": test_df, "b": test_df}
# A kind is not allowed with dicts
self.assertRaises(ValueError, dataframe_functions.normalize_input_to_internal_representation, test_dict,
"id", None, "a kind", None)
# The value must be present
self.assertRaises(ValueError, dataframe_functions.normalize_input_to_internal_representation, test_dict,
"id", None, None, "something other")
# Nothing should have changed compared to the input data
result_dict, column_id, column_value = \
dataframe_functions.normalize_input_to_internal_representation(test_dict, "id", None, None, "value")
self.assertEqual(column_value, "value")
self.assertEqual(column_id, "id")
six.assertCountEqual(self, list(test_dict.keys()), list(result_dict.keys()))
self.assertEqual(result_dict["a"].iloc[0].to_dict(), {"value": 1, "id": "id_1"})
# The algo should choose the correct value column
result_dict, column_id, column_value = \
dataframe_functions.normalize_input_to_internal_representation(test_dict, "id", None, None, None)
self.assertEqual(column_value, "value")
self.assertEqual(column_id, "id")
def test_with_dictionaries_two_rows(self):
test_df = pd.DataFrame([{"value": 2, "sort": 2, "id": "id_1"},
{"value": 1, "sort": 1, "id": "id_1"}])
test_dict = {"a": test_df, "b": test_df}
# If there are more than one column, the algorithm can not choose the correct column
self.assertRaises(ValueError, dataframe_functions.normalize_input_to_internal_representation, test_dict,
"id", None, None, None)
# Sorting should work
result_dict, column_id, column_value = \
dataframe_functions.normalize_input_to_internal_representation(test_dict, "id", "sort", None, "value")
self.assertEqual(column_value, "value")
self.assertEqual(column_id, "id")
# Assert sorted and without sort column
self.assertEqual(result_dict["a"].iloc[0].to_dict(), {"value": 1, "id": "id_1"})
self.assertEqual(result_dict["a"].iloc[1].to_dict(), {"value": 2, "id": "id_1"})
# Assert the algo has found the correct column
result_dict, column_id, column_value = \
dataframe_functions.normalize_input_to_internal_representation(test_dict, "id", "sort", None, None)
self.assertEqual(column_value, "value")
self.assertEqual(column_id, "id")
def test_with_dictionaries_two_rows_sorted(self):
test_df = pd.DataFrame([{"value": 2, "id": "id_1"},
{"value": 1, "id": "id_1"}])
test_dict = {"a": test_df, "b": test_df}
# Pass the id
result_dict, column_id, column_value = \
dataframe_functions.normalize_input_to_internal_representation(test_dict, "id", None, None, "value")
self.assertEqual(column_value, "value")
self.assertEqual(column_id, "id")
self.assertEqual(result_dict["a"].iloc[0].to_dict(), {"value": 2, "id": "id_1"})
# The algo should have found the correct value column
result_dict, column_id, column_value = \
dataframe_functions.normalize_input_to_internal_representation(test_dict, "id", None, None, None)
self.assertEqual(column_value, "value")
self.assertEqual(column_id, "id")
def test_with_df(self):
# give everyting
test_df = pd.DataFrame([{"id": 0, "kind": "a", "value": 3, "sort": 1}])
result_dict, column_id, column_value = \
dataframe_functions.normalize_input_to_internal_representation(test_df, "id", "sort", "kind", "value")
self.assertEqual(column_id, "id")
self.assertEqual(column_value, "value")
self.assertIn("a", result_dict)
six.assertCountEqual(self, list(result_dict["a"].columns), ["id", "value"])
self.assertEqual(list(result_dict["a"]["value"]), [3])
self.assertEqual(list(result_dict["a"]["id"]), [0])
# give no kind
test_df = pd.DataFrame([{"id": 0, "value": 3, "sort": 1}])
result_dict, column_id, column_value = \
dataframe_functions.normalize_input_to_internal_representation(test_df, "id", "sort", None, "value")
self.assertEqual(column_id, "id")
self.assertEqual(column_value, "value")
self.assertIn("value", result_dict)
six.assertCountEqual(self, list(result_dict["value"].columns), ["id", "value"])
self.assertEqual(list(result_dict["value"]["value"]), [3])
self.assertEqual(list(result_dict["value"]["id"]), [0])
# Let the function find the values
test_df = pd.DataFrame([{"id": 0, "a": 3, "b": 5, "sort": 1}])
result_dict, column_id, column_value = \
dataframe_functions.normalize_input_to_internal_representation(test_df, "id", "sort", None, None)
self.assertEqual(column_id, "id")
self.assertEqual(column_value, "_value")
self.assertIn("a", result_dict)
self.assertIn("b", result_dict)
six.assertCountEqual(self, list(result_dict["a"].columns), ["_value", "id"])
self.assertEqual(list(result_dict["a"]["_value"]), [3])
self.assertEqual(list(result_dict["a"]["id"]), [0])
six.assertCountEqual(self, list(result_dict["b"].columns), ["_value", "id"])
self.assertEqual(list(result_dict["b"]["_value"]), [5])
self.assertEqual(list(result_dict["b"]["id"]), [0])
def test_with_wrong_input(self):
test_df = pd.DataFrame([{"id": 0, "kind": "a", "value": 3, "sort": np.NaN}])
self.assertRaises(ValueError, dataframe_functions.normalize_input_to_internal_representation, test_df,
"id", "sort", "kind", "value")
test_df = pd.DataFrame([{"id": 0, "kind": "a", "value": 3, "sort": 1}])
self.assertRaises(AttributeError, dataframe_functions.normalize_input_to_internal_representation, test_df,
"strange_id", "sort", "kind", "value")
test_df = pd.DataFrame([{"id": np.NaN, "kind": "a", "value": 3, "sort": 1}])
self.assertRaises(ValueError, dataframe_functions.normalize_input_to_internal_representation, test_df,
"id", "sort", "kind", "value")
test_df = pd.DataFrame([{"id": 0}])
self.assertRaises(ValueError, dataframe_functions.normalize_input_to_internal_representation, test_df,
"id", None, None, None)
test_df = pd.DataFrame([{"id": 2}, {"id": 1}])
test_dict = {"a": test_df, "b": test_df}
# If there are more than one column, the algorithm can not choose the correct column
self.assertRaises(ValueError, dataframe_functions.normalize_input_to_internal_representation, test_dict,
"id", None, None, None)
test_dict = {"a": pd.DataFrame([{"id": 2, "value_a": 3}, {"id": 1, "value_a": 4}]),
"b": pd.DataFrame([{"id": 2}, {"id": 1}])}
# If there are more than one column, the algorithm can not choose the correct column
self.assertRaises(ValueError, dataframe_functions.normalize_input_to_internal_representation, test_dict,
"id", None, None, None)
test_df = pd.DataFrame([{"id": 0, "value": np.NaN}])
self.assertRaises(ValueError, dataframe_functions.normalize_input_to_internal_representation, test_df,
"id", None, None, "value")
test_df = pd.DataFrame([{"id": 0, "value": np.NaN}])
self.assertRaises(ValueError, dataframe_functions.normalize_input_to_internal_representation, test_df,
None, None, None, "value")
class RollingTestCase(TestCase):
def test_with_wrong_input(self):
test_df = pd.DataFrame({"id": [0, 0], "kind": ["a", "b"], "value": [3, 3], "sort": [np.NaN, np.NaN]})
self.assertRaises(ValueError, dataframe_functions.roll_time_series,
df_or_dict=test_df, column_id="id",
column_sort="sort", column_kind="kind",
rolling_direction=1)
test_df = pd.DataFrame({"id": [0, 0], "kind": ["a", "b"], "value": [3, 3], "sort": [1, 1]})
self.assertRaises(AttributeError, dataframe_functions.roll_time_series,
df_or_dict=test_df, column_id="strange_id",
column_sort="sort", column_kind="kind",
rolling_direction=1)
test_df = {"a": pd.DataFrame([{"id": 0}])}
self.assertRaises(ValueError, dataframe_functions.roll_time_series,
df_or_dict=test_df, column_id="id",
column_sort=None, column_kind="kind",
rolling_direction=1)
self.assertRaises(ValueError, dataframe_functions.roll_time_series,
df_or_dict=test_df, column_id=None,
column_sort=None, column_kind="kind",
rolling_direction=1)
self.assertRaises(ValueError, dataframe_functions.roll_time_series,
df_or_dict=test_df, column_id="id",
column_sort=None, column_kind=None,
rolling_direction=0)
self.assertRaises(ValueError, dataframe_functions.roll_time_series,
df_or_dict=test_df, column_id=None,
column_sort=None, column_kind=None,
rolling_direction=0)
def test_assert_single_row(self):
test_df = pd.DataFrame([{"id": np.NaN, "kind": "a", "value": 3, "sort": 1}])
self.assertRaises(ValueError, dataframe_functions.roll_time_series,
df_or_dict=test_df, column_id="id",
column_sort="sort", column_kind="kind",
rolling_direction=1)
def test_positive_rolling(self):
first_class = pd.DataFrame({"a": [1, 2, 3, 4], "b": [5, 6, 7, 8], "time": range(4)})
second_class = pd.DataFrame({"a": [10, 11], "b": [12, 13], "time": range(20, 22)})
first_class["id"] = 1
second_class["id"] = 2
df_full = | pd.concat([first_class, second_class], ignore_index=True) | pandas.concat |
#------------------------------------------------------------------------------
NROWS_TRAIN=184903891 #dimmension of the train set
NCHUNK_TRAIN=75000000 #length of chunk of data used for training, from total train set
MAX_TRAIN=75000000 #max length of train data (substract from NROWS_TRAIN to get the start position for training set)
NROWS_VALIDATION=2500000 #size of the validation set
ENV_RUN='local' #environment where the kernel is run
PRESET_D = 2 ** 26
PRESET_DM = 3000000000
if ENV_RUN=='local':
inpath = '../input/'
suffix = ''
outpath = ''
savepath = ''
elif ENV_RUN=='aws':
inpath = '../input/'
suffix = '.zip'
outpath = '../sub/'
savepath = '../data/'
#------------------------------------------------------------------------------
import pandas as pd
import time
import numpy as np
from sklearn.model_selection import train_test_split
import lightgbm as lgb
import gc
import matplotlib.pyplot as plt
import os
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
def show_max_clean(df,gp,agg_name,agg_type,show_max):
#------------------------------------------------------------------------------
del gp
if show_max:
print( agg_name + " max value = ", df[agg_name].max() )
df[agg_name] = df[agg_name].astype(agg_type)
gc.collect()
return( df )
#------------------------------------------------------------------------------
def perform_count( df, group_cols, agg_name, agg_type='uint32', show_max=False, show_agg=True ):
#------------------------------------------------------------------------------
if show_agg:
print( "Aggregating by ", group_cols , '...' )
gp = df[group_cols][group_cols].groupby(group_cols).size().rename(agg_name).to_frame().reset_index()
df = df.merge(gp, on=group_cols, how='left')
return (show_max_clean(df,gp,agg_name,agg_type,show_max))
#------------------------------------------------------------------------------
def perform_countuniq( df, group_cols, counted, agg_name, agg_type='uint32', show_max=False, show_agg=True ):
#------------------------------------------------------------------------------
if show_agg:
print( "Counting unique ", counted, " by ", group_cols , '...' )
gp = df[group_cols+[counted]].groupby(group_cols)[counted].nunique().reset_index().rename(columns={counted:agg_name})
df = df.merge(gp, on=group_cols, how='left')
return (show_max_clean(df,gp,agg_name,agg_type,show_max))
#------------------------------------------------------------------------------
def perform_cumcount( df, group_cols, counted, agg_name, agg_type='uint32', show_max=False, show_agg=True ):
#------------------------------------------------------------------------------
if show_agg:
print( "Cumulative count by ", group_cols , '...' )
gp = df[group_cols+[counted]].groupby(group_cols)[counted].cumcount()
df[agg_name]=gp.values
return (show_max_clean(df,gp,agg_name,agg_type,show_max))
#------------------------------------------------------------------------------
def perform_mean( df, group_cols, counted, agg_name, agg_type='float32', show_max=False, show_agg=True ):
#------------------------------------------------------------------------------
if show_agg:
print( "Calculating mean of ", counted, " by ", group_cols , '...' )
gp = df[group_cols+[counted]].groupby(group_cols)[counted].mean().reset_index().rename(columns={counted:agg_name})
df = df.merge(gp, on=group_cols, how='left')
return (show_max_clean(df,gp,agg_name,agg_type,show_max))
#------------------------------------------------------------------------------
def perform_var( df, group_cols, counted, agg_name, agg_type='float32', show_max=False, show_agg=True ):
#------------------------------------------------------------------------------
if show_agg:
print( "Calculating variance of ", counted, " by ", group_cols , '...' )
gp = df[group_cols+[counted]].groupby(group_cols)[counted].var().reset_index().rename(columns={counted:agg_name})
df = df.merge(gp, on=group_cols, how='left')
return (show_max_clean(df,gp,agg_name,agg_type,show_max))
debug=0
if debug:
print('*** debug parameter set: this is a test run for debugging purposes ***')
#------------------------------------------------------------------------------
def lgb_modelfit_nocv(params, dtrain, dvalid, predictors, target='target', objective='binary', metrics='auc',
feval=None, early_stopping_rounds=20, num_boost_round=3000, verbose_eval=10, categorical_features=None):
#------------------------------------------------------------------------------
lgb_params = {
'boosting_type': 'gbdt',
'objective': objective,
'metric':metrics,
'learning_rate': 0.2,
#'is_unbalance': 'true', #because training data is unbalance (replaced with scale_pos_weight)
'num_leaves': 31, # we should let it be smaller than 2^(max_depth)
'max_depth': -1, # -1 means no limit
'min_child_samples': 20, # Minimum number of data need in a child(min_data_in_leaf)
'max_bin': 255, # Number of bucketed bin for feature values
'subsample': 0.6, # Subsample ratio of the training instance.
'subsample_freq': 0, # frequence of subsample, <=0 means no enable
'colsample_bytree': 0.3, # Subsample ratio of columns when constructing each tree.
'min_child_weight': 5, # Minimum sum of instance weight(hessian) needed in a child(leaf)
'subsample_for_bin': 200000, # Number of samples for constructing bin
'min_split_gain': 0, # lambda_l1, lambda_l2 and min_gain_to_split to regularization
'reg_alpha': 0, # L1 regularization term on weights
'reg_lambda': 0, # L2 regularization term on weights
'nthread': 4,
'verbose': 0,
'metric':metrics
}
lgb_params.update(params)
print("preparing validation datasets")
xgtrain = lgb.Dataset(dtrain[predictors].values, label=dtrain[target].values,
feature_name=predictors,
categorical_feature=categorical_features
)
xgvalid = lgb.Dataset(dvalid[predictors].values, label=dvalid[target].values,
feature_name=predictors,
categorical_feature=categorical_features
)
evals_results = {}
bst1 = lgb.train(lgb_params,
xgtrain,
valid_sets=[xgtrain, xgvalid],
valid_names=['train','valid'],
evals_result=evals_results,
num_boost_round=num_boost_round,
early_stopping_rounds=early_stopping_rounds,
verbose_eval=10,
feval=feval)
print("\nModel Report")
print("bst1.best_iteration: ", bst1.best_iteration)
print(metrics+":", evals_results['valid'][metrics][bst1.best_iteration-1])
return (bst1,bst1.best_iteration)
#------------------------------------------------------------------------------
def perform_analysis(idx_from,idx_to,fileno):
#------------------------------------------------------------------------------
dtypes = {
'ip' : 'uint32',
'app' : 'uint16',
'device' : 'uint16',
'os' : 'uint16',
'channel' : 'uint16',
'is_attributed' : 'uint8',
'click_id' : 'uint32',
}
print('loading train data...',idx_from,idx_to)
train_df = pd.read_csv(inpath+"train.csv", parse_dates=['click_time'],
skiprows=range(1,idx_from), nrows=idx_to-idx_from, dtype=dtypes,
usecols=['ip','app','device','os', 'channel', 'click_time', 'is_attributed'])
print('loading test data...')
if debug:
test_df = pd.read_csv(inpath+"test.csv", nrows=100000,
parse_dates=['click_time'], dtype=dtypes,
usecols=['ip','app','device','os', 'channel', 'click_time', 'click_id'])
else:
test_df = pd.read_csv(inpath+"test.csv", parse_dates=['click_time'],
dtype=dtypes, usecols=['ip','app','device','os', 'channel', 'click_time', 'click_id'])
len_train = len(train_df)
train_df=train_df.append(test_df)
del test_df
gc.collect()
print('Extracting new features...')
train_df['hour'] = pd.to_datetime(train_df.click_time).dt.hour.astype('uint8')
train_df['day'] = | pd.to_datetime(train_df.click_time) | pandas.to_datetime |
# -*- coding: utf-8 -*-
"""
Tests the usecols functionality during parsing
for all of the parsers defined in parsers.py
"""
import nose
import numpy as np
import pandas.util.testing as tm
from pandas import DataFrame, Index
from pandas.lib import Timestamp
from pandas.compat import StringIO
class UsecolsTests(object):
def test_raise_on_mixed_dtype_usecols(self):
# See gh-12678
data = """a,b,c
1000,2000,3000
4000,5000,6000
"""
msg = ("The elements of 'usecols' must "
"either be all strings, all unicode, or all integers")
usecols = [0, 'b', 2]
with tm.assertRaisesRegexp(ValueError, msg):
self.read_csv(StringIO(data), usecols=usecols)
def test_usecols(self):
data = """\
a,b,c
1,2,3
4,5,6
7,8,9
10,11,12"""
result = self.read_csv(StringIO(data), usecols=(1, 2))
result2 = self.read_csv(StringIO(data), usecols=('b', 'c'))
exp = self.read_csv(StringIO(data))
self.assertEqual(len(result.columns), 2)
self.assertTrue((result['b'] == exp['b']).all())
self.assertTrue((result['c'] == exp['c']).all())
tm.assert_frame_equal(result, result2)
result = self.read_csv(StringIO(data), usecols=[1, 2], header=0,
names=['foo', 'bar'])
expected = self.read_csv(StringIO(data), usecols=[1, 2])
expected.columns = ['foo', 'bar']
tm.assert_frame_equal(result, expected)
data = """\
1,2,3
4,5,6
7,8,9
10,11,12"""
result = self.read_csv(StringIO(data), names=['b', 'c'],
header=None, usecols=[1, 2])
expected = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None)
expected = expected[['b', 'c']]
tm.assert_frame_equal(result, expected)
result2 = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None, usecols=['b', 'c'])
tm.assert_frame_equal(result2, result)
# see gh-5766
result = self.read_csv(StringIO(data), names=['a', 'b'],
header=None, usecols=[0, 1])
expected = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None)
expected = expected[['a', 'b']]
tm.assert_frame_equal(result, expected)
# length conflict, passed names and usecols disagree
self.assertRaises(ValueError, self.read_csv, StringIO(data),
names=['a', 'b'], usecols=[1], header=None)
def test_usecols_index_col_False(self):
# see gh-9082
s = "a,b,c,d\n1,2,3,4\n5,6,7,8"
s_malformed = "a,b,c,d\n1,2,3,4,\n5,6,7,8,"
cols = ['a', 'c', 'd']
expected = DataFrame({'a': [1, 5], 'c': [3, 7], 'd': [4, 8]})
df = self.read_csv(StringIO(s), usecols=cols, index_col=False)
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(s_malformed),
usecols=cols, index_col=False)
tm.assert_frame_equal(expected, df)
def test_usecols_index_col_conflict(self):
# see gh-4201: test that index_col as integer reflects usecols
data = 'a,b,c,d\nA,a,1,one\nB,b,2,two'
expected = DataFrame({'c': [1, 2]}, index=Index(
['a', 'b'], name='b'))
df = self.read_csv(StringIO(data), usecols=['b', 'c'],
index_col=0)
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(data), usecols=['b', 'c'],
index_col='b')
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(data), usecols=[1, 2],
index_col='b')
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(data), usecols=[1, 2],
index_col=0)
tm.assert_frame_equal(expected, df)
expected = DataFrame(
{'b': ['a', 'b'], 'c': [1, 2], 'd': ('one', 'two')})
expected = expected.set_index(['b', 'c'])
df = self.read_csv( | StringIO(data) | pandas.compat.StringIO |
#!//usr/local/bin/python2
import math
import operator
from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot
from plotly.graph_objs import Scatter, Figure, Layout
import plotly.plotly as py
import plotly.graph_objs as go
import pandas as pd
import numpy as np
Xmin=3
Xmax=9
for Abuse in ("Spam Domains", "Phishing Domains", "Malware Domains", "Botnet Domains"):
InitialDataPoint = '2017-may-tlds.csv'
DataPoints = ['2017-may-tlds.csv', '2017-june-tlds.csv', '2017-july-tlds.csv', '2017-aug-tlds.csv', '2017-sept-tlds.csv']
# read data files
datasets=pd.DataFrame()
DataPointsAxes=[]
for DataPoint in DataPoints:
dataset=pd.read_csv(DataPoint,index_col=['TLD'])
dataset["DataPoint"]=DataPoint
datasets=datasets.append(dataset)
DataPointsAxes.append(DataPoint.strip("-tlds.csv"))
TLDs=sorted(set(list(datasets.index)))
empty= | pd.DataFrame(index=TLDs) | pandas.DataFrame |
import traceback
from pathlib import Path
import cv2
import fire
import pandas as pd
from tqdm.contrib.concurrent import thread_map
from manga_ocr_dev.env import FONTS_ROOT, DATA_SYNTHETIC_ROOT
from manga_ocr_dev.synthetic_data_generator.generator import SyntheticDataGenerator
generator = SyntheticDataGenerator()
def f(args):
try:
i, source, id_, text = args
filename = f'{id_}.jpg'
img, text_gt, params = generator.process(text)
cv2.imwrite(str(OUT_DIR / filename), img)
font_path = Path(params['font_path']).relative_to(FONTS_ROOT)
ret = source, id_, text_gt, params['vertical'], str(font_path)
return ret
except Exception as e:
print(traceback.format_exc())
def run(package=0, n_random=1000, n_limit=None, max_workers=16):
"""
:param package: number of data package to generate
:param n_random: how many samples with random text to generate
:param n_limit: limit number of generated samples (for debugging)
:param max_workers: max number of workers
"""
package = f'{package:04d}'
lines = | pd.read_csv(DATA_SYNTHETIC_ROOT / f'lines/{package}.csv') | pandas.read_csv |
"""
Beating benchmark with ensembles
Otto Group product classification challenge @ Kaggle
__author__ : <NAME>
"""
import pandas as pd
import numpy as np
from time import time
from sklearn import ensemble, feature_extraction, preprocessing
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.cross_validation import train_test_split
def multiclass_log_loss(y_true, y_pred, eps=1e-15):
predictions = np.clip(y_pred, eps, 1 - eps)
# normalize row sums to 1
predictions /= predictions.sum(axis=1)[:, np.newaxis]
actual = np.zeros(y_pred.shape)
n_samples = actual.shape[0]
actual[np.arange(n_samples), y_true.astype(int)] = 1
vectsum = np.sum(actual * np.log(predictions))
loss = -1.0 / n_samples * vectsum
return loss
# import data
train = | pd.read_csv('../input/train.csv') | pandas.read_csv |
import astropy.table
import numpy as np
import pandas as pd
def load(path,
x_cols=('psfMag_u', 'psfMag_g', 'psfMag_r', 'psfMag_i', 'psfMag_z'),
y_col='redshift',
class_col='class',
class_val='Galaxy'):
# Cast x_cols to list so Pandas doesn't complain…
x_cols_l = list(x_cols)
if '.h5' in path or '.hdf' in path:
# We have an HDF5 file
data = | pd.read_hdf(path) | pandas.read_hdf |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Nov 15 11:54:15 2020
@author: <NAME>
gbm loop: county
"""
n=5
if n<10:
n=input('How many of the highest population counties in the US do you want to project? Please use numerical input:')
n=int(n)
if n > 10:
print('That will take a long time due to API limitations. Are you sure?')
answer=str(input('Y/N:'))
if answer == 'N' or 'n':
n=input('How many of the highest population counties in the US do you want to project? Please use numerical input:')
n=int(n)
if int(n) > 10: print('Ok. Please be patient.')
else:
print('Ok, please be patient.')
proj=input('How far out would you like to project? Please enter numerical days as input.')
proj=int(proj)
projlb=proj-5
for name in [proj,projlb]:
name=str(name)
print(f"Projecting the total new cases until '{proj}' days in the future for the top '{n}' most populated US counties.")
answer2=input('Are you ready? Y/N:')
if str(answer2)== 'Y' or 'y':
print('Prepare for a quantum leap. Please do not interrupt me.')
else:
print('Sorry, science waits for no one.')
n=int(n)
proj=int(proj)
##############################################################################
import os
import pandas as pd
os.getcwd()
os.chdir('/Users/revanth/Documents/Datta_Fund/c3ai/')
#os.chdir() #need locids.csv and api pkg in wd, also install h2o
import c3aidatalake
import h2o
from h2o.estimators.gbm import H2OGradientBoostingEstimator
import bottleneck as bn
import numpy as np
from pyfinance import ols
##############################################################################
dfp = c3aidatalake.fetch(
"populationdata",
{
"spec": {
"filter": "contains(parent, 'UnitedStates') && populationAge == 'Total' && gender == 'Male/Female' && year == 2019 && value >= '1000000'",
"limit": -1
}
},
)
#&& value >= '500000'
countypops=dfp[[dfp.columns[-1],dfp.columns[-5]]]
countypops=countypops.copy()
countypops.sort_values(by='value',ascending=False,inplace=True)
countypops.reset_index(inplace=True,drop=True)
countypops.drop(8, inplace=True, axis=0)
l_o_c=countypops.iloc[0:n,0].tolist()
print('toppers')
#
##################CODE TO FETCH AND CALCULATE AVERAGE INTENT SCORES PER STATE###################
#fetch all survey data
survey = c3aidatalake.fetch(
"surveydata",
{
"spec": {
}
},
get_all = True
)
#filter it for the intent metrics - mask, 6 feet, stay home, wash hands
#put data in to dictionary
d = {
'State': np.array(list(map(lambda a: a.split('_')[0], list(survey.iloc[:]['location.id'])))),
'Mask': np.array(survey.iloc[:]['coronavirusIntent_Mask']),
'SixF': np.array(survey.iloc[:]['coronavirusIntent_SixFeet']),
'StayH': np.array(survey.iloc[:]['coronavirusIntent_StayHome']),
'WashH': np.array(survey.iloc[:]['coronavirusIntent_WashHands'])
}
#turn dictionary into dataframe and calculate averages by state
intent_by_state = pd.DataFrame(d).groupby("State").mean()
#delete dictionary and survey data because it is not needed
del d, survey
##############################################################################allmobilitycode
#####get cc, mob, dex
piqli=pd.read_csv('/Users/revanth/Documents/Datta_Fund/c3ai/piqlabels.csv')
pl=piqli.iloc[:,0].tolist()
del pl[26:] #remove device counts and non adjusted
del pl[1:13]
del pl[2:] #these deletions bc only count and exposure populated for harris, can check for otherse later
#only ones that come through for Harris are adjusted count and exposure
mob=['Apple_WalkingMobility',
'Apple_DrivingMobility',
'Apple_TransitMobility',
'Google_GroceryMobility',
'Google_ParksMobility',
'Google_TransitStationsMobility',
'Google_RetailMobility',
'Google_ResidentialMobility',
'Google_WorkplacesMobility']
cl=['JHU_ConfirmedCases',
'JHU_ConfirmedDeaths',
'JHU_ConfirmedRecoveries',]
main=pl+mob+cl
print('lists')
train= | pd.DataFrame() | pandas.DataFrame |
import numpy as np
from datetime import timedelta
from distutils.version import LooseVersion
import pandas as pd
import pandas.util.testing as tm
from pandas import to_timedelta
from pandas.util.testing import assert_series_equal, assert_frame_equal
from pandas import (Series, Timedelta, DataFrame, Timestamp, TimedeltaIndex,
timedelta_range, date_range, DatetimeIndex, Int64Index,
_np_version_under1p10, Float64Index, Index, tslib)
from pandas.tests.test_base import Ops
class TestTimedeltaIndexOps(Ops):
def setUp(self):
super(TestTimedeltaIndexOps, self).setUp()
mask = lambda x: isinstance(x, TimedeltaIndex)
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = []
def test_ops_properties(self):
self.check_ops_properties(['days', 'hours', 'minutes', 'seconds',
'milliseconds'])
self.check_ops_properties(['microseconds', 'nanoseconds'])
def test_asobject_tolist(self):
idx = timedelta_range(start='1 days', periods=4, freq='D', name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'),
Timedelta('3 days'), Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = TimedeltaIndex([timedelta(days=1), timedelta(days=2), pd.NaT,
timedelta(days=4)], name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'), pd.NaT,
Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
def test_minmax(self):
# monotonic
idx1 = TimedeltaIndex(['1 days', '2 days', '3 days'])
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = TimedeltaIndex(['1 days', np.nan, '3 days', 'NaT'])
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), Timedelta('1 days')),
self.assertEqual(idx.max(), Timedelta('3 days')),
self.assertEqual(idx.argmin(), 0)
self.assertEqual(idx.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = TimedeltaIndex([])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT, pd.NaT, pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
def test_numpy_minmax(self):
dr = pd.date_range(start='2016-01-15', end='2016-01-20')
td = TimedeltaIndex(np.asarray(dr))
self.assertEqual(np.min(td), Timedelta('16815 days'))
self.assertEqual(np.max(td), Timedelta('16820 days'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, td, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, td, out=0)
self.assertEqual(np.argmin(td), 0)
self.assertEqual(np.argmax(td), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, td, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, td, out=0)
def test_round(self):
td = pd.timedelta_range(start='16801 days', periods=5, freq='30Min')
elt = td[1]
expected_rng = TimedeltaIndex([
Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 01:00:00'),
Timedelta('16801 days 02:00:00'),
Timedelta('16801 days 02:00:00'),
])
expected_elt = expected_rng[1]
tm.assert_index_equal(td.round(freq='H'), expected_rng)
self.assertEqual(elt.round(freq='H'), expected_elt)
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with self.assertRaisesRegexp(ValueError, msg):
td.round(freq='foo')
with tm.assertRaisesRegexp(ValueError, msg):
elt.round(freq='foo')
msg = "<MonthEnd> is a non-fixed frequency"
tm.assertRaisesRegexp(ValueError, msg, td.round, freq='M')
tm.assertRaisesRegexp(ValueError, msg, elt.round, freq='M')
def test_representation(self):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """TimedeltaIndex([], dtype='timedelta64[ns]', freq='D')"""
exp2 = ("TimedeltaIndex(['1 days'], dtype='timedelta64[ns]', "
"freq='D')")
exp3 = ("TimedeltaIndex(['1 days', '2 days'], "
"dtype='timedelta64[ns]', freq='D')")
exp4 = ("TimedeltaIndex(['1 days', '2 days', '3 days'], "
"dtype='timedelta64[ns]', freq='D')")
exp5 = ("TimedeltaIndex(['1 days 00:00:01', '2 days 00:00:00', "
"'3 days 00:00:00'], dtype='timedelta64[ns]', freq=None)")
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(idx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """Series([], dtype: timedelta64[ns])"""
exp2 = """0 1 days
dtype: timedelta64[ns]"""
exp3 = """0 1 days
1 2 days
dtype: timedelta64[ns]"""
exp4 = """0 1 days
1 2 days
2 3 days
dtype: timedelta64[ns]"""
exp5 = """0 1 days 00:00:01
1 2 days 00:00:00
2 3 days 00:00:00
dtype: timedelta64[ns]"""
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = repr(pd.Series(idx))
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """TimedeltaIndex: 0 entries
Freq: D"""
exp2 = """TimedeltaIndex: 1 entries, 1 days to 1 days
Freq: D"""
exp3 = """TimedeltaIndex: 2 entries, 1 days to 2 days
Freq: D"""
exp4 = """TimedeltaIndex: 3 entries, 1 days to 3 days
Freq: D"""
exp5 = ("TimedeltaIndex: 3 entries, 1 days 00:00:01 to 3 days "
"00:00:00")
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = idx.summary()
self.assertEqual(result, expected)
def test_add_iadd(self):
# only test adding/sub offsets as + is now numeric
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = timedelta_range('1 days', '10 days')
result = rng + delta
expected = timedelta_range('1 days 02:00:00', '10 days 02:00:00',
freq='D')
| tm.assert_index_equal(result, expected) | pandas.util.testing.assert_index_equal |
"""Various one off plots.
Usage:
./plots.py
Author:
<NAME> - 2021-08-30
"""
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from typing import List, Optional
from scipy.stats import norm
from warzone.base import normalize, running_mean, cumulative_mean
from warzone.document_filter import DocumentFilter
def personal_plot(doc_filter: DocumentFilter) -> None:
"""
Returns a series of plots.
:param doc_filter: A DocumentFilter.
:type doc_filter: DocumentFilter
:return: *None*
:example: *None*
:note: This is intended to be used with map_choice, mode_choice and a Gamertag inputted into the DocumentFilter.
"""
data = doc_filter.df
dates = list(data['startDate'].unique())
fig, ax = plt.subplots(nrows=3, ncols=2, figsize=(30, 30))
plt.title('Personal Data for: ' + doc_filter.username, fontsize='xx-large')
# win/loss
win_count_lst = []
game_count_lst = []
wl_ratio_lst = []
for i in dates:
temp = data[data['startDate'] == i]
wins = len(temp[temp['teamPlacement'] == 1])
losses = len(temp[temp['teamPlacement'] > 1])
win_count_lst.append(wins)
game_count_lst.append(losses + wins)
wl_ratio_lst.append(wins / (wins + losses))
wl_df = pd.DataFrame(wl_ratio_lst, columns=['ratio'], index=dates)
wl_df['wins'] = win_count_lst
wl_df['losses'] = game_count_lst
cm_wl = cumulative_mean(np.array(wl_df['ratio']))
rm_wl = running_mean(np.array(wl_df['ratio']), 50)
ax[0, 0].set_title('Daily Win / Loss Ratio', fontsize='xx-large')
ax[0, 0].plot(cm_wl, label='W/L Ratio Cumulative Mean', color='tab:blue')
ax[0, 0].plot(rm_wl, label='W/L Ratio Running Mean', color='tab:blue', alpha=0.25)
ax[0, 0].legend(loc='lower left', fontsize='large', frameon=True, framealpha=0.85)
ax2 = ax[0, 0].twinx()
ax2.plot(np.array(wl_df['losses']), label='Daily Game Count', color='black', alpha=0.25)
ax2.legend(loc='lower right', fontsize='large', frameon=True, framealpha=0.85)
# ax2.set_xticks(np.arange(min(range(len(wl_df))), max(range(len(wl_df))) + 1, 100.0))
# placement
cm_p = cumulative_mean(np.array(data['placementPercent']))
rm_p = running_mean(np.array(data['placementPercent']), 50)
ax[0, 1].set_title('Team Placement', fontsize='xx-large')
ax[0, 1].plot(cm_p, label='Placement Cumulative Mean', color='tab:blue')
ax[0, 1].plot(rm_p, label='Placement Running Mean', color='tab:blue', alpha=0.25)
ax[0, 1].set_xticks(np.arange(min(range(len(data['matchID']))), max(range(len(data['matchID']))) + 1, 100.0))
ax[0, 1].legend(loc='lower right', fontsize='large', frameon=True, framealpha=0.85)
# kd
cm_kd = cumulative_mean(np.array(data['kdRatio']))
rm_kd = running_mean(np.array(data['kdRatio']), 50)
ax[1, 0].set_title('Kill Death Ratio', fontsize='xx-large')
ax[1, 0].plot(cm_kd, label='Kd Ratio Cumulative Mean', color='tab:blue')
ax[1, 0].plot(rm_kd, label='Kd Ratio Running Mean', color='tab:blue', alpha=0.25)
ax[1, 0].set_xticks(np.arange(min(range(len(data['matchID']))), max(range(len(data['matchID']))) + 1, 100.0))
ax[1, 0].legend(loc='lower right', fontsize='large', frameon=True, framealpha=0.85)
# Kills and Deaths
ax[1, 1].set_title('Kills and Deaths Per Game', fontsize='xx-large')
cm_kills = cumulative_mean(np.array(data['kills']))
cm_deaths = cumulative_mean(np.array(data['deaths']))
rm_kills = running_mean(np.array(data['kills']), 50)
rm_deaths = running_mean(np.array(data['deaths']), 50)
ax[1, 1].set_title('Kills and Deaths', fontsize='xx-large')
ax[1, 1].plot(cm_kills, label='Kills Cumulative Mean', color='green')
ax[1, 1].plot(cm_deaths, label='Deaths Cumulative Mean', color='red')
ax[1, 1].plot(rm_kills, label='Kills Running Mean', color='green', alpha=0.25)
ax[1, 1].plot(rm_deaths, label='Deaths Running Mean', color='red', alpha=0.25)
ax[1, 1].set_xticks(np.arange(min(range(len(data['matchID']))), max(range(len(data['matchID']))) + 1, 100.0))
ax[1, 1].legend(loc='lower right', fontsize='large', frameon=True, framealpha=0.85)
# Damage
cm_dam_d = cumulative_mean(np.array(data['damageDone']))
cm_dam_t = cumulative_mean(np.array(data['damageTaken']))
rm_dam_d = running_mean(np.array(data['damageDone']), 50)
rm_dam_t = running_mean(np.array(data['damageTaken']), 50)
ax[2, 0].set_title('Damage', fontsize='xx-large')
ax[2, 0].plot(cm_dam_d, label='Damage Done Cumulative Mean', color='green')
ax[2, 0].plot(cm_dam_t, label='Damage Taken Cumulative Mean', color='red')
ax[2, 0].plot(rm_dam_d, label='Damage Done Running Mean', color='green', alpha=0.25)
ax[2, 0].plot(rm_dam_t, label='Damage Taken Running Mean', color='red', alpha=0.25)
ax[2, 0].set_xticks(np.arange(min(range(len(data['matchID']))), max(range(len(data['matchID']))) + 1, 100.0))
ax[2, 0].legend(loc='lower right', fontsize='large', frameon=True, framealpha=0.85)
# Misc
ax[2, 1].set_title('Misc', fontsize='xx-large')
ax[2, 1].plot(data['headshots'])
ax[2, 1].set_xticks(np.arange(min(range(len(data['matchID']))), max(range(len(data['matchID']))) + 1, 100.0))
ax[2, 1].legend(loc='lower right', fontsize='large', frameon=True, framealpha=0.85)
plt.show()
def lobby_plot(doc_filter: DocumentFilter) -> None:
"""
Returns a series of plots.
:param doc_filter: A DocumentFilter.
:type doc_filter: DocumentFilter
:return: *None*
:example: *None*
:note: This is intended to be used with map_choice and mode_choice inputted into the DocumentFilter.
"""
data = doc_filter.df
games = doc_filter.unique_ids
dates = list(data['startDate'].unique())
col_lst = ['kdRatio', 'kills', 'deaths', 'damageDone', 'damageTaken', 'percentTimeMoving', 'distanceTraveled',
'objectiveTeamWiped', 'objectiveReviver', 'missionsComplete']
day_dic = {}
for date in dates:
temp_df = data[data['startDate'] == date].fillna(0)
day_dic[date] = [np.mean(temp_df[col]) for col in col_lst]
game_dic = {}
for game in games:
temp_df = data[data['matchID'] == game].fillna(0)
game_dic[game] = [np.mean(temp_df[col]) for col in col_lst]
day_df = | pd.DataFrame.from_dict(day_dic, orient='index', columns=col_lst) | pandas.DataFrame.from_dict |
import numpy as np
import pandas as pd
TZ_LOOKUP = {
'America/Anchorage': 9,
'America/Chicago': 6,
'America/Denver': 7,
'America/Los_Angeles': 8,
'America/New_York': 5,
'America/Phoenix': 7,
'Pacific/Honolulu': 10
}
def load_results():
base = 's3://pvinsight.nrel/output/'
nrel_data = | pd.read_csv(base + 'pvo_results.csv') | pandas.read_csv |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 31 15:16:47 2017
@author: wasifaahmed
"""
from flask import Flask, flash,render_template, request, Response, redirect, url_for, send_from_directory,jsonify,session
import json as json
from datetime import datetime,timedelta,date
from sklearn.cluster import KMeans
import numpy as np
from PIL import Image
from flask.ext.sqlalchemy import SQLAlchemy
import matplotlib.image as mpimg
from io import StringIO
from skimage import data, exposure, img_as_float ,io,color
import scipy
from scipy import ndimage
import time
import tensorflow as tf
import os , sys
import shutil
import numpy as np
import pandas as pd
from PIL import Image
from model import *
from sqlalchemy.sql import text
from sqlalchemy import *
from forms import *
import math
from io import StringIO
import csv
from sqlalchemy.orm import load_only
from datetime import datetime,date
from numpy import genfromtxt
from sqlalchemy.ext.serializer import loads, dumps
from sqlalchemy.orm import sessionmaker, scoped_session
from flask_bootstrap import Bootstrap
graph = tf.Graph()
with graph.as_default():
sess = tf.Session(graph=graph)
init_op = tf.global_variables_initializer()
pointsarray=[]
def load_model():
sess.run(init_op)
saver = tf.train.import_meta_graph('E:/FRAS Windows/FRAS_production/Simulation/FRAS_20170726/FRAS_20170727.meta')
#saver = tf.train.import_meta_graph('/Users/wasifaahmed/Documents/FRAS/Fras_production_v.0.1/FRAS Windows/FRAS Windows/FRAS_production/Simulation/FRAS_20170726/FRAS_20170727.meta')
print('The model is loading...')
#saver.restore(sess, "/Users/wasifaahmed/Documents/FRAS/Fras_production_v.0.1/FRAS Windows/FRAS Windows/FRAS_production/Simulation/FRAS_20170726/FRAS_20170727")
saver.restore(sess, 'E:/FRAS Windows/FRAS_production/Simulation/FRAS_20170726/FRAS_20170727')
print('loaded...')
pass
engine =create_engine('postgresql://postgres:user@localhost/postgres')
Session = scoped_session(sessionmaker(bind=engine))
mysession = Session()
app = Flask(__name__)
app.config.update(
DEBUG=True,
SECRET_KEY='\<KEY>')
app.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql://postgres:user@localhost/fras_production'
db.init_app(app)
Bootstrap(app)
@app.after_request
def add_header(response):
response.headers['X-UA-Compatible'] = 'IE=Edge,chrome=1'
response.headers['Cache-Control'] = 'public, max-age=0'
return response
@app.route('/',methods=['GET', 'POST'])
def login():
form = LoginForm()
return render_template('forms/login.html', form=form)
@app.route('/home',methods=['GET', 'POST'])
def index():
return render_template('pages/home.html')
@app.route('/detail_setup/')
def Detail_Setup():
curdate=time.strftime("%Y-%m-%d")
selection=Shooting_Session.query.filter(Shooting_Session.date>=curdate).order_by(Shooting_Session.datetimestamp.desc()).all()
firer_1 = [row.service_id for row in Shooter.query.all()]
return render_template('pages/detail_setup.html',
data=selection,
firer_1=firer_1)
@app.route('/auto_setup/')
def auto_setup():
drop=[]
curdate=time.strftime("%Y-%m-%d")
form=BulkRegistrationForm()
selection_2=Shooting_Session.query.filter(Shooting_Session.date>=curdate).order_by(Shooting_Session.datetimestamp.desc()).all()
selection=TGroup.query.distinct(TGroup.group_no).filter(TGroup.date==curdate).all()
return render_template('pages/auto_setup.html',
data=selection, data_2=selection_2,form=form)
@app.route('/auto_setup_1/')
def auto_setup_1():
drop=[]
curdate=time.strftime("%Y-%m-%d")
form=BulkRegistrationForm()
selection_2=Shooting_Session.query.filter(Shooting_Session.date>=curdate).order_by(Shooting_Session.datetimestamp.desc()).all()
selection=TGroup.query.distinct(TGroup.group_no).all()
return render_template('pages/auto_setup_1.html',
data=selection, data_2=selection_2,form=form)
@app.route('/group_gen/',methods=['GET', 'POST'])
def group_gen():
da_1=None
da_2=None
da_3=None
da_4=None
da_5=None
da_6=None
da_7=None
da_8=None
if request.method == "POST":
data = request.get_json()
group=data['data']
session['group']=group
data=TGroup.query.filter(TGroup.group_no==group).scalar()
da_1=data.target_1_no
da_2=data.target_2_no
da_3=data.target_3_no
da_4=data.target_4_no
da_5=data.target_5_no
da_6=data.target_6_no
da_7=data.target_7_no
da_8=data.target_8_no
return jsonify(data1=da_1,
data2=da_2,
data3=da_3,
data4=da_4,
data5=da_5,
data6=da_6,
data7=da_7,
data8=da_8
)
@app.route('/detail_exitence_1/',methods=['GET', 'POST'])
def detail_exitence_1():
ra_1=None
da_1=None
detail=None
service_id_1=None
session=None
paper=None
set_no=None
cant=None
if request.method == "POST":
data = request.get_json()
detail=data['data']
dt=time.strftime("%Y-%m-%d")
data=db.session.query(Session_Detail).filter(Session_Detail.detail_no==detail).scalar()
db.session.query(TShooting).delete()
db.session.commit()
Tdetail_shots =TShooting(
date=datetime.now(),
datetimestamp=time.strftime("%Y-%m-%d %H:%M"),
session_id=data.session_id,
detail_no=data.detail_no,
target_1_id=data.target_1_id,
target_2_id=data.target_2_id,
target_3_id=data.target_3_id,
target_4_id=data.target_4_id,
target_5_id=data.target_5_id,
target_6_id=data.target_6_id,
target_7_id=data.target_7_id,
target_8_id=data.target_8_id,
paper_ref=data.paper_ref,
set_no=data.set_no,
save_flag=0
)
db.session.add(Tdetail_shots)
db.session.commit()
res=[]
ten=[]
gp_len=[]
tten=db.session.query(MPI.tendency_code).filter(MPI.firer_id==data.target_1_id).order_by(MPI.datetimestamp.desc()).limit(5).all()[::-1]
tres = db.session.query(Grouping.result).filter(Grouping.firer_id==data.target_1_id).order_by(Grouping.datetimestamp.desc()).limit(5).all()[::-1]
tgp = db.session.query(Grouping.grouping_length_f).filter(Grouping.firer_id==data.target_1_id).order_by(Grouping.datetimestamp.desc()).limit(5).all()[::-1]
for ele in tres:
for ele2 in ele:
res.append(ele2)
for ele3 in tten:
for ele4 in ele3:
ten.append(ele4)
for ele5 in tgp:
for ele6 in ele5:
gp_len.append(ele6)
da_1=db.session.query(Shooter.name).filter(Shooter.id==data.target_1_id).scalar()
cant_id=db.session.query(Shooter.cantonment_id).filter(Shooter.id==data.target_1_id).scalar()
cant=db.session.query(Cantonment.cantonment).filter(Cantonment.id==cant_id).scalar()
ra_1_id=db.session.query(Shooter.rank_id).filter(Shooter.id==data.target_1_id).scalar()
ra_1 = db.session.query(Rank.name).filter(Rank.id==ra_1_id).scalar()
session=db.session.query(TShooting.session_id).scalar()
paper=db.session.query(TShooting.paper_ref).scalar()
set_no=db.session.query(TShooting.set_no).scalar()
service_id_1 = db.session.query(Shooter.service_id).filter(Shooter.id==data.target_1_id).scalar()
return jsonify(
data1=da_1,
ra_1=ra_1,
detail=detail,
service_id_1=service_id_1,
session=session,
paper=paper,
set_no=set_no,
cant=cant,
res=res,
ten=ten,
gp_len=gp_len
)
@app.route('/generate_ref/' ,methods=['GET', 'POST'])
def generate_ref():
g=None
if request.method == "POST":
data = request.get_json()
paper_ref =data['data']
if (paper_ref == 'New'):
g=0
else:
obj=TPaper_ref.query.scalar()
g= obj.paper_ref
return jsonify(gen=int(g))
@app.route('/create_detail_target_2/', methods=['GET', 'POST'])
def create_detail_target_2():
curdate=time.strftime("%Y-%m-%d")
firer_1 = [row.service_id for row in Shooter.query.all()]
detail_data=TShooting.query.scalar()
return render_template('pages/create_detail_target_2.html',
detail_data=detail_data,
firer_1=firer_1
)
@app.route('/save_target_2/', methods=['GET', 'POST'])
def save_target_2():
r=request.form['tag']
r_object=Shooter.query.filter(Shooter.service_id==r).scalar()
r_id=r_object.id
ses=Session_Detail.query.first()
ses.target_2_id=r_id
db.session.commit()
temp =TShooting.query.first()
temp.target_2_id=r_id
db.session.commit()
return redirect(url_for('individual_score_target_2'))
@app.route('/create_detail_target_1/', methods=['GET', 'POST'])
def create_detail_target_1():
curdate=time.strftime("%Y-%m-%d")
selection=Shooting_Session.query.filter(Shooting_Session.date==curdate).all()
firer_1 = [row.service_id for row in Shooter.query.all()]
return render_template('pages/create_detail_target_1.html',
data=selection,
firer_1=firer_1
)
@app.route('/create_session/', methods=['GET', 'POST'])
def create_session():
try:
data = Shooter.query.all()
rang= Range.query.all()
firearms = Firearms.query.all()
ammunation = Ammunation.query.all()
rang_name = request.form.get('comp_select_4')
fire_name = request.form.get('comp_select_5')
ammu_name = request.form.get('comp_select_6')
form=SessionForm()
if(rang_name is None):
range_id=999
fire_id=999
ammu_id=999
else:
range_id = db.session.query(Range.id).filter(Range.name==rang_name).scalar()
fire_id = db.session.query(Firearms.id).filter(Firearms.name==fire_name).scalar()
ammu_id = db.session.query(Ammunation.id).filter(Ammunation.name==ammu_name).scalar()
if form.validate_on_submit():
shooting=Shooting_Session(
date=form.date.data.strftime('%Y-%m-%d'),
datetimestamp=time.strftime("%Y-%m-%d %H:%M"),
shooting_range_id=range_id,
firearms_id=fire_id,
ammunation_id=ammu_id,
target_distance = form.target_distance.data,
weather_notes = form.weather_notes.data,
comments = form.comments.data,
session_no=form.session_no.data,
occasion=form.occ.data
)
db.session.add(shooting)
db.session.commit()
return redirect(url_for('create_detail_target_1'))
except Exception as e:
return redirect(url_for('error5_505.html'))
return render_template('forms/shooting_form.html', form=form, data =data ,rang=rang , firearmns=firearms, ammunation = ammunation)
@app.route('/monthly_report/',methods=['GET','POST'])
def monthly_report():
year=None
month=None
date_start=None
try:
if request.method=='POST':
month=request.form.get('comp_select')
year = datetime.now().year
if (month == 'October'):
dt_start='-10-01'
dt_end ='-10-31'
str_date_start = str(year)+dt_start
date_start=datetime.strptime(str_date_start, "%Y-%m-%d")
str_date_end = str(year)+dt_end
date_end=datetime.strptime(str_date_end, "%Y-%m-%d")
dat1=db.session.query(Grouping.date,Shooter.service_id,Rank.name,Shooter.name.label('firer'),Shooter.unit,Shooter.brigade,Grouping.detail_no,Grouping.result,Grouping.grouping_length_f,MPI.tendency_text).filter(Grouping.date.between(date_start,date_end), Grouping.firer_id==Shooter.id,Shooter.rank_id==Rank.id and Grouping.date==MPI.date, Grouping.session_id==MPI.session_id,Grouping.firer_id==MPI.firer_id,Grouping.detail_no==MPI.detail_no,Grouping.target_no==MPI.target_no,Grouping.spell_no==MPI.spell_no,Grouping.paper_ref==MPI.paper_ref).all()
elif(month=='January'):
dt_start='-01-01'
dt_end ='-01-31'
str_date_start = str(year)+dt_start
date_start=datetime.strptime(str_date_start, "%Y-%m-%d")
str_date_end = str(year)+dt_end
date_end=datetime.strptime(str_date_end, "%Y-%m-%d")
dat1=db.session.query(Grouping.date,Shooter.service_id,Rank.name,Shooter.name.label('firer'),Shooter.unit,Shooter.brigade,Grouping.detail_no,Grouping.result,Grouping.grouping_length_f,MPI.tendency_text).filter(Grouping.date.between(date_start,date_end), Grouping.firer_id==Shooter.id,Shooter.rank_id==Rank.id and Grouping.date==MPI.date, Grouping.session_id==MPI.session_id,Grouping.firer_id==MPI.firer_id,Grouping.detail_no==MPI.detail_no,Grouping.target_no==MPI.target_no,Grouping.spell_no==MPI.spell_no,Grouping.paper_ref==MPI.paper_ref).all()
elif(month=='February'):
dt_start='-02-01'
dt_end ='-02-28'
str_date_start = str(year)+dt_start
date_start=datetime.strptime(str_date_start, "%Y-%m-%d")
str_date_end = str(year)+dt_end
date_end=datetime.strptime(str_date_end, "%Y-%m-%d")
dat1=db.session.query(Grouping.date,Shooter.service_id,Rank.name,Shooter.name.label('firer'),Shooter.unit,Shooter.brigade,Grouping.detail_no,Grouping.result,Grouping.grouping_length_f,MPI.tendency_text).filter(Grouping.date.between(date_start,date_end), Grouping.firer_id==Shooter.id,Shooter.rank_id==Rank.id and Grouping.date==MPI.date, Grouping.session_id==MPI.session_id,Grouping.firer_id==MPI.firer_id,Grouping.detail_no==MPI.detail_no,Grouping.target_no==MPI.target_no,Grouping.spell_no==MPI.spell_no,Grouping.paper_ref==MPI.paper_ref).all()
elif(month=='March'):
dt_start='-03-01'
dt_end ='-03-31'
str_date_start = str(year)+dt_start
date_start=datetime.strptime(str_date_start, "%Y-%m-%d")
str_date_end = str(year)+dt_end
date_end=datetime.strptime(str_date_end, "%Y-%m-%d")
dat1=db.session.query(Grouping.date,Shooter.service_id,Rank.name,Shooter.name.label('firer'),Shooter.unit,Shooter.brigade,Grouping.detail_no,Grouping.result,Grouping.grouping_length_f,MPI.tendency_text).filter(Grouping.date.between(date_start,date_end), Grouping.firer_id==Shooter.id,Shooter.rank_id==Rank.id and Grouping.date==MPI.date, Grouping.session_id==MPI.session_id,Grouping.firer_id==MPI.firer_id,Grouping.detail_no==MPI.detail_no,Grouping.target_no==MPI.target_no,Grouping.spell_no==MPI.spell_no,Grouping.paper_ref==MPI.paper_ref).all()
elif(month=='April'):
dt_start='-04-01'
dt_end ='-04-30'
str_date_start = str(year)+dt_start
date_start=datetime.strptime(str_date_start, "%Y-%m-%d")
str_date_end = str(year)+dt_end
date_end=datetime.strptime(str_date_end, "%Y-%m-%d")
dat1=db.session.query(Grouping.date,Shooter.service_id,Rank.name,Shooter.name.label('firer'),Shooter.unit,Shooter.brigade,Grouping.detail_no,Grouping.result,Grouping.grouping_length_f,MPI.tendency_text).filter(Grouping.date.between(date_start,date_end), Grouping.firer_id==Shooter.id,Shooter.rank_id==Rank.id and Grouping.date==MPI.date, Grouping.session_id==MPI.session_id,Grouping.firer_id==MPI.firer_id,Grouping.detail_no==MPI.detail_no,Grouping.target_no==MPI.target_no,Grouping.spell_no==MPI.spell_no,Grouping.paper_ref==MPI.paper_ref).all()
elif(month=='May'):
dt_start='-05-01'
dt_end ='-05-31'
str_date_start = str(year)+dt_start
date_start=datetime.strptime(str_date_start, "%Y-%m-%d")
str_date_end = str(year)+dt_end
date_end=datetime.strptime(str_date_end, "%Y-%m-%d")
dat1=db.session.query(Grouping.date,Shooter.service_id,Rank.name,Shooter.name.label('firer'),Shooter.unit,Shooter.brigade,Grouping.detail_no,Grouping.result,Grouping.grouping_length_f,MPI.tendency_text).filter(Grouping.date.between(date_start,date_end), Grouping.firer_id==Shooter.id,Shooter.rank_id==Rank.id and Grouping.date==MPI.date, Grouping.session_id==MPI.session_id,Grouping.firer_id==MPI.firer_id,Grouping.detail_no==MPI.detail_no,Grouping.target_no==MPI.target_no,Grouping.spell_no==MPI.spell_no,Grouping.paper_ref==MPI.paper_ref).all()
elif(month=='June'):
dt_start='-06-01'
dt_end ='-06-30'
str_date_start = str(year)+dt_start
date_start=datetime.strptime(str_date_start, "%Y-%m-%d")
str_date_end = str(year)+dt_end
date_end=datetime.strptime(str_date_end, "%Y-%m-%d")
dat1=db.session.query(Grouping.date,Shooter.service_id,Rank.name,Shooter.name.label('firer'),Shooter.unit,Shooter.brigade,Grouping.detail_no,Grouping.result,Grouping.grouping_length_f,MPI.tendency_text).filter(Grouping.date.between(date_start,date_end), Grouping.firer_id==Shooter.id,Shooter.rank_id==Rank.id and Grouping.date==MPI.date, Grouping.session_id==MPI.session_id,Grouping.firer_id==MPI.firer_id,Grouping.detail_no==MPI.detail_no,Grouping.target_no==MPI.target_no,Grouping.spell_no==MPI.spell_no,Grouping.paper_ref==MPI.paper_ref).all()
elif(month=='July'):
dt_start='-07-01'
dt_end ='-07-31'
str_date_start = str(year)+dt_start
date_start=datetime.strptime(str_date_start, "%Y-%m-%d")
str_date_end = str(year)+dt_end
date_end=datetime.strptime(str_date_end, "%Y-%m-%d")
dat1=db.session.query(Grouping.date,Shooter.service_id,Rank.name,Shooter.name.label('firer'),Shooter.unit,Shooter.brigade,Grouping.detail_no,Grouping.result,Grouping.grouping_length_f,MPI.tendency_text).filter(Grouping.date.between(date_start,date_end), Grouping.firer_id==Shooter.id,Shooter.rank_id==Rank.id and Grouping.date==MPI.date, Grouping.session_id==MPI.session_id,Grouping.firer_id==MPI.firer_id,Grouping.detail_no==MPI.detail_no,Grouping.target_no==MPI.target_no,Grouping.spell_no==MPI.spell_no,Grouping.paper_ref==MPI.paper_ref).all()
elif(month=='August'):
dt_start='-08-01'
dt_end ='-08-31'
str_date_start = str(year)+dt_start
date_start=datetime.strptime(str_date_start, "%Y-%m-%d")
str_date_end = str(year)+dt_end
date_end=datetime.strptime(str_date_end, "%Y-%m-%d")
dat1=db.session.query(Grouping.date,Shooter.service_id,Rank.name,Shooter.name.label('firer'),Shooter.unit,Shooter.brigade,Grouping.detail_no,Grouping.result,Grouping.grouping_length_f,MPI.tendency_text).filter(Grouping.date.between(date_start,date_end), Grouping.firer_id==Shooter.id,Shooter.rank_id==Rank.id and Grouping.date==MPI.date, Grouping.session_id==MPI.session_id,Grouping.firer_id==MPI.firer_id,Grouping.detail_no==MPI.detail_no,Grouping.target_no==MPI.target_no,Grouping.spell_no==MPI.spell_no,Grouping.paper_ref==MPI.paper_ref).all()
elif(month=='September'):
dt_start='-09-01'
dt_end ='-09-30'
str_date_start = str(year)+dt_start
date_start=datetime.strptime(str_date_start, "%Y-%m-%d")
str_date_end = str(year)+dt_end
date_end=datetime.strptime(str_date_end, "%Y-%m-%d")
dat1=db.session.query(Grouping.date,Shooter.service_id,Rank.name,Shooter.name.label('firer'),Shooter.unit,Shooter.brigade,Grouping.detail_no,Grouping.result,Grouping.grouping_length_f,MPI.tendency_text).filter(Grouping.date.between(date_start,date_end), Grouping.firer_id==Shooter.id,Shooter.rank_id==Rank.id and Grouping.date==MPI.date, Grouping.session_id==MPI.session_id,Grouping.firer_id==MPI.firer_id,Grouping.detail_no==MPI.detail_no,Grouping.target_no==MPI.target_no,Grouping.spell_no==MPI.spell_no,Grouping.paper_ref==MPI.paper_ref).all()
elif(month=='November'):
dt_start='-11-01'
dt_end ='-11-30'
str_date_start = str(year)+dt_start
date_start=datetime.strptime(str_date_start, "%Y-%m-%d")
str_date_end = str(year)+dt_end
date_end=datetime.strptime(str_date_end, "%Y-%m-%d")
dat1=db.session.query(Grouping.date,Shooter.service_id,Rank.name,Shooter.name.label('firer'),Shooter.unit,Shooter.brigade,Grouping.detail_no,Grouping.result,Grouping.grouping_length_f,MPI.tendency_text).filter(Grouping.date.between(date_start,date_end), Grouping.firer_id==Shooter.id,Shooter.rank_id==Rank.id and Grouping.date==MPI.date, Grouping.session_id==MPI.session_id,Grouping.firer_id==MPI.firer_id,Grouping.detail_no==MPI.detail_no,Grouping.target_no==MPI.target_no,Grouping.spell_no==MPI.spell_no,Grouping.paper_ref==MPI.paper_ref).all()
else:
dt_start='-12-01'
dt_end ='-12-31'
str_date_start = str(year)+dt_start
date_start=datetime.strptime(str_date_start, "%Y-%m-%d")
str_date_end = str(year)+dt_end
date_end=datetime.strptime(str_date_end, "%Y-%m-%d")
dat1=db.session.query(Grouping.date,Shooter.service_id,Rank.name,Shooter.name.label('firer'),Shooter.unit,Shooter.brigade,Grouping.detail_no,Grouping.result,Grouping.grouping_length_f,MPI.tendency_text).filter(Grouping.date.between(date_start,date_end), Grouping.firer_id==Shooter.id,Shooter.rank_id==Rank.id and Grouping.date==MPI.date, Grouping.session_id==MPI.session_id,Grouping.firer_id==MPI.firer_id,Grouping.detail_no==MPI.detail_no,Grouping.target_no==MPI.target_no,Grouping.spell_no==MPI.spell_no,Grouping.paper_ref==MPI.paper_ref).all()
return render_template('pages/monthly_report.html', dat1=dat1 ,month=month)
except Exception as e:
return render_template('errors/month_session.html')
return render_template('pages/monthly_report.html')
@app.route('/save_target_1/', methods=['GET', 'POST'])
def save_target_1():
ref_1=None
try:
if request.method == 'POST':
detail_no = request.form['game_id_1']
r=request.form['tag']
r_object=Shooter.query.filter(Shooter.service_id==r).scalar()
r_id=r_object.id
r2_id=999
r3_id=999
r4_id=999
r5_id=999
r6_id=999
r7_id=999
r8_id=999
ref=request.form['business']
set_no = request.form.get('comp_select_6')
shots = request.form['tag_8']
sess=request.form.get('comp_select')
ref_1 = None
paper=db.session.query(TPaper_ref).scalar()
if(ref == ""):
ref_1=paper.paper_ref
else:
ref_1=ref
temp_shooting=db.session.query(TShooting).scalar()
if(temp_shooting is None):
detail_shots =Session_Detail(
date=datetime.now(),
datetimestamp=time.strftime("%Y-%m-%d %H:%M"),
session_id=sess,
detail_no=detail_no,
target_1_id=r_id,
target_2_id=r2_id,
target_3_id=r3_id,
target_4_id=r4_id,
target_5_id=r5_id,
target_6_id=r6_id,
target_7_id=r7_id,
target_8_id=r8_id,
paper_ref=ref_1,
set_no=set_no,
save_flag=0
)
db.session.add(detail_shots)
db.session.commit()
db.session.query(TPaper_ref).delete()
db.session.commit()
ref_db = TPaper_ref(
paper_ref=ref_1,
detail_no=detail_no,
session_no=sess
)
db.session.add(ref_db)
db.session.commit()
Tdetail_shots =TShooting(
date=datetime.now(),
datetimestamp=time.strftime("%Y-%m-%d %H:%M"),
session_id=sess,
detail_no=detail_no,
target_1_id=r_id,
target_2_id=r2_id,
target_3_id=r3_id,
target_4_id=r4_id,
target_5_id=r5_id,
target_6_id=r6_id,
target_7_id=r7_id,
target_8_id=r8_id,
paper_ref=ref_1,
set_no=set_no,
save_flag=0
)
db.session.add(Tdetail_shots)
db.session.commit()
else:
db.session.query(TShooting).delete()
db.session.commit()
db.session.query(TPaper_ref).delete()
db.session.commit()
ref_db = TPaper_ref(
paper_ref=ref_1,
detail_no=detail_no,
session_no=sess
)
db.session.add(ref_db)
db.session.commit()
detail_shots =Session_Detail(
date=datetime.now(),
datetimestamp=time.strftime("%Y-%m-%d %H:%M"),
session_id=sess,
detail_no=detail_no,
target_1_id=r_id,
target_2_id=r2_id,
target_3_id=r3_id,
target_4_id=r4_id,
target_5_id=r5_id,
target_6_id=r6_id,
target_7_id=r7_id,
target_8_id=r8_id,
paper_ref=ref_1,
set_no=set_no,
save_flag=0
)
db.session.add(detail_shots)
db.session.commit()
Tdetail_shots =TShooting(
date=datetime.now(),
datetimestamp=time.strftime("%Y-%m-%d %H:%M"),
session_id=sess,
detail_no=detail_no,
target_1_id=r_id,
target_2_id=r2_id,
target_3_id=r3_id,
target_4_id=r4_id,
target_5_id=r5_id,
target_6_id=r6_id,
target_7_id=r7_id,
target_8_id=r8_id,
paper_ref=ref_1,
set_no=set_no,
save_flag=0
)
db.session.add(Tdetail_shots)
db.session.commit()
except Exception as e:
return redirect(url_for('error_target_1'))
return redirect(url_for('individual_score_target_1'))
@app.route('/FRAS/', methods=['GET', 'POST'])
def load ():
try:
ref_1=None
if request.method == 'POST':
detail_no = request.form['game_id_1']
tmp_list = []
duplicate = False
r=request.form['tag']
if (r== ""):
r_id = 999
else:
r_object=Shooter.query.filter(Shooter.service_id==r).scalar()
r_id=r_object.id
r1=request.form['tag_1']
if(r1== ""):
r1_id=999
else:
r1_object=Shooter.query.filter(Shooter.service_id==r1).scalar()
r1_id=r1_object.id
r2=request.form['tag_2']
if (r2==""):
r2_id=999
else:
r2_object=Shooter.query.filter(Shooter.service_id==r2).scalar()
r2_id=r2_object.id
r3=request.form['tag_3']
if(r3==""):
r3_id=999
else:
r3_object=Shooter.query.filter(Shooter.service_id==r3).scalar()
r3_id=r3_object.id
r4=request.form['tag_4']
if(r4==""):
r4_id=999
else:
r4_object=Shooter.query.filter(Shooter.service_id==r4).scalar()
r4_id=r4_object.id
r5=request.form['tag_5']
if(r5==""):
r5_id=999
else:
r5_object=Shooter.query.filter(Shooter.service_id==r5).scalar()
r5_id=r5_object.id
r6=request.form['tag_6']
if(r6==""):
r6_id=999
else:
r6_object=Shooter.query.filter(Shooter.service_id==r6).scalar()
r6_id=r6_object.id
r7=request.form['tag_7']
if(r7== ""):
r7_id=999
else:
r7_object=Shooter.query.filter(Shooter.service_id==r7).scalar()
r7_id=r7_object.id
ref=request.form['business']
set_no = request.form.get('comp_select_6')
shots = request.form['tag_8']
sess=request.form.get('comp_select')
tmp_list.append(r_id)
tmp_list.append(r1_id)
tmp_list.append(r2_id)
tmp_list.append(r3_id)
tmp_list.append(r4_id)
tmp_list.append(r5_id)
tmp_list.append(r6_id)
tmp_list.append(r7_id)
if ref == None or ref =="":
ref_obj=TPaper_ref.query.scalar()
ref_1=ref_obj.paper_ref
else :
print("Inside ref _4 else")
ref_1=ref
print(ref_1)
print("Inside ref _4 else 1")
if(int(set_no)>5):
print("Inside ref _5 else")
return redirect(url_for('paper_duplicate_error'))
else:
print("Inside TPaper_ref")
db.session.query(TPaper_ref).delete()
print("Inside TPaper_ref")
db.session.commit()
ref_db = TPaper_ref(
paper_ref=ref_1,
detail_no=detail_no,
session_no=sess
)
db.session.add(ref_db)
db.session.commit()
print("Inside load 3")
for i in range(len(tmp_list)):
for j in range(len(tmp_list)):
if(tmp_list[i]== 999 and tmp_list[j]==999):
duplicate = False
elif(i!=j and tmp_list[i]==tmp_list[j]):
duplicate = True
print("temp1")
if(duplicate):
return redirect(url_for('duplicate_firer_error'))
else:
print("temp")
temp=db.session.query(TShooting.save_flag).scalar()
print(temp)
if(temp is None):
print("Inside the temp if")
print(sess)
print(detail_no)
Tdetail_shots =TShooting(
date=datetime.now(),
datetimestamp=time.strftime("%Y-%m-%d %H:%M"),
session_id=sess,
detail_no=detail_no,
target_1_id=r_id,
target_2_id=r1_id,
target_3_id=r2_id,
target_4_id=r3_id,
target_5_id=r4_id,
target_6_id=r5_id,
target_7_id=r6_id,
target_8_id=r7_id,
paper_ref=ref_1,
set_no=set_no,
save_flag=0
)
print(Tdetail_shots)
print("Tdetail_shots")
db.session.add(Tdetail_shots)
db.session.commit()
print(""
)
detail_shots =Session_Detail(
date=datetime.now(),
datetimestamp=time.strftime("%Y-%m-%d %H:%M"),
session_id=sess,
detail_no=detail_no,
target_1_id=r_id,
target_2_id=r1_id,
target_3_id=r2_id,
target_4_id=r3_id,
target_5_id=r4_id,
target_6_id=r5_id,
target_7_id=r6_id,
target_8_id=r7_id,
paper_ref=ref_1,
set_no=set_no,
save_flag=0
)
db.session.add(detail_shots)
db.session.commit()
else:
db.session.query(TShooting).filter(TShooting.id != 999).delete()
db.session.commit()
Tdetail_shots =TShooting(
date=datetime.now(),
datetimestamp=time.strftime("%Y-%m-%d %H:%M"),
session_id=sess,
detail_no=detail_no,
target_1_id=r_id,
target_2_id=r1_id,
target_3_id=r2_id,
target_4_id=r3_id,
target_5_id=r4_id,
target_6_id=r5_id,
target_7_id=r6_id,
target_8_id=r7_id,
paper_ref=ref_1,
set_no=set_no,
save_flag=0
)
db.session.add(Tdetail_shots)
db.session.commit()
detail_shots =Session_Detail(
date=datetime.now(),
datetimestamp=time.strftime("%Y-%m-%d %H:%M"),
session_id=sess,
detail_no=detail_no,
target_1_id=r_id,
target_2_id=r1_id,
target_3_id=r2_id,
target_4_id=r3_id,
target_5_id=r4_id,
target_6_id=r5_id,
target_7_id=r6_id,
target_8_id=r7_id,
paper_ref=ref_1,
set_no=set_no,
save_flag=0
)
db.session.add(detail_shots)
db.session.commit()
except Exception as e:
print(e)
return redirect(url_for('error_2'))
return redirect(url_for('image_process'))
@app.route('/FRAS_1/', methods=['GET', 'POST'])
def load_1 ():
ref_1=None
try:
if request.method == 'POST':
print("This is inside Post")
detail_no = request.form['game_id_1']
print("this is detail_no")
print(detail_no)
tmp_list = []
duplicate = False
gr=session.get('group',None)
data=TGroup.query.filter(TGroup.group_no==gr).scalar()
da_1=data.target_1_no
da_2=data.target_2_no
da_3=data.target_3_no
da_4=data.target_4_no
da_5=data.target_5_no
da_6=data.target_6_no
da_7=data.target_7_no
da_8=data.target_8_no
if(da_1==""):
r_id=999
else:
r=Shooter.query.filter(Shooter.service_id==da_1).scalar()
r_id=r.id
if(da_2==""):
r1_id=999
else:
r1=Shooter.query.filter(Shooter.service_id==da_2).scalar()
r1_id=r1.id
if(da_3==""):
r2_id=999
else:
r2=Shooter.query.filter(Shooter.service_id==da_3).scalar()
r2_id=r2.id
if(da_4==""):
r3_id=999
else:
r3=Shooter.query.filter(Shooter.service_id==da_4).scalar()
r3_id=r3.id
if(da_5==""):
r4_id=999
else:
r4=Shooter.query.filter(Shooter.service_id==da_5).scalar()
r4_id=r4.id
if(da_6==""):
r5_id=999
else:
r5=Shooter.query.filter(Shooter.service_id==da_6).scalar()
r5_id=r5.id
if(da_7==""):
r6_id=999
else:
r6=Shooter.query.filter(Shooter.service_id==da_7).scalar()
r6_id=r6.id
if(da_8==""):
r7_id=999
else:
r7=Shooter.query.filter(Shooter.service_id==da_8).scalar()
r7_id=r7.id
ref=request.form['business']
set_no = request.form.get('comp_select_6')
shots = request.form['tag_8']
sess=request.form.get('comp_select')
tmp_list.append(r_id)
tmp_list.append(r1_id)
tmp_list.append(r2_id)
tmp_list.append(r3_id)
tmp_list.append(r4_id)
tmp_list.append(r5_id)
tmp_list.append(r6_id)
tmp_list.append(r7_id)
print(tmp_list)
if ref == None or ref =="":
ref_obj=TPaper_ref.query.scalar()
ref_1=ref_obj.paper_ref
else :
ref_1=ref
check=TPaper_ref.query.scalar()
cses=check.session_no
det=check.detail_no
if(int(set_no)>5):
return redirect(url_for('paper_duplicate_error'))
else:
db.session.query(TPaper_ref).delete()
db.session.commit()
ref_db = TPaper_ref(
paper_ref=ref_1,
detail_no=detail_no,
session_no=sess
)
db.session.add(ref_db)
db.session.commit()
for i in range(len(tmp_list)):
for j in range(len(tmp_list)):
if(tmp_list[i]== 999 and tmp_list[j]==999):
duplicate = False
elif(i!=j and tmp_list[i]==tmp_list[j]):
duplicate = True
if(duplicate):
return redirect(url_for('duplicate_firer_error'))
else:
temp_shooting=db.session.query(TShooting).scalar()
if(temp_shooting is None):
detail_shots =Session_Detail(
date=datetime.now(),
datetimestamp=time.strftime("%Y-%m-%d %H:%M"),
session_id=sess,
detail_no=detail_no,
target_1_id=r_id,
target_2_id=r1_id,
target_3_id=r2_id,
target_4_id=r3_id,
target_5_id=r4_id,
target_6_id=r5_id,
target_7_id=r6_id,
target_8_id=r7_id,
paper_ref=ref_1,
set_no=set_no,
save_flag=0
)
db.session.add(detail_shots)
db.session.commit()
Tdetail_shots =TShooting(
date=datetime.now(),
datetimestamp=time.strftime("%Y-%m-%d %H:%M"),
session_id=sess,
detail_no=detail_no,
target_1_id=r_id,
target_2_id=r1_id,
target_3_id=r2_id,
target_4_id=r3_id,
target_5_id=r4_id,
target_6_id=r5_id,
target_7_id=r6_id,
target_8_id=r7_id,
paper_ref=ref_1,
set_no=set_no,
save_flag=0
)
db.session.add(Tdetail_shots)
db.session.commit()
else:
db.session.query(TShooting).filter(TShooting.id != 999).delete()
db.session.commit()
Tdetail_shots =TShooting(
date=datetime.now(),
datetimestamp=time.strftime("%Y-%m-%d %H:%M"),
session_id=sess,
detail_no=detail_no,
target_1_id=r_id,
target_2_id=r1_id,
target_3_id=r2_id,
target_4_id=r3_id,
target_5_id=r4_id,
target_6_id=r5_id,
target_7_id=r6_id,
target_8_id=r7_id,
paper_ref=ref_1,
set_no=set_no,
save_flag=0
)
db.session.add(Tdetail_shots)
db.session.commit()
detail_shots =Session_Detail(
date=datetime.now(),
datetimestamp=time.strftime("%Y-%m-%d %H:%M"),
session_id=sess,
detail_no=detail_no,
target_1_id=r_id,
target_2_id=r1_id,
target_3_id=r2_id,
target_4_id=r3_id,
target_5_id=r4_id,
target_6_id=r5_id,
target_7_id=r6_id,
target_8_id=r7_id,
paper_ref=ref_1,
set_no=set_no,
save_flag=0
)
db.session.add(detail_shots)
db.session.commit()
except Exception as e:
return redirect(url_for('error_102'))
return redirect(url_for('detail_view'))
@app.route('/FRAS_2/', methods=['GET', 'POST'])
def load_2 ():
ref_1=None
try:
if request.method == 'POST':
print("This is inside Post")
detail_no = request.form['game_id_1']
print("this is detail_no")
print(detail_no)
tmp_list = []
duplicate = False
gr=session.get('group',None)
data=TGroup.query.filter(TGroup.group_no==gr).scalar()
da_1=data.target_1_no
da_2=data.target_2_no
da_3=data.target_3_no
da_4=data.target_4_no
da_5=data.target_5_no
da_6=data.target_6_no
da_7=data.target_7_no
da_8=data.target_8_no
if(da_1==""):
r_id=999
else:
r=Shooter.query.filter(Shooter.service_id==da_1).scalar()
r_id=r.id
if(da_2==""):
r1_id=999
else:
r1=Shooter.query.filter(Shooter.service_id==da_2).scalar()
r1_id=r1.id
if(da_3==""):
r2_id=999
else:
r2=Shooter.query.filter(Shooter.service_id==da_3).scalar()
r2_id=r2.id
if(da_4==""):
r3_id=999
else:
r3=Shooter.query.filter(Shooter.service_id==da_4).scalar()
r3_id=r3.id
if(da_5==""):
r4_id=999
else:
r4=Shooter.query.filter(Shooter.service_id==da_5).scalar()
r4_id=r4.id
if(da_6==""):
r5_id=999
else:
r5=Shooter.query.filter(Shooter.service_id==da_6).scalar()
r5_id=r5.id
if(da_7==""):
r6_id=999
else:
r6=Shooter.query.filter(Shooter.service_id==da_7).scalar()
r6_id=r6.id
if(da_8==""):
r7_id=999
else:
r7=Shooter.query.filter(Shooter.service_id==da_8).scalar()
r7_id=r7.id
ref=request.form['business']
set_no = request.form.get('comp_select_6')
shots = request.form['tag_8']
sess=request.form.get('comp_select')
tmp_list.append(r_id)
tmp_list.append(r1_id)
tmp_list.append(r2_id)
tmp_list.append(r3_id)
tmp_list.append(r4_id)
tmp_list.append(r5_id)
tmp_list.append(r6_id)
tmp_list.append(r7_id)
print(tmp_list)
if ref == None or ref =="":
ref_obj=TPaper_ref.query.scalar()
ref_1=ref_obj.paper_ref
else :
ref_1=ref
check=TPaper_ref.query.scalar()
cses=check.session_no
det=check.detail_no
if(int(set_no)>5):
return redirect(url_for('paper_duplicate_error'))
else:
db.session.query(TPaper_ref).delete()
db.session.commit()
ref_db = TPaper_ref(
paper_ref=ref_1,
detail_no=detail_no,
session_no=sess
)
db.session.add(ref_db)
db.session.commit()
for i in range(len(tmp_list)):
for j in range(len(tmp_list)):
if(tmp_list[i]== 999 and tmp_list[j]==999):
duplicate = False
elif(i!=j and tmp_list[i]==tmp_list[j]):
duplicate = True
if(duplicate):
return redirect(url_for('duplicate_firer_error'))
else:
temp_shooting=db.session.query(TShooting).scalar()
if(temp_shooting is None):
detail_shots =Session_Detail(
date=datetime.now(),
datetimestamp=time.strftime("%Y-%m-%d %H:%M"),
session_id=sess,
detail_no=detail_no,
target_1_id=r_id,
target_2_id=r1_id,
target_3_id=r2_id,
target_4_id=r3_id,
target_5_id=r4_id,
target_6_id=r5_id,
target_7_id=r6_id,
target_8_id=r7_id,
paper_ref=ref_1,
set_no=set_no,
save_flag=0
)
db.session.add(detail_shots)
db.session.commit()
Tdetail_shots =TShooting(
date=datetime.now(),
datetimestamp=time.strftime("%Y-%m-%d %H:%M"),
session_id=sess,
detail_no=detail_no,
target_1_id=r_id,
target_2_id=r1_id,
target_3_id=r2_id,
target_4_id=r3_id,
target_5_id=r4_id,
target_6_id=r5_id,
target_7_id=r6_id,
target_8_id=r7_id,
paper_ref=ref_1,
set_no=set_no,
save_flag=0
)
db.session.add(Tdetail_shots)
db.session.commit()
else:
db.session.query(TShooting).filter(TShooting.id != 999).delete()
db.session.commit()
Tdetail_shots =TShooting(
date=datetime.now(),
datetimestamp=time.strftime("%Y-%m-%d %H:%M"),
session_id=sess,
detail_no=detail_no,
target_1_id=r_id,
target_2_id=r1_id,
target_3_id=r2_id,
target_4_id=r3_id,
target_5_id=r4_id,
target_6_id=r5_id,
target_7_id=r6_id,
target_8_id=r7_id,
paper_ref=ref_1,
set_no=set_no,
save_flag=0
)
db.session.add(Tdetail_shots)
db.session.commit()
detail_shots =Session_Detail(
date=datetime.now(),
datetimestamp=time.strftime("%Y-%m-%d %H:%M"),
session_id=sess,
detail_no=detail_no,
target_1_id=r_id,
target_2_id=r1_id,
target_3_id=r2_id,
target_4_id=r3_id,
target_5_id=r4_id,
target_6_id=r5_id,
target_7_id=r6_id,
target_8_id=r7_id,
paper_ref=ref_1,
set_no=set_no,
save_flag=0
)
db.session.add(detail_shots)
db.session.commit()
except Exception as e:
print(e)
return redirect(url_for('error'))
return redirect(url_for('image_process'))
@app.route('/detail_view/', methods=['GET', 'POST'])
def detail_view():
detail = Session_Detail.query.all()
for details in detail:
details.target_1=Shooter.query.filter(Shooter.id==details.target_1_id).scalar()
details.target_2=Shooter.query.filter(Shooter.id==details.target_2_id).scalar()
details.target_3=Shooter.query.filter(Shooter.id==details.target_3_id).scalar()
details.target_4=Shooter.query.filter(Shooter.id==details.target_4_id).scalar()
details.target_5=Shooter.query.filter(Shooter.id==details.target_5_id).scalar()
details.target_6=Shooter.query.filter(Shooter.id==details.target_6_id).scalar()
details.target_7=Shooter.query.filter(Shooter.id==details.target_7_id).scalar()
details.target_8=Shooter.query.filter(Shooter.id==details.target_8_id).scalar()
return render_template('pages/detail_view.html',detail=detail)
@app.route('/detail_view/detail/<id>', methods=['GET', 'POST'])
def view_detail(id):
detail=Session_Detail.query.filter(Session_Detail.id == id)
for details in detail:
details.target_1=Shooter.query.filter(Shooter.id==details.target_1_id).scalar()
details.target_2=Shooter.query.filter(Shooter.id==details.target_2_id).scalar()
details.target_3=Shooter.query.filter(Shooter.id==details.target_3_id).scalar()
details.target_4=Shooter.query.filter(Shooter.id==details.target_4_id).scalar()
details.target_5=Shooter.query.filter(Shooter.id==details.target_5_id).scalar()
details.target_6=Shooter.query.filter(Shooter.id==details.target_6_id).scalar()
details.target_7=Shooter.query.filter(Shooter.id==details.target_7_id).scalar()
details.target_8=Shooter.query.filter(Shooter.id==details.target_8_id).scalar()
return render_template('pages/detail_view_id.html',data=detail)
@app.route('/detail_view/edit/<id>', methods=['GET', 'POST'])
def view_detail_edit(id):
try:
detail=Session_Detail.query.filter(Session_Detail.id == id).first()
form=DetailEditForm(obj=detail)
if form.validate_on_submit():
tmp_list = []
target_1=Shooter.query.filter(Shooter.service_id == form.target_1_service.data).scalar()
tmp_list.append(target_1.id)
target_2=Shooter.query.filter(Shooter.service_id == form.target_2_service.data).scalar()
tmp_list.append(target_2.id)
target_3=Shooter.query.filter(Shooter.service_id == form.target_3_service.data).scalar()
tmp_list.append(target_3.id)
target_4=Shooter.query.filter(Shooter.service_id == form.target_4_service.data).scalar()
tmp_list.append(target_4.id)
target_5=Shooter.query.filter(Shooter.service_id == form.target_5_service.data).scalar()
tmp_list.append(target_5.id)
target_6=Shooter.query.filter(Shooter.service_id == form.target_6_service.data).scalar()
tmp_list.append(target_6.id)
target_7=Shooter.query.filter(Shooter.service_id == form.target_7_service.data).scalar()
tmp_list.append(target_7.id)
target_8=Shooter.query.filter(Shooter.service_id == form.target_8_service.data).scalar()
tmp_list.append(target_8.id)
duplicate = False
for i in range(len(tmp_list)):
for j in range(len(tmp_list)):
if(tmp_list[i]== 999 and tmp_list[j]==999):
duplicate = False
elif(i!=j and tmp_list[i]==tmp_list[j]):
duplicate = True
if(duplicate):
return redirect(url_for('duplicate_firer_error'))
else:
detail.date=form.date.data
detail.session_id=form.session_id.data
detail.detail_no=form.detail_no.data
detail.paper_ref=form.paper_ref.data
detail.set_no=form.set_no.data
target_1_obj=Shooter.query.filter(Shooter.service_id == form.target_1_service.data).scalar()
detail.target_1_id=target_1_obj.id
target_2_obj=Shooter.query.filter(Shooter.service_id == form.target_2_service.data).scalar()
detail.target_2_id=target_2_obj.id
target_3_obj=Shooter.query.filter(Shooter.service_id == form.target_3_service.data).scalar()
detail.target_3_id=target_3_obj.id
target_4_obj=Shooter.query.filter(Shooter.service_id == form.target_4_service.data).scalar()
detail.target_4_id=target_4_obj.id
target_5_obj=Shooter.query.filter(Shooter.service_id == form.target_5_service.data).scalar()
detail.target_5_id=target_5_obj.id
target_6_obj=Shooter.query.filter(Shooter.service_id == form.target_6_service.data).scalar()
detail.target_6_id=target_6_obj.id
target_7_obj=Shooter.query.filter(Shooter.service_id == form.target_7_service.data).scalar()
detail.target_7_id=target_7_obj.id
target_8_obj=Shooter.query.filter(Shooter.service_id == form.target_8_service.data).scalar()
detail.target_8_id=target_8_obj.id
db.session.commit()
db.session.query(TPaper_ref).delete()
db.session.commit()
ref_edit = TPaper_ref(
paper_ref=form.paper_ref.data,
detail_no=form.detail_no.data,
session_no=form.session_id.data
)
db.session.add(ref_edit)
db.session.commit()
target_1_obj=Shooter.query.filter(Shooter.service_id == form.target_1_service.data).scalar()
target_2_obj=Shooter.query.filter(Shooter.service_id == form.target_2_service.data).scalar()
target_3_obj=Shooter.query.filter(Shooter.service_id == form.target_3_service.data).scalar()
target_4_obj=Shooter.query.filter(Shooter.service_id == form.target_4_service.data).scalar()
target_5_obj=Shooter.query.filter(Shooter.service_id == form.target_5_service.data).scalar()
target_6_obj=Shooter.query.filter(Shooter.service_id == form.target_6_service.data).scalar()
target_7_obj=Shooter.query.filter(Shooter.service_id == form.target_7_service.data).scalar()
target_8_obj=Shooter.query.filter(Shooter.service_id == form.target_8_service.data).scalar()
temp_shooting=db.session.query(TShooting).scalar()
if(temp_shooting.save_flag==1):
return redirect(url_for('data_save'))
else:
db.session.query(TShooting).filter(TShooting.id != 999).delete()
db.session.commit()
Tdetail_edit =TShooting(
date=form.date.data,
datetimestamp=time.strftime("%Y-%m-%d %H:%M"),
session_id=form.session_id.data,
detail_no=form.detail_no.data,
target_1_id=target_1_obj.id,
target_2_id=target_2_obj.id,
target_3_id=target_3_obj.id,
target_4_id=target_4_obj.id,
target_5_id=target_5_obj.id,
target_6_id=target_6_obj.id,
target_7_id=target_7_obj.id,
target_8_id=target_8_obj.id,
paper_ref=form.paper_ref.data,
set_no=form.set_no.data,
save_flag=0
)
db.session.add(Tdetail_edit)
db.session.commit()
return redirect(url_for('detail_view'))
form.date.data=detail.date
form.session_id.data=detail.session_id
form.detail_no.data=detail.detail_no
form.paper_ref.data=detail.paper_ref
form.set_no.data=detail.set_no
name_1= Shooter.query.filter(Shooter.id==detail.target_1_id).scalar()
form.target_1_service.data=data=name_1.service_id
name_2= Shooter.query.filter(Shooter.id==detail.target_2_id).scalar()
form.target_2_service.data=data=name_2.service_id
name_3= Shooter.query.filter(Shooter.id==detail.target_3_id).scalar()
form.target_3_service.data=data=name_3.service_id
name_4= Shooter.query.filter(Shooter.id==detail.target_4_id).scalar()
form.target_4_service.data=data=name_4.service_id
name_5=Shooter.query.filter(Shooter.id==detail.target_5_id).scalar()
form.target_5_service.data=data=name_5.service_id
name_6=Shooter.query.filter(Shooter.id==detail.target_6_id).scalar()
form.target_6_service.data=data=name_6.service_id
name_7=Shooter.query.filter(Shooter.id==detail.target_7_id).scalar()
form.target_7_service.data=data=name_7.service_id
name_8=Shooter.query.filter(Shooter.id==detail.target_8_id).scalar()
form.target_8_service.data=data=name_8.service_id
except Exception as e:
return render_template('errors/detail_view.html')
return render_template('pages/detail_view_edit.html' , detail=detail,form=form)
@app.route('/data_save', methods=['GET', 'POST'])
def data_save():
return render_template('pages/data_save.html')
@app.route('/target_registration/', methods=['GET', 'POST'])
def target_registration():
result=None
if request.method=="POST":
data1 = request.get_json()
print(data1)
cant=data1['cant']
div=data1['div']
rank=data1['rank']
gen=data1['gender']
dt=data1['date']
name=data1['name']
army_no=data1['service']
unit=data1['unit']
brigade=data1['brig']
gender_id=db.session.query(Gender.id).filter(Gender.name==gen).scalar()
rank_id=db.session.query(Rank.id).filter(Rank.name==rank).scalar()
cant_id=db.session.query(Cantonment.id).filter(Cantonment.cantonment==cant ,Cantonment.division==div).scalar()
print("cant_id")
print(cant_id)
shooter = Shooter(
name=name,
service_id = army_no,
registration_date = dt,
gender_id=gender_id,
cantonment_id = cant_id,
rank_id =rank_id,
unit=unit,
brigade=brigade
)
db.session.add(shooter)
db.session.commit()
result="Data Saved Sucessfully"
return jsonify(result=result)
@app.route('/shooter_registration/', methods=['GET', 'POST'])
def registration():
try:
cantonment=Cantonment.query.distinct(Cantonment.cantonment)
gender =Gender.query.all()
rank = Rank.query.all()
ran = request.form.get('comp_select4')
cant = request.form.get('comp_select')
gen = request.form.get('comp_select5')
brig = request.form.get('comp_select1')
form = RegistrationForm(request.form)
if(ran is None):
pass
else:
ran_object=Rank.query.filter(Rank.name==ran).scalar()
rank_id = ran_object.id
cant_object = Cantonment.query.filter(Cantonment.cantonment==cant,Cantonment.division==brig).scalar()
cant_id = cant_object.id
gen_obj=Gender.query.filter(Gender.name==gen).scalar()
gender_id = gen_obj.id
if form.validate_on_submit():
shooter = Shooter(
name=form.name.data,
service_id = form.service_id.data,
registration_date = form.dt.data.strftime('%Y-%m-%d'),
gender_id=gender_id,
cantonment_id = cant_id,
rank_id =rank_id,
unit=form.unit.data,
brigade=form.brig.data
)
db.session.add(shooter)
db.session.commit()
new_form = RegistrationForm(request.form)
return redirect(url_for('firer_details'))
except Exception as e:
return redirect(url_for('error_4'))
return render_template('forms/registration.html',
cantonment = cantonment ,
form=form ,
rank = rank,
gender=gender)
@app.route('/get_brigade/')
def get_brigade():
cant = request.args.get('customer')
da = da = Cantonment.query.filter(Cantonment.cantonment==cant).distinct(Cantonment.division)
data = [{"name": x.division} for x in da]
return jsonify(data)
@app.route('/firer_details/', methods=['GET', 'POST'])
def firer_details():
firer = Shooter.query.all()
for firers in firer:
firers.cantonment_name= Cantonment.query.filter(Cantonment.id==firers.cantonment_id).scalar()
firers.division = Cantonment.query.filter(Cantonment.id==firers.cantonment_id).scalar()
firers.rank = Rank.query.filter(Rank.id==firers.rank_id).scalar()
firers.gender_name = Gender.query.filter(Gender.id==firers.gender_id).scalar()
return render_template('pages/firer_details.html' , firer = firer)
@app.route('/bulk_registration_group')
def bulk_registration_group():
form=BulkRegistrationForm(request.form)
return render_template('pages/bulk_registration_group.html',form=form)
@app.route('/bulk_registration')
def bulk_registration():
cantonment=db.session.query(Cantonment).distinct(Cantonment.cantonment)
form=RegistrationForm(request.form)
return render_template('pages/bulk_registration.html',cantonment=cantonment,form=form)
@app.route('/upload', methods=['POST'])
def upload():
try:
f = request.files['data_file']
cant = request.form.get('comp_select')
div = request.form.get('comp_select1')
form=RegistrationForm(request.form)
unit = request.form['game_id_1']
brig = request.form['game_id_2']
cant_id = db.session.query(Cantonment.id).filter(Cantonment.cantonment==cant,
Cantonment.division==div
).scalar()
if form.is_submitted():
stream = StringIO(f.stream.read().decode("UTF8"))
csv_input = csv.reader(stream)
lis =list(csv_input)
for i in range(len(lis)):
if (i==0):
pass
else:
shooters = Shooter(
name = lis[i][0],
service_id=lis[i][3],
registration_date=datetime.now(),
gender_id=db.session.query(Gender.id).filter(Gender.name==lis[i][2]).scalar(),
cantonment_id = cant_id,
rank_id = db.session.query(Rank.id).filter(Rank.name==lis[i][1]).scalar(),
unit=unit,
brigade=brig
)
db.session.add(shooters)
db.session.commit()
except Exception as e:
return redirect(url_for('error_3'))
return redirect(url_for('firer_details'))
@app.route('/uploadgroup', methods=['POST'])
def uploadgroup():
try:
f = request.files['data_file']
form=BulkRegistrationForm(request.form)
if form.is_submitted():
curdate_p=(date.today())- timedelta(1)
if(db.session.query(db.exists().where(TGroup.date <= curdate_p)).scalar()):
db.session.query(TGroup).delete()
db.session.commit()
stream = StringIO(f.stream.read().decode("UTF8"))
csv_input = csv.reader(stream)
lis =list(csv_input)
for i in range(len(lis)):
if (i==0):
pass
else:
group = TGroup(
date=datetime.now(),
group_no=lis[i][0],
target_1_no=lis[i][1],
target_2_no=lis[i][2],
target_3_no=lis[i][3],
target_4_no=lis[i][4],
target_5_no=lis[i][5],
target_6_no=lis[i][6],
target_7_no=lis[i][7],
target_8_no=lis[i][8]
)
db.session.add(group)
db.session.commit()
else:
stream = StringIO(f.stream.read().decode("UTF8"))
csv_input = csv.reader(stream)
lis =list(csv_input)
for i in range(len(lis)):
if (i==0):
pass
else:
group = TGroup(
date=datetime.now(),
group_no=lis[i][0],
target_1_no=lis[i][1],
target_2_no=lis[i][2],
target_3_no=lis[i][3],
target_4_no=lis[i][4],
target_5_no=lis[i][5],
target_6_no=lis[i][6],
target_7_no=lis[i][7],
target_8_no=lis[i][8]
)
db.session.add(group)
db.session.commit()
except Exception as e:
return redirect(url_for('error_duplicate'))
return redirect(url_for('group_view'))
@app.route('/new_group')
def new_group():
firer = [row.service_id for row in Shooter.query.all()]
return render_template('pages/new_group.html',firer_1=firer)
@app.route('/individual_group/', methods=['GET', 'POST'])
def individual_group():
try:
curdate_p=(date.today())- timedelta(1)
#check=mysession.query(TGroup).filter(date==curdate_p).all()
if request.method=="POST":
grp = request.form['game_id_1']
tmp_list = []
duplicate = False
r=request.form['tag']
if (r== ""):
r_id = 999
else:
r_object=Shooter.query.filter(Shooter.service_id==r).scalar()
r_id=r_object.id
r1=request.form['tag_1']
if(r1== ""):
r1_id=999
else:
r1_object=Shooter.query.filter(Shooter.service_id==r1).scalar()
r1_id=r1_object.id
r2=request.form['tag_2']
if (r2==""):
r2_id=999
else:
r2_object=Shooter.query.filter(Shooter.service_id==r2).scalar()
r2_id=r2_object.id
r3=request.form['tag_3']
if(r3==""):
r3_id=999
else:
r3_object=Shooter.query.filter(Shooter.service_id==r3).scalar()
r3_id=r3_object.id
r4=request.form['tag_4']
if(r4==""):
r4_id=999
else:
r4_object=Shooter.query.filter(Shooter.service_id==r4).scalar()
r4_id=r4_object.id
r5=request.form['tag_5']
if(r5==""):
r5_id=999
else:
r5_object=Shooter.query.filter(Shooter.service_id==r5).scalar()
r5_id=r5_object.id
r6=request.form['tag_6']
if(r6==""):
r6_id=999
else:
r6_object=Shooter.query.filter(Shooter.service_id==r6).scalar()
r6_id=r6_object.id
r7=request.form['tag_7']
if(r7== ""):
r7_id=999
else:
r7_object=Shooter.query.filter(Shooter.service_id==r7).scalar()
r7_id=r7_object.id
tmp_list.append(r_id)
tmp_list.append(r1_id)
tmp_list.append(r2_id)
tmp_list.append(r3_id)
tmp_list.append(r4_id)
tmp_list.append(r5_id)
tmp_list.append(r6_id)
tmp_list.append(r7_id)
for i in range(len(tmp_list)):
for j in range(len(tmp_list)):
if(tmp_list[i]== 999 and tmp_list[j]==999):
duplicate = False
elif(i!=j and tmp_list[i]==tmp_list[j]):
duplicate = True
if(db.session.query(db.exists().where(TGroup.date == curdate_p)).scalar()):
db.session.query(TGroup).delete()
db.session.commit()
if(duplicate):
return redirect(url_for('duplicate_firer_error'))
else:
gr=TGroup(
date=datetime.now(),
group_no=grp,
target_1_no=r,
target_2_no=r1,
target_3_no=r2,
target_4_no=r3,
target_5_no=r4,
target_6_no=r5,
target_7_no=r6,
target_8_no=r7
)
db.session.add(gr)
db.session.commit()
else:
if(duplicate):
return redirect(url_for('duplicate_firer_error'))
else:
gr=TGroup(
date=datetime.now(),
group_no=grp,
target_1_no=r,
target_2_no=r1,
target_3_no=r2,
target_4_no=r3,
target_5_no=r4,
target_6_no=r5,
target_7_no=r6,
target_8_no=r7
)
db.session.add(gr)
db.session.commit()
except Exception as e:
return render_template('errors/group_view_error.html')
return redirect(url_for('group_view'))
@app.route('/group_view/', methods=['GET', 'POST'])
def group_view():
detail = TGroup.query.all()
return render_template('pages/group_detail_view.html',detail=detail)
@app.route('/group_view/detail/<id>', methods=['GET', 'POST'])
def group_detail_view(id):
view = TGroup.query.filter(TGroup.group_no == id)
return render_template('pages/group_detail_view_id.html' , data = view)
@app.route('/group_details/edit/<id>', methods=['GET', 'POST'])
def group_detail_edit(id):
firer = TGroup.query.filter(TGroup.group_no == id).first()
form=GroupEditForm(obj=firer)
if form.validate_on_submit():
firer.date=form.date.data
firer.target_1_no=form.target_1_army.data
firer.target_2_no=form.target_2_army.data
firer.target_3_no=form.target_3_army.data
firer.target_4_no=form.target_4_army.data
firer.target_5_no=form.target_5_army.data
firer.target_6_no=form.target_6_army.data
firer.target_7_no=form.target_7_army.data
firer.target_8_no=form.target_8_army.data
firer.group_no=form.group_no.data
db.session.commit()
return redirect(url_for('group_view'))
form.group_no.data=firer.group_no
form.target_1_army.data=firer.target_1_no
form.target_2_army.data=firer.target_2_no
form.target_3_army.data=firer.target_3_no
form.target_4_army.data=firer.target_4_no
form.target_5_army.data=firer.target_5_no
form.target_6_army.data=firer.target_6_no
form.target_7_army.data=firer.target_7_no
form.target_8_army.data=firer.target_8_no
return render_template('pages/group_edit.html' , firer = firer , form=form)
@app.route('/firer_details/detail/<id>', methods=['GET', 'POST'])
def firer_detail_view(id):
firer = Shooter.query.filter(Shooter.service_id == id)
for firers in firer:
firers.cantonment_name= Cantonment.query.filter(Cantonment.id==firers.cantonment_id).scalar()
firers.division = Cantonment.query.filter(Cantonment.id==firers.cantonment_id).scalar()
firers.rank = Rank.query.filter(Rank.id==firers.rank_id).scalar()
firers.gender_name = Gender.query.filter(Gender.id==firers.gender_id).scalar()
return render_template('pages/firer_detail_view.html' , data = firer)
@app.route('/firer_details/edit/<id>', methods=['GET', 'POST'])
def firer_detail_edit(id):
firer = Shooter.query.filter(Shooter.service_id == id).first()
form=RegistrationEditForm(obj=firer)
try:
if form.validate_on_submit():
firer.name = form.name.data
firer.service_id=form.service_id.data
firer.registration_date=form.date.data
gender_obj=Gender.query.filter(Gender.name==form.gender.data).scalar()
firer.gender_id=gender_obj.id
cantonment_obj=Cantonment.query.filter(Cantonment.cantonment==form.cantonment.data ,Cantonment.division==form.div.data).scalar()
firer.cantonment_id=cantonment_obj.id
rank_obj=Range.query.filter(Rank.name==form.rank.data).distinct(Rank.id).scalar()
firer.rank_id=rank_obj.id
firer.unit=form.unit.data
firer.brigade=form.brigade.data
db.session.commit()
return redirect(url_for('firer_details'))
form.name.data=firer.name
form.service_id.data=firer.service_id
form.date.data=firer.registration_date
gender_name=Gender.query.filter(Gender.id==firer.gender_id).scalar()
form.gender.data=gender_name.name
cantonment_name=Cantonment.query.filter(Cantonment.id==firer.cantonment_id).scalar()
form.cantonment.data=cantonment_name.cantonment
form.div.data=cantonment_name.division
unit_data=Shooter.query.filter(Shooter.service_id==firer.service_id).scalar()
form.unit.data=unit_data.unit
form.brigade.data=unit_data.brigade
rank_name=Rank.query.filter(Rank.id==firer.rank_id).distinct(Rank.name).scalar()
form.rank.data=rank_name.name
except Exception as e:
return redirect(url_for('error_7'))
return render_template('pages/firer_detail_edit.html' , firer = firer , form=form)
@app.route('/live/')
def live():
T1_name = mysession.query(Shooter.name).filter(Shooter.id==TShooting.target_1_id).scalar()
T1_service = mysession.query(Shooter.service_id).filter(Shooter.id==TShooting.target_1_id).scalar()
T1_r_id = mysession.query(Shooter.rank_id).filter(Shooter.id==TShooting.target_1_id).scalar()
T1_rank = mysession.query(Rank.name).filter(Rank.id==T1_r_id).scalar()
T2_name = mysession.query(Shooter.name).filter(Shooter.id==TShooting.target_2_id).scalar()
T2_service = mysession.query(Shooter.service_id).filter(Shooter.id==TShooting.target_2_id).scalar()
T2_r_id = mysession.query(Shooter.rank_id).filter(Shooter.id==TShooting.target_2_id).scalar()
T2_rank = mysession.query(Rank.name).filter(Rank.id==T2_r_id).scalar()
T3_name = mysession.query(Shooter.name).filter(Shooter.id==TShooting.target_3_id).scalar()
T3_service = mysession.query(Shooter.service_id).filter(Shooter.id==TShooting.target_3_id).scalar()
T3_r_id = mysession.query(Shooter.rank_id).filter(Shooter.id==TShooting.target_3_id).scalar()
T3_rank = mysession.query(Rank.name).filter(Rank.id==T3_r_id).scalar()
T4_name = mysession.query(Shooter.name).filter(Shooter.id==TShooting.target_4_id).scalar()
T4_service = mysession.query(Shooter.service_id).filter(Shooter.id==TShooting.target_4_id).scalar()
T4_r_id = mysession.query(Shooter.rank_id).filter(Shooter.id==TShooting.target_4_id).scalar()
T4_rank = mysession.query(Rank.name).filter(Rank.id==T4_r_id).scalar()
T5_name = mysession.query(Shooter.name).filter(Shooter.id==TShooting.target_5_id).scalar()
T5_service = mysession.query(Shooter.service_id).filter(Shooter.id==TShooting.target_5_id).scalar()
T5_r_id = mysession.query(Shooter.rank_id).filter(Shooter.id==TShooting.target_5_id).scalar()
T5_rank = mysession.query(Rank.name).filter(Rank.id==T5_r_id).scalar()
T6_name = mysession.query(Shooter.name).filter(Shooter.id==TShooting.target_6_id).scalar()
T6_service = mysession.query(Shooter.service_id).filter(Shooter.id==TShooting.target_6_id).scalar()
T6_r_id = mysession.query(Shooter.rank_id).filter(Shooter.id==TShooting.target_6_id).scalar()
T6_rank = mysession.query(Rank.name).filter(Rank.id==T6_r_id).scalar()
T7_name = mysession.query(Shooter.name).filter(Shooter.id==TShooting.target_7_id).scalar()
T7_service = mysession.query(Shooter.service_id).filter(Shooter.id==TShooting.target_7_id).scalar()
T7_r_id = mysession.query(Shooter.rank_id).filter(Shooter.id==TShooting.target_7_id).scalar()
T7_rank = mysession.query(Rank.name).filter(Rank.id==T7_r_id).scalar()
T8_name = mysession.query(Shooter.name).filter(Shooter.id==TShooting.target_8_id).scalar()
T8_service = mysession.query(Shooter.service_id).filter(Shooter.id==TShooting.target_8_id).scalar()
T8_r_id = mysession.query(Shooter.rank_id).filter(Shooter.id==TShooting.target_8_id).scalar()
T8_rank = mysession.query(Rank.name).filter(Rank.id==T8_r_id).scalar()
return render_template('pages/live.html' ,
T1_name=T1_name,
T1_service=T1_service,
T2_name=T2_name,
T2_service=T2_service,
T3_name=T3_name,
T3_service=T3_service,
T4_name=T4_name,
T4_service=T4_service,
T5_name=T5_name,
T5_service=T5_service,
T6_name=T6_name,
T6_service=T6_service,
T7_name=T7_name,
T7_service=T7_service,
T8_name=T8_name,
T8_service=T8_service,
T1_rank=T1_rank,
T2_rank=T2_rank,
T3_rank=T3_rank,
T4_rank=T4_rank,
T5_rank=T5_rank,
T6_rank=T6_rank,
T7_rank=T7_rank,
T8_rank=T8_rank
)
@app.route('/cam_detail_2/', methods=['GET', 'POST'])
def cam_detail_2():
return render_template('pages/cam_detail_1.html')
@app.route('/cam_detail_4/', methods=['GET', 'POST'])
def cam_detail_4():
return render_template('pages/cam_detail_2.html')
@app.route('/cam_detail_1/', methods=['GET', 'POST'])
def cam_detail_1():
return render_template('pages/cam_detail_3.html')
@app.route('/cam_detail_3/', methods=['GET', 'POST'])
def cam_detail_3():
return render_template('pages/cam_detail_4.html')
@app.route('/cam_detail_6/', methods=['GET', 'POST'])
def cam_detail_6():
return render_template('pages/cam_detail_5.html')
@app.route('/cam_detail_8/', methods=['GET', 'POST'])
def cam_detail_8():
return render_template('pages/cam_detail_6.html')
@app.route('/cam_detail_7/', methods=['GET', 'POST'])
def cam_detail_7():
return render_template('pages/cam_detail_7.html')
@app.route('/cam_detail_5/', methods=['GET', 'POST'])
def cam_detail_5():
return render_template('pages/cam_detail_8.html')
@app.route('/session_setup/', methods=['GET', 'POST'])
def session_setup():
try:
data = Shooter.query.all()
rang= Range.query.all()
firearms = Firearms.query.all()
ammunation = Ammunation.query.all()
rang_name = request.form.get('comp_select_4')
fire_name = request.form.get('comp_select_5')
ammu_name = request.form.get('comp_select_6')
form=SessionForm()
if(rang_name is None):
range_id=999
fire_id=999
ammu_id=999
else:
range_id = db.session.query(Range.id).filter(Range.name==rang_name).scalar()
fire_id = db.session.query(Firearms.id).filter(Firearms.name==fire_name).scalar()
ammu_id = db.session.query(Ammunation.id).filter(Ammunation.name==ammu_name).scalar()
if form.validate_on_submit():
shooting=Shooting_Session(
date=form.date.data.strftime('%Y-%m-%d'),
datetimestamp=time.strftime("%Y-%m-%d %H:%M"),
shooting_range_id=range_id,
firearms_id=fire_id,
ammunation_id=ammu_id,
target_distance = form.target_distance.data,
weather_notes = form.weather_notes.data,
comments = form.comments.data,
session_no=form.session_no.data,
occasion=form.occ.data
)
db.session.add(shooting)
db.session.commit()
return redirect(url_for('session_config'))
except Exception as e:
return redirect(url_for('error5_505.html'))
return render_template('forms/shooting_form.html', form=form, data =data ,rang=rang , firearmns=firearms, ammunation = ammunation)
@app.route('/configuration/', methods=['GET', 'POST'])
def session_config():
config = Shooting_Session.query.all()
for con in config:
con.range_name = Range.query.filter(Range.id==con.shooting_range_id).scalar()
con.firerarms_name = Firearms.query.filter(Firearms.id==con.firearms_id).scalar()
con.ammunation_name = Ammunation.query.filter(Ammunation.id==con.ammunation_id).scalar()
return render_template('pages/shooting_configuration_detail.html',con=config)
@app.route('/image_process/')
def image_process():
dt=time.strftime("%Y-%m-%d")
detail_data=db.session.query(Session_Detail).filter(Session_Detail.date==dt,Session_Detail.save_flag==0).all()
data =TShooting.query.scalar()
if(data is None):
T1_name ="NA"
T1_service ="NA"
T1_rank="NA"
T2_name ="NA"
T2_service ="NA"
T2_rank="NA"
T3_name ="NA"
T3_service ="NA"
T3_rank="NA"
T4_name ="NA"
T4_service ="NA"
T4_rank="NA"
T5_name ="NA"
T5_service ="NA"
T5_rank="NA"
T6_name ="NA"
T6_service ="NA"
T6_rank="NA"
T7_name ="NA"
T7_service ="NA"
T7_rank="NA"
T8_name ="NA"
T8_service ="NA"
T8_rank="NA"
elif(data.save_flag == 1 ):
db.session.query(TShooting).delete()
db.session.commit()
T1_name ="NA"
T1_service ="NA"
T1_rank="NA"
T2_name ="NA"
T2_service ="NA"
T2_rank="NA"
T3_name ="NA"
T3_service ="NA"
T3_rank="NA"
T4_name ="NA"
T4_service ="NA"
T4_rank="NA"
T5_name ="NA"
T5_service ="NA"
T5_rank="NA"
T6_name ="NA"
T6_service ="NA"
T6_rank="NA"
T7_name ="NA"
T7_service ="NA"
T7_rank="NA"
T8_name ="NA"
T8_service ="NA"
T8_rank="NA"
else:
T1=Shooter.query.filter(Shooter.id==TShooting.target_1_id).scalar()
if(T1 is None):
T1_name ="NA"
T1_service ="NA"
T1_rank="NA"
else:
T1_name = T1.name
T1_service = T1.service_id
T1_r_id = T1.rank_id
T1_rank_id = Rank.query.filter(Rank.id==T1_r_id).scalar()
T1_rank=T1_rank_id.name
T2=Shooter.query.filter(Shooter.id==TShooting.target_2_id).scalar()
if(T2 is None):
T2_name ="NA"
T2_service ="NA"
T2_rank="NA"
else:
T2_name = T2.name
T2_service = T2.service_id
T2_r_id = T2.rank_id
T2_rank_id = Rank.query.filter(Rank.id==T2_r_id).scalar()
T2_rank=T2_rank_id.name
T3=Shooter.query.filter(Shooter.id==TShooting.target_3_id,TShooting.target_3_id!=999).scalar()
if(T3 is None):
T3_name ="NA"
T3_service ="NA"
T3_rank="NA"
else:
T3_name = T3.name
T3_service = T3.service_id
T3_r_id = T3.rank_id
T3_rank_id = Rank.query.filter(Rank.id==T3_r_id).scalar()
T3_rank=T3_rank_id.name
T4=Shooter.query.filter(Shooter.id==TShooting.target_4_id,TShooting.target_4_id!=999).scalar()
if(T4 is None):
T4_name ="NA"
T4_service ="NA"
T4_rank="NA"
else:
T4_name = T4.name
T4_service = T4.service_id
T4_r_id = T4.rank_id
T4_rank_id = Rank.query.filter(Rank.id==T4_r_id).scalar()
T4_rank=T4_rank_id.name
T5=Shooter.query.filter(Shooter.id==TShooting.target_5_id).scalar()
if(T5 is None):
T5_name ="NA"
T5_service ="NA"
T5_rank="NA"
else:
T5_name = T5.name
T5_service = T5.service_id
T5_r_id = T5.rank_id
T5_rank_id = Rank.query.filter(Rank.id==T5_r_id).scalar()
T5_rank=T5_rank_id.name
T6=Shooter.query.filter(Shooter.id==TShooting.target_6_id).scalar()
if(T6 is None):
T6_name ="NA"
T6_service ="NA"
T6_rank="NA"
else:
T6_name = T6.name
T6_service = T6.service_id
T6_r_id = T6.rank_id
T6_rank_id = Rank.query.filter(Rank.id==T6_r_id).scalar()
T6_rank=T6_rank_id.name
T7=Shooter.query.filter(Shooter.id==TShooting.target_7_id).scalar()
if(T7 is None):
T7_name ="NA"
T7_service ="NA"
T7_rank="NA"
else:
T7_name = T7.name
T7_service = T7.service_id
T7_r_id = T7.rank_id
T7_rank_id = Rank.query.filter(Rank.id==T7_r_id).scalar()
T7_rank=T7_rank_id.name
T8=Shooter.query.filter(Shooter.id==TShooting.target_8_id).scalar()
if(T8 is None):
T8_name ="NA"
T8_service ="NA"
T8_rank="NA"
else:
T8_name = T8.name
T8_service = T8.service_id
T8_r_id = T8.rank_id
T8_rank_id = Rank.query.filter(Rank.id==T8_r_id).scalar()
T8_rank=T8_rank_id.name
return render_template('pages/image_process.html' ,
T1_name=T1_name,
detail_data=detail_data,
T1_service=T1_service,
T2_name=T2_name,
T2_service=T2_service,
T3_name=T3_name,
T3_service=T3_service,
T4_name=T4_name,
T4_service=T4_service,
T5_name=T5_name,
T5_service=T5_service,
T6_name=T6_name,
T6_service=T6_service,
T7_name=T7_name,
T7_service=T7_service,
T8_name=T8_name,
T8_service=T8_service,
T1_rank=T1_rank,
T2_rank=T2_rank,
T3_rank=T3_rank,
T4_rank=T4_rank,
T5_rank=T5_rank,
T6_rank=T6_rank,
T7_rank=T7_rank,
T8_rank=T8_rank
)
@app.route('/image_edit_1/', methods=['GET', 'POST'])
def image_edit_1():
return render_template('pages/image_edit_1.html')
@app.route('/image_edit_2/', methods=['GET', 'POST'])
def image_edit_2():
return render_template('pages/image_edit_2.html')
@app.route('/image_edit_3/', methods=['GET', 'POST'])
def image_edit_3():
return render_template('pages/image_edit_3.html')
@app.route('/image_edit_4/', methods=['GET', 'POST'])
def image_edit_4():
return render_template('pages/image_edit_4.html')
@app.route('/image_edit_5/', methods=['GET', 'POST'])
def image_edit_5():
return render_template('pages/image_edit_5.html')
@app.route('/image_edit_6/', methods=['GET', 'POST'])
def image_edit_6():
return render_template('pages/image_edit_6.html')
@app.route('/image_edit_7/', methods=['GET', 'POST'])
def image_edit_7():
return render_template('pages/image_edit_7.html')
@app.route('/image_edit_8/', methods=['GET', 'POST'])
def image_edit_8():
return render_template('pages/image_edit_8.html')
@app.route('/configuration/detail/<id>', methods=['GET', 'POST'])
def session_config_detail(id):
config = Shooting_Session.query.filter(Shooting_Session.id == id)
for con in config:
con.range_name = Range.query.filter(Range.id==con.shooting_range_id).scalar()
con.firerarms_name = Firearms.query.filter(Firearms.id==con.firearms_id).scalar()
con.ammunation_name = Ammunation.query.filter(Ammunation.id==con.ammunation_id).scalar()
return render_template('pages/shooting_configuration_detail_view.html',con=config)
@app.route('/configuration/edit/<id>', methods=['GET', 'POST'])
def shooting_config_edit(id):
edit = Shooting_Session.query.get_or_404(id)
form = SessionEditForm(obj=edit)
if form.validate_on_submit():
edit.session_no = form.session_no.data
edit.date = form.date.data
edit.occasion=form.occ.data
edit.target_distance = form.target_distance.data
ammunation_id=Ammunation.query.filter(Ammunation.name==form.ammunation_name.data).scalar()
edit.ammunation_id=ammunation_id.id
firearms_id=Firearms.query.filter(Firearms.name==form.firerarms_name.data).scalar()
edit.firearms_id=firearms_id.id
range_id=Range.query.filter(Range.name==form.range_name.data).scalar()
edit.shooting_range_id=range_id.id
edit.weather_notes=form.weather_notes.data
edit.comments=form.comments.data
db.session.commit()
return redirect(url_for('session_config'))
form.session_no.data=edit.session_no
form.date.data=edit.date
form.occ.data=edit.occasion
ammunation_name=Ammunation.query.filter(Ammunation.id==edit.ammunation_id).scalar()
form.ammunation_name.data=ammunation_name.name
firerarms_name=Firearms.query.filter(Firearms.id==edit.firearms_id).scalar()
form.firerarms_name.data=firerarms_name.name
range_name=Range.query.filter(Range.id==edit.shooting_range_id).scalar()
form.range_name.data=range_name.name
form.weather_notes.data=edit.weather_notes
form.comments.data=edit.comments
return render_template('pages/shooting_configuration_edit.html',form=form,edit=edit)
@app.route('/detail_dashboard/')
def detail_dashboard():
tshoot=db.session.query(TShooting).scalar()
if(tshoot is None):
T1_name = "NA"
T1_service="NA"
T1_rank ="NA"
T2_name = "NA"
T2_service="NA"
T2_rank ="NA"
T3_name = "NA"
T3_service="NA"
T3_rank ="NA"
T4_name = "NA"
T4_service="NA"
T4_rank ="NA"
T5_name = "NA"
T5_service="NA"
T5_rank ="NA"
T6_name = "NA"
T6_service="NA"
T6_rank ="NA"
T7_name = "NA"
T7_service="NA"
T7_rank ="NA"
T8_name = "NA"
T8_service="NA"
T8_rank ="NA"
else:
T1=Shooter.query.filter(Shooter.id==TShooting.target_1_id).scalar()
T1_name = T1.name
T1_service = T1.service_id
T1_r_id = T1.rank_id
T1_rank_id = Rank.query.filter(Rank.id==T1_r_id).scalar()
T1_rank=T1_rank_id.name
T2=Shooter.query.filter(Shooter.id==TShooting.target_2_id).scalar()
T2_name = T2.name
T2_service = T2.service_id
T2_r_id = T2.rank_id
T2_rank_id = Rank.query.filter(Rank.id==T2_r_id).scalar()
T2_rank=T2_rank_id.name
T3=Shooter.query.filter(Shooter.id==TShooting.target_3_id).scalar()
T3_name = T3.name
T3_service = T3.service_id
T3_r_id = T3.rank_id
T3_rank_id = Rank.query.filter(Rank.id==T3_r_id).scalar()
T3_rank=T3_rank_id.name
T4=Shooter.query.filter(Shooter.id==TShooting.target_4_id).scalar()
T4_name = T4.name
T4_service = T4.service_id
T4_r_id = T4.rank_id
T4_rank_id = Rank.query.filter(Rank.id==T4_r_id).scalar()
T4_rank=T4_rank_id.name
T5=Shooter.query.filter(Shooter.id==TShooting.target_5_id).scalar()
T5_name = T5.name
T5_service = T5.service_id
T5_r_id = T5.rank_id
T5_rank_id = Rank.query.filter(Rank.id==T5_r_id).scalar()
T5_rank=T5_rank_id.name
T6=Shooter.query.filter(Shooter.id==TShooting.target_6_id).scalar()
T6_name = T6.name
T6_service = T6.service_id
T6_r_id = T6.rank_id
T6_rank_id = Rank.query.filter(Rank.id==T6_r_id).scalar()
T6_rank=T6_rank_id.name
T7=Shooter.query.filter(Shooter.id==TShooting.target_7_id).scalar()
T7_name = T7.name
T7_service = T7.service_id
T7_r_id = T7.rank_id
T7_rank_id = Rank.query.filter(Rank.id==T7_r_id).scalar()
T7_rank=T7_rank_id.name
T8=Shooter.query.filter(Shooter.id==TShooting.target_8_id).scalar()
T8_name = T8.name
T8_service = T8.service_id
T8_r_id = T8.rank_id
T8_rank_id = Rank.query.filter(Rank.id==T8_r_id).scalar()
T8_rank=T8_rank_id.name
return render_template('pages/detail_dashboard.html' ,
T1_name=T1_name,
T1_service=T1_service,
T2_name=T2_name,
T2_service=T2_service,
T3_name=T3_name,
T3_service=T3_service,
T4_name=T4_name,
T4_service=T4_service,
T5_name=T5_name,
T5_service=T5_service,
T6_name=T6_name,
T6_service=T6_service,
T7_name=T7_name,
T7_service=T7_service,
T8_name=T8_name,
T8_service=T8_service,
T1_rank=T1_rank,
T2_rank=T2_rank,
T3_rank=T3_rank,
T4_rank=T4_rank,
T5_rank=T5_rank,
T6_rank=T6_rank,
T7_rank=T7_rank,
T8_rank=T8_rank
)
@app.route('/adhoc_detail_1/', methods=['GET', 'POST'])
def adhoc_detail_1():
name_1=None
army=None
rank=None
cant=None
set_1_name=None
set_1_army=None
set_2_name=None
set_2_army=None
set_3_name=None
set_3_army=None
set_4_name=None
set_4_army=None
res=[]
ten=[]
gp_len=[]
if request.method == "POST":
data1 = request.get_json()
army=data1['usr']
curdate=time.strftime("%Y-%m-%d")
name_1=db.session.query(Shooter.name).filter(Shooter.service_id==army).scalar()
target_1_id=db.session.query(Shooter.id).filter(Shooter.service_id==army).scalar()
rank_id=db.session.query(Shooter.rank_id).filter(Shooter.service_id==army).scalar()
cant_id=db.session.query(Shooter.cantonment_id).filter(Shooter.service_id==army).scalar()
rank=db.session.query(Rank.name).filter(Rank.id==rank_id).scalar()
cant=db.session.query(Cantonment.cantonment).filter(Cantonment.id==cant_id).scalar()
tten=db.session.query(MPI.tendency_code).filter(MPI.firer_id==target_1_id).order_by(MPI.datetimestamp.desc()).limit(5).all()[::-1]
tres = db.session.query(Grouping.result).filter(Grouping.firer_id==target_1_id).order_by(Grouping.datetimestamp.desc()).limit(5).all()[::-1]
tgp = db.session.query(Grouping.grouping_length_f).filter(Grouping.firer_id==target_1_id).order_by(Grouping.datetimestamp.desc()).limit(5).all()[::-1]
for ele in tres:
for ele2 in ele:
res.append(ele2)
for ele3 in tten:
for ele4 in ele3:
ten.append(ele4)
for ele5 in tgp:
for ele6 in ele5:
gp_len.append(ele6)
set_1_id = db.session.query(Firer_Details.firer_id).filter(Firer_Details.date==curdate,
Firer_Details.target_no==1,
Firer_Details.set_no==1
).distinct().scalar()
set_1_name=db.session.query(Shooter.name).filter(Shooter.id==set_1_id).scalar()
set_1_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_1_id).scalar()
set_2_id = db.session.query(Firer_Details.firer_id).filter(Firer_Details.date==curdate,
Firer_Details.target_no==2,
Firer_Details.set_no==2
).distinct().scalar()
set_2_name=db.session.query(Shooter.name).filter(Shooter.id==set_2_id).scalar()
set_2_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_2_id).scalar()
set_3_id = db.session.query(Firer_Details.firer_id).filter(Firer_Details.date==curdate,
Firer_Details.target_no==3,
Firer_Details.set_no==3
).distinct().scalar()
set_3_name=db.session.query(Shooter.name).filter(Shooter.id==set_3_id).scalar()
set_3_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_3_id).scalar()
set_4_id = db.session.query(Firer_Details.firer_id).filter(Firer_Details.date==curdate,
Firer_Details.target_no==4,
Firer_Details.set_no==4
).distinct().scalar()
set_4_name=db.session.query(Shooter.name).filter(Shooter.id==set_4_id).scalar()
set_4_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_4_id).scalar()
return jsonify(name_1=name_1,army=army,rank=rank,cant=cant,
set_1_name=set_1_name,
set_2_name=set_2_name,
set_3_name=set_3_name,
set_4_name=set_4_name,
set_1_army=set_1_army,
set_2_army=set_2_army,
set_3_army=set_3_army,
set_4_army=set_4_army,
gp_len=gp_len,
res=res,
ten=ten
)
@app.route('/individual_score/target_1', methods=['GET', 'POST'])
def individual_score_target_1():
session.clear()
data=TShooting.query.scalar()
firing_set_arr=[]
cantonment=Cantonment.query.distinct(Cantonment.cantonment)
curdate=time.strftime("%Y-%m-%d")
selection=Shooting_Session.query.filter(Shooting_Session.date>=curdate).order_by(Shooting_Session.datetimestamp.desc()).all()
gender =Gender.query.all()
rank_s = Rank.query.all()
firing_set=db.session.query(Firer_Details.set_no).filter(Firer_Details.target_no==1).distinct().all()
for ele in firing_set:
for ele2 in ele:
firing_set_arr.append(ele2)
if(len(firing_set_arr)<1):
pass
else:
i=len(firing_set_arr)-1
if(firing_set_arr[i]==5):
db.session.query(Firer_Details).filter(Firer_Details.target_no==1).delete()
db.session.commit()
else:
pass
dt=time.strftime("%Y-%m-%d")
curdatetime=datetime.now()
firer_1 = [row.service_id for row in Shooter.query.all()]
detail_data=db.session.query(Session_Detail).filter(Session_Detail.date==dt,Session_Detail.save_flag==0).all()
name = "NA"
detail_no ="NA"
rank ="NA"
target_no = 1
service_id ="NA"
ten = []
res = []
selection=Shooting_Session.query.filter(Shooting_Session.date>=dt).order_by(Shooting_Session.datetimestamp.desc()).all()
firearms = Firearms.query.all()
rang= Range.query.all()
ammunation = Ammunation.query.all()
return render_template('pages/prediction_target_1.html',
curdatetime=curdatetime,
name = name,
firer_1=firer_1,
rank=rank,
detail_data=detail_data,
detail_no=detail_no,
target_no=target_no,
service_id=service_id,
firearms=firearms,
ammunation=ammunation,
data=selection,
rang=rang,
res=res,
date=dt,
ten=ten,
cantonment=cantonment,
gender=gender,
rank_s=rank_s)
@app.route('/session_target_1/', methods=['GET', 'POST'])
def session_target_1():
if request.method == "POST":
data1 = request.get_json()
session=data1["session"]
ran=data1["range"]
arms=data1["arms"]
distance=data1["dis"]
occ=data1["occ"]
ammu=data1["ammu"]
weather=data1["weather"]
comment=data1["comment"]
range_id=db.session.query(Range.id).filter(Range.name==ran).scalar()
arms_id=db.session.query(Firearms.id).filter(Firearms.name==arms).scalar()
ammu_id=db.session.query(Ammunation.id).filter(Ammunation.name==ammu).scalar()
shooting=Shooting_Session(
date=time.strftime("%Y-%m-%d"),
datetimestamp=time.strftime("%Y-%m-%d %H:%M"),
shooting_range_id=range_id,
firearms_id=arms_id,
ammunation_id=ammu_id,
target_distance=distance,
weather_notes =weather,
comments =comment,
session_no=session,
occasion=occ
)
db.session.add(shooting)
db.session.commit()
result="This is Successfully Saved"
return jsonify(result=result ,session=session)
@app.route('/target_1_populate/', methods=['GET', 'POST'])
def target_1_populate():
if request.method == 'POST':
session_id=db.session.query(TShooting.session_id).scalar()
return jsonify(session_id=session_id)
@app.route('/load_detail_1/', methods=['GET', 'POST'])
def load_detail_1():
result_1="Done"
if request.method == 'POST':
curdate=time.strftime("%Y-%m-%d")
r8=None
data=request.get_json()
tmp_list = []
duplicate = False
detail =data["detail"]
sess=data["session"]
paper=data["paper"]
shot=data["shot"]
set=data["set"]
if(data["r1"]==""):
r1_id=999
else:
r1=data["r1"]
r1_id=db.session.query(Shooter.id).filter(Shooter.service_id==r1).scalar()
if(data["r2"]==""):
r2_id=999
else:
r2=data["r2"]
r2_id=db.session.query(Shooter.id).filter(Shooter.service_id==r2).scalar()
if(data["r3"]==""):
r3_id=999
else:
r3=data["r3"]
r3_id=db.session.query(Shooter.id).filter(Shooter.service_id==r3).scalar()
if(data["r4"]==""):
r4_id=999
else:
r4=data["r4"]
r4_id=db.session.query(Shooter.id).filter(Shooter.service_id==r4).scalar()
if(data["r5"]==""):
r5_id=999
else:
r5=data["r5"]
r5_id=db.session.query(Shooter.id).filter(Shooter.service_id==r5).scalar()
if(data["r6"]==""):
r6_id=999
else:
r6=data["r6"]
r6_id=db.session.query(Shooter.id).filter(Shooter.service_id==r6).scalar()
if(data["r7"]==""):
r7_id=999
else:
r7=data["r7"]
r7_id=db.session.query(Shooter.id).filter(Shooter.service_id==r7).scalar()
if(data["r8"]==""):
r8_id=999
else:
r8=data["r8"]
r8_id=db.session.query(Shooter.id).filter(Shooter.service_id==r8).scalar()
tmp_list.append(r1_id)
tmp_list.append(r2_id)
tmp_list.append(r3_id)
tmp_list.append(r4_id)
tmp_list.append(r5_id)
tmp_list.append(r6_id)
tmp_list.append(r7_id)
tmp_list.append(r8_id)
db.session.query(TPaper_ref).delete()
db.session.commit()
ref_db = TPaper_ref(
date=time.strftime("%Y-%m-%d"),
paper_ref=paper,
detail_no=detail,
session_no=sess
)
db.session.add(ref_db)
db.session.commit()
for i in range(len(tmp_list)):
for j in range(len(tmp_list)):
if(i!=j and tmp_list[i]==tmp_list[j]):
if(tmp_list[i]== 999 and tmp_list[j]==999):
duplicate = False
else:
duplicate = True
else:
duplicate = False
if(duplicate):
print("inside dup")
error="dup"
else:
db.session.query(TShooting).delete()
db.session.commit()
tshoot=TShooting(
date=datetime.now(),
datetimestamp=time.strftime("%Y-%m-%d %H:%M"),
session_id=sess,
detail_no=detail,
target_1_id=r1_id,
target_2_id=r2_id,
target_3_id=r3_id,
target_4_id=r4_id,
target_5_id=r5_id,
target_6_id=r6_id,
target_7_id=r7_id,
target_8_id=r8_id,
paper_ref=paper,
set_no=set,
save_flag=0
)
db.session.add(tshoot)
db.session.commit()
detail_shots =Session_Detail(
date=datetime.now(),
datetimestamp=time.strftime("%Y-%m-%d %H:%M"),
session_id=sess,
detail_no=detail,
target_1_id=r1_id,
target_2_id=r2_id,
target_3_id=r3_id,
target_4_id=r4_id,
target_5_id=r5_id,
target_6_id=r6_id,
target_7_id=r7_id,
target_8_id=r8_id,
paper_ref=paper,
set_no=set,
save_flag=0
)
db.session.add(detail_shots)
db.session.commit()
error="ok"
firer_name,cant,rank,service_id,res,tenden,gp_len,set_4_name,set_4_army,set_4_session_no,set_4_detail_no,set_3_name,set_3_army,set_3_session_no,set_3_detail_no,set_2_name,set_2_army,set_2_session_no,set_2_detail_no,set_1_name,set_1_army,set_1_session_no,set_1_detail_no,current_firer_name,current_army_no,current_session_no,current_detail_no=get_information(r1_id,sess,paper)
result="The Detail is Saved Successfully"
return jsonify(result=result,data1=firer_name,ra_1=rank,detail=detail,
service_id_1=service_id,
session=sess,
paper=paper,
set_no=set,
cant=cant,
gp_len=gp_len,
res=res,
ten=tenden,
set_4_name=set_4_name,
set_3_name=set_3_name,
set_2_name=set_2_name,
set_1_name=set_1_name,
current_firer_name=current_firer_name,
set_4_army=set_4_army,
set_3_army=set_3_army,
set_2_army=set_2_army,
set_1_army=set_1_army,
current_army_no=current_army_no,
set_4_session_no=set_4_session_no,
set_3_session_no=set_3_session_no,
set_2_session_no=set_2_session_no,
set_1_session_no=set_1_session_no,
current_session_no=current_session_no,
set_4_detail_no=set_4_detail_no,
set_3_detail_no=set_3_detail_no,
set_2_detail_no=set_2_detail_no,
set_1_detail_no=set_1_detail_no,
current_detail_no=current_detail_no
)
return jsonify(result_1=result_1)
def get_information(target_1_id,sess,paper_ref):
res=[]
ten=[]
gp_len=[]
curdate=time.strftime("%Y-%m-%d")
tten=db.session.query(MPI.tendency_code).filter(MPI.firer_id==target_1_id).order_by(MPI.datetimestamp.desc()).limit(5).all()[::-1]
tres = db.session.query(Grouping.result).filter(Grouping.firer_id==target_1_id).order_by(Grouping.datetimestamp.desc()).limit(5).all()[::-1]
tgp = db.session.query(Grouping.grouping_length_f).filter(Grouping.firer_id==target_1_id).order_by(Grouping.datetimestamp.desc()).limit(5).all()[::-1]
for ele in tres:
for ele2 in ele:
res.append(ele2)
for ele3 in tten:
for ele4 in ele3:
ten.append(ele4)
for ele5 in tgp:
for ele6 in ele5:
gp_len.append(int(ele6))
da_1=db.session.query(Shooter.name).filter(Shooter.id==target_1_id).scalar()
cant_id=db.session.query(Shooter.cantonment_id).filter(Shooter.id==target_1_id).scalar()
cant=db.session.query(Cantonment.cantonment).filter(Cantonment.id==cant_id).scalar()
ra_1_id=db.session.query(Shooter.rank_id).filter(Shooter.id==target_1_id).scalar()
ra_1 = db.session.query(Rank.name).filter(Rank.id==ra_1_id).scalar()
service_id_1 = db.session.query(Shooter.service_id).filter(Shooter.id==target_1_id).scalar()
set_1_id = db.session.query(Firer_Details.firer_id).filter(
Firer_Details.date==curdate,
Firer_Details.target_no==1,
Firer_Details.set_no==1
).distinct().scalar()
set_1_session_no=db.session.query(Firer_Details.session_id).filter(
Firer_Details.date==curdate,
Firer_Details.target_no==1,
Firer_Details.set_no==1
).distinct().scalar()
set_1_detail_no=db.session.query(Firer_Details.detail_id).filter(
Firer_Details.date==curdate,
Firer_Details.target_no==1,
Firer_Details.set_no==1
).distinct().scalar()
set_1_name=db.session.query(Shooter.name).filter(
Shooter.id==set_1_id
).scalar()
set_1_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_1_id).scalar()
set_2_id = db.session.query(Firer_Details.firer_id).filter(
Firer_Details.date==curdate,
Firer_Details.target_no==1,
Firer_Details.set_no==2
).distinct().scalar()
set_2_session_no=db.session.query(Firer_Details.session_id).filter(
Firer_Details.date==curdate,
Firer_Details.target_no==1,
Firer_Details.set_no==2
).distinct().scalar()
set_2_detail_no=db.session.query(Firer_Details.detail_id).filter(
Firer_Details.date==curdate,
Firer_Details.target_no==1,
Firer_Details.set_no==2
).distinct().scalar()
set_2_name=db.session.query(Shooter.name).filter(
Shooter.id==set_2_id
).scalar()
set_2_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_2_id).scalar()
set_3_id = db.session.query(Firer_Details.firer_id).filter(
Firer_Details.date==curdate,
Firer_Details.target_no==1,
Firer_Details.set_no==3
).distinct().scalar()
set_3_session_no=db.session.query(Firer_Details.session_id).filter(
Firer_Details.date==curdate,
Firer_Details.target_no==1,
Firer_Details.set_no==3
).distinct().scalar()
set_3_detail_no=db.session.query(Firer_Details.detail_id).filter(
Firer_Details.date==curdate,
Firer_Details.target_no==1,
Firer_Details.set_no==3
).distinct().scalar()
set_3_name=db.session.query(Shooter.name).filter(
Shooter.id==set_3_id
).scalar()
set_3_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_3_id).scalar()
set_4_id = db.session.query(Firer_Details.firer_id).filter(
Firer_Details.date==curdate,
Firer_Details.target_no==1,
Firer_Details.set_no==4
).distinct().scalar()
set_4_session_no=db.session.query(Firer_Details.session_id).filter(
Firer_Details.date==curdate,
Firer_Details.target_no==1,
Firer_Details.set_no==4
).distinct().scalar()
set_4_detail_no=db.session.query(Firer_Details.detail_id).filter(
Firer_Details.date==curdate,
Firer_Details.target_no==1,
Firer_Details.set_no==4
).distinct().scalar()
set_4_name=db.session.query(Shooter.name).filter(
Shooter.id==set_4_id
).scalar()
set_4_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_4_id).scalar()
current_firer_name = db.session.query(Shooter.name).filter(Shooter.id==target_1_id).scalar()
current_army_no = db.session.query(Shooter.service_id).filter(Shooter.id==target_1_id).scalar()
current_session_no=db.session.query(TShooting.session_id).filter(TShooting.target_1_id==target_1_id).scalar()
current_detail_no=db.session.query(TShooting.detail_no).filter(TShooting.target_1_id==target_1_id).scalar()
return(da_1,cant,ra_1,service_id_1,res,ten,gp_len,
set_4_name,set_4_army,set_4_session_no,set_4_detail_no,
set_3_name,set_3_army,set_3_session_no,set_3_detail_no,
set_2_name,set_2_army,set_2_session_no,set_2_detail_no,
set_1_name,set_1_army,set_1_session_no,set_1_detail_no,
current_firer_name,current_army_no,current_session_no,current_detail_no
)
@app.route('/individual_score/target_2', methods=['GET', 'POST'])
def individual_score_target_2():
firer_id =db.session.query(TShooting.target_2_id).scalar()
detail_no =db.session.query(TShooting.detail_no).scalar()
session_no =db.session.query(TShooting.session_id).scalar()
target_no = 2
tres = db.session.query(Grouping.result).filter(Grouping.firer_id==firer_id).order_by(Grouping.datetimestamp.desc()).limit(5).all()[::-1]
res=[]
ten=[]
tten=db.session.query(MPI.tendency_code).filter(MPI.firer_id==firer_id).order_by(MPI.datetimestamp.desc()).limit(5).all()[::-1]
print(tres,)
for ele in tres:
for ele2 in ele:
print(type(ele2))
res.append(ele2)
for ele3 in tten:
for ele4 in ele3:
print(type(ele4))
ten.append(ele4)
service_id = db.session.query(Shooter.service_id).filter(Shooter.id==firer_id).scalar()
rank_id=db.session.query(Shooter.rank_id).filter(Shooter.id==firer_id).scalar()
rank=db.session.query(Rank.name).filter(Rank.id==rank_id).scalar()
name = db.session.query(Shooter.name).filter(Shooter.id==firer_id).scalar()
firer_id,sess,o,p,u,q,t1_x,t1_y,xmpi,ympi,f,gp,Tfirt_x,Tfirt_y,fin_x_1,fin_y_1,result_1,fir_tendency_1=prediction_calculation_2()
if request.method == 'POST':
paper_ref=db.session.query(TPaper_ref.paper_ref).scalar()
print("paper_ref")
print(paper_ref)
return render_template('pages/prediction_target_2.html',
name = name,
detail_no=detail_no,
session_no=session_no,
target_no=target_no,
service_id=service_id,
rank=rank,
res=res,
ten=ten)
@app.route('/individual_score/target_3', methods=['GET', 'POST'])
def individual_score_target_3():
firer_id =db.session.query(TShooting.target_3_id).scalar()
detail_no =db.session.query(TShooting.detail_no).scalar()
session_no =db.session.query(TShooting.session_id).scalar()
target_no = 3
tres = db.session.query(Grouping.result).filter(Grouping.firer_id==firer_id).order_by(Grouping.datetimestamp.desc()).limit(5).all()[::-1]
res=[]
ten=[]
tten=db.session.query(MPI.tendency_code).filter(MPI.firer_id==firer_id).order_by(MPI.datetimestamp.desc()).limit(5).all()[::-1]
print(tres)
for ele in tres:
for ele2 in ele:
print(type(ele2))
res.append(ele2)
for ele3 in tten:
for ele4 in ele3:
print(type(ele4))
ten.append(ele4)
service_id = db.session.query(Shooter.service_id).filter(Shooter.id==firer_id).scalar()
rank_id=db.session.query(Shooter.rank_id).filter(Shooter.id==firer_id).scalar()
rank=db.session.query(Rank.name).filter(Rank.id==rank_id).scalar()
name = db.session.query(Shooter.name).filter(Shooter.id==firer_id).scalar()
return render_template('pages/prediction_target_3.html',
name = name,
detail_no=detail_no,
session_no=session_no,
target_no=target_no,
service_id=service_id,
rank=rank,
res=res,
ten=ten)
@app.route('/individual_score/target_4', methods=['GET', 'POST'])
def individual_score_target_4():
firer_id =db.session.query(TShooting.target_4_id).scalar()
detail_no =db.session.query(TShooting.detail_no).scalar()
session_no =db.session.query(TShooting.session_id).scalar()
target_no = 4
tres = db.session.query(Grouping.result).filter(Grouping.firer_id==firer_id).order_by(Grouping.datetimestamp.desc()).limit(5).all()[::-1]
res=[]
ten=[]
tten=db.session.query(MPI.tendency_code).filter(MPI.firer_id==firer_id).order_by(MPI.datetimestamp.desc()).limit(5).all()[::-1]
print(tres)
for ele in tres:
for ele2 in ele:
print(type(ele2))
res.append(ele2)
for ele3 in tten:
for ele4 in ele3:
print(type(ele4))
ten.append(ele4)
service_id = db.session.query(Shooter.service_id).filter(Shooter.id==firer_id).scalar()
rank_id=db.session.query(Shooter.rank_id).filter(Shooter.id==firer_id).scalar()
rank=db.session.query(Rank.name).filter(Rank.id==rank_id).scalar()
name = db.session.query(Shooter.name).filter(Shooter.id==firer_id).scalar()
return render_template('pages/prediction_target_4.html',
name = name,
detail_no=detail_no,
session_no=session_no,
target_no=target_no,
service_id=service_id,
rank=rank,
res=res,
ten=ten)
@app.route('/individual_score/target_5', methods=['GET', 'POST'])
def individual_score_target_5():
firer_id =db.session.query(TShooting.target_5_id).scalar()
detail_no =db.session.query(TShooting.detail_no).scalar()
session_no =db.session.query(TShooting.session_id).scalar()
target_no = 5
tres = db.session.query(Grouping.result).filter(Grouping.firer_id==firer_id).order_by(Grouping.datetimestamp.desc()).limit(5).all()[::-1]
res=[]
ten=[]
tten=db.session.query(MPI.tendency_code).filter(MPI.firer_id==firer_id).order_by(MPI.datetimestamp.desc()).limit(5).all()[::-1]
print(tres)
for ele in tres:
for ele2 in ele:
print(type(ele2))
res.append(ele2)
for ele3 in tten:
for ele4 in ele3:
print(type(ele4))
ten.append(ele4)
service_id = db.session.query(Shooter.service_id).filter(Shooter.id==firer_id).scalar()
rank_id=db.session.query(Shooter.rank_id).filter(Shooter.id==firer_id).scalar()
rank=db.session.query(Rank.name).filter(Rank.id==rank_id).scalar()
name = db.session.query(Shooter.name).filter(Shooter.id==firer_id).scalar()
return render_template('pages/prediction_target_5.html',
name = name,
detail_no=detail_no,
session_no=session_no,
target_no=target_no,
service_id=service_id,
rank=rank,
res=res,
ten=ten)
@app.route('/individual_score/target_6', methods=['GET', 'POST'])
def individual_score_target_6():
firer_id =db.session.query(TShooting.target_6_id).scalar()
detail_no =db.session.query(TShooting.detail_no).scalar()
session_no =db.session.query(TShooting.session_id).scalar()
target_no = 6
tres = db.session.query(Grouping.result).filter(Grouping.firer_id==firer_id).order_by(Grouping.datetimestamp.desc()).limit(5).all()[::-1]
res=[]
ten=[]
tten=db.session.query(MPI.tendency_code).filter(MPI.firer_id==firer_id).order_by(MPI.datetimestamp.desc()).limit(5).all()[::-1]
print(tres)
for ele in tres:
for ele2 in ele:
print(type(ele2))
res.append(ele2)
for ele3 in tten:
for ele4 in ele3:
print(type(ele4))
ten.append(ele4)
service_id = db.session.query(Shooter.service_id).filter(Shooter.id==firer_id).scalar()
rank_id=db.session.query(Shooter.rank_id).filter(Shooter.id==firer_id).scalar()
rank=db.session.query(Rank.name).filter(Rank.id==rank_id).scalar()
name = db.session.query(Shooter.name).filter(Shooter.id==firer_id).scalar()
return render_template('pages/prediction_target_6.html',
name = name,
detail_no=detail_no,
session_no=session_no,
target_no=target_no,
service_id=service_id,
rank=rank,
res=res,
ten=ten)
@app.route('/individual_score/target_7', methods=['GET', 'POST'])
def individual_score_target_7():
firer_id =db.session.query(TShooting.target_7_id).scalar()
detail_no =db.session.query(TShooting.detail_no).scalar()
session_no =db.session.query(TShooting.session_id).scalar()
target_no = 7
tres = db.session.query(Grouping.result).filter(Grouping.firer_id==firer_id).order_by(Grouping.datetimestamp.desc()).limit(5).all()[::-1]
res=[]
ten=[]
tten=db.session.query(MPI.tendency_code).filter(MPI.firer_id==firer_id).order_by(MPI.datetimestamp.desc()).limit(5).all()[::-1]
print(tres)
for ele in tres:
for ele2 in ele:
print(type(ele2))
res.append(ele2)
for ele3 in tten:
for ele4 in ele3:
print(type(ele4))
ten.append(ele4)
service_id = db.session.query(Shooter.service_id).filter(Shooter.id==firer_id).scalar()
rank_id=db.session.query(Shooter.rank_id).filter(Shooter.id==firer_id).scalar()
rank=db.session.query(Rank.name).filter(Rank.id==rank_id).scalar()
name = db.session.query(Shooter.name).filter(Shooter.id==firer_id).scalar()
return render_template('pages/prediction_target_7.html',
name = name,
detail_no=detail_no,
session_no=session_no,
target_no=target_no,
service_id=service_id,
rank=rank,
res=res,
ten=ten)
@app.route('/individual_score/target_8', methods=['GET', 'POST'])
def individual_score_target_8():
firer_id =db.session.query(TShooting.target_8_id).scalar()
detail_no =db.session.query(TShooting.detail_no).scalar()
session_no =db.session.query(TShooting.session_id).scalar()
target_no = 7
tres = db.session.query(Grouping.result).filter(Grouping.firer_id==firer_id).order_by(Grouping.datetimestamp.desc()).limit(5).all()[::-1]
res=[]
ten=[]
tten=db.session.query(MPI.tendency_code).filter(MPI.firer_id==firer_id).order_by(MPI.datetimestamp.desc()).limit(5).all()[::-1]
print(tres)
for ele in tres:
for ele2 in ele:
print(type(ele2))
res.append(ele2)
for ele3 in tten:
for ele4 in ele3:
print(type(ele4))
ten.append(ele4)
service_id = db.session.query(Shooter.service_id).filter(Shooter.id==firer_id).scalar()
rank_id=db.session.query(Shooter.rank_id).filter(Shooter.id==firer_id).scalar()
rank=db.session.query(Rank.name).filter(Rank.id==rank_id).scalar()
name = db.session.query(Shooter.name).filter(Shooter.id==firer_id).scalar()
return render_template('pages/prediction_target_8.html',
name = name,
detail_no=detail_no,
session_no=session_no,
target_no=target_no,
service_id=service_id,
rank=rank,
res=res,
ten=ten)
@app.route('/prediction_target_1/', methods=['GET', 'POST'])
def prediction_target_1():
t1_x=0
t1_y=0
xmpi_j=0
ympi_j=0
gp=0
Tfirt_x_j=0
Tfirt_y_j=0
fin_x_1=0
fin_y_1=0
xmpi_inch = 0
ympi_inch = 0
result_1=None
fir_tendency=None
set_1_name = None
set_1_army =None
set_1_session_no = None
set_1_detail_no=None
set_1_id =None
set_2_name = None
set_2_army =None
set_2_session_no = None
set_2_detail_no=None
set_2_id =None
set_3_name = None
set_3_army =None
set_3_session_no = None
set_3_detail_no=None
set_3_id =None
set_4_name = None
set_4_army =None
set_4_session_no = None
set_4_detail_no=None
set_4_id =None
fir_tendency_1=None
firer_id=None
current_army_no=None
current_firer_name=None
current_session_no=None
session_detail_no=None
current_detail_no=None
set_2_x=None
set_2_y=None
set_3_x=None
set_3_y=None
set_4_x=None
set_4_y=None
paper_ref=None
sess=None
res=None
set_2_x_arr=[]
set_2_y_arr=[]
set_3_x_arr=[]
set_3_y_arr=[]
set_4_x_arr=[]
set_4_y_arr=[]
fin_x_arr_1=[]
fin_y_arr_1=[]
curdate=time.strftime("%Y-%m-%d")
if request.method == 'POST':
firer_id,sess,detail,p,u,q,t1_x,t1_y,xmpi,ympi,f,gp,Tfirt_x,Tfirt_y,fin_x_1,fin_y_1,result_1,fir_tendency_1=prediction_calculation_1()
paper_ref=db.session.query(TPaper_ref.paper_ref).scalar()
set_2_x=db.session.query(Firer_Details).filter(Firer_Details.date==curdate , Firer_Details.target_no==1 ,Firer_Details.set_no==2 , Firer_Details.session_id==sess).all()
set_2_y=db.session.query(Firer_Details).filter(Firer_Details.date==curdate , Firer_Details.target_no==1 , Firer_Details.set_no==2 , Firer_Details.session_id==sess).all()
for x_2 in set_2_x:
set_2_x_arr.append(int(x_2.final_x))
for y_2 in set_2_y:
set_2_y_arr.append(int(y_2.final_y))
set_3_x=db.session.query(Firer_Details).filter(Firer_Details.date==curdate , Firer_Details.target_no==1 , Firer_Details.set_no==3 , Firer_Details.session_id==sess).all()
set_3_y=db.session.query(Firer_Details).filter(Firer_Details.date==curdate , Firer_Details.target_no==1 , Firer_Details.set_no==3 , Firer_Details.session_id==sess).all()
for x_3 in set_3_x:
set_3_x_arr.append(int(x_3.final_x))
for y_3 in set_3_y:
set_3_y_arr.append(int(y_3.final_y))
print(set_3_x_arr)
set_4_x=db.session.query(Firer_Details).filter(Firer_Details.date==curdate , Firer_Details.target_no==1 , Firer_Details.set_no==4 , Firer_Details.session_id==sess).all()
set_4_y=db.session.query(Firer_Details).filter(Firer_Details.date==curdate , Firer_Details.target_no==1 , Firer_Details.set_no==4 , Firer_Details.session_id==sess).all()
for x_4 in set_4_x:
set_4_x_arr.append(int(x_4.final_x))
for y_4 in set_4_y:
set_4_y_arr.append(int(y_4.final_y))
set_1_id = db.session.query(Firer_Details.firer_id).filter(
Firer_Details.date==curdate,
Firer_Details.target_no==1,
Firer_Details.set_no==1
).distinct().scalar()
set_1_session_no=db.session.query(Firer_Details.session_id).filter(
Firer_Details.date==curdate,
Firer_Details.target_no==1,
Firer_Details.set_no==1
).distinct().scalar()
set_1_detail_no=db.session.query(Firer_Details.detail_id).filter(
Firer_Details.date==curdate,
Firer_Details.target_no==1,
Firer_Details.set_no==1
).distinct().scalar()
set_1_name=db.session.query(Shooter.name).filter(
Shooter.id==set_1_id
).scalar()
set_1_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_1_id).scalar()
set_2_id = db.session.query(Firer_Details.firer_id).filter(
Firer_Details.date==curdate,
Firer_Details.target_no==1,
Firer_Details.set_no==2
).distinct().scalar()
set_2_session_no=db.session.query(Firer_Details.session_id).filter(
Firer_Details.date==curdate,
Firer_Details.target_no==1,
Firer_Details.set_no==2
).distinct().scalar()
set_2_detail_no=db.session.query(Firer_Details.detail_id).filter(
Firer_Details.date==curdate,
Firer_Details.target_no==1,
Firer_Details.set_no==2
).distinct().scalar()
print("set_2_detail_no")
print(set_2_detail_no)
print(set_2_detail_no)
set_2_name=db.session.query(Shooter.name).filter(
Shooter.id==set_2_id
).scalar()
set_2_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_2_id).scalar()
set_3_id = db.session.query(Firer_Details.firer_id).filter(
Firer_Details.date==curdate,
Firer_Details.target_no==1,
Firer_Details.set_no==3
).distinct().scalar()
set_3_session_no=db.session.query(Firer_Details.session_id).filter(
Firer_Details.date==curdate,
Firer_Details.target_no==1,
Firer_Details.set_no==3
).distinct().scalar()
set_3_detail_no=db.session.query(Firer_Details.detail_id).filter(
Firer_Details.date==curdate,
Firer_Details.target_no==1,
Firer_Details.set_no==3
).distinct().scalar()
set_3_name=db.session.query(Shooter.name).filter(
Shooter.id==set_3_id
).scalar()
set_3_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_3_id).scalar()
set_4_id = db.session.query(Firer_Details.firer_id).filter(
Firer_Details.date==curdate,
Firer_Details.target_no==1,
Firer_Details.set_no==4
).distinct().scalar()
set_4_session_no=db.session.query(Firer_Details.session_id).filter(
Firer_Details.date==curdate,
Firer_Details.target_no==1,
Firer_Details.set_no==4
).distinct().scalar()
set_4_detail_no=db.session.query(Firer_Details.detail_id).filter(
Firer_Details.date==curdate,
Firer_Details.target_no==1,
Firer_Details.set_no==4
).distinct().scalar()
set_4_name=db.session.query(Shooter.name).filter(
Shooter.id==set_4_id
).scalar()
set_4_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_4_id).scalar()
current_firer_name = db.session.query(Shooter.name).filter(Shooter.id==firer_id).scalar()
current_army_no = db.session.query(Shooter.service_id).filter(Shooter.id==firer_id).scalar()
current_session_no=db.session.query(TShooting.session_id).filter(TShooting.target_1_id==firer_id).scalar()
current_detail_no=db.session.query(TShooting.detail_no).filter(TShooting.target_1_id==firer_id).scalar()
xmpi_inch = pixeltoinch(xmpi)
ympi_inch = pixeltoinch(ympi)
xmpi_j =pd.Series(xmpi_inch).to_json(orient='values')
ympi_j =pd.Series(ympi_inch).to_json(orient='values')
Tfirt_x_j =pd.Series(Tfirt_x).to_json(orient='values')
Tfirt_y_j =pd.Series(Tfirt_y).to_json(orient='values')
for x_1 in fin_x_1:
fin_x_arr_1.append(int(x_1.final_x))
for y_1 in fin_y_1 :
fin_y_arr_1.append(int(y_1.final_y))
return jsonify(x1=t1_x ,
y1=t1_y ,
xmpi1=Tfirt_x_j ,
ympi1=Tfirt_y_j,
gp=gp,
txf1=Tfirt_x_j,
tyf1=Tfirt_y_j,
fx1=fin_x_arr_1,
fy1=fin_y_arr_1,
result_1=result_1,
fir_tendency_1=fir_tendency_1,
set_1_name=set_1_name,
current_firer_name=current_firer_name,
set_1_army=set_1_army,
current_army_no=current_army_no,
set_1_session_no=set_1_session_no,
current_session_no=current_session_no,
set_1_detail_no=set_1_detail_no,
current_detail_no=current_detail_no,
set_2_x=set_2_x_arr,
set_2_y=set_2_y_arr,
set_2_name=set_2_name,
set_2_army=set_2_army,
set_2_detail_no=set_2_detail_no,
set_2_session_no=set_2_session_no,
set_3_x=set_3_x_arr,
set_3_y=set_3_y_arr,
set_3_name=set_3_name,
set_3_army=set_3_army,
set_3_session_no=set_3_session_no,
set_3_detail_no=set_3_detail_no,
set_4_x=set_4_x_arr,
set_4_y=set_4_y_arr,
set_4_name=set_4_name,
set_4_army=set_4_army,
set_4_session_no=set_4_session_no,
set_4_detail_no=set_4_detail_no
)
@app.route('/prediction_target_2/', methods=['GET', 'POST'])
def prediction_target_2():
t1_x=0
t1_y=0
xmpi_j=0
ympi_j=0
gp=0
Tfirt_x_j=0
Tfirt_y_j=0
fin_x_1=0
fin_y_1=0
xmpi_inch = 0
ympi_inch = 0
result_1=None
fir_tendency=None
set_1_name = None
set_1_army =None
set_1_session_no = None
set_1_detail_no=None
set_1_id =None
set_2_name = None
set_2_army =None
set_2_session_no = None
set_2_detail_no=None
set_2_id =None
set_3_name = None
set_3_army =None
set_3_session_no = None
set_3_detail_no=None
set_3_id =None
set_4_name = None
set_4_army =None
set_4_session_no = None
set_4_detail_no=None
set_4_id =None
fir_tendency_1=None
firer_id=None
current_army_no=None
current_firer_name=None
current_session_no=None
session_detail_no=None
current_detail_no=None
set_2_x=None
set_2_y=None
set_3_x=None
set_3_y=None
set_4_x=None
set_4_y=None
paper_ref=None
sess=None
res=None
set_2_x_arr=[]
set_2_y_arr=[]
set_3_x_arr=[]
set_3_y_arr=[]
set_4_x_arr=[]
set_4_y_arr=[]
fin_x_arr_1=[]
fin_y_arr_1=[]
curdate=time.strftime("%Y-%m-%d")
if request.method == 'POST':
firer_id,sess,o,p,u,q,t1_x,t1_y,xmpi,ympi,f,gp,Tfirt_x,Tfirt_y,fin_x_1,fin_y_1,result_1,fir_tendency_1=prediction_calculation_2()
paper_ref=db.session.query(TPaper_ref.paper_ref).scalar()
set_2_x=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==2 ,T_Firer_Details.set_no==2 , T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess).all()
set_2_y=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==2 , T_Firer_Details.set_no==2 , T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess).all()
for x_2 in set_2_x:
set_2_x_arr.append(int(x_2.final_x))
for y_2 in set_2_y:
set_2_y_arr.append(int(y_2.final_y))
set_3_x=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==2 , T_Firer_Details.set_no==3 , T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess).all()
set_3_y=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==2 , T_Firer_Details.set_no==3 , T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess).all()
for x_3 in set_3_x:
set_3_x_arr.append(int(x_3.final_x))
for y_3 in set_3_y:
set_3_y_arr.append(int(y_3.final_y))
set_4_x=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==2 , T_Firer_Details.set_no==4 , T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess).all()
set_4_y=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==2 , T_Firer_Details.set_no==4 ,T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess).all()
for x_4 in set_4_x:
set_4_x_arr.append(int(x_4.final_x))
for y_4 in set_4_y:
set_4_y_arr.append(int(y_4.final_y))
set_1_id = db.session.query(T_Firer_Details.firer_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==2,
T_Firer_Details.set_no==1,
T_Firer_Details.paper_ref==paper_ref,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_1_session_no=db.session.query(T_Firer_Details.session_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==2,
T_Firer_Details.set_no==1,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_1_detail_no=db.session.query(T_Firer_Details.detail_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==2,
T_Firer_Details.set_no==1,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_1_name=db.session.query(Shooter.name).filter(
Shooter.id==set_1_id
).scalar()
set_1_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_1_id).scalar()
set_2_id = db.session.query(T_Firer_Details.firer_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==2,
T_Firer_Details.set_no==2,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_2_session_no=db.session.query(T_Firer_Details.session_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==2,
T_Firer_Details.set_no==2,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_2_detail_no=db.session.query(T_Firer_Details.detail_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==2,
T_Firer_Details.set_no==2,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_2_name=db.session.query(Shooter.name).filter(
Shooter.id==set_2_id
).scalar()
set_2_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_2_id).scalar()
set_3_id = db.session.query(T_Firer_Details.firer_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==2,
T_Firer_Details.set_no==3,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_3_session_no=db.session.query(T_Firer_Details.session_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==2,
T_Firer_Details.set_no==3,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_3_detail_no=db.session.query(T_Firer_Details.detail_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==2,
T_Firer_Details.set_no==3,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_3_name=db.session.query(Shooter.name).filter(
Shooter.id==set_3_id
).scalar()
set_3_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_3_id).scalar()
set_4_id = db.session.query(T_Firer_Details.firer_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==2,
T_Firer_Details.set_no==4,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_4_session_no=db.session.query(T_Firer_Details.session_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==2,
T_Firer_Details.set_no==4,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_4_detail_no=db.session.query(T_Firer_Details.detail_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==2,
T_Firer_Details.set_no==4,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_4_name=db.session.query(Shooter.name).filter(
Shooter.id==set_4_id
).scalar()
set_4_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_4_id).scalar()
current_firer_name = db.session.query(Shooter.name).filter(Shooter.id==firer_id).scalar()
current_army_no = db.session.query(Shooter.service_id).filter(Shooter.id==firer_id).scalar()
current_session_no=db.session.query(TShooting.session_id).filter(TShooting.target_1_id==firer_id).scalar()
current_detail_no=db.session.query(TShooting.detail_no).filter(TShooting.target_1_id==firer_id).scalar()
xmpi_inch = pixeltoinch(xmpi)
ympi_inch = pixeltoinch(ympi)
xmpi_j =pd.Series(xmpi_inch).to_json(orient='values')
ympi_j =pd.Series(ympi_inch).to_json(orient='values')
Tfirt_x_j =pd.Series(Tfirt_x).to_json(orient='values')
Tfirt_y_j =pd.Series(Tfirt_y).to_json(orient='values')
fin_x_arr_1=[]
fin_y_arr_1=[]
for x_1 in fin_x_1:
fin_x_arr_1.append(int(x_1.final_x))
for y_1 in fin_y_1 :
fin_y_arr_1.append(int(y_1.final_y))
return jsonify(x1=t1_x ,
y1=t1_y ,
xmpi1=Tfirt_x_j ,
ympi1=Tfirt_y_j,
gp=gp,
txf1=Tfirt_x_j,
tyf1=Tfirt_y_j,
fx1=fin_x_arr_1,
fy1=fin_y_arr_1,
result_1=result_1,
fir_tendency_1=fir_tendency_1,
set_1_name=set_1_name,
current_firer_name=current_firer_name,
set_1_army=set_1_army,
current_army_no=current_army_no,
set_1_session_no=set_1_session_no,
current_session_no=current_session_no,
set_1_detail_no=set_1_detail_no,
current_detail_no=current_detail_no,
set_2_x=set_2_x_arr,
set_2_y=set_2_y_arr,
set_2_name=set_2_name,
set_2_army=set_2_army,
set_2_detail_no=set_2_detail_no,
set_2_session_no=set_2_session_no,
set_3_x=set_3_x_arr,
set_3_y=set_3_y_arr,
set_3_name=set_3_name,
set_3_army=set_3_army,
set_3_session_no=set_3_session_no,
set_3_detail_no=set_3_detail_no,
set_4_x=set_4_x_arr,
set_4_y=set_4_y_arr,
set_4_name=set_4_name,
set_4_army=set_4_army,
set_4_session_no=set_4_session_no,
set_4_detail_no=set_4_detail_no
)
@app.route('/prediction_target_3/', methods=['GET', 'POST'])
def prediction_target_3():
t1_x=0
t1_y=0
xmpi_j=0
ympi_j=0
gp=0
Tfirt_x_j=0
Tfirt_y_j=0
fin_x_1=0
fin_y_1=0
xmpi_inch = 0
ympi_inch = 0
result_1=None
fir_tendency=None
set_1_name = None
set_1_army =None
set_1_session_no = None
set_1_detail_no=None
set_1_id =None
set_2_name = None
set_2_army =None
set_2_session_no = None
set_2_detail_no=None
set_2_id =None
set_3_name = None
set_3_army =None
set_3_session_no = None
set_3_detail_no=None
set_3_id =None
set_4_name = None
set_4_army =None
set_4_session_no = None
set_4_detail_no=None
set_4_id =None
fir_tendency_1=None
firer_id=None
current_army_no=None
current_firer_name=None
current_session_no=None
session_detail_no=None
current_detail_no=None
set_2_x=None
set_2_y=None
set_3_x=None
set_3_y=None
set_4_x=None
set_4_y=None
paper_ref=None
sess=None
res=None
set_2_x_arr=[]
set_2_y_arr=[]
set_3_x_arr=[]
set_3_y_arr=[]
set_4_x_arr=[]
set_4_y_arr=[]
fin_x_arr_1=[]
fin_y_arr_1=[]
curdate=time.strftime("%Y-%m-%d")
if request.method == 'POST':
firer_id,sess,o,p,u,q,t1_x,t1_y,xmpi,ympi,f,gp,Tfirt_x,Tfirt_y,fin_x_1,fin_y_1,result_1,fir_tendency_1=prediction_calculation_3()
paper_ref=db.session.query(TPaper_ref.paper_ref).scalar()
set_2_x=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==3 ,T_Firer_Details.set_no==2 , T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess).all()
set_2_y=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==3 , T_Firer_Details.set_no==2 , T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess).all()
for x_2 in set_2_x:
set_2_x_arr.append(int(x_2.final_x))
for y_2 in set_2_y:
set_2_y_arr.append(int(y_2.final_y))
set_3_x=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==3 , T_Firer_Details.set_no==3 , T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess).all()
set_3_y=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==3 , T_Firer_Details.set_no==3 , T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess).all()
for x_3 in set_3_x:
set_3_x_arr.append(int(x_3.final_x))
for y_2 in set_2_y:
set_3_y_arr.append(int(y_3.final_y))
set_4_x=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==3 , T_Firer_Details.set_no==4 , T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess).all()
set_4_y=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==3 , T_Firer_Details.set_no==4 ,T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess).all()
for x_4 in set_4_x:
set_4_x_arr.append(int(x_4.final_x))
for y_2 in set_2_y:
set_4_y_arr.append(int(y_4.final_y))
set_1_id = db.session.query(T_Firer_Details.firer_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==3,
T_Firer_Details.set_no==1,
T_Firer_Details.paper_ref==paper_ref,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_1_session_no=db.session.query(T_Firer_Details.session_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==3,
T_Firer_Details.set_no==1,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_1_detail_no=db.session.query(T_Firer_Details.detail_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==3,
T_Firer_Details.set_no==1,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_1_name=db.session.query(Shooter.name).filter(
Shooter.id==set_1_id
).scalar()
set_1_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_1_id).scalar()
set_2_id = db.session.query(T_Firer_Details.firer_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==3,
T_Firer_Details.set_no==2,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_2_session_no=db.session.query(T_Firer_Details.session_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==3,
T_Firer_Details.set_no==2,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_2_detail_no=db.session.query(T_Firer_Details.detail_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==3,
T_Firer_Details.set_no==2,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_2_name=db.session.query(Shooter.name).filter(
Shooter.id==set_2_id
).scalar()
set_2_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_2_id).scalar()
set_3_id = db.session.query(T_Firer_Details.firer_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==3,
T_Firer_Details.set_no==3,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_3_session_no=db.session.query(T_Firer_Details.session_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==3,
T_Firer_Details.set_no==3,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_3_detail_no=db.session.query(T_Firer_Details.detail_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==3,
T_Firer_Details.set_no==3,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_3_name=db.session.query(Shooter.name).filter(
Shooter.id==set_3_id
).scalar()
set_3_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_3_id).scalar()
set_4_id = db.session.query(T_Firer_Details.firer_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==3,
T_Firer_Details.set_no==4,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_4_session_no=db.session.query(T_Firer_Details.session_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==3,
T_Firer_Details.set_no==4,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_4_detail_no=db.session.query(T_Firer_Details.detail_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==3,
T_Firer_Details.set_no==4,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_4_name=db.session.query(Shooter.name).filter(
Shooter.id==set_4_id
).scalar()
set_4_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_4_id).scalar()
current_firer_name = db.session.query(Shooter.name).filter(Shooter.id==firer_id).scalar()
current_army_no = db.session.query(Shooter.service_id).filter(Shooter.id==firer_id).scalar()
current_session_no=db.session.query(TShooting.session_id).filter(TShooting.target_1_id==firer_id).scalar()
current_detail_no=db.session.query(TShooting.detail_no).filter(TShooting.target_1_id==firer_id).scalar()
xmpi_inch = pixeltoinch(xmpi)
ympi_inch = pixeltoinch(ympi)
xmpi_j = | pd.Series(xmpi_inch) | pandas.Series |
import base64
import io
import textwrap
import dash
import dash_core_components as dcc
import dash_html_components as html
import gunicorn
import plotly.graph_objs as go
from dash.dependencies import Input, Output, State
import flask
import pandas as pd
import urllib.parse
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.decomposition import PCA
import numpy as np
import math
import scipy.stats
import dash_table
from dash_table.Format import Format, Scheme
from colour import Color
import dash_bootstrap_components as dbc
# from waitress import serve
external_stylesheets = [dbc.themes.BOOTSTRAP, 'https://codepen.io/chriddyp/pen/bWLwgP.css',
"https://codepen.io/sutharson/pen/dyYzEGZ.css",
"https://fonts.googleapis.com/css2?family=Raleway&display=swap",
"https://codepen.io/chriddyp/pen/brPBPO.css"]
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
server = app.server
# "external_url": "https://codepen.io/chriddyp/pen/brPBPO.css"
# https://raw.githubusercontent.com/aaml-analytics/pca-explorer/master/LoadingStatusStyleSheet.css
styles = {
'pre': {
'border': 'thin lightgrey solid',
'overflowX': 'scroll'
}
}
tabs_styles = {'height': '40px', 'font-family': 'Raleway', 'fontSize': 14}
tab_style = {
'borderBottom': '1px solid #d6d6d6',
'padding': '6px',
'Weight': 'bold'
}
tab_selected_style = {
'borderTop': '3px solid #333333',
'borderBottom': '1px solid #d6d6d6 ',
'backgroundColor': '#f6f6f6',
'color': '#333333',
# 'fontColor': '#004a4a',
'fontWeight': 'bold',
'padding': '6px'
}
# APP ABOUT DESCRIPTION
MOF_tool_about = textwrap.wrap(' These tools aim to provide a reproducible and consistent data visualisation platform '
'where experimental and computational researchers can use big data and statistical '
'analysis to find the best materials for specific applications. Principal Component '
'Analysis (PCA) is a dimension reduction technique that can be used to reduce a large '
'set of observable variables to a smaller set of latent variables that still contain '
'most of the information in the large set (feature extraction). This is done by '
'transforming a number of (possibly) correlated variables into some number of orthogonal '
'(uncorrelated) variables called principal components to find the directions of maximal '
'variance. PCA can be used to ease data visualisation by having fewer dimensions to plot '
'or be used as a pre-processing step before using another Machine Learning (ML)'
' algorithm for regression '
'and classification tasks. PCA can be used to improve an ML algorithm performance, '
'reduce overfitting and reduce noise in data.',
width=50)
Scree_plot_about = textwrap.wrap(' The Principal Component Analysis Visualisation Tools runs PCA for the user and '
'populates a Scree plot. This plot allows the user to determine if PCA is suitable '
'for '
'their dataset and if can compromise an X% drop in explained variance to '
'have fewer dimensions.', width=50)
Feature_correlation_filter = textwrap.wrap("Feature correlation heatmaps provide users with feature analysis and "
"feature principal component analysis. This tool will allow users to see the"
" correlation between variables and the"
" covariances/correlations between original variables and the "
"principal components (loadings)."
, width=50)
plots_analysis = textwrap.wrap('Users can keep all variables as features or drop certain variables to produce a '
'Biplot, cos2 plot and contribution plot. The score plot is used to look for clusters, '
'trends, and outliers in the first two principal components. The loading plot is used to'
' visually interpret the first two principal components. The biplot overlays the score '
'plot and the loading plot on the same graph. The squared cosine (cos2) plot shows '
'the importance of a component for a given observation i.e. measures '
'how much a variable is represented in a component. The contribution plot contains the '
'contributions (%) of the variables to the principal components', width=50, )
data_table_download = textwrap.wrap("The user's inputs from the 'Plots' tab will provide the output of the data tables."
" The user can download the scores, eigenvalues, explained variance, "
"cumulative explained variance, loadings, "
"cos2 and contributions from the populated data tables. "
"Note: Wait for user inputs to be"
" computed (faded tab app will return to the original colour) before downloading the"
" data tables. ", width=50)
MOF_GH = textwrap.wrap(" to explore AAML's sample data and read more on"
" AAML's Principal Component Analysis Visualisation Tool Manual, FAQ's & Troubleshooting"
" on GitHub... ", width=50)
####################
# APP LAYOUT #
####################
fig = go.Figure()
fig1 = go.Figure()
app.layout = html.Div([
html.Div([
html.Img(
src='https://raw.githubusercontent.com/aaml-analytics/mof-explorer/master/UOC.png',
height='35', width='140', style={'display': 'inline-block', 'padding-left': '1%'}),
html.Img(src='https://raw.githubusercontent.com/aaml-analytics/mof-explorer/master/A2ML-logo.png',
height='50', width='125', style={'float': 'right', 'display': 'inline-block', 'padding-right': '2%'}),
html.H1("Principal Component Analysis Visualisation Tools",
style={'display': 'inline-block', 'padding-left': '11%', 'text-align': 'center', 'fontSize': 36,
'color': 'white', 'font-family': 'Raleway'}),
html.H1("...", style={'fontColor': '#3c3c3c', 'fontSize': 6})
], style={'backgroundColor': '#333333'}),
html.Div([html.A('Refresh', href='/')], style={}),
html.Div([
html.H2("Upload Data", style={'fontSize': 24, 'font-family': 'Raleway', 'color': '#333333'}, ),
html.H3("Upload .txt, .csv or .xls files to starting exploring data...", style={'fontSize': 16,
'font-family': 'Raleway'}),
dcc.Store(id='csv-data', storage_type='session', data=None),
html.Div([dcc.Upload(
id='data-table-upload',
children=html.Div([html.Button('Upload File')],
style={'height': "60px", 'borderWidth': '1px',
'borderRadius': '5px',
'textAlign': 'center',
}),
multiple=False
),
html.Div(id='output-data-upload'),
]), ], style={'display': 'inline-block', 'padding-left': '1%', }),
html.Div([dcc.Tabs([
dcc.Tab(label='About', style=tab_style, selected_style=tab_selected_style,
children=[html.Div([html.H2(" What are AAML's Principal Component Analysis Visualisation Tools?",
style={'fontSize': 18, 'font-family': 'Raleway', 'font-weight': 'bold'
}),
html.Div([' '.join(MOF_tool_about)]
, style={'font-family': 'Raleway'}),
html.H2(["Scree Plot"],
style={'fontSize': 18,
'font-family': 'Raleway', 'font-weight': 'bold'}),
html.Div([' '.join(Scree_plot_about)], style={'font-family': 'Raleway'}),
html.H2(["Feature Correlation"], style={'fontSize': 18,
'font-weight': 'bold',
'font-family': 'Raleway'}),
html.Div([' '.join(Feature_correlation_filter)], style={'font-family': 'Raleway', }),
html.H2(["Plots"],
style={'fontSize': 18, 'font-weight': 'bold',
'font-family': 'Raleway'}),
html.Div([' '.join(plots_analysis)], style={'font-family': 'Raleway'}),
html.H2(["Data tables"],
style={'fontSize': 18, 'font-weight': 'bold',
'font-family': 'Raleway'}),
html.Div([' '.join(data_table_download)], style={'font-family': 'Raleway'}),
# ADD LINK
html.Div([html.Plaintext(
[' Click ', html.A('here ',
href='https://github.com/aaml-analytics/pca-explorer')],
style={'display': 'inline-block',
'fontSize': 14, 'font-family': 'Raleway'}),
html.Div([' '.join(MOF_GH)], style={'display': 'inline-block',
'fontSize': 14,
'font-family': 'Raleway'}),
html.Img(
src='https://raw.githubusercontent.com/aaml-analytics/mof'
'-explorer/master/github.png',
height='40', width='40',
style={'display': 'inline-block', 'float': "right"
})
]
, style={'display': 'inline-block'})
], style={'backgroundColor': '#ffffff', 'padding-left': '1%'}
)]),
dcc.Tab(label='Scree Plot', style=tab_style, selected_style=tab_selected_style,
children=[
html.Div([dcc.Graph(id='PC-Eigen-plot')
],
style={'display': 'inline-block',
'width': '49%'}),
html.Div([dcc.Graph(id='PC-Var-plot')
], style={'display': 'inline-block', 'float': 'right',
'width': '49%'}),
html.Div(
[html.Label(["Remove outliers (if any) in analysis:", dcc.RadioItems(
id='outlier-value',
options=[{'label': 'Yes', 'value': 'Yes'},
{'label': 'No', 'value': 'No'}],
value='No')
])
], style={'display': 'inline-block',
'width': '49%', 'padding-left': '1%'}),
html.Div([html.Label(["Select the type of matrix used to calculate the principal components:",
dcc.RadioItems(
id='matrix-type-scree',
options=[{'label': 'Correlation', 'value': 'Correlation'},
{'label': 'Covariance', 'value': 'Covariance'}],
value='Correlation')
])], style={'display': 'inline-block',
'width': '49%', }),
html.Div([html.P(
"Note: Use a correlation matrix when your variables have different scales and you want to weight "
"all the variables equally. Use a covariance matrix when your variables have different scales and"
" you want to give more emphasis to variables with higher variances. When unsure"
" use a correlation matrix.")],
style={'padding-left': '1%'}),
html.Div([
html.Label(["You should attempt to use at least..."
, html.Div(id='var-output-container-filter')])
], style={'padding-left': '1%'}
),
html.Div([
html.Label(["As a rule of thumb for the Scree Plot"
" Eigenvalues, the point where the slope of the curve "
"is clearly "
"leveling off (the elbow), indicates the number of "
"components that "
"should be retained as significant."])
], style={'padding-left': '1%'}),
]),
dcc.Tab(label='Feature correlation', style=tab_style,
selected_style=tab_selected_style,
children=[html.Div([html.Div([dcc.Graph(id='PC-feature-heatmap')
], style={'width': '47%',
'display': 'inline-block',
'float': 'right'}),
html.Div([dcc.Graph(id='feature-heatmap')
], style={'width': '51%',
'display': 'inline-block',
'float': 'left'}),
html.Div([html.Label(["Loading colour bar range:"
, html.Div(
id='color-range-container')])
], style={
'fontSize': 12,
'float': 'right',
'width': '100%',
'padding-left': '85%'}
),
html.Div(
[html.Label(
["Remove outliers (if any) in analysis:",
dcc.RadioItems(
id='PC-feature-outlier-value',
options=[{'label': 'Yes', 'value': 'Yes'},
{'label': 'No', 'value': 'No'}],
value='No')
])
], style={'display': 'inline-block',
'width': '29%', 'padding-left': '1%'}),
html.Div([html.Label(
["Select the type of matrix used to calculate the principal components:",
dcc.RadioItems(
id='matrix-type-heatmap',
options=[{'label': 'Correlation', 'value': 'Correlation'},
{'label': 'Covariance', 'value': 'Covariance'}],
value='Correlation')
])], style={'display': 'inline-block',
'width': '39%', }),
html.Div([html.Label(["Select color scale:",
dcc.RadioItems(
id='colorscale',
options=[{'label': i, 'value': i}
for i in
['Viridis', 'Plasma']],
value='Plasma'
)]),
], style={'display': 'inline-block',
'width': '29%', 'padding-left': '1%'}),
html.Div([html.P(
"Note: Use a correlation matrix when your variables have different scales and you want to weight "
"all the variables equally. Use a covariance matrix when your variables have different scales and"
" you want to give more emphasis to variables with higher variances. When unsure"
" use a correlation matrix.")],
style={'padding-left': '1%'}),
html.Div([
html.P("There are usually two ways multicollinearity, "
"which is when there are a number of variables "
"that are highly correlated, is dealt with:"),
html.P("1) Use PCA to obtain a set of orthogonal ("
"not correlated) variables to analyse."),
html.P("2) Use correlation of determination (R²) to "
"determine which variables are highly "
"correlated and use only 1 in analysis. "
"Cut off for highly correlated variables "
"is ~0.7."),
html.P(
"In any case, it depends on the machine learning algorithm you may apply later. For correlation robust algorithms,"
" such as Random Forest, correlation of features will not be a concern. For non-correlation robust algorithms such as Linear Discriminant Analysis, "
"all high correlation variables should be removed.")
], style={'padding-left': '1%'}
),
html.Div([
html.Label(["Note: Data has been standardised (scale)"])
], style={'padding-left': '1%'})
])
]),
dcc.Tab(label='Plots', style=tab_style,
selected_style=tab_selected_style,
children=[html.Div([
html.Div([html.P("Selecting Features")], style={'padding-left': '1%',
'font-weight': 'bold'}),
html.Div([
html.P("Input here affects all plots, datatables and downloadable data output"),
html.Label([
"Would you like to analyse all variables or choose custom variables to "
"analyse:",
dcc.RadioItems(
id='all-custom-choice',
options=[{'label': 'All',
'value': 'All'},
{'label': 'Custom',
'value': 'Custom'}],
value='All'
)])
], style={'padding-left': '1%'}),
html.Div([
html.P("For custom variables input variables you would not like as features in your PCA:"),
html.Label(
[
"Note: Only input numerical variables (non-numerical variables have already "
"been removed from your dataframe)",
dcc.Dropdown(id='feature-input',
multi=True,
)])
], style={'padding': 10, 'padding-left': '1%'}),
]), dcc.Tabs(id='sub-tabs1', style=tabs_styles,
children=[
dcc.Tab(label='Biplot (Scores + loadings)', style=tab_style,
selected_style=tab_selected_style,
children=[
html.Div([dcc.Graph(id='biplot', figure=fig)
], style={'height': '100%', 'width': '75%',
'padding-left': '20%'},
),
html.Div(
[html.Label(
["Remove outliers (if any) in analysis:",
dcc.RadioItems(
id='outlier-value-biplot',
options=[
{'label': 'Yes', 'value': 'Yes'},
{'label': 'No', 'value': 'No'}],
value='No')
])
], style={'display': 'inline-block',
'width': '29%', 'padding-left': '1%'}),
html.Div([html.Label([
"Select the type of matrix used to calculate the principal components:",
dcc.RadioItems(
id='matrix-type-biplot',
options=[{'label': 'Correlation',
'value': 'Correlation'},
{'label': 'Covariance',
'value': 'Covariance'}],
value='Correlation')
])], style={'display': 'inline-block',
'width': '39%', }),
html.Div([
html.Label([
"Graph Update to show either loadings (Loading Plot) or "
"scores and loadings (Biplot):",
dcc.RadioItems(
id='customvar-graph-update',
options=[{'label': 'Biplot',
'value': 'Biplot'},
{'label': 'Loadings',
'value': 'Loadings'}],
value='Biplot')
])
], style={'display': 'inline-block',
'width': '29%', 'padding-left': '1%'}),
html.Div([html.P(
"Note: Use a correlation matrix when your variables have different scales and you want to weight "
"all the variables equally. Use a covariance matrix when your variables have different scales and"
" you want to give more emphasis to variables with higher variances. When unsure"
" use a correlation matrix. PCA is an unsupervised machine learning technique - it only "
"looks at the input features and does not take "
"into account the output or the target"
" (response) variable.")],
style={'padding-left': '1%'}),
html.Div([
html.P("For variables you have dropped..."),
html.Label([
"Would you like to introduce a first target variable"
" into your data visualisation?"
" (Graph type must be Biplot): "
"",
dcc.RadioItems(
id='radio-target-item',
options=[{'label': 'Yes',
'value': 'Yes'},
{'label': 'No',
'value': 'No'}],
value='No'
)])
], style={'width': '49%', 'padding-left': '1%',
'display': 'inline-block'}),
html.Div([
html.Label([
"Select first target variable for color scale of scores: ",
dcc.Dropdown(
id='color-scale-scores',
)])
], style={'width': '49%', 'padding-left': '1%',
'display': 'inline-block'}),
html.Div([
html.Label([
"Would you like to introduce a second target variable"
" into your data visualisation??"
" (Graph type must be Biplot):",
dcc.RadioItems(
id='radio-target-item-second',
options=[{'label': 'Yes',
'value': 'Yes'},
{'label': 'No',
'value': 'No'}],
value='No'
)])
], style={'width': '49%', 'padding-left': '1%',
'display': 'inline-block'}),
html.Div([
html.Label([
"Select second target variable for size scale of scores:",
dcc.Dropdown(
id='size-scale-scores',
)])
], style={'width': '49%', 'padding-left': '1%',
'display': 'inline-block'}),
html.Div([html.Label(["Size range:"
, html.Div(
id='size-second-target-container')])
], style={'display': 'inline-block',
'float': 'right',
'padding-right': '5%'}
),
html.Div([
html.Br(),
html.P(
"A loading plot shows how "
"strongly each characteristic (variable)"
" influences a principal component. The angles between the vectors"
" tell us how characteristics correlate with one another: "),
html.P("1) When two vectors are close, forming a small angle, the two "
"variables they represent are positively correlated. "),
html.P(
"2) If they meet each other at 90°, they are not likely to be correlated. "),
html.P(
"3) When they diverge and form a large angle (close to 180°), they are negative correlated."),
html.P(
"The Score Plot involves the projection of the data onto the PCs in two dimensions."
"The plot contains the original data but in the rotated (PC) coordinate system"),
html.P(
"A biplot merges a score plot and loading plot together.")
], style={'padding-left': '1%'}
),
]),
dcc.Tab(label='Cos2', style=tab_style,
selected_style=tab_selected_style,
children=[
html.Div([dcc.Graph(id='cos2-plot', figure=fig)
], style={'width': '65%',
'padding-left': '25%'},
),
html.Div(
[html.Label(["Remove outliers (if any) in analysis:",
dcc.RadioItems(
id='outlier-value-cos2',
options=[
{'label': 'Yes', 'value': 'Yes'},
{'label': 'No', 'value': 'No'}],
value='No')
])
], style={'display': 'inline-block',
'padding-left': '1%',
'width': '49%'}),
html.Div([html.Label([
"Select the type of matrix used to calculate the principal components:",
dcc.RadioItems(
id='matrix-type-cos2',
options=[{'label': 'Correlation',
'value': 'Correlation'},
{'label': 'Covariance',
'value': 'Covariance'}],
value='Correlation')
])], style={'display': 'inline-block',
'width': '49%', }),
html.Div([html.P(
"Note: Use a correlation matrix when your variables have different scales and you want to weight "
"all the variables equally. Use a covariance matrix when your variables have different scales and"
" you want to give more emphasis to variables with higher variances. When unsure"
" use a correlation matrix.")],
style={'padding-left': '1%'}),
html.Div([
html.P("The squared cosine shows the importance of a "
"component for a given observation i.e. "
"measures "
" how much a variable is represented in a "
"component")
], style={'padding-left': '1%'}),
]),
dcc.Tab(label='Contribution', style=tab_style,
selected_style=tab_selected_style,
children=[
html.Div([dcc.Graph(id='contrib-plot', figure=fig)
], style={'width': '65%',
'padding-left': '25%'},
),
html.Div(
[html.Label(["Remove outliers (if any) in analysis:",
dcc.RadioItems(
id='outlier-value-contrib',
options=[
{'label': 'Yes', 'value': 'Yes'},
{'label': 'No', 'value': 'No'}],
value='No')
], style={'padding-left': '1%'})
], style={'display': 'inline-block',
'width': '49%'}),
html.Div([html.Label([
"Select the type of matrix used to calculate the principal components:",
dcc.RadioItems(
id='matrix-type-contrib',
options=[{'label': 'Correlation',
'value': 'Correlation'},
{'label': 'Covariance',
'value': 'Covariance'}],
value='Correlation')
])], style={'display': 'inline-block',
'width': '49%', }),
html.Div([html.P(
"Note: Use a correlation matrix when your variables have different scales and you want to weight "
"all the variables equally. Use a covariance matrix when your variables have different scales and"
" you want to give more emphasis to variables with higher variances. When unsure"
" use a correlation matrix.")],
style={'padding-left': '1%'}),
html.Div([
html.P("The contribution plot contains the "
"contributions (in percentage) of the "
"variables to the principal components")
], style={'padding-left': '1%'}),
])
])
]),
dcc.Tab(label='Data tables', style=tab_style,
selected_style=tab_selected_style,
children=[html.Div([
html.Div([
html.Label(
["Note: Input in 'Plots' tab will provide output of data tables and the"
" downloadable PCA data"])
], style={'font-weight': 'bold', 'padding-left': '1%'}),
html.Div([html.A(
'Download PCA Data (scores for each principal component)',
id='download-link',
href="",
target="_blank"
)], style={'padding-left': '1%'}),
html.Div([html.Label(["Remove outliers (if any) in analysis:",
dcc.RadioItems(id="eigenA-outlier",
options=[{'label': 'Yes',
'value': 'Yes'},
{'label': 'No',
'value': 'No'}],
value='No'
)])], style={'padding-left': '1%',
'display': 'inline-block', 'width': '49%'}),
html.Div([html.Label(["Select the type of matrix used to calculate the principal components:",
dcc.RadioItems(
id='matrix-type-data-table',
options=[{'label': 'Correlation', 'value': 'Correlation'},
{'label': 'Covariance', 'value': 'Covariance'}],
value='Correlation')
])], style={'display': 'inline-block',
'width': '49%', }),
html.Div([html.P(
"Note: Use a correlation matrix when your variables have different scales and you want to weight "
"all the variables equally. Use a covariance matrix when your variables have different scales and"
" you want to give more emphasis to variables with higher variances. When unsure"
" use a correlation matrix.")],
style={'padding-left': '1%'}),
html.Div([
html.Div([
html.Label(["Correlation between Features"])
], style={'font-weight': 'bold'}),
html.Div([
dash_table.DataTable(id='data-table-correlation',
editable=False,
filter_action='native',
sort_action='native',
sort_mode='multi',
selected_columns=[],
selected_rows=[],
page_action='native',
column_selectable='single',
page_current=0,
page_size=20,
style_data={'height': 'auto'},
style_table={'overflowX': 'scroll',
'maxHeight': '300px',
'overflowY': 'scroll'},
style_cell={
'minWidth': '0px', 'maxWidth': '220px',
'whiteSpace': 'normal',
}
),
html.Div(id='data-table-correlation-container'),
]),
html.Div([html.A(
'Download Feature Correlation data',
id='download-link-correlation',
href="",
target="_blank"
)]),
], style={'padding': 20}),
html.Div([
html.Div([
html.Label(["Eigen Analysis of the correlation matrix"]),
], style={'font-weight': 'bold'}),
html.Div([
dash_table.DataTable(id='data-table-eigenA',
editable=False,
filter_action='native',
sort_action='native',
sort_mode='multi',
selected_columns=[],
selected_rows=[],
page_action='native',
column_selectable='single',
page_current=0,
page_size=20,
style_data={'height': 'auto'},
style_table={'overflowX': 'scroll',
'maxHeight': '300px',
'overflowY': 'scroll'},
style_cell={
'minWidth': '0px', 'maxWidth': '220px',
'whiteSpace': 'normal',
}
),
html.Div(id='data-table-eigenA-container'),
]),
html.Div([html.A(
'Download Eigen Analysis data',
id='download-link-eigenA',
href="",
download='Eigen_Analysis_data.csv',
target="_blank"
)]),
], style={'padding': 20}),
html.Div([
html.Div([
html.Label(["Loadings (Feature and PC correlation) from PCA"]),
], style={'font-weight': 'bold'}),
html.Div([
dash_table.DataTable(id='data-table-loadings',
editable=False,
filter_action='native',
sort_action='native',
sort_mode='multi',
selected_columns=[],
selected_rows=[],
page_action='native',
column_selectable='single',
page_current=0,
page_size=20,
style_data={'height': 'auto'},
style_table={'overflowX': 'scroll',
'maxHeight': '300px',
'overflowY': 'scroll'},
style_cell={
'minWidth': '0px', 'maxWidth': '220px',
'whiteSpace': 'normal',
}
),
html.Div(id='data-table-loadings-container'),
]),
html.Div([html.A(
'Download Loadings data',
id='download-link-loadings',
download='Loadings_data.csv',
href="",
target="_blank"
)]),
], style={'padding': 20}),
html.Div([
html.Div([
html.Label(["Cos2 from PCA"])
], style={'font-weight': 'bold'}),
html.Div([
dash_table.DataTable(id='data-table-cos2',
editable=False,
filter_action='native',
sort_action='native',
sort_mode='multi',
selected_columns=[],
selected_rows=[],
page_action='native',
column_selectable='single',
page_current=0,
page_size=20,
style_data={'height': 'auto'},
style_table={'overflowX': 'scroll',
'maxHeight': '300px',
'overflowY': 'scroll'},
style_cell={
'minWidth': '0px', 'maxWidth': '220px',
'whiteSpace': 'normal',
}
),
html.Div(id='data-table-cos2-container'),
]),
html.Div([html.A(
'Download Cos2 data',
id='download-link-cos2',
download='Cos2_data.csv',
href="",
target="_blank"
)]),
], style={'padding': 20}),
html.Div([
html.Div([
html.Label(["Contributions from PCA"])
], style={'font-weight': 'bold'}),
html.Div([
dash_table.DataTable(id='data-table-contrib',
editable=False,
filter_action='native',
sort_action='native',
sort_mode='multi',
selected_columns=[],
selected_rows=[],
page_action='native',
column_selectable='single',
page_current=0,
page_size=20,
style_data={'height': 'auto'},
style_table={'overflowX': 'scroll',
'maxHeight': '300px',
'overflowY': 'scroll'},
style_cell={
'minWidth': '0px', 'maxWidth': '220px',
'whiteSpace': 'normal',
}
),
html.Div(id='data-table-contrib-container'),
]),
html.Div([html.A(
'Download Contributions data',
id='download-link-contrib',
download='Contributions_data.csv',
href="",
target="_blank"
)]),
], style={'padding': 20}),
])])
])
], style={'font-family': 'Raleway'})])
# READ FILE
def parse_contents(contents, filename):
content_type, content_string = contents.split(',')
decoded = base64.b64decode(content_string)
try:
if 'csv' in filename:
# Assume that the user uploaded a CSV file
df = pd.read_csv(io.StringIO(decoded.decode('utf-8')))
df.fillna(0)
elif 'xls' in filename:
# Assume that the user uploaded an excel file
df = pd.read_excel(io.BytesIO(decoded))
df.fillna(0)
elif 'txt' or 'tsv' in filename:
df = pd.read_csv(io.StringIO(decoded.decode('utf-8')), delimiter=r'\s+')
df.fillna(0)
except Exception as e:
print(e)
return html.Div([
'There was an error processing this file.'
])
return df
@app.callback(Output('csv-data', 'data'),
[Input('data-table-upload', 'contents')],
[State('data-table-upload', 'filename')])
def parse_uploaded_file(contents, filename):
if not filename:
return dash.no_update
df = parse_contents(contents, filename)
df.fillna(0)
return df.to_json(date_format='iso', orient='split')
@app.callback(Output('PC-Var-plot', 'figure'),
[Input('outlier-value', 'value'),
Input('matrix-type-scree', 'value'),
Input('csv-data', 'data')],
)
def update_graph_stat(outlier, matrix_type, data):
traces = []
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
if outlier == 'No' and matrix_type == 'Correlation':
features1 = dff.columns
features = list(features1)
x = dff.loc[:, features].values
# Separating out the target (if any)
# Standardizing the features to {mean, variance} = {0, 1}
x = StandardScaler().fit_transform(x)
pca = PCA(n_components=len(features))
principalComponents = pca.fit_transform(x)
principalDf = pd.DataFrame(data=principalComponents
, columns=['PC' + str(i + 1) for i in range(len(features))])
finalDf = pd.concat([df[[df.columns[0]]], principalDf], axis=1)
loading = pca.components_.T * np.sqrt(pca.explained_variance_)
loading_df = pd.DataFrame(data=loading[0:, 0:], index=features,
columns=['PC' + str(i + 1) for i in range(loading.shape[1])])
Var = pca.explained_variance_ratio_
PC_df = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features))], columns=['Principal Component'])
Var_df = pd.DataFrame(data=Var, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum = Var_df.cumsum()
Var_dff = pd.concat([PC_df, (Var_cumsum * 100)], axis=1)
data = Var_dff
elif outlier == 'Yes' and matrix_type == 'Correlation':
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_outlier = outlier_dff.loc[:, features_outlier].values
# Standardizing the features
x_outlier = StandardScaler().fit_transform(x_outlier)
pca_outlier = PCA(n_components=len(features_outlier))
principalComponents_outlier = pca_outlier.fit_transform(x_outlier)
principalDf_outlier = pd.DataFrame(data=principalComponents_outlier
, columns=['PC' + str(i + 1) for i in range(len(features_outlier))])
# combining principle components and target
finalDf_outlier = pd.concat([outlier_names, principalDf_outlier], axis=1)
# calculating loading
loading_outlier = pca_outlier.components_.T * np.sqrt(pca_outlier.explained_variance_)
loading_df_outlier = pd.DataFrame(data=loading_outlier[0:, 0:], index=features_outlier,
columns=['PC' + str(i + 1) for i in range(loading_outlier.shape[1])])
Var_outlier = pca_outlier.explained_variance_ratio_
PC_df_outlier = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_outlier))],
columns=['Principal Component'])
Var_df_outlier = pd.DataFrame(data=Var_outlier, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_outlier = Var_df_outlier.cumsum()
Var_dff_outlier = pd.concat([PC_df_outlier, (Var_cumsum_outlier * 100)], axis=1)
data = Var_dff_outlier
elif outlier == 'No' and matrix_type == 'Covariance':
features1_covar = dff.columns
features_covar = list(features1_covar)
x = dff.loc[:, features_covar].values
pca_covar = PCA(n_components=len(features_covar))
principalComponents_covar = pca_covar.fit_transform(x)
principalDf_covar = pd.DataFrame(data=principalComponents_covar
, columns=['PC' + str(i + 1) for i in range(len(features_covar))])
finalDf_covar = pd.concat([df[[df.columns[0]]], principalDf_covar], axis=1)
loading_covar = pca_covar.components_.T * np.sqrt(pca_covar.explained_variance_)
loading_df_covar = pd.DataFrame(data=loading_covar[0:, 0:], index=features_covar,
columns=['PC' + str(i + 1) for i in range(loading_covar.shape[1])])
Var_covar = pca_covar.explained_variance_ratio_
PC_df_covar = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_covar))],
columns=['Principal Component'])
Var_df_covar = pd.DataFrame(data=Var_covar, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_covar = Var_df_covar.cumsum()
Var_dff_covar = pd.concat([PC_df_covar, (Var_cumsum_covar * 100)], axis=1)
data = Var_dff_covar
elif outlier == 'Yes' and matrix_type == 'Covariance':
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier_covar = outlier_dff.columns
features_outlier_covar = list(features1_outlier_covar)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_outlier = outlier_dff.loc[:, features_outlier_covar].values
pca_outlier_covar = PCA(n_components=len(features_outlier_covar))
principalComponents_outlier_covar = pca_outlier_covar.fit_transform(x_outlier)
principalDf_outlier_covar = pd.DataFrame(data=principalComponents_outlier_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_outlier_covar))])
# combining principle components and target
finalDf_outlier_covar = pd.concat([outlier_names, principalDf_outlier_covar], axis=1)
# calculating loading
loading_outlier_covar = pca_outlier_covar.components_.T * np.sqrt(pca_outlier_covar.explained_variance_)
loading_df_outlier_covar = pd.DataFrame(data=loading_outlier_covar[0:, 0:], index=features_outlier_covar,
columns=['PC' + str(i + 1) for i in
range(loading_outlier_covar.shape[1])])
Var_outlier_covar = pca_outlier_covar.explained_variance_ratio_
PC_df_outlier_covar = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_outlier_covar))],
columns=['Principal Component'])
Var_df_outlier_covar = pd.DataFrame(data=Var_outlier_covar,
columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_outlier_covar = Var_df_outlier_covar.cumsum()
Var_dff_outlier_covar = pd.concat([PC_df_outlier_covar, (Var_cumsum_outlier_covar * 100)], axis=1)
data = Var_dff_outlier_covar
traces.append(go.Scatter(x=data['Principal Component'], y=data['Cumulative Proportion of Explained Variance'],
mode='lines', line=dict(color='Red')))
return {'data': traces,
'layout': go.Layout(title='<b>Cumulative Scree Plot Proportion of Explained Variance</b>',
titlefont=dict(family='Helvetica', size=16),
xaxis={'title': 'Principal Component',
'mirror': True,
'ticks': 'outside',
'showline': True,
'showspikes': True
}, yaxis={'title': 'Cumulative Explained Variance',
'mirror': True,
'ticks': 'outside',
'showline': True,
'showspikes': True,
'range': [0, 100]},
hovermode='closest', font=dict(family="Helvetica"), template="simple_white")
}
@app.callback(
Output('var-output-container-filter', 'children'),
[Input('outlier-value', 'value'),
Input('matrix-type-scree', 'value'),
Input('csv-data', 'data')],
)
def update_output(outlier, matrix_type, data):
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
if outlier == 'No' and matrix_type == 'Correlation':
features1 = dff.columns
features = list(features1)
x = dff.loc[:, features].values
# Separating out the target (if any)
# Standardizing the features to {mean, variance} = {0, 1}
x = StandardScaler().fit_transform(x)
pca = PCA(n_components=len(features))
principalComponents = pca.fit_transform(x)
principalDf = pd.DataFrame(data=principalComponents
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf = pd.concat([df[[df.columns[0]]], principalDf], axis=1)
dfff = finalDf
loading = pca.components_.T * np.sqrt(pca.explained_variance_)
loading_df = pd.DataFrame(data=loading[0:, 0:], index=features,
columns=['PC' + str(i + 1) for i in range(loading.shape[1])])
loading_dff = loading_df.T
Var = pca.explained_variance_ratio_
PC_df = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features))], columns=['Principal Component'])
PC_num = [float(i + 1) for i in range(len(features))]
Var_df = pd.DataFrame(data=Var, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum = Var_df.cumsum()
Var_dff = pd.concat([PC_df, (Var_cumsum * 100)], axis=1)
PC_interp = np.interp(70, Var_dff['Cumulative Proportion of Explained Variance'], PC_num)
PC_interp_int = math.ceil(PC_interp)
return "'{}' principal components (≥70% of explained variance) to avoid losing too much of your " \
"data. Note that there is no required threshold in order for PCA to be valid." \
" ".format(PC_interp_int)
elif outlier == 'Yes' and matrix_type == "Correlation":
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_outlier = outlier_dff.loc[:, features_outlier].values
# Separating out the target (if any)
y_outlier = outlier_dff.loc[:, ].values
# Standardizing the features
x_outlier = StandardScaler().fit_transform(x_outlier)
pca_outlier = PCA(n_components=len(features_outlier))
principalComponents_outlier = pca_outlier.fit_transform(x_outlier)
principalDf_outlier = pd.DataFrame(data=principalComponents_outlier
, columns=['PC' + str(i + 1) for i in range(len(features_outlier))])
# combining principle components and target
finalDf_outlier = pd.concat([outlier_names, principalDf_outlier], axis=1)
dfff_outlier = finalDf_outlier
# calculating loading
loading_outlier = pca_outlier.components_.T * np.sqrt(pca_outlier.explained_variance_)
loading_df_outlier = pd.DataFrame(data=loading_outlier[0:, 0:], index=features_outlier,
columns=['PC' + str(i + 1) for i in range(loading_outlier.shape[1])])
loading_dff_outlier = loading_df_outlier.T
Var_outlier = pca_outlier.explained_variance_ratio_
PC_df_outlier = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_outlier))],
columns=['Principal Component'])
PC_num_outlier = [float(i + 1) for i in range(len(features_outlier))]
Var_df_outlier = pd.DataFrame(data=Var_outlier, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_outlier = Var_df_outlier.cumsum()
Var_dff_outlier = pd.concat([PC_df_outlier, (Var_cumsum_outlier * 100)], axis=1)
PC_interp_outlier = np.interp(70, Var_dff_outlier['Cumulative Proportion of Explained Variance'],
PC_num_outlier)
PC_interp_int_outlier = math.ceil(PC_interp_outlier)
return "'{}' principal components (≥70% of explained variance) to avoid losing too much of your " \
"data. Note that there is no required threshold in order for PCA to be valid." \
" ".format(PC_interp_int_outlier)
elif outlier == 'No' and matrix_type == 'Covariance':
features1_covar = dff.columns
features_covar = list(features1_covar)
x = dff.loc[:, features_covar].values
pca_covar = PCA(n_components=len(features_covar))
principalComponents_covar = pca_covar.fit_transform(x)
principalDf_covar = pd.DataFrame(data=principalComponents_covar
, columns=['PC' + str(i + 1) for i in range(len(features_covar))])
finalDf_covar = pd.concat([df[[df.columns[0]]], principalDf_covar], axis=1)
loading_covar = pca_covar.components_.T * np.sqrt(pca_covar.explained_variance_)
loading_df_covar = pd.DataFrame(data=loading_covar[0:, 0:], index=features_covar,
columns=['PC' + str(i + 1) for i in range(loading_covar.shape[1])])
Var_covar = pca_covar.explained_variance_ratio_
PC_df_covar = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_covar))],
columns=['Principal Component'])
PC_num_covar = [float(i + 1) for i in range(len(features_covar))]
Var_df_covar = pd.DataFrame(data=Var_covar, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_covar = Var_df_covar.cumsum()
Var_dff_covar = pd.concat([PC_df_covar, (Var_cumsum_covar * 100)], axis=1)
PC_interp_covar = np.interp(70, Var_dff_covar['Cumulative Proportion of Explained Variance'], PC_num_covar)
PC_interp_int_covar = math.ceil(PC_interp_covar)
return "'{}' principal components (≥70% of explained variance) to avoid losing too much of your " \
"data. Note that there is no required threshold in order for PCA to be valid." \
" ".format(PC_interp_int_covar)
elif outlier == 'Yes' and matrix_type == 'Covariance':
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier_covar = outlier_dff.columns
features_outlier_covar = list(features1_outlier_covar)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_outlier = outlier_dff.loc[:, features_outlier_covar].values
pca_outlier_covar = PCA(n_components=len(features_outlier_covar))
principalComponents_outlier_covar = pca_outlier_covar.fit_transform(x_outlier)
principalDf_outlier_covar = pd.DataFrame(data=principalComponents_outlier_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_outlier_covar))])
# combining principle components and target
finalDf_outlier_covar = pd.concat([outlier_names, principalDf_outlier_covar], axis=1)
# calculating loading
loading_outlier_covar = pca_outlier_covar.components_.T * np.sqrt(pca_outlier_covar.explained_variance_)
loading_df_outlier_covar = pd.DataFrame(data=loading_outlier_covar[0:, 0:], index=features_outlier_covar,
columns=['PC' + str(i + 1) for i in
range(loading_outlier_covar.shape[1])])
Var_outlier_covar = pca_outlier_covar.explained_variance_ratio_
PC_df_outlier_covar = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_outlier_covar))],
columns=['Principal Component'])
PC_num_outlier_covar = [float(i + 1) for i in range(len(features_outlier_covar))]
Var_df_outlier_covar = pd.DataFrame(data=Var_outlier_covar,
columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_outlier_covar = Var_df_outlier_covar.cumsum()
Var_dff_outlier_covar = pd.concat([PC_df_outlier_covar, (Var_cumsum_outlier_covar * 100)], axis=1)
PC_interp_outlier = np.interp(70, Var_dff_outlier_covar['Cumulative Proportion of Explained Variance'],
PC_num_outlier_covar)
PC_interp_int_outlier = math.ceil(PC_interp_outlier)
return "'{}' principal components (≥70% of explained variance) to avoid losing too much of your " \
"data. Note that there is no required threshold in order for PCA to be valid." \
" ".format(PC_interp_int_outlier)
@app.callback(Output('PC-Eigen-plot', 'figure'),
[Input('outlier-value', 'value'),
Input('matrix-type-scree', 'value'),
Input('csv-data', 'data')]
)
def update_graph_stat(outlier, matrix_type, data):
traces = []
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
if outlier == 'No' and matrix_type == "Correlation":
features1 = dff.columns
features = list(features1)
x = dff.loc[:, features].values
# Separating out the target (if any)
y = dff.loc[:, ].values
# Standardizing the features to {mean, variance} = {0, 1}
x = StandardScaler().fit_transform(x)
pca = PCA(n_components=len(features))
principalComponents = pca.fit_transform(x)
principalDf = pd.DataFrame(data=principalComponents
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf = pd.concat([df[[df.columns[0]]], principalDf], axis=1)
dfff = finalDf
loading = pca.components_.T * np.sqrt(pca.explained_variance_)
loading_df = pd.DataFrame(data=loading[0:, 0:], index=features,
columns=['PC' + str(i + 1) for i in range(loading.shape[1])])
loading_dff = loading_df.T
Var = pca.explained_variance_ratio_
PC_df = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features))], columns=['Principal Component'])
PC_num = [float(i + 1) for i in range(len(features))]
Var_df = pd.DataFrame(data=Var, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum = Var_df.cumsum()
Var_dff = pd.concat([PC_df, (Var_cumsum * 100)], axis=1)
PC_interp = np.interp(70, Var_dff['Cumulative Proportion of Explained Variance'], PC_num)
PC_interp_int = math.ceil(PC_interp)
eigenvalues = pca.explained_variance_
Eigen_df = pd.DataFrame(data=eigenvalues, columns=['Eigenvalues'])
Eigen_dff = pd.concat([PC_df, Eigen_df], axis=1)
data = Eigen_dff
elif outlier == 'Yes' and matrix_type == "Correlation":
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_outlier = outlier_dff.loc[:, features_outlier].values
# Separating out the target (if any)
y_outlier = outlier_dff.loc[:, ].values
# Standardizing the features
x_outlier = StandardScaler().fit_transform(x_outlier)
pca_outlier = PCA(n_components=len(features_outlier))
principalComponents_outlier = pca_outlier.fit_transform(x_outlier)
principalDf_outlier = pd.DataFrame(data=principalComponents_outlier
, columns=['PC' + str(i + 1) for i in range(len(features_outlier))])
# combining principle components and target
finalDf_outlier = pd.concat([outlier_names, principalDf_outlier], axis=1)
dfff_outlier = finalDf_outlier
# calculating loading
loading_outlier = pca_outlier.components_.T * np.sqrt(pca_outlier.explained_variance_)
loading_df_outlier = pd.DataFrame(data=loading_outlier[0:, 0:], index=features_outlier,
columns=['PC' + str(i + 1) for i in range(loading_outlier.shape[1])])
loading_dff_outlier = loading_df_outlier.T
Var_outlier = pca_outlier.explained_variance_ratio_
PC_df_outlier = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_outlier))],
columns=['Principal Component'])
PC_num_outlier = [float(i + 1) for i in range(len(features_outlier))]
Var_df_outlier = pd.DataFrame(data=Var_outlier, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_outlier = Var_df_outlier.cumsum()
Var_dff_outlier = pd.concat([PC_df_outlier, (Var_cumsum_outlier * 100)], axis=1)
PC_interp_outlier = np.interp(70, Var_dff_outlier['Cumulative Proportion of Explained Variance'],
PC_num_outlier)
PC_interp_int_outlier = math.ceil(PC_interp_outlier)
eigenvalues_outlier = pca_outlier.explained_variance_
Eigen_df_outlier = pd.DataFrame(data=eigenvalues_outlier, columns=['Eigenvalues'])
Eigen_dff_outlier = pd.concat([PC_df_outlier, Eigen_df_outlier], axis=1)
data = Eigen_dff_outlier
elif outlier == 'No' and matrix_type == "Covariance":
features1_covar = dff.columns
features_covar = list(features1_covar)
x = dff.loc[:, features_covar].values
pca_covar = PCA(n_components=len(features_covar))
principalComponents_covar = pca_covar.fit_transform(x)
principalDf_covar = pd.DataFrame(data=principalComponents_covar
, columns=['PC' + str(i + 1) for i in range(len(features_covar))])
finalDf_covar = pd.concat([df[[df.columns[0]]], principalDf_covar], axis=1)
loading_covar = pca_covar.components_.T * np.sqrt(pca_covar.explained_variance_)
loading_df_covar = pd.DataFrame(data=loading_covar[0:, 0:], index=features_covar,
columns=['PC' + str(i + 1) for i in range(loading_covar.shape[1])])
Var_covar = pca_covar.explained_variance_ratio_
PC_df_covar = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_covar))],
columns=['Principal Component'])
PC_num_covar = [float(i + 1) for i in range(len(features_covar))]
Var_df_covar = pd.DataFrame(data=Var_covar, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_covar = Var_df_covar.cumsum()
Var_dff_covar = pd.concat([PC_df_covar, (Var_cumsum_covar * 100)], axis=1)
PC_interp_covar = np.interp(70, Var_dff_covar['Cumulative Proportion of Explained Variance'], PC_num_covar)
PC_interp_int_covar = math.ceil(PC_interp_covar)
eigenvalues_covar = pca_covar.explained_variance_
Eigen_df_covar = pd.DataFrame(data=eigenvalues_covar, columns=['Eigenvalues'])
Eigen_dff_covar = pd.concat([PC_df_covar, Eigen_df_covar], axis=1)
data = Eigen_dff_covar
elif outlier == 'Yes' and matrix_type == 'Covariance':
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier_covar = outlier_dff.columns
features_outlier_covar = list(features1_outlier_covar)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_outlier = outlier_dff.loc[:, features_outlier_covar].values
pca_outlier_covar = PCA(n_components=len(features_outlier_covar))
principalComponents_outlier_covar = pca_outlier_covar.fit_transform(x_outlier)
principalDf_outlier_covar = pd.DataFrame(data=principalComponents_outlier_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_outlier_covar))])
# combining principle components and target
finalDf_outlier_covar = pd.concat([outlier_names, principalDf_outlier_covar], axis=1)
# calculating loading
loading_outlier_covar = pca_outlier_covar.components_.T * np.sqrt(pca_outlier_covar.explained_variance_)
loading_df_outlier_covar = pd.DataFrame(data=loading_outlier_covar[0:, 0:], index=features_outlier_covar,
columns=['PC' + str(i + 1) for i in
range(loading_outlier_covar.shape[1])])
Var_outlier_covar = pca_outlier_covar.explained_variance_ratio_
PC_df_outlier_covar = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_outlier_covar))],
columns=['Principal Component'])
PC_num_outlier_covar = [float(i + 1) for i in range(len(features_outlier_covar))]
Var_df_outlier_covar = pd.DataFrame(data=Var_outlier_covar,
columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_outlier_covar = Var_df_outlier_covar.cumsum()
Var_dff_outlier_covar = pd.concat([PC_df_outlier_covar, (Var_cumsum_outlier_covar * 100)], axis=1)
PC_interp_outlier = np.interp(70, Var_dff_outlier_covar['Cumulative Proportion of Explained Variance'],
PC_num_outlier_covar)
PC_interp_int_outlier = math.ceil(PC_interp_outlier)
eigenvalues_outlier_covar = pca_outlier_covar.explained_variance_
Eigen_df_outlier_covar = pd.DataFrame(data=eigenvalues_outlier_covar, columns=['Eigenvalues'])
Eigen_dff_outlier_covar = pd.concat([PC_df_outlier_covar, Eigen_df_outlier_covar], axis=1)
data = Eigen_dff_outlier_covar
traces.append(go.Scatter(x=data['Principal Component'], y=data['Eigenvalues'], mode='lines'))
return {'data': traces,
'layout': go.Layout(title='<b>Scree Plot Eigenvalues</b>', xaxis={'title': 'Principal Component',
'mirror': True,
'ticks': 'outside',
'showline': True,
'showspikes': True},
titlefont=dict(family='Helvetica', size=16),
yaxis={'title': 'Eigenvalues', 'mirror': True,
'ticks': 'outside',
'showline': True,
'showspikes': True}, hovermode='closest',
font=dict(family="Helvetica"), template="simple_white", )
}
def round_up(n, decimals=0):
multiplier = 10 ** decimals
return math.ceil(n * multiplier) / multiplier
def round_down(n, decimals=0):
multiplier = 10 ** decimals
return math.floor(n * multiplier) / multiplier
@app.callback([Output('PC-feature-heatmap', 'figure'),
Output('color-range-container', 'children')],
[
Input('PC-feature-outlier-value', 'value'),
Input('colorscale', 'value'),
Input("matrix-type-heatmap", "value"),
Input('csv-data', 'data')]
)
def update_graph_stat(outlier, colorscale, matrix_type, data):
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
traces = []
# INCLUDING OUTLIERS
features1 = dff.columns
features = list(features1)
x = dff.loc[:, features].values
# Separating out the target (if any)
y = dff.loc[:, ].values
# Standardizing the features to {mean, variance} = {0, 1}
x = StandardScaler().fit_transform(x)
pca = PCA(n_components=len(features))
principalComponents = pca.fit_transform(x)
principalDf = pd.DataFrame(data=principalComponents
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf = pd.concat([df[[df.columns[0]]], principalDf], axis=1)
dfff = finalDf
# explained variance of the the two principal components
# print(pca.explained_variance_ratio_)
# Explained variance tells us how much information (variance) can be attributed to each of the principal components
# loading of each feature in principle components
loading = pca.components_.T * np.sqrt(pca.explained_variance_)
loading_df = pd.DataFrame(data=loading[0:, 0:], index=features,
columns=['PC' + str(i + 1) for i in range(loading.shape[1])])
loading_dff = loading_df.T
# OUTLIERS REMOVED
z_scores_hm = scipy.stats.zscore(dff)
abs_z_scores_hm = np.abs(z_scores_hm)
filtered_entries_hm = (abs_z_scores_hm < 3).all(axis=1)
outlier_dff_hm = dff[filtered_entries_hm]
features1_outlier_hm = outlier_dff_hm.columns
features_outlier2 = list(features1_outlier_hm)
outlier_names1_hm = df[filtered_entries_hm]
outlier_names_hm = outlier_names1_hm.iloc[:, 0]
x_outlier_hm = outlier_dff_hm.loc[:, features_outlier2].values
# Separating out the target (if any)
# Standardizing the features
x_outlier_hm = StandardScaler().fit_transform(x_outlier_hm)
pca_outlier_hm = PCA(n_components=len(features_outlier2))
principalComponents_outlier_hm = pca_outlier_hm.fit_transform(x_outlier_hm)
principalDf_outlier_hm = pd.DataFrame(data=principalComponents_outlier_hm
, columns=['PC' + str(i + 1) for i in range(len(features_outlier2))])
# combining principle components and target
finalDf_outlier_hm = pd.concat([outlier_names_hm, principalDf_outlier_hm], axis=1)
dfff_outlier_hm = finalDf_outlier_hm
# calculating loading
loading_outlier_hm = pca_outlier_hm.components_.T * np.sqrt(pca_outlier_hm.explained_variance_)
loading_df_outlier_hm = pd.DataFrame(data=loading_outlier_hm[0:, 0:], index=features_outlier2,
columns=['PC' + str(i + 1) for i in range(loading_outlier_hm.shape[1])])
loading_dff_outlier_hm = loading_df_outlier_hm.T
# COVAR MATRIX
features1_covar = dff.columns
features_covar = list(features1_covar)
x = dff.loc[:, features_covar].values
pca_covar = PCA(n_components=len(features_covar))
principalComponents_covar = pca_covar.fit_transform(x)
principalDf_covar = pd.DataFrame(data=principalComponents_covar
, columns=['PC' + str(i + 1) for i in range(len(features_covar))])
finalDf_covar = pd.concat([df[[df.columns[0]]], principalDf_covar], axis=1)
loading_covar = pca_covar.components_.T * np.sqrt(pca_covar.explained_variance_)
loading_df_covar = pd.DataFrame(data=loading_covar[0:, 0:], index=features_covar,
columns=['PC' + str(i + 1) for i in range(loading_covar.shape[1])])
loading_dff_covar = loading_df_covar.T
# COVAR MATRIX OUTLIERS REMOVED
if outlier == 'No' and matrix_type == "Correlation":
data = loading_dff
elif outlier == 'Yes' and matrix_type == "Correlation":
data = loading_dff_outlier_hm
elif outlier == 'No' and matrix_type == "Covariance":
data = loading_dff_covar
elif outlier == "Yes" and matrix_type == "Covariance":
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier_covar = outlier_dff.columns
features_outlier_covar = list(features1_outlier_covar)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_outlier = outlier_dff.loc[:, features_outlier_covar].values
pca_outlier_covar = PCA(n_components=len(features_outlier_covar))
principalComponents_outlier_covar = pca_outlier_covar.fit_transform(x_outlier)
principalDf_outlier_covar = pd.DataFrame(data=principalComponents_outlier_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_outlier_covar))])
# combining principle components and target
finalDf_outlier_covar = pd.concat([outlier_names, principalDf_outlier_covar], axis=1)
# calculating loading
loading_outlier_covar = pca_outlier_covar.components_.T * np.sqrt(pca_outlier_covar.explained_variance_)
loading_df_outlier_covar = pd.DataFrame(data=loading_outlier_covar[0:, 0:], index=features_outlier_covar,
columns=['PC' + str(i + 1) for i in
range(loading_outlier_covar.shape[1])])
loading_dff_outlier_covar = loading_df_outlier_covar.T
data = loading_dff_outlier_covar
size_range = [round_up(data.values.min(), 2),round_down(data.values.max(),2) ]
traces.append(go.Heatmap(
z=data, x=features_outlier2, y=['PC' + str(i + 1) for i in range(loading_outlier_hm.shape[1])],
colorscale="Viridis" if colorscale == 'Viridis' else "Plasma",
# coord: represent the correlation between the various feature and the principal component itself
colorbar={"title": "Loading",
# 'tickvals': [round_up(data.values.min(), 2),
# round_up((data.values.min() + (data.values.max() + data.values.min())/2)/2, 2),
# round_down((data.values.max() + data.values.min())/2,2),
# round_down((data.values.max() + (data.values.max() + data.values.min())/2)/2, 2),
# round_down(data.values.max(),2), ]
}
))
return {'data': traces,
'layout': go.Layout(title=dict(text='<b>PC and Feature Correlation Analysis</b>'),
xaxis=dict(title_text='Features', title_standoff=50),
titlefont=dict(family='Helvetica', size=16),
hovermode='closest', margin={'b': 110, 't': 50, 'l': 75},
font=dict(family="Helvetica", size=11),
annotations=[
dict(x=-0.16, y=0.5, showarrow=False, text="Principal Components",
xref='paper', yref='paper', textangle=-90,
font=dict(size=12))]
),
}, '{}'.format(size_range)
@app.callback(Output('feature-heatmap', 'figure'),
[
Input('PC-feature-outlier-value', 'value'),
Input('colorscale', 'value'),
Input('csv-data', 'data')])
def update_graph_stat(outlier, colorscale, data):
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
traces = []
if outlier == 'No':
features1 = dff.columns
features = list(features1)
# correlation coefficient and coefficient of determination
correlation_dff = dff.corr(method='pearson', )
r2_dff = correlation_dff * correlation_dff
data = r2_dff
feat = features
elif outlier == 'Yes':
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
# correlation coefficient and coefficient of determination
correlation_dff_outlier = outlier_dff.corr(method='pearson', )
r2_dff_outlier = correlation_dff_outlier * correlation_dff_outlier
data = r2_dff_outlier
feat = features_outlier
traces.append(go.Heatmap(
z=data, x=feat, y=feat, colorscale="Viridis" if colorscale == 'Viridis' else "Plasma",
# coord: represent the correlation between the various feature and the principal component itself
colorbar={"title": "R²", 'tickvals': [0, 0.2, 0.4, 0.6, 0.8, 1]}))
return {'data': traces,
'layout': go.Layout(title=dict(text='<b>Feature Correlation Analysis</b>', y=0.97, x=0.6),
xaxis={},
titlefont=dict(family='Helvetica', size=16),
yaxis={},
hovermode='closest', margin={'b': 110, 't': 50, 'l': 180, 'r': 50},
font=dict(family="Helvetica", size=11)),
}
@app.callback(Output('feature-input', 'options'),
[Input('all-custom-choice', 'value'),
Input('csv-data', 'data')])
def activate_input(all_custom, data):
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
if all_custom == 'All':
options = []
elif all_custom == 'Custom':
options = [{'label': i, 'value': i} for i in dff.columns]
return options
@app.callback(Output('color-scale-scores', 'options'),
[Input('feature-input', 'value'),
Input('radio-target-item', 'value'),
Input('outlier-value-biplot', 'value'),
Input('customvar-graph-update', 'value'),
Input('matrix-type-biplot', 'value'),
Input('csv-data', 'data')], )
def populate_color_dropdown(input, target, outlier, graph_type, matrix_type, data):
if not data:
return dash.no_update
if input is None:
raise dash.exceptions.PreventUpdate
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
dff_target = dff[input]
z_scores_target = scipy.stats.zscore(dff_target)
abs_z_scores_target = np.abs(z_scores_target)
filtered_entries_target = (abs_z_scores_target < 3).all(axis=1)
dff_target_outlier = dff_target[filtered_entries_target]
if target == 'Yes' and outlier == 'Yes' and matrix_type == "Correlation" and graph_type == 'Biplot':
options = [{'label': i, 'value': i} for i in dff_target_outlier.columns]
elif target == 'Yes' and outlier == 'Yes' and matrix_type == "Covariance" and graph_type == 'Biplot':
options = [{'label': i, 'value': i} for i in dff_target_outlier.columns]
elif target == 'Yes' and outlier == 'No' and matrix_type == "Correlation" and graph_type == 'Biplot':
options = [{'label': i, 'value': i} for i in dff_target.columns]
elif target == 'Yes' and outlier == 'No' and matrix_type == "Covariance" and graph_type == 'Biplot':
options = [{'label': i, 'value': i} for i in dff_target.columns]
elif target == 'No' or graph_type == 'Loadings':
options = []
return options
@app.callback(Output('size-scale-scores', 'options'),
[Input('feature-input', 'value'),
Input('radio-target-item-second', 'value'),
Input('outlier-value-biplot', 'value'),
Input('customvar-graph-update', 'value'),
Input('matrix-type-biplot', 'value'),
Input('csv-data', 'data')])
def populate_color_dropdown(input, target, outlier, graph_type, matrix_type, data):
if not data:
return dash.no_update
if input is None:
raise dash.exceptions.PreventUpdate
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
dff_target = dff[input]
z_scores_target = scipy.stats.zscore(dff_target)
abs_z_scores_target = np.abs(z_scores_target)
filtered_entries_target = (abs_z_scores_target < 3).all(axis=1)
dff_target_outlier = dff_target[filtered_entries_target]
if target == 'Yes' and outlier == 'Yes' and matrix_type == "Correlation" and graph_type == 'Biplot':
options = [{'label': i, 'value': i} for i in dff_target_outlier.columns]
elif target == 'Yes' and outlier == 'Yes' and matrix_type == "Covariance" and graph_type == 'Biplot':
options = [{'label': i, 'value': i} for i in dff_target_outlier.columns]
elif target == 'Yes' and outlier == 'No' and matrix_type == "Correlation" and graph_type == 'Biplot':
options = [{'label': i, 'value': i} for i in dff_target.columns]
elif target == 'Yes' and outlier == 'No' and matrix_type == "Covariance" and graph_type == 'Biplot':
options = [{'label': i, 'value': i} for i in dff_target.columns]
elif target == 'No' or graph_type == 'Loadings':
options = []
return options
# resume covar matrix...
@app.callback(Output('biplot', 'figure'),
[
Input('outlier-value-biplot', 'value'),
Input('feature-input', 'value'),
Input('customvar-graph-update', 'value'),
Input('color-scale-scores', 'value'),
Input('radio-target-item', 'value'),
Input('size-scale-scores', 'value'),
Input('radio-target-item-second', 'value'),
Input('all-custom-choice', 'value'),
Input('matrix-type-biplot', 'value'),
Input('csv-data', 'data')
]
)
def update_graph_custom(outlier, input, graph_update, color, target, size, target2, all_custom, matrix_type, data):
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
features1 = dff.columns
features = list(features1)
if all_custom == 'All':
# x_scale = MinMaxScaler(feature_range=(0, 1), copy=True).fit_transform(x_scale)
# OUTLIER DATA
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
# def rescale(data, new_min=0, new_max=1):
# """Rescale the data to be within the range [new_min, new_max]"""
# return (data - data.min()) / (data.max() - data.min()) * (new_max - new_min) + new_min
# ORIGINAL DATA WITH OUTLIERS
x_scale = dff.loc[:, features].values
y_scale = dff.loc[:, ].values
x_scale = StandardScaler().fit_transform(x_scale)
# x_scale = rescale(x_scale, new_min=0, new_max=1)
pca_scale = PCA(n_components=len(features))
principalComponents_scale = pca_scale.fit_transform(x_scale)
principalDf_scale = pd.DataFrame(data=principalComponents_scale
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf_scale = pd.concat([df[[df.columns[0]]], principalDf_scale], axis=1)
dfff_scale = finalDf_scale.fillna(0)
Var_scale = pca_scale.explained_variance_ratio_
# calculating loading vector plot
loading_scale = pca_scale.components_.T * np.sqrt(pca_scale.explained_variance_)
loading_scale_df = pd.DataFrame(data=loading_scale[:, 0:2],
columns=["PC1", "PC2"])
line_group_scale_df = pd.DataFrame(data=features, columns=['line_group'])
loading_scale_dff = pd.concat([loading_scale_df, line_group_scale_df], axis=1)
a = (len(features), 2)
zero_scale = np.zeros(a)
zero_scale_df = pd.DataFrame(data=zero_scale, columns=["PC1", "PC2"])
zero_scale_dff = pd.concat([zero_scale_df, line_group_scale_df], axis=1)
loading_scale_line_graph = pd.concat([loading_scale_dff, zero_scale_dff], axis=0)
# ORIGINAL DATA WITH REMOVING OUTLIERS
x_outlier_scale = outlier_dff.loc[:, features_outlier].values
y_outlier_scale = outlier_dff.loc[:, ].values
x_outlier_scale = StandardScaler().fit_transform(x_outlier_scale)
# x_outlier_scale = MinMaxScaler().fit_transform(x_outlier_scale)
# def rescale(data, new_min=0, new_max=1):
# """Rescale the data to be within the range [new_min, new_max]"""
# return (data - data.min()) / (data.max() - data.min()) * (new_max - new_min) + new_min
# x_outlier_scale = rescale(x_outlier_scale, new_min=0, new_max=1)
# uses covariance matrix
pca_outlier_scale = PCA(n_components=len(features_outlier))
principalComponents_outlier_scale = pca_outlier_scale.fit_transform(x_outlier_scale)
principalDf_outlier_scale = pd.DataFrame(data=principalComponents_outlier_scale
, columns=['PC' + str(i + 1) for i in range(len(features_outlier))])
# combining principle components and target
finalDf_outlier_scale = pd.concat([outlier_names, principalDf_outlier_scale], axis=1)
dfff_outlier_scale = finalDf_outlier_scale.fillna(0)
# calculating loading
Var_outlier_scale = pca_outlier_scale.explained_variance_ratio_
# calculating loading vector plot
loading_outlier_scale = pca_outlier_scale.components_.T * np.sqrt(pca_outlier_scale.explained_variance_)
loading_outlier_scale_df = pd.DataFrame(data=loading_outlier_scale[:, 0:2],
columns=["PC1", "PC2"])
line_group_df = pd.DataFrame(data=features_outlier, columns=['line_group'])
loading_outlier_scale_dff = pd.concat([loading_outlier_scale_df, line_group_df], axis=1)
a = (len(features_outlier), 2)
zero_outlier_scale = np.zeros(a)
zero_outlier_scale_df = pd.DataFrame(data=zero_outlier_scale, columns=["PC1", "PC2"])
zero_outlier_scale_dff = pd.concat([zero_outlier_scale_df, line_group_df], axis=1)
loading_outlier_scale_line_graph = pd.concat([loading_outlier_scale_dff, zero_outlier_scale_dff], axis=0)
# COVARIANCE MATRIX
x_scale_covar = dff.loc[:, features].values
y_scale_covar = dff.loc[:, ].values
pca_scale_covar = PCA(n_components=len(features))
principalComponents_scale_covar = pca_scale_covar.fit_transform(x_scale_covar)
principalDf_scale_covar = pd.DataFrame(data=principalComponents_scale_covar
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf_scale_covar = pd.concat([df[[df.columns[0]]], principalDf_scale_covar], axis=1)
dfff_scale_covar = finalDf_scale_covar.fillna(0)
Var_scale_covar = pca_scale_covar.explained_variance_ratio_
loading_scale_covar = pca_scale_covar.components_.T * np.sqrt(pca_scale_covar.explained_variance_)
loading_scale_df_covar = pd.DataFrame(data=loading_scale_covar[:, 0:2],
columns=["PC1", "PC2"])
line_group_scale_df_covar = pd.DataFrame(data=features, columns=['line_group'])
loading_scale_dff_covar = pd.concat([loading_scale_df_covar, line_group_scale_df_covar], axis=1)
a = (len(features), 2)
zero_scale_covar = np.zeros(a)
zero_scale_df_covar = pd.DataFrame(data=zero_scale_covar, columns=["PC1", "PC2"])
zero_scale_dff_covar = pd.concat([zero_scale_df_covar, line_group_scale_df_covar], axis=1)
loading_scale_line_graph_covar = pd.concat([loading_scale_dff_covar, zero_scale_dff_covar], axis=0)
# COVARIANCE MATRIX OUTLIERS
x_outlier_scale_covar = outlier_dff.loc[:, features_outlier].values
y_outlier_scale_covar = outlier_dff.loc[:, ].values
pca_outlier_scale_covar = PCA(n_components=len(features_outlier))
principalComponents_outlier_scale_covar = pca_outlier_scale_covar.fit_transform(x_outlier_scale_covar)
principalDf_outlier_scale_covar = pd.DataFrame(data=principalComponents_outlier_scale_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_outlier))])
finalDf_outlier_scale_covar = pd.concat([outlier_names, principalDf_outlier_scale_covar], axis=1)
dfff_outlier_scale_covar = finalDf_outlier_scale_covar.fillna(0)
Var_outlier_scale_covar = pca_outlier_scale_covar.explained_variance_ratio_
# calculating loading vector plot
loading_outlier_scale_covar = pca_outlier_scale_covar.components_.T * np.sqrt(
pca_outlier_scale_covar.explained_variance_)
loading_outlier_scale_df_covar = pd.DataFrame(data=loading_outlier_scale_covar[:, 0:2],
columns=["PC1", "PC2"])
line_group_df_covar = pd.DataFrame(data=features_outlier, columns=['line_group'])
loading_outlier_scale_dff_covar = pd.concat([loading_outlier_scale_df_covar, line_group_df_covar], axis=1)
a = (len(features_outlier), 2)
zero_outlier_scale_covar = np.zeros(a)
zero_outlier_scale_df_covar = pd.DataFrame(data=zero_outlier_scale_covar, columns=["PC1", "PC2"])
zero_outlier_scale_dff_covar = pd.concat([zero_outlier_scale_df_covar, line_group_df_covar], axis=1)
loading_outlier_scale_line_graph_covar = pd.concat(
[loading_outlier_scale_dff_covar, zero_outlier_scale_dff_covar], axis=0)
if outlier == 'No' and matrix_type == "Correlation":
dat = dfff_scale
elif outlier == 'Yes' and matrix_type == "Correlation":
dat = dfff_outlier_scale
elif outlier == "No" and matrix_type == "Covariance":
dat = dfff_scale_covar
elif outlier == "Yes" and matrix_type == "Covariance":
dat = dfff_outlier_scale_covar
trace2_all = go.Scatter(x=dat['PC1'], y=dat['PC2'], mode='markers',
text=dat[dat.columns[0]],
hovertemplate=
'<b>%{text}</b>' +
'<br>PC1: %{x}<br>' +
'PC2: %{y}'
"<extra></extra>",
marker=dict(opacity=0.7, showscale=False, size=12,
line=dict(width=0.5, color='DarkSlateGrey'),
),
)
####################################################################################################
# INCLUDE THIS
if outlier == 'No' and matrix_type == "Correlation":
data = loading_scale_line_graph
variance = Var_scale
elif outlier == 'Yes' and matrix_type == "Correlation":
data = loading_outlier_scale_line_graph
variance = Var_outlier_scale
elif outlier == "No" and matrix_type == "Covariance":
data = loading_scale_line_graph_covar
variance = Var_scale_covar
elif outlier == "Yes" and matrix_type == "Covariance":
data = loading_outlier_scale_line_graph_covar
variance = Var_outlier_scale_covar
counter = 0
lists = [[] for i in range(len(data['line_group'].unique()))]
for i in data['line_group'].unique():
dataf_all = data[data['line_group'] == i]
trace1_all = go.Scatter(x=dataf_all['PC1'], y=dataf_all['PC2'], line=dict(color="#4f4f4f"),
name=i,
# text=i,
meta=i,
hovertemplate=
'<b>%{meta}</b>' +
'<br>PC1: %{x}<br>' +
'PC2: %{y}'
"<extra></extra>",
mode='lines+text',
textposition='bottom right', textfont=dict(size=12)
)
lists[counter] = trace1_all
counter = counter + 1
####################################################################################################
if graph_update == 'Biplot':
lists.insert(0, trace2_all)
return {'data': lists,
'layout': go.Layout(xaxis=dict(title='PC1 ({}%)'.format(round((variance[0] * 100), 2))),
yaxis=dict(title='PC2 ({}%)'.format(round((variance[1] * 100), 2))),
showlegend=False, margin={'r': 0},
# shapes=[dict(type="circle", xref="x", yref="y", x0=-1,
# y0=-1, x1=1, y1=1,
# line_color="DarkSlateGrey")]
),
}
elif graph_update == 'Loadings':
return {'data': lists,
'layout': go.Layout(xaxis=dict(title='PC1 ({}%)'.format(round((variance[0] * 100), 2))),
yaxis=dict(title='PC2 ({}%)'.format(round((variance[1] * 100), 2))),
showlegend=False, margin={'r': 0},
# shapes=[dict(type="circle", xref="x", yref="y", x0=-1,
# y0=-1, x1=1, y1=1,
# line_color="DarkSlateGrey")]
),
}
elif all_custom == 'Custom':
# Dropping Data variables
dff_input = dff.drop(columns=dff[input])
features1_input = dff_input.columns
features_input = list(features1_input)
dff_target = dff[input]
# OUTLIER DATA INPUT
z_scores_input = scipy.stats.zscore(dff_input)
abs_z_scores_input = np.abs(z_scores_input)
filtered_entries_input = (abs_z_scores_input < 3).all(axis=1)
dff_input_outlier = dff_input[filtered_entries_input]
features1_input_outlier = dff_input_outlier.columns
features_input_outlier = list(features1_input_outlier)
outlier_names_input1 = df[filtered_entries_input]
outlier_names_input = outlier_names_input1.iloc[:, 0]
# OUTLIER DATA TARGET
z_scores_target = scipy.stats.zscore(dff_target)
abs_z_scores_target = np.abs(z_scores_target)
filtered_entries_target = (abs_z_scores_target < 3).all(axis=1)
dff_target_outlier = dff_target[filtered_entries_target]
# INPUT DATA WITH OUTLIERS
x_scale_input = dff_input.loc[:, features_input].values
y_scale_input = dff_input.loc[:, ].values
x_scale_input = StandardScaler().fit_transform(x_scale_input)
# x_scale_input = MinMaxScaler(feature_range=(0, 1), copy=True).fit_transform(x_scale_input)
# def rescale(data, new_min=0, new_max=1):
# """Rescale the data to be within the range [new_min, new_max]"""
# return (data - data.min()) / (data.max() - data.min()) * (new_max - new_min) + new_min
# x_scale_input = rescale(x_scale_input, new_min=0, new_max=1)
pca_scale_input = PCA(n_components=len(features_input))
principalComponents_scale_input = pca_scale_input.fit_transform(x_scale_input)
principalDf_scale_input = pd.DataFrame(data=principalComponents_scale_input
, columns=['PC' + str(i + 1) for i in range(len(features_input))])
finalDf_scale_input = pd.concat([df[[df.columns[0]]], principalDf_scale_input, dff_target], axis=1)
dfff_scale_input = finalDf_scale_input.fillna(0)
Var_scale_input = pca_scale_input.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input = pca_scale_input.components_.T * np.sqrt(pca_scale_input.explained_variance_)
loading_scale_input_df = pd.DataFrame(data=loading_scale_input[:, 0:2],
columns=["PC1", "PC2"])
line_group_scale_input_df = pd.DataFrame(data=features_input, columns=['line_group'])
loading_scale_input_dff = pd.concat([loading_scale_input_df, line_group_scale_input_df],
axis=1)
a = (len(features_input), 2)
zero_scale_input = np.zeros(a)
zero_scale_input_df = pd.DataFrame(data=zero_scale_input, columns=["PC1", "PC2"])
zero_scale_input_dff = pd.concat([zero_scale_input_df, line_group_scale_input_df], axis=1)
loading_scale_input_line_graph = pd.concat([loading_scale_input_dff, zero_scale_input_dff],
axis=0)
# INPUT DATA WITH REMOVING OUTLIERS
x_scale_input_outlier = dff_input_outlier.loc[:, features_input_outlier].values
y_scale_input_outlier = dff_input_outlier.loc[:, ].values
x_scale_input_outlier = StandardScaler().fit_transform(x_scale_input_outlier)
# x_scale_input_outlier = MinMaxScaler(feature_range=(0, 1), copy=True).fit_transform(x_scale_input_outlier)
# def rescale(data, new_min=0, new_max=1):
# """Rescale the data to be within the range [new_min, new_max]"""
# return (data - data.min()) / (data.max() - data.min()) * (new_max - new_min) + new_min
# x_scale_input_outlier = rescale(x_scale_input_outlier, new_min=0, new_max=1)
pca_scale_input_outlier = PCA(n_components=len(features_input_outlier))
principalComponents_scale_input_outlier = pca_scale_input_outlier.fit_transform(x_scale_input_outlier)
principalDf_scale_input_outlier = pd.DataFrame(data=principalComponents_scale_input_outlier
, columns=['PC' + str(i + 1) for i in
range(len(features_input_outlier))])
finalDf_scale_input_outlier = pd.concat(
[outlier_names_input, principalDf_scale_input_outlier, dff_target_outlier],
axis=1)
dfff_scale_input_outlier = finalDf_scale_input_outlier.fillna(0)
Var_scale_input_outlier = pca_scale_input_outlier.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input_outlier = pca_scale_input_outlier.components_.T * np.sqrt(
pca_scale_input_outlier.explained_variance_)
loading_scale_input_outlier_df = pd.DataFrame(data=loading_scale_input_outlier[:, 0:2],
columns=["PC1", "PC2"])
line_group_scale_input_outlier_df = pd.DataFrame(data=features_input_outlier, columns=['line_group'])
loading_scale_input_outlier_dff = pd.concat([loading_scale_input_outlier_df, line_group_scale_input_outlier_df],
axis=1)
a = (len(features_input_outlier), 2)
zero_scale_input_outlier = np.zeros(a)
zero_scale_input_outlier_df = pd.DataFrame(data=zero_scale_input_outlier, columns=["PC1", "PC2"])
zero_scale_input_outlier_dff = pd.concat([zero_scale_input_outlier_df, line_group_scale_input_outlier_df],
axis=1)
loading_scale_input_outlier_line_graph = pd.concat(
[loading_scale_input_outlier_dff, zero_scale_input_outlier_dff],
axis=0)
# COVARIANCE MATRIX
x_scale_input_covar = dff_input.loc[:, features_input].values
y_scale_input_covar = dff_input.loc[:, ].values
pca_scale_input_covar = PCA(n_components=len(features_input))
principalComponents_scale_input_covar = pca_scale_input_covar.fit_transform(x_scale_input_covar)
principalDf_scale_input_covar = pd.DataFrame(data=principalComponents_scale_input_covar
, columns=['PC' + str(i + 1) for i in range(len(features_input))])
finalDf_scale_input_covar = pd.concat([df[[df.columns[0]]], principalDf_scale_input_covar, dff_target], axis=1)
dfff_scale_input_covar = finalDf_scale_input_covar.fillna(0)
Var_scale_input_covar = pca_scale_input_covar.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input_covar = pca_scale_input_covar.components_.T * np.sqrt(
pca_scale_input_covar.explained_variance_)
loading_scale_input_df_covar = pd.DataFrame(data=loading_scale_input_covar[:, 0:2],
columns=["PC1", "PC2"])
line_group_scale_input_df_covar = pd.DataFrame(data=features_input, columns=['line_group'])
loading_scale_input_dff_covar = pd.concat([loading_scale_input_df_covar, line_group_scale_input_df_covar],
axis=1)
a = (len(features_input), 2)
zero_scale_input_covar = np.zeros(a)
zero_scale_input_df_covar = pd.DataFrame(data=zero_scale_input_covar, columns=["PC1", "PC2"])
zero_scale_input_dff_covar = pd.concat([zero_scale_input_df_covar, line_group_scale_input_df_covar], axis=1)
loading_scale_input_line_graph_covar = pd.concat([loading_scale_input_dff_covar, zero_scale_input_dff_covar],
axis=0)
# COVARIANCE MATRIX OUTLIERS
x_scale_input_outlier_covar = dff_input_outlier.loc[:, features_input_outlier].values
y_scale_input_outlier_covar = dff_input_outlier.loc[:, ].values
pca_scale_input_outlier_covar = PCA(n_components=len(features_input_outlier))
principalComponents_scale_input_outlier_covar = pca_scale_input_outlier_covar.fit_transform(
x_scale_input_outlier_covar)
principalDf_scale_input_outlier_covar = pd.DataFrame(data=principalComponents_scale_input_outlier_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_input_outlier))])
finalDf_scale_input_outlier_covar = pd.concat(
[outlier_names_input, principalDf_scale_input_outlier_covar, dff_target_outlier],
axis=1)
dfff_scale_input_outlier_covar = finalDf_scale_input_outlier_covar.fillna(0)
Var_scale_input_outlier_covar = pca_scale_input_outlier_covar.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input_outlier_covar = pca_scale_input_outlier_covar.components_.T * np.sqrt(
pca_scale_input_outlier_covar.explained_variance_)
loading_scale_input_outlier_df_covar = pd.DataFrame(data=loading_scale_input_outlier_covar[:, 0:2],
columns=["PC1", "PC2"])
line_group_scale_input_outlier_df_covar = pd.DataFrame(data=features_input_outlier, columns=['line_group'])
loading_scale_input_outlier_dff_covar = pd.concat([loading_scale_input_outlier_df_covar,
line_group_scale_input_outlier_df_covar],
axis=1)
a = (len(features_input_outlier), 2)
zero_scale_input_outlier_covar = np.zeros(a)
zero_scale_input_outlier_df_covar = pd.DataFrame(data=zero_scale_input_outlier_covar, columns=["PC1", "PC2"])
zero_scale_input_outlier_dff_covar = pd.concat([zero_scale_input_outlier_df_covar,
line_group_scale_input_outlier_df_covar],
axis=1)
loading_scale_input_outlier_line_graph_covar = pd.concat(
[loading_scale_input_outlier_dff_covar, zero_scale_input_outlier_dff_covar],
axis=0)
if outlier == 'No' and matrix_type == "Correlation":
dat = dfff_scale_input
variance = Var_scale_input
elif outlier == 'Yes' and matrix_type == "Correlation":
dat = dfff_scale_input_outlier
variance = Var_scale_input_outlier
elif outlier == "No" and matrix_type == "Covariance":
dat = dfff_scale_input_covar
variance = Var_scale_input_covar
elif outlier == "Yes" and matrix_type == "Covariance":
dat = dfff_scale_input_outlier_covar
variance = Var_scale_input_outlier_covar
trace2 = go.Scatter(x=dat['PC1'], y=dat['PC2'], mode='markers',
marker_color=dat[color] if target == 'Yes' else None,
marker_size=dat[size] if target2 == 'Yes' else 12,
text=dat[dat.columns[0]],
hovertemplate=
'<b>%{text}</b>' +
'<br>PC1: %{x}<br>' +
'PC2: %{y}'
"<extra></extra>",
marker=dict(opacity=0.7, colorscale='Plasma',
sizeref=max(dat[size]) / (15 ** 2) if target2 == 'Yes' else None,
sizemode='area',
showscale=True if target == 'Yes' else False,
line=dict(width=0.5, color='DarkSlateGrey'),
colorbar=dict(title=dict(text=color if target == 'Yes' else None,
font=dict(family='Helvetica'),
side='right'), ypad=0),
),
)
####################################################################################################
if outlier == 'No' and matrix_type == "Correlation":
data = loading_scale_input_line_graph
elif outlier == 'Yes' and matrix_type == "Correlation":
data = loading_scale_input_outlier_line_graph
elif outlier == "No" and matrix_type == "Covariance":
data = loading_scale_input_line_graph_covar
elif outlier == "Yes" and matrix_type == "Covariance":
data = loading_scale_input_outlier_line_graph_covar
counter = 0
lists = [[] for i in range(len(data['line_group'].unique()))]
for i in data['line_group'].unique():
dataf = data[data['line_group'] == i]
trace1 = go.Scatter(x=dataf['PC1'], y=dataf['PC2'],
line=dict(color="#666666" if target == 'Yes' else '#4f4f4f'), name=i,
# text=i,
meta=i,
hovertemplate=
'<b>%{meta}</b>' +
'<br>PC1: %{x}<br>' +
'PC2: %{y}'
"<extra></extra>",
mode='lines+text', textposition='bottom right', textfont=dict(size=12),
)
lists[counter] = trace1
counter = counter + 1
####################################################################################################
if graph_update == 'Biplot':
lists.insert(0, trace2)
return {'data': lists,
'layout': go.Layout(xaxis=dict(title='PC1 ({}%)'.format(round((variance[0] * 100), 2))),
yaxis=dict(title='PC2 ({}%)'.format(round((variance[1] * 100), 2))),
showlegend=False, margin={'r': 0},
# shapes=[dict(type="circle", xref="x", yref="y", x0=-1,
# y0=-1, x1=1, y1=1,
# line_color="DarkSlateGrey")]
),
}
elif graph_update == 'Loadings':
return {'data': lists,
'layout': go.Layout(xaxis=dict(title='PC1 ({}%)'.format(round((variance[0] * 100), 2))),
yaxis=dict(title='PC2 ({}%)'.format(round((variance[1] * 100), 2))),
showlegend=False, margin={'r': 0},
# shapes=[dict(type="circle", xref="x", yref="y", x0=-1,
# y0=-1, x1=1, y1=1,
# line_color="DarkSlateGrey")]
),
}
@app.callback(
Output('size-second-target-container', 'children'),
[Input('size-scale-scores', 'value'),
Input('outlier-value-biplot', 'value'),
Input('csv-data', 'data')
]
)
def update_output(size, outlier, data):
if not data:
return dash.no_update
if size is None:
raise dash.exceptions.PreventUpdate
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
z_scores_dff_size = scipy.stats.zscore(dff)
abs_z_scores_dff_size = np.abs(z_scores_dff_size)
filtered_entries_dff_size = (abs_z_scores_dff_size < 3).all(axis=1)
dff_target_outlier_size = dff[filtered_entries_dff_size]
if outlier == 'Yes':
size_range = [round(dff_target_outlier_size[size].min(), 2), round(dff_target_outlier_size[size].max(), 2)]
elif outlier == 'No':
size_range = [round(dff[size].min(), 2), round(dff[size].max(), 2)]
return '{}'.format(size_range)
@app.callback(Output('cos2-plot', 'figure'),
[
Input('outlier-value-cos2', 'value'),
Input('feature-input', 'value'),
Input('all-custom-choice', 'value'),
Input("matrix-type-cos2", "value"),
Input('csv-data', 'data')
])
def update_cos2_plot(outlier, input, all_custom, matrix_type, data):
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
if all_custom == 'All':
# x_scale = MinMaxScaler(feature_range=(0, 1), copy=True).fit_transform(x_scale)
features1 = dff.columns
features = list(features1)
# OUTLIER DATA
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
# def rescale(data, new_min=0, new_max=1):
# """Rescale the data to be within the range [new_min, new_max]"""
# return (data - data.min()) / (data.max() - data.min()) * (new_max - new_min) + new_min
# ORIGINAL DATA WITH OUTLIERS
x_scale = dff.loc[:, features].values
y_scale = dff.loc[:, ].values
x_scale = StandardScaler().fit_transform(x_scale)
pca_scale = PCA(n_components=len(features))
principalComponents_scale = pca_scale.fit_transform(x_scale)
principalDf_scale = pd.DataFrame(data=principalComponents_scale
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf_scale = pd.concat([df[[df.columns[0]]], principalDf_scale], axis=1)
Var_scale = pca_scale.explained_variance_ratio_
# calculating loading vector plot
loading_scale = pca_scale.components_.T * np.sqrt(pca_scale.explained_variance_)
loading_scale_df = pd.DataFrame(data=loading_scale[:, 0:2],
columns=["PC1", "PC2"])
loading_scale_df['cos2'] = (loading_scale_df["PC1"] ** 2) + (loading_scale_df["PC2"] ** 2)
line_group_scale_df = pd.DataFrame(data=features, columns=['line_group'])
loading_scale_dff = pd.concat([loading_scale_df, line_group_scale_df], axis=1)
a = (len(features), 2)
zero_scale = np.zeros(a)
zero_scale_df = pd.DataFrame(data=zero_scale, columns=["PC1", "PC2"])
zero_scale_df_color = | pd.DataFrame(data=loading_scale_df.iloc[:, 2], columns=['cos2']) | pandas.DataFrame |
# *****************************************************************************
# © Copyright IBM Corp. 2018. All Rights Reserved.
#
# This program and the accompanying materials
# are made available under the terms of the Apache V2.0 license
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
#
# *****************************************************************************
"""
The Built In Functions module contains preinstalled functions
"""
import datetime as dt
import logging
import re
import time
import warnings
from collections import OrderedDict
import numpy as np
import pandas as pd
from sqlalchemy import String
from .base import (BaseTransformer, BaseEvent, BaseSCDLookup, BaseSCDLookupWithDefault, BaseMetadataProvider,
BasePreload, BaseDatabaseLookup, BaseDataSource, BaseDBActivityMerge, BaseSimpleAggregator)
from .loader import _generate_metadata
from .ui import (UISingle, UIMultiItem, UIFunctionOutSingle, UISingleItem, UIFunctionOutMulti, UIMulti, UIExpression,
UIText, UIParameters)
from .util import adjust_probabilities, reset_df_index, asList
from ibm_watson_machine_learning import APIClient
logger = logging.getLogger(__name__)
PACKAGE_URL = 'git+https://github.com/ibm-watson-iot/functions.git@'
_IS_PREINSTALLED = True
class ActivityDuration(BaseDBActivityMerge):
"""
Merge data from multiple tables containing activities. An activity table
must have a deviceid, activity_code, start_date and end_date. The
function returns an activity duration for each selected activity code.
"""
_is_instance_level_logged = False
def __init__(self, table_name, activity_codes, activity_duration=None, additional_items=None,
additional_output_names=None):
super().__init__(input_activities=activity_codes, activity_duration=activity_duration,
additional_items=additional_items, additional_output_names=additional_output_names)
self.table_name = table_name
self.activity_codes = activity_codes
self.activities_metadata[table_name] = activity_codes
self.activities_custom_query_metadata = {}
@classmethod
def build_ui(cls):
# define arguments that behave as function inputs
inputs = []
inputs.append(UISingle(name='table_name', datatype=str, description='Source table name', ))
inputs.append(UIMulti(name='activity_codes', datatype=str, description='Comma separated list of activity codes',
output_item='activity_duration', is_output_datatype_derived=False, output_datatype=float))
inputs.append(UIMulti(name='additional_items', datatype=str, required=False,
description='Comma separated list of additional column names to retrieve',
output_item='additional_output_names', is_output_datatype_derived=True,
output_datatype=None))
outputs = []
return (inputs, outputs)
class AggregateWithExpression(BaseSimpleAggregator):
"""
Create aggregation using expression. The calculation is evaluated for
each data_item selected. The data item will be made available as a
Pandas Series. Refer to the Pandas series using the local variable named
"x". The expression must return a scalar value.
Example:
x.max() - x.min()
"""
def __init__(self, source=None, expression=None, name=None):
super().__init__()
logger.info('AggregateWithExpression _init')
self.source = source
self.expression = expression
self.name = name
@classmethod
def build_ui(cls):
inputs = []
inputs.append(UIMultiItem(name='source', datatype=None,
description=('Choose the data items that you would like to aggregate'),
output_item='name', is_output_datatype_derived=True))
inputs.append(UIExpression(name='expression', description='Paste in or type an AS expression'))
return (inputs, [])
def execute(self, x):
logger.info('Execute AggregateWithExpression')
logger.debug('Source ' + str(self.source) + 'Expression ' + str(self.expression) + 'Name ' + str(self.name))
y = eval(self.expression)
self.log_df_info(y, 'AggregateWithExpression evaluation')
return y
class AlertExpression(BaseEvent):
"""
Create alerts that are triggered when data values the expression is True
"""
def __init__(self, expression, alert_name, **kwargs):
self.expression = expression
self.alert_name = alert_name
super().__init__()
def _calc(self, df):
"""
unused
"""
return df
def execute(self, df):
c = self._entity_type.get_attributes_dict()
df = df.copy()
if '${' in self.expression:
expr = re.sub(r"\$\{(\w+)\}", r"df['\1']", self.expression)
msg = 'Expression converted to %s. ' % expr
else:
expr = self.expression
msg = 'Expression (%s). ' % expr
self.trace_append(msg)
df[self.alert_name] = np.where(eval(expr), True, None)
return df
def get_input_items(self):
items = self.get_expression_items(self.expression)
return items
@classmethod
def build_ui(cls):
# define arguments that behave as function inputs
inputs = []
inputs.append(UIExpression(name='expression',
description="Define alert expression using pandas systax. Example: df['inlet_temperature']>50"))
# define arguments that behave as function outputs
outputs = []
outputs.append(UIFunctionOutSingle(name='alert_name', datatype=bool, description='Output of alert function'))
return (inputs, outputs)
class AlertExpressionWithFilter(BaseEvent):
"""
Create alerts that are triggered when data values the expression is True
"""
def __init__(self, expression, dimension_name, dimension_value, alert_name, **kwargs):
self.dimension_name = dimension_name
self.dimension_value = dimension_value
self.expression = expression
self.pulse_trigger = False
self.alert_name = alert_name
self.alert_end = None
logger.info(
'AlertExpressionWithFilter dim: ' + str(dimension_name) + ' exp: ' + str(expression) + ' alert: ' + str(
alert_name))
super().__init__()
# evaluate alerts by entity
def _calc(self, df):
c = self._entity_type.get_attributes_dict()
df = df.copy()
logger.info('AlertExpressionWithFilter exp: ' + self.expression + ' input: ' + str(df.columns))
expr = self.expression
if '${' in expr:
expr = re.sub(r"\$\{(\w+)\}", r"df['\1']", expr)
msg = 'Expression converted to %s. ' % expr
else:
msg = 'Expression (%s). ' % expr
self.trace_append(msg)
expr = str(expr)
logger.info('AlertExpressionWithFilter - after regexp: ' + expr)
try:
evl = eval(expr)
n1 = np.where(evl, 1, 0)
if self.dimension_name is None or self.dimension_value is None or len(self.dimension_name) == 0 or len(
self.dimension_value) == 0:
n2 = n1
np_res = n1
else:
n2 = np.where(df[self.dimension_name] == self.dimension_value, 1, 0)
np_res = np.multiply(n1, n2)
# get time index
ts_ind = df.index.get_level_values(self._entity_type._timestamp)
if self.pulse_trigger:
# walk through all subsequences starting with the longest
# and replace all True with True, False, False, ...
for i in range(np_res.size, 2, -1):
for j in range(0, i - 1):
if np.all(np_res[j:i]):
np_res[j + 1:i] = np.zeros(i - j - 1, dtype=int)
np_res[j] = i - j # keep track of sequence length
if self.alert_end is not None:
alert_end = np.zeros(np_res.size)
for i in range(np_res.size):
if np_res[i] > 0:
alert_end[i] = ts_ind[i]
else:
if self.alert_end is not None:
df[self.alert_end] = df.index[0]
logger.info('AlertExpressionWithFilter shapes ' + str(n1.shape) + ' ' + str(n2.shape) + ' ' + str(
np_res.shape) + ' results\n - ' + str(n1) + '\n - ' + str(n2) + '\n - ' + str(np_res))
df[self.alert_name] = np_res
except Exception as e:
logger.info('AlertExpressionWithFilter eval for ' + expr + ' failed with ' + str(e))
df[self.alert_name] = None
pass
return df
def execute(self, df):
"""
unused
"""
return super().execute(df)
def get_input_items(self):
items = set(self.dimension_name)
items = items | self.get_expression_items(self.expression)
return items
@classmethod
def build_ui(cls):
# define arguments that behave as function inputs
inputs = []
inputs.append(UISingleItem(name='dimension_name', datatype=str))
inputs.append(UISingle(name='dimension_value', datatype=str, description='Dimension Filter Value'))
inputs.append(UIExpression(name='expression', description="Define alert expression using pandas systax. \
Example: df['inlet_temperature']>50. ${pressure} will be substituted \
with df['pressure'] before evaluation, ${} with df[<dimension_name>]"))
# define arguments that behave as function outputs
outputs = []
outputs.append(UIFunctionOutSingle(name='alert_name', datatype=bool, description='Output of alert function'))
return (inputs, outputs)
class AlertExpressionWithFilterExt(AlertExpressionWithFilter):
"""
Create alerts that are triggered when data values the expression is True
"""
def __init__(self, expression, dimension_name, dimension_value, pulse_trigger, alert_name, alert_end, **kwargs):
super().__init__(expression, dimension_name, dimension_value, alert_name, **kwargs)
if pulse_trigger is None:
self.pulse_trigger = True
if alert_end is not None:
self.alert_end = alert_end
logger.info('AlertExpressionWithFilterExt dim: ' + str(dimension_name) + ' exp: ' + str(
expression) + ' alert: ' + str(alert_name) + ' pulsed: ' + str(pulse_trigger))
def _calc(self, df):
"""
unused
"""
return df
def execute(self, df):
df = super().execute(df)
logger.info('AlertExpressionWithFilterExt generated columns: ' + str(df.columns))
return df
@classmethod
def build_ui(cls):
# define arguments that behave as function inputs
inputs = []
inputs.append(UISingleItem(name='dimension_name', datatype=str))
inputs.append(UISingle(name='dimension_value', datatype=str, description='Dimension Filter Value'))
inputs.append(UIExpression(name='expression', description="Define alert expression using pandas systax. \
Example: df['inlet_temperature']>50. ${pressure} will be substituted \
with df['pressure'] before evaluation, ${} with df[<dimension_name>]"))
inputs.append(
UISingle(name='pulse_trigger', description="If true only generate alerts on crossing the threshold",
datatype=bool))
# define arguments that behave as function outputs
outputs = []
outputs.append(UIFunctionOutSingle(name='alert_name', datatype=bool, description='Output of alert function'))
outputs.append(
UIFunctionOutSingle(name='alert_end', datatype=dt.datetime, description='End of pulse triggered alert'))
return (inputs, outputs)
class AlertOutOfRange(BaseEvent):
"""
Fire alert when metric exceeds an upper threshold or drops below a lower_theshold. Specify at least one threshold.
"""
def __init__(self, input_item, lower_threshold=None, upper_threshold=None, output_alert_upper=None,
output_alert_lower=None, **kwargs):
self.input_item = input_item
if lower_threshold is not None:
lower_threshold = float(lower_threshold)
self.lower_threshold = lower_threshold
if upper_threshold is not None:
upper_threshold = float(upper_threshold)
self.upper_threshold = upper_threshold
if output_alert_lower is None:
self.output_alert_lower = 'output_alert_lower'
else:
self.output_alert_lower = output_alert_lower
if output_alert_upper is None:
self.output_alert_upper = 'output_alert_upper'
else:
self.output_alert_upper = output_alert_upper
super().__init__()
def _calc(self, df):
"""
unused
"""
def execute(self, df):
# c = self._entity_type.get_attributes_dict()
df = df.copy()
df[self.output_alert_upper] = False
df[self.output_alert_lower] = False
if self.lower_threshold is not None:
df[self.output_alert_lower] = np.where(df[self.input_item] <= self.lower_threshold, True, None)
if self.upper_threshold is not None:
df[self.output_alert_upper] = np.where(df[self.input_item] >= self.upper_threshold, True, None)
return df
@classmethod
def build_ui(cls):
# define arguments that behave as function inputs
inputs = []
inputs.append(UISingleItem(name='input_item', datatype=None, description='Item to alert on'))
inputs.append(UISingle(name='lower_threshold', datatype=float,
description='Alert when item value is lower than this value', required=False, ))
inputs.append(UISingle(name='upper_threshold', datatype=float,
description='Alert when item value is higher than this value', required=False, ))
# define arguments that behave as function outputs
outputs = []
outputs.append(
UIFunctionOutSingle(name='output_alert_lower', datatype=bool, description='Output of alert function'))
outputs.append(
UIFunctionOutSingle(name='output_alert_upper', datatype=bool, description='Output of alert function'))
return (inputs, outputs)
class AlertHighValue(BaseEvent):
"""
Fire alert when metric exceeds an upper threshold'.
"""
def __init__(self, input_item, upper_threshold=None, alert_name=None, **kwargs):
self.input_item = input_item
self.upper_threshold = float(upper_threshold)
if alert_name is None:
self.alert_name = 'alert_name'
else:
self.alert_name = alert_name
super().__init__()
def _calc(self, df):
"""
unused
"""
def execute(self, df):
df = df.copy()
df[self.alert_name] = np.where(df[self.input_item] >= self.upper_threshold, True, None)
return df
@classmethod
def build_ui(cls):
# define arguments that behave as function inputs
inputs = []
inputs.append(UISingleItem(name='input_item', datatype=None, description='Item to alert on'))
inputs.append(UISingle(name='upper_threshold', datatype=float,
description='Alert when item value is higher than this value'))
# define arguments that behave as function outputs
outputs = []
outputs.append(UIFunctionOutSingle(name='alert_name', datatype=bool, description='Output of alert function'))
return (inputs, outputs)
def _getMetadata(self, df=None, new_df=None, inputs=None, outputs=None, constants=None):
return self.build_ui()
class AlertLowValue(BaseEvent):
"""
Fire alert when metric goes below a threshold'.
"""
def __init__(self, input_item, lower_threshold=None, alert_name=None, **kwargs):
self.input_item = input_item
self.lower_threshold = float(lower_threshold)
if alert_name is None:
self.alert_name = 'alert_name'
else:
self.alert_name = alert_name
super().__init__()
def _calc(self, df):
"""
unused
"""
return df
def execute(self, df):
# c = self._entity_type.get_attributes_dict()
df = df.copy()
df[self.alert_name] = np.where(df[self.input_item] <= self.lower_threshold, True, None)
return df
@classmethod
def build_ui(cls):
# define arguments that behave as function inputs
inputs = []
inputs.append(UISingleItem(name='input_item', datatype=None, description='Item to alert on'))
inputs.append(UISingle(name='lower_threshold', datatype=float,
description='Alert when item value is lower than this value'))
# define arguments that behave as function outputs
outputs = []
outputs.append(UIFunctionOutSingle(name='alert_name', datatype=bool, description='Output of alert function'))
return (inputs, outputs)
class AutoTest(BaseTransformer):
"""
Test the results of pipeline execution against a known test dataset.
The test will compare calculated values with values in the test dataset.
Discepancies will the written to a test output file.
Note: This function is experimental
"""
def __init__(self, test_datset_name, columns_to_test, result_col=None):
super().__init__()
self.test_datset_name = test_datset_name
self.columns_to_test = columns_to_test
if result_col is None:
self.result_col = 'test_result'
else:
self.result_col = result_col
def execute(self, df):
db = self.get_db()
# bucket = self.get_bucket_name()
file = db.model_store.retrieve_model(self.test_datset_name)
logger.debug('AutoTest executed - result in ' + str(file))
@classmethod
def build_ui(cls):
# define arguments that behave as function inputs
inputs = OrderedDict()
inputs['test_datset_name'] = UISingle(name='test_datset_name', datatype=str,
description=('Name of cos object containing'
' test data. Object is a pickled '
' dataframe. Object must be placed '
' in the bos_runtime_bucket'))
inputs['columns_to_test'] = UIMultiItem(name='input_items', datatype=None,
description=('Choose the data items that'
' you would like to compare'))
outputs = OrderedDict()
return (inputs, outputs)
class Coalesce(BaseTransformer):
"""
Return first non-null value from a list of data items.
"""
def __init__(self, data_items, output_item=None):
super().__init__()
self.data_items = data_items
if output_item is None:
self.output_item = 'output_item'
else:
self.output_item = output_item
def execute(self, df):
df[self.output_item] = df[self.data_items].bfill(axis=1).iloc[:, 0]
return df
@classmethod
def build_ui(cls):
# define arguments that behave as function inputs
inputs = []
inputs.append(UIMultiItem('data_items'))
# define arguments that behave as function outputs
outputs = []
outputs.append(UIFunctionOutSingle('output_item', datatype=float))
return (inputs, outputs)
class CoalesceDimension(BaseTransformer):
"""
Return first non-null value from a list of data items.
"""
def __init__(self, data_items, output_item=None):
super().__init__()
self.data_items = data_items
if output_item is None:
self.output_item = 'output_item'
else:
self.output_item = output_item
def execute(self, df):
df[self.output_item] = df[self.data_items].bfill(axis=1).iloc[:, 0]
return df
@classmethod
def build_ui(cls):
# define arguments that behave as function inputs
inputs = []
inputs.append(UIMultiItem('data_items'))
# define arguments that behave as function outputs
outputs = []
outputs.append(UIFunctionOutSingle('output_item', datatype=str, tags=['DIMENSION']))
return (inputs, outputs)
class ConditionalItems(BaseTransformer):
"""
Return null unless a condition is met.
eg. if df["sensor_is_valid"]==True then deliver the value of df["temperature"] else deliver Null
"""
def __init__(self, conditional_expression, conditional_items, output_items=None):
super().__init__()
self.conditional_expression = self.parse_expression(conditional_expression)
self.conditional_items = conditional_items
if output_items is None:
output_items = ['conditional_%s' % x for x in conditional_items]
self.output_items = output_items
def execute(self, df):
c = self._entity_type.get_attributes_dict()
df = df.copy()
result = eval(self.conditional_expression)
for i, o in enumerate(self.conditional_items):
df[self.output_items[i]] = np.where(result, df[o], None)
return df
@classmethod
def build_ui(cls):
# define arguments that behave as function inputs
inputs = []
inputs.append(UIExpression(name='conditional_expression',
description="expression that returns a True/False value, eg. if df['sensor_is_valid']==True"))
inputs.append(UIMultiItem(name='conditional_items', datatype=None,
description='Data items that have conditional values, e.g. temp and pressure'))
# define arguments that behave as function outputs
outputs = []
outputs.append(
UIFunctionOutMulti(name='output_items', cardinality_from='conditional_items', is_datatype_derived=False,
description='Function output items'))
return (inputs, outputs)
def get_input_items(self):
items = self.get_expression_items(self.conditional_expression)
return items
class DateDifference(BaseTransformer):
"""
Calculate the difference between two date data items in days,ie: ie date_2 - date_1
"""
def __init__(self, date_1, date_2, num_days=None):
super().__init__()
self.date_1 = date_1
self.date_2 = date_2
if num_days is None:
self.num_days = 'num_days'
else:
self.num_days = num_days
def execute(self, df):
if self.date_1 is None or self.date_1 == self._entity_type._timestamp:
ds_1 = self.get_timestamp_series(df)
ds_1 = pd.to_datetime(ds_1)
else:
ds_1 = df[self.date_1]
if self.date_2 is None or self.date_2 == self._entity_type._timestamp:
ds_2 = self.get_timestamp_series(df)
ds_2 = pd.to_datetime(ds_2)
else:
ds_2 = df[self.date_2]
df[self.num_days] = (ds_2 - ds_1).dt.total_seconds() / (60 * 60 * 24)
return df
@classmethod
def build_ui(cls):
"""
Registration metadata
"""
# define arguments that behave as function inputs
inputs = []
inputs.append(UISingleItem(name='date_1', datatype=dt.datetime, required=False,
description=('Date data item. Use timestamp'
' if no date specified')))
inputs.append(UISingleItem(name='date_2', datatype=dt.datetime, required=False,
description=('Date data item. Use timestamp'
' if no date specified')))
# define arguments that behave as function outputs
outputs = []
outputs.append(UIFunctionOutSingle(name='num_days', datatype=float, description='Number of days'))
return (inputs, outputs)
class DateDifferenceConstant(BaseTransformer):
"""
Calculate the difference between a data item and a constant_date,
ie: ie constant_date - date_1
"""
def __init__(self, date_1, date_constant, num_days=None):
super().__init__()
self.date_1 = date_1
self.date_constant = date_constant
if num_days is None:
self.num_days = 'num_days'
else:
self.num_days = num_days
def execute(self, df):
if self.date_1 is None or self.date_1 == self._entity_type._timestamp:
ds_1 = self.get_timestamp_series(df)
ds_1 = pd.to_datetime(ds_1)
else:
ds_1 = df[self.date_1]
c = self._entity_type.get_attributes_dict()
constant_value = c[self.date_constant]
ds_2 = pd.Series(data=constant_value, index=df.index)
ds_2 = pd.to_datetime(ds_2)
df[self.num_days] = (ds_2 - ds_1).dt.total_seconds() / (60 * 60 * 24)
return df
@classmethod
def build_ui(cls):
"""
Registration metadata
"""
# define arguments that behave as function inputs
inputs = []
inputs.append(UISingleItem(name='date_1', datatype=dt.datetime, required=False,
description=('Date data item. Use timestamp'
' if no date specified')))
inputs.append(UISingle(name='date_constant', datatype=str, description='Name of datetime constant'))
# define arguments that behave as function outputs
outputs = []
outputs.append(UIFunctionOutSingle(name='num_days', datatype=float, description='Number of days'))
return (inputs, outputs)
class DatabaseLookup(BaseDatabaseLookup):
"""
Lookup columns from a database table. The lookup is time invariant. Lookup key column names
must match data items names. Example: Lookup EmployeeCount and Country from a Company lookup
table that is keyed on country_code.
"""
# create the table and populate it using the data dict
_auto_create_lookup_table = False
def __init__(self, lookup_table_name, lookup_keys, lookup_items, parse_dates=None, output_items=None):
super().__init__(lookup_table_name=lookup_table_name, lookup_keys=lookup_keys, lookup_items=lookup_items,
parse_dates=parse_dates, output_items=output_items)
@classmethod
def build_ui(cls):
"""
Registration metadata
"""
# define arguments that behave as function inputs
inputs = []
inputs.append(
UISingle(name='lookup_table_name', datatype=str, description='Table name to perform lookup against')),
inputs.append(UIMulti(name='lookup_keys', datatype=str, description='Data items to use as a key to the lookup'))
inputs.append(UIMulti(name='lookup_items', datatype=str, description='columns to return from the lookup')),
inputs.append(UIMulti(name='parse_dates', datatype=str, description='columns that should be converted to dates',
required=False))
# define arguments that behave as function outputs
outputs = []
outputs.append(
UIFunctionOutMulti(name='output_items', cardinality_from='lookup_items', is_datatype_derived=False,
description='Function output items', tags=['DIMENSION']))
return (inputs, outputs)
def get_item_values(self, arg, db):
raise NotImplementedError('No items values available for generic database lookup function. \
Implement a specific one for each table to define item values. ')
class DeleteInputData(BasePreload):
"""
Delete data from time series input table for entity type
"""
def __init__(self, dummy_items, older_than_days, output_item=None):
super().__init__(dummy_items=dummy_items)
self.older_than_days = older_than_days
if output_item is None:
self.output_item = 'output_item'
else:
self.output_item = output_item
def execute(self, df=None, start_ts=None, end_ts=None, entities=None):
entity_type = self.get_entity_type()
self.get_db().delete_data(table_name=entity_type.name, schema=entity_type._db_schema,
timestamp=entity_type._timestamp, older_than_days=self.older_than_days)
msg = 'Deleted data for %s' % (self._entity_type.name)
logger.debug(msg)
return True
@classmethod
def build_ui(cls):
"""
Registration metadata
"""
# define arguments that behave as function inputs
inputs = []
inputs.append(UIMultiItem(name='dummy_items', datatype=None, description='Dummy data items'))
inputs.append(
UISingle(name='older_than_days', datatype=float, description='Delete data older than this many days'))
# define arguments that behave as function outputs
outputs = []
outputs.append(UIFunctionOutSingle(name='output_item', datatype=bool,
description='Returns a status flag of True when executed'))
return (inputs, outputs)
class DropNull(BaseMetadataProvider):
"""
Drop any row that has all null metrics
"""
def __init__(self, exclude_items, drop_all_null_rows=True, output_item=None):
if output_item is None:
output_item = 'drop_nulls'
kw = {'_custom_exclude_col_from_auto_drop_nulls': exclude_items, '_drop_all_null_rows': drop_all_null_rows}
super().__init__(dummy_items=exclude_items, output_item=output_item, **kw)
self.exclude_items = exclude_items
self.drop_all_null_rows = drop_all_null_rows
@classmethod
def build_ui(cls):
"""
Registration metadata
"""
# define arguments that behave as function inputs
inputs = []
inputs.append(UIMultiItem(name='exclude_items', datatype=None,
description='Ignore non-null values in these columns when dropping rows'))
inputs.append(
UISingle(name='drop_all_null_rows', datatype=bool, description='Enable or disable drop of all null rows'))
# define arguments that behave as function outputs
outputs = []
outputs.append(UIFunctionOutSingle(name='output_item', datatype=bool,
description='Returns a status flag of True when executed'))
return (inputs, outputs)
class EntityDataGenerator(BasePreload):
"""
Automatically load the entity input data table using new generated data.
Time series columns defined on the entity data table will be populated
with random data.
Optional parameters:
freq: pandas frequency string. Time series frequency.
scd_frequency: pandas frequency string. Dimension change frequency.
activity_frequency: pandas frequency string. Activity frequency.
activities = dict keyed on activity name containing list of activities codes
scds = dict keyes on scd property name containing list of string domain items
data_item_mean: dict keyed by data item name. Mean value.
data_item_sd: dict keyed by data item name. Standard deviation.
data_item_domain: dictionary keyed by data item name. List of values.
drop_existing: bool. Drop existing input tables and generate new for each run.
"""
is_data_generator = True
freq = '5min'
scd_frequency = '1D'
activity_frequency = '3D'
start_entity_id = 73000 # used to build entity ids
auto_entity_count = 5 # default number of entities to generate data for
data_item_mean = None
data_item_sd = None
data_item_domain = None
activities = None
scds = None
drop_existing = False
# ids of entities to generate. Change the value of the range() function to change the number of entities
def __init__(self, ids=None, output_item=None, parameters=None, **kw):
if output_item is None:
output_item = 'entity_data_generator'
if parameters is None:
parameters = {}
parameters = {**kw, **parameters}
self.parameters = parameters
self.set_params(**parameters)
super().__init__(dummy_items=[], output_item=output_item)
if ids is None:
ids = self.get_entity_ids()
self.ids = ids
if self.data_item_mean is None:
self.data_item_mean = {}
if self.data_item_sd is None:
self.data_item_sd = {}
if self.data_item_domain is None:
self.data_item_domain = {}
if self.activities is None:
self.activities = {}
if self.scds is None:
self.scds = {}
def execute(self, df, start_ts=None, end_ts=None, entities=None):
# Define simulation related metadata on the entity type
if entities is None:
entities = self.ids
# Add scds
for key, values in list(self.scds.items()):
self._entity_type.add_slowly_changing_dimension(key, String(255))
self.data_item_domain[key] = values
# Add activities metadata to entity type
for key, codes in list(self.activities.items()):
name = '%s_%s' % (self._entity_type.name, key)
self._entity_type.add_activity_table(name, codes)
# Generate data
if start_ts is not None:
seconds = (dt.datetime.utcnow() - start_ts).total_seconds()
else:
seconds = pd.to_timedelta(self.freq).total_seconds()
df = self._entity_type.generate_data(entities=entities, days=0, seconds=seconds, freq=self.freq,
scd_freq=self.scd_frequency, write=True,
data_item_mean=self.data_item_mean, data_item_sd=self.data_item_sd,
data_item_domain=self.data_item_domain, drop_existing=self.drop_existing)
self.usage_ = len(df.index)
return True
def get_entity_ids(self):
"""
Generate a list of entity ids
"""
ids = [str(self.start_entity_id + x) for x in list(range(self.auto_entity_count))]
return (ids)
@classmethod
def build_ui(cls):
"""
Registration metadata
"""
# define arguments that behave as function inputs
inputs = []
inputs.append(
UIMulti(name='ids', datatype=str, description='Comma separate list of entity ids, e.g: X902-A01,X902-A03'))
inputs.append(UIParameters())
# define arguments that behave as function outputs
outputs = []
outputs.append(UIFunctionOutSingle(name='output_item', datatype=bool,
description='Returns a status flag of True when executed'))
return (inputs, outputs)
class EntityFilter(BaseMetadataProvider):
"""
Filter data retrieval queries to retrieve only data for the entity ids
included in the filter
"""
def __init__(self, entity_list, output_item=None):
if output_item is None:
output_item = 'is_filter_set'
dummy_items = ['deviceid']
kwargs = {'_entity_filter_list': entity_list}
super().__init__(dummy_items, output_item=output_item, **kwargs)
self.entity_list = entity_list
@classmethod
def build_ui(cls):
"""
Registration metadata
"""
# define arguments that behave as function inputs
inputs = []
inputs.append(UIMulti(name='entity_list', datatype=str, description='comma separated list of entity ids'))
# define arguments that behave as function outputs
outputs = []
outputs.append(UIFunctionOutSingle(name='output_item', datatype=bool,
description='Returns a status flag of True when executed'))
return (inputs, outputs)
class PythonExpression(BaseTransformer):
"""
Create a new item from an expression involving other items
"""
def __init__(self, expression, output_name):
self.output_name = output_name
super().__init__()
# convert single quotes to double
self.expression = self.parse_expression(expression)
# registration
self.constants = ['expression']
self.outputs = ['output_name']
def execute(self, df):
c = self._entity_type.get_attributes_dict()
df = df.copy()
requested = list(self.get_input_items())
msg = self.expression + ' .'
self.trace_append(msg)
msg = 'Function requested items: %s . ' % ','.join(requested)
self.trace_append(msg)
df[self.output_name] = eval(self.expression)
return df
def get_input_items(self):
items = self.get_expression_items(self.expression)
return items
@classmethod
def build_ui(cls):
# define arguments that behave as function inputs
inputs = []
inputs.append(UIExpression(name='expression',
description="Define alert expression using pandas systax. Example: df['inlet_temperature']>50"))
# define arguments that behave as function outputs
outputs = []
outputs.append(UIFunctionOutSingle(name='output_name', datatype=float, description='Output of expression'))
return (inputs, outputs)
class GetEntityData(BaseDataSource):
"""
Get time series data from an entity type. Provide the table name for the entity type and
specify the key column to use for mapping the source entity type to the destination.
e.g. Add temperature sensor data to a location entity type by selecting a location_id
as the mapping key on the source entity type
Note: This function is experimental
"""
is_deprecated = True
merge_method = 'outer'
allow_projection_list_trim = False
def __init__(self, source_entity_type_name, key_map_column, input_items, output_items=None):
warnings.warn('GetEntityData is deprecated.', DeprecationWarning)
self.source_entity_type_name = source_entity_type_name
self.key_map_column = key_map_column
super().__init__(input_items=input_items, output_items=output_items)
def get_data(self, start_ts=None, end_ts=None, entities=None):
db = self.get_db()
target = self.get_entity_type()
# get entity type metadata from the AS API
source = db.get_entity_type(self.source_entity_type_name)
source._checkpoint_by_entity = False
source._pre_aggregate_time_grain = target._pre_aggregate_time_grain
source._pre_agg_rules = target._pre_agg_rules
source._pre_agg_outputs = target._pre_agg_outputs
cols = [self.key_map_column, source._timestamp]
cols.extend(self.input_items)
renamed_cols = [target._entity_id, target._timestamp]
renamed_cols.extend(self.output_items)
df = source.get_data(start_ts=start_ts, end_ts=end_ts, entities=entities, columns=cols)
df = self.rename_cols(df, cols, renamed_cols)
return df
@classmethod
def build_ui(cls):
# define arguments that behave as function inputs
inputs = []
inputs.append(UISingle(name='source_entity_type_name', datatype=str,
description="Enter the name of the entity type that you would like to retrieve data from"))
inputs.append(UISingle(name='key_map_column', datatype=str, description="Enter the name of the column on the source entity type that represents the map \
to the device id of this entity type"))
inputs.append(UIMulti(name='input_items', datatype=str,
description="Comma separated list of data item names to retrieve from the source entity type",
output_item='output_items', is_output_datatype_derived=True))
outputs = []
return (inputs, outputs)
class EntityId(BaseTransformer):
"""
Deliver a data item containing the id of each entity. Optionally only return the entity
id when one or more data items are populated, else deliver a null value.
"""
def __init__(self, data_items=None, output_item=None):
super().__init__()
self.data_items = data_items
if output_item is None:
self.output_item = 'entity_id'
else:
self.output_item = output_item
def execute(self, df):
df = df.copy()
if self.data_items is None:
df[self.output_item] = df[self.get_entity_type()._entity_id]
else:
df[self.output_item] = np.where(df[self.data_items].notna().max(axis=1),
df[self.get_entity_type()._entity_id], None)
return df
@classmethod
def build_ui(cls):
# define arguments that behave as function inputs
inputs = []
inputs.append(UIMultiItem(name='data_items', datatype=None, required=False, description='Choose one or more data items. If data items are defined, \
entity id will only be shown if these data items are not null'))
# define arguments that behave as function outputs
outputs = []
outputs.append(UIFunctionOutSingle(name='output_item', datatype=bool, description='Dummy function output'))
return (inputs, outputs)
class IfThenElse(BaseTransformer):
"""
Set the value of the output_item based on a conditional expression.
When the conditional expression returns a True value, return the value of the true_expression.
Example:
conditional_expression: df['x1'] > 5 * df['x2']
true expression: df['x2'] * 5
false expression: 0
"""
def __init__(self, conditional_expression, true_expression, false_expression, output_item=None):
super().__init__()
self.conditional_expression = self.parse_expression(conditional_expression)
self.true_expression = self.parse_expression(true_expression)
self.false_expression = self.parse_expression(false_expression)
if output_item is None:
self.output_item = 'output_item'
else:
self.output_item = output_item
def execute(self, df):
c = self._entity_type.get_attributes_dict()
df = df.copy()
df[self.output_item] = np.where(eval(self.conditional_expression), eval(self.true_expression),
eval(self.false_expression))
return df
@classmethod
def build_ui(cls):
# define arguments that behave as function inputs
inputs = []
inputs.append(UIExpression(name='conditional_expression', description="expression that returns a True/False value, \
eg. if df['temp']>50 then df['temp'] else None"))
inputs.append(UIExpression(name='true_expression', description="expression when true, eg. df['temp']"))
inputs.append(UIExpression(name='false_expression', description='expression when false, eg. None'))
# define arguments that behave as function outputs
outputs = []
outputs.append(UIFunctionOutSingle(name='output_item', datatype=bool, description='Dummy function output'))
return (inputs, outputs)
def get_input_items(self):
items = self.get_expression_items([self.conditional_expression, self.true_expression, self.false_expression])
return items
class PackageInfo(BaseTransformer):
"""
Show the version of a list of installed packages. Optionally install packages that are not installed.
"""
def __init__(self, package_names, add_to_trace=True, install_missing=True, version_output=None):
self.package_names = package_names
self.add_to_trace = add_to_trace
self.install_missing = install_missing
if version_output is None:
version_output = ['%s_version' % x for x in package_names]
self.version_output = version_output
super().__init__()
def execute(self, df):
import importlib
entity_type = self.get_entity_type()
df = df.copy()
for i, p in enumerate(self.package_names):
ver = ''
try:
installed_package = importlib.import_module(p)
except (BaseException):
if self.install_missing:
entity_type.db.install_package(p)
try:
installed_package = importlib.import_module(p)
except (BaseException):
ver = 'Package could not be installed'
else:
try:
ver = 'installed %s' % installed_package.__version__
except AttributeError:
ver = 'Package has no __version__ attribute'
else:
try:
ver = installed_package.__version__
except AttributeError:
ver = 'Package has no __version__ attribute'
df[self.version_output[i]] = ver
if self.add_to_trace:
msg = '( %s : %s)' % (p, ver)
self.trace_append(msg)
return df
@classmethod
def build_ui(cls):
# define arguments that behave as function inputs
inputs = []
inputs.append(
UIMulti(name='package_names', datatype=str, description='Comma separate list of python package names',
output_item='version_output', is_output_datatype_derived=False, output_datatype=str))
inputs.append(UISingle(name='install_missing', datatype=bool))
inputs.append(UISingle(name='add_to_trace', datatype=bool))
# define arguments that behave as function outputs
outputs = []
return (inputs, outputs)
class PythonFunction(BaseTransformer):
"""
Execute a paste-in function. A paste-in function is python function declaration
code block. The function must be called 'f' and accept two inputs:
df (a pandas DataFrame) and parameters (a dict that you can use
to externalize the configuration of the function).
The function can return a DataFrame,Series,NumpyArray or scalar value.
Example:
def f(df,parameters):
# generate an 2-D array of random numbers
output = np.random.normal(1,0.1,len(df.index))
return output
Function source may be pasted in or retrieved from Cloud Object Storage.
PythonFunction is currently experimental.
"""
function_name = 'f'
def __init__(self, function_code, input_items, output_item, parameters=None):
self.function_code = function_code
self.input_items = input_items
self.output_item = output_item
super().__init__()
if parameters is None:
parameters = {}
function_name = parameters.get('function_name', None)
if function_name is not None:
self.function_name = function_name
self.parameters = parameters
def execute(self, df):
# function may have already been serialized to cos
kw = {}
if not self.function_code.startswith('def '):
bucket = self.get_bucket_name()
fn = self._entity_type.db.model_store.retrieve_model(self.function_code)
kw['source'] = 'cos'
kw['filename'] = self.function_code
if fn is None:
msg = (' Function text does not start with "def ". '
' Function is assumed to located in COS'
' Cant locate function %s in cos. Make sure this '
' function exists in the %s bucket' % (self.function_code, bucket))
raise RuntimeError(msg)
else:
fn = self._entity_type.db.make_function(function_name=self.function_name, function_code=self.function_code)
kw['source'] = 'paste-in code'
kw['filename'] = None
kw['input_items'] = self.input_items
kw['output_item'] = self.output_item
kw['entity_type'] = self._entity_type
kw['db'] = self._entity_type.db
kw['c'] = self._entity_type.get_attributes_dict()
kw['logger'] = logger
self.trace_append(msg=self.function_code, log_method=logger.debug, **kw)
result = fn(df=df, parameters={**kw, **self.parameters})
df[self.output_item] = result
return df
@classmethod
def build_ui(cls):
# define arguments that behave as function inputs
inputs = []
inputs.append(UIMultiItem('input_items'))
inputs.append(UIText(name='function_code', description='Paste in your function definition'))
inputs.append(UISingle(name='parameters', datatype=dict, required=False,
description='optional parameters specified in json format'))
# define arguments that behave as function outputs
outputs = []
outputs.append(UIFunctionOutSingle('output_item', datatype=float))
return (inputs, outputs)
class RaiseError(BaseTransformer):
"""
Halt execution of the pipeline raising an error that will be shown. This function is
useful for testing a pipeline that is running to completion but not delivering the expected results.
By halting execution of the pipeline you can view useful diagnostic information in an error
message displayed in the UI.
"""
def __init__(self, halt_after, abort_execution=True, output_item=None):
super().__init__()
self.halt_after = halt_after
self.abort_execution = abort_execution
if output_item is None:
self.output_item = 'pipeline_exception'
else:
self.output_item = output_item
def execute(self, df):
msg = self.log_df_info(df, 'Prior to raising error')
self.trace_append(msg)
msg = 'The calculation was halted deliberately by the IoTRaiseError function. Remove the IoTRaiseError \
function or disable "abort_execution" in the function configuration. '
if self.abort_execution:
raise RuntimeError(msg)
df[self.output_item] = True
return df
@classmethod
def build_ui(cls):
# define arguments that behave as function inputs
inputs = []
inputs.append(UIMultiItem(name='halt_after', datatype=None, description='Raise error after calculating items'))
# define arguments that behave as function outputs
outputs = []
outputs.append(UIFunctionOutSingle(name='output_item', datatype=bool, description='Dummy function output'))
return (inputs, outputs)
class RandomNoise(BaseTransformer):
"""
Add random noise to one or more data items
"""
def __init__(self, input_items, standard_deviation, output_items):
super().__init__()
self.input_items = input_items
self.standard_deviation = standard_deviation
self.output_items = output_items
def execute(self, df):
for i, item in enumerate(self.input_items):
output = self.output_items[i]
random_noise = np.random.normal(0, self.standard_deviation, len(df.index))
df[output] = df[item] + random_noise
return df
@classmethod
def build_ui(cls):
# define arguments that behave as function inputs
inputs = []
inputs.append(UISingle(name='standard_deviation', datatype=float, description="Standard deviation of noise"))
inputs.append(
UIMultiItem(name='input_items', description="Chose data items to add noise to", output_item='output_items',
is_output_datatype_derived=True))
outputs = []
return (inputs, outputs)
class RandomUniform(BaseTransformer):
"""
Generate a uniformally distributed random number.
"""
def __init__(self, min_value, max_value, output_item=None):
super().__init__()
self.min_value = min_value
self.max_value = max_value
if output_item is None:
self.output_item = 'output_item'
else:
self.output_item = output_item
def execute(self, df):
df[self.output_item] = np.random.uniform(self.min_value, self.max_value, len(df.index))
return df
@classmethod
def build_ui(cls):
# define arguments that behave as function inputs
inputs = []
inputs.append(UISingle(name='min_value', datatype=float))
inputs.append(UISingle(name='max_value', datatype=float))
# define arguments that behave as function outputs
outputs = []
outputs.append(UIFunctionOutSingle(name='output_item', datatype=float, description='Random output'))
return (inputs, outputs)
class RandomNormal(BaseTransformer):
"""
Generate a normally distributed random number.
"""
def __init__(self, mean, standard_deviation, output_item=None):
super().__init__()
self.mean = mean
self.standard_deviation = standard_deviation
if output_item is None:
self.output_item = 'output_item'
else:
self.output_item = output_item
def execute(self, df):
df[self.output_item] = np.random.normal(self.mean, self.standard_deviation, len(df.index))
return df
@classmethod
def build_ui(cls):
# define arguments that behave as function inputs
inputs = []
inputs.append(UISingle(name='mean', datatype=float))
inputs.append(UISingle(name='standard_deviation', datatype=float))
# define arguments that behave as function outputs
outputs = []
outputs.append(UIFunctionOutSingle(name='output_item', datatype=float, description='Random output'))
return (inputs, outputs)
class RandomNull(BaseTransformer):
"""
Occassionally replace random values with null values for selected items.
"""
def __init__(self, input_items, output_items):
super().__init__()
self.input_items = input_items
self.output_items = output_items
def execute(self, df):
for counter, item in enumerate(self.input_items):
choice = np.random.choice([True, False], len(df.index))
df[self.output_items[counter]] = np.where(choice, None, df[item])
return df
@classmethod
def build_ui(cls):
# define arguments that behave as function inputs
inputs = []
inputs.append(
UIMultiItem(name='input_items', datatype=None, description='Select items to apply null replacement to',
output_item='output_items', is_output_datatype_derived=True, output_datatype=None))
outputs = []
return (inputs, outputs)
class RandomChoiceString(BaseTransformer):
"""
Generate random categorical values.
"""
def __init__(self, domain_of_values, probabilities=None, output_item=None):
super().__init__()
self.domain_of_values = domain_of_values
self.probabilities = adjust_probabilities(probabilities)
if output_item is None:
self.output_item = 'output_item'
else:
self.output_item = output_item
def execute(self, df):
df[self.output_item] = np.random.choice(a=self.domain_of_values, p=self.probabilities, size=len(df.index))
return df
@classmethod
def build_ui(cls):
# define arguments that behave as function inputs
inputs = []
inputs.append(UIMulti(name='domain_of_values', datatype=str, required=True))
inputs.append(UIMulti(name='probabilities', datatype=float, required=False))
# define arguments that behave as function outputs
outputs = []
outputs.append(
UIFunctionOutSingle(name='output_item', datatype=str, description='Random output', tags=['DIMENSION']))
return (inputs, outputs)
class RandomDiscreteNumeric(BaseTransformer):
"""
Generate random discrete numeric values.
"""
def __init__(self, discrete_values, probabilities=None, output_item=None):
super().__init__()
self.discrete_values = discrete_values
self.probabilities = adjust_probabilities(probabilities)
if output_item is None:
self.output_item = 'output_item'
else:
self.output_item = output_item
def execute(self, df):
df[self.output_item] = np.random.choice(a=self.discrete_values, p=self.probabilities, size=len(df.index))
return df
@classmethod
def build_ui(cls):
# define arguments that behave as function inputs
inputs = []
inputs.append(UIMulti(name='discrete_values', datatype=float))
inputs.append(UIMulti(name='probabilities', datatype=float, required=False))
# define arguments that behave as function outputs
outputs = []
outputs.append(UIFunctionOutSingle(name='output_item', datatype=float, description='Random output'))
return (inputs, outputs)
class SaveCosDataFrame(BaseTransformer):
"""
Serialize dataframe to COS
"""
def __init__(self, filename=None, columns=None, output_item=None):
super().__init__()
if filename is None:
self.filename = 'job_output_df'
else:
self.filename = filename
self.columns = columns
if output_item is None:
self.output_item = 'save_df_result'
else:
self.output_item = output_item
def execute(self, df):
if self.columns is not None:
sf = df[self.columns]
else:
sf = df
db = self.get_db()
bucket = self.get_bucket_name()
db.cos_save(persisted_object=sf, filename=self.filename, bucket=bucket, binary=True)
df[self.output_item] = True
return df
@classmethod
def build_ui(cls):
# define arguments that behave as function inputs
inputs = []
inputs.append(UISingle(name='filename', datatype=str))
inputs.append(UIMultiItem(name='columns'))
# define arguments that behave as function outputs
outputs = []
outputs.append(UIFunctionOutSingle(name='output_item', datatype=str, description='Result of save operation'))
return (inputs, outputs)
class SCDLookup(BaseSCDLookup):
"""
Lookup an slowly changing dimension property from a scd lookup table containing:
Start_date, end_date, device_id and property. End dates are not currently used.
Previous lookup value is assumed to be valid until the next.
"""
def __init__(self, table_name, output_item=None):
self.table_name = table_name
super().__init__(table_name=table_name, output_item=output_item)
class IoTSCDLookupWithDefault(BaseSCDLookupWithDefault):
"""
Look up an scd property from a scd lookup table containing columns for:
start_date, end_date, device_id and dimension property.
If the table does not provide a value for a given time
the default value is taken.
"""
def __init__(self, table_name, dimension_name, entity_name, start_name, end_name, default_value, output_item=None):
super().__init__(table_name=table_name, output_item=output_item, default_value=default_value,
dimension_name=dimension_name, entity_name=entity_name, start_name=start_name,
end_name=end_name)
class ShiftCalendar(BaseTransformer):
"""
Generate data for a shift calendar using a shift_definition in the form of a dict keyed on shift_id
Dict contains a tuple with the start and end hours of the shift expressed as numbers. Example:
{
"1": [5.5, 14],
"2": [14, 21],
"3": [21, 29.5]
},
"""
is_custom_calendar = True
auto_conform_index = True
def __init__(self, shift_definition=None, period_start_date=None, period_end_date=None, shift_day=None,
shift_id=None):
if shift_definition is None:
self.shift_definition = {"1": [5.5, 14], "2": [14, 21], "3": [21, 29.5]}
else:
self.shift_definition = shift_definition
if period_start_date is None:
self.period_start_date = 'shift_start_date'
else:
self.period_start_date = period_start_date
if period_end_date is None:
self.period_end_date = 'shift_end_date'
else:
self.period_end_date = period_end_date
if shift_day is None:
self.shift_day = 'shift_day'
else:
self.shift_day = shift_day
if shift_id is None:
self.shift_id = 'shift_id'
else:
self.shift_id = shift_id
super().__init__()
def get_data(self, start_date, end_date):
if start_date is None:
raise ValueError('Start date is required when building data for a shift calendar')
if end_date is None:
raise ValueError('End date is required when building data for a shift calendar')
# Subtract a day from start_date and add a day to end_date to provide shift information for the full
# calendar days at left and right boundary.
# Example: shift1 = [22:00,10:00], shift2 = [10:00, 22:00], data point = '2019-11-22 23:01:00' ==> data point
# falls into shift_day '2019-11-23', not '2019-11-22'
one_day = pd.DateOffset(days=1)
start_date = start_date.date() - one_day
end_date = end_date.date() + one_day
dates = pd.date_range(start=start_date, end=end_date, freq='1D').tolist()
dfs = []
for shift_id, start_end in list(self.shift_definition.items()):
data = {}
data[self.shift_day] = dates
data[self.shift_id] = shift_id
data[self.period_start_date] = [x + dt.timedelta(hours=start_end[0]) for x in dates]
data[self.period_end_date] = [x + dt.timedelta(hours=start_end[1]) for x in dates]
dfs.append(pd.DataFrame(data))
df = pd.concat(dfs)
df[self.period_start_date] = pd.to_datetime(df[self.period_start_date])
df[self.period_end_date] = pd.to_datetime(df[self.period_end_date])
df.sort_values([self.period_start_date], inplace=True)
return df
def get_empty_data(self):
col_types = {self.shift_day: 'datetime64[ns]', self.shift_id: 'float64',
self.period_start_date: 'datetime64[ns]', self.period_end_date: 'datetime64[ns]'}
df = pd.DataFrame(columns=col_types.keys())
df = df.astype(dtype=col_types)
return df
def execute(self, df):
df = reset_df_index(df, auto_index_name=self.auto_index_name)
entity_type = self.get_entity_type()
(df, ts_col) = entity_type.df_sort_timestamp(df)
start_date = df[ts_col].min()
end_date = df[ts_col].max()
if len(df.index) > 0:
calendar_df = self.get_data(start_date=start_date, end_date=end_date)
df = pd.merge_asof(left=df, right=calendar_df, left_on=ts_col, right_on=self.period_start_date,
direction='backward')
df = self._entity_type.index_df(df)
return df
def get_period_end(self, date):
df = self.get_data(date, date)
result = df[self.period_end_date].max()
return result
@classmethod
def build_ui(cls):
# define arguments that behave as function inputs
inputs = []
inputs.append(UISingle(name='shift_definition', datatype=dict, description=''))
# define arguments that behave as function outputs
outputs = []
outputs.append(UIFunctionOutSingle(name='period_start_date', datatype=dt.datetime, tags=['DIMENSION']))
outputs.append(UIFunctionOutSingle(name='period_end_date', datatype=dt.datetime, tags=['DIMENSION']))
outputs.append(UIFunctionOutSingle(name='shift_day', datatype=dt.datetime, tags=['DIMENSION']))
outputs.append(UIFunctionOutSingle(name='shift_id', datatype=int, tags=['DIMENSION']))
return (inputs, outputs)
class Sleep(BaseTransformer):
"""
Wait for the designated number of seconds
"""
def __init__(self, sleep_after, sleep_duration_seconds=None, output_item=None):
super().__init__()
self.sleep_after = sleep_after
if sleep_duration_seconds is None:
self.sleep_duration_seconds = 30
else:
self.sleep_duration_seconds = sleep_duration_seconds
if output_item is None:
self.output_item = 'sleep_status'
else:
self.output_item = output_item
def execute(self, df):
msg = 'Sleep duration: %s. ' % self.sleep_duration_seconds
self.trace_append(msg)
time.sleep(self.sleep_duration_seconds)
df[self.output_item] = True
return df
@classmethod
def build_ui(cls):
# define arguments that behave as function inputs
inputs = []
inputs.append(
UIMultiItem(name='sleep_after', datatype=None, required=False, description='Sleep after calculating items'))
inputs.append(UISingle(name='sleep_duration_seconds', datatype=float))
# define arguments that behave as function outputs
outputs = []
outputs.append(UIFunctionOutSingle(name='output_item', datatype=bool, description='Dummy function output'))
return (inputs, outputs)
class TraceConstants(BaseTransformer):
"""
Write the values of available constants to the trace
"""
def __init__(self, dummy_items, output_item=None):
super().__init__()
self.dummy_items = dummy_items
if output_item is None:
self.output_item = 'trace_written'
else:
self.output_item = output_item
def execute(self, df):
c = self._entity_type.get_attributes_dict()
msg = 'entity constants retrieved'
self.trace_append(msg, **c)
df[self.output_item] = True
return df
@classmethod
def build_ui(cls):
# define arguments that behave as function inputs
inputs = []
inputs.append(UIMultiItem(name='dummy_items', datatype=None, required=False, description='Not required'))
# define arguments that behave as function outputs
outputs = []
outputs.append(UIFunctionOutSingle(name='output_item', datatype=bool, description='Dummy function output'))
return (inputs, outputs)
class TimestampCol(BaseTransformer):
"""
Deliver a data item containing the timestamp
"""
def __init__(self, dummy_items=None, output_item=None):
super().__init__()
self.dummy_items = dummy_items
if output_item is None:
self.output_item = 'timestamp_col'
else:
self.output_item = output_item
def execute(self, df):
ds_1 = self.get_timestamp_series(df)
ds_1 = pd.to_datetime(ds_1)
df[self.output_item] = ds_1
return df
@classmethod
def build_ui(cls):
# define arguments that behave as function inputs
inputs = []
inputs.append(UIMultiItem(name='dummy_items', datatype=None, required=False, description='Not required'))
# define arguments that behave as function outputs
outputs = []
outputs.append(
UIFunctionOutSingle(name='output_item', datatype=dt.datetime, description='Timestamp column name'))
return (inputs, outputs)
# Renamed functions
IoTExpression = PythonExpression
IoTRandomChoice = RandomChoiceString
IoTRandonNormal = RandomNormal
IoTActivityDuration = ActivityDuration
IoTSCDLookup = SCDLookup
IoTShiftCalendar = ShiftCalendar
IoTAlertHighValue = AlertHighValue
IoTAlertLow = AlertLowValue
IoTAlertExpression = AlertExpression
IoTAlertOutOfRange = AlertOutOfRange
IoTAutoTest = AutoTest
IoTConditionalItems = ConditionalItems
IoTDatabaseLookup = DatabaseLookup
IoTDeleteInputData = DeleteInputData
IoTDropNull = DropNull
IoTEntityFilter = EntityFilter
IoTGetEntityId = EntityId
IoTIfThenElse = IfThenElse
IoTPackageInfo = PackageInfo
IoTRaiseError = RaiseError
IoTSaveCosDataFrame = SaveCosDataFrame
IoTSleep = Sleep
IoTTraceConstants = TraceConstants
# Deprecated functions
class IoTEntityDataGenerator(BasePreload):
"""
Automatically load the entity input data table using new generated data.
Time series columns defined on the entity data table will be populated
with random data.
"""
is_deprecated = True
def __init__(self, ids=None, output_item=None):
self.ids = ids
if output_item is None:
self.output_item = 'entity_data_generator'
else:
self.output_item = output_item
def get_replacement(self):
new = EntityDataGenerator(ids=self.ids, output_item=self.output_item)
return new
class IoTCalcSettings(BaseMetadataProvider):
"""
Overide default calculation settings for the entity type
"""
is_deprecated = True
def __init__(self, checkpoint_by_entity=False, pre_aggregate_time_grain=None, auto_read_from_ts_table=None,
sum_items=None, mean_items=None, min_items=None, max_items=None, count_items=None, sum_outputs=None,
mean_outputs=None, min_outputs=None, max_outputs=None, count_outputs=None, output_item=None):
warnings.warn('IoTCalcSettings is deprecated. Use entity type constants instead of a '
'metadata provider to set entity type properties', DeprecationWarning)
if auto_read_from_ts_table is None:
auto_read_from_ts_table = True
if output_item is None:
output_item = 'output_item'
# metadata for pre-aggregation:
# pandas aggregate dict containing a list of aggregates for each item
self._pre_agg_rules = {}
# dict containing names of aggregate items produced for each item
self._pre_agg_outputs = {}
# assemble these metadata structures
self._apply_pre_agg_metadata('sum', items=sum_items, outputs=sum_outputs)
self._apply_pre_agg_metadata('mean', items=mean_items, outputs=mean_outputs)
self._apply_pre_agg_metadata('min', items=min_items, outputs=min_outputs)
self._apply_pre_agg_metadata('max', items=max_items, outputs=max_outputs)
self._apply_pre_agg_metadata('count', items=count_items, outputs=count_outputs)
# pass metadata to the entity type
kwargs = {'_checkpoint_by_entity': checkpoint_by_entity, '_pre_aggregate_time_grain': pre_aggregate_time_grain,
'_auto_read_from_ts_table': auto_read_from_ts_table, '_pre_agg_rules': self._pre_agg_rules,
'_pre_agg_outputs': self._pre_agg_outputs}
super().__init__(dummy_items=[], output_item=output_item, **kwargs)
def _apply_pre_agg_metadata(self, aggregate, items, outputs):
"""
convert UI inputs into a pandas aggregate dictionary and
a separate dictionary containing names of aggregate items
"""
if items is not None:
if outputs is None:
outputs = ['%s_%s' % (x, aggregate) for x in items]
for i, item in enumerate(items):
try:
self._pre_agg_rules[item].append(aggregate)
self._pre_agg_outputs[item].append(outputs[i])
except KeyError:
self._pre_agg_rules[item] = [aggregate]
self._pre_agg_outputs[item] = [outputs[i]]
except IndexError:
msg = 'Metadata for aggregate %s is not defined correctly. Outputs array should match \
length of items array.' % aggregate
raise ValueError(msg)
return None
@classmethod
def build_ui(cls):
# define arguments that behave as function inputs
inputs = []
inputs.append(UISingle(name='auto_read_from_ts_table', datatype=bool, required=False,
description='By default, data retrieved is from the designated input table. Use this setting to disable.', ))
inputs.append(
UISingle(name='checkpoint_by_entity', datatype=bool, required=False, description='By default a single '))
inputs.append(UISingle(name='pre_aggregate_time_grain', datatype=str, required=False, description='By default, data is retrieved at the input grain. Use this setting to preaggregate \
data and reduce the volumne of data retrieved',
values=['1min', '5min', '15min', '30min', '1H', '2H', '4H', '8H', '12H', 'day', 'week',
'month', 'year']))
inputs.append(UIMultiItem(name='sum_items', datatype=float, required=False,
description='Choose items that should be added when aggregating',
output_item='sum_outputs', is_output_datatype_derived=True))
inputs.append(UIMultiItem(name='mean_items', datatype=float, required=False,
description='Choose items that should be averaged when aggregating',
output_item='mean_outputs', is_output_datatype_derived=True))
inputs.append(UIMultiItem(name='min_items', datatype=float, required=False,
description='Choose items that the system should find the smallest value when aggregating',
output_item='mean_outputs', is_output_datatype_derived=True))
inputs.append(UIMultiItem(name='max_items', datatype=float, required=False,
description='Choose items that the system should find the largest value when aggregating',
output_item='mean_outputs', is_output_datatype_derived=True))
inputs.append(UIMultiItem(name='count_items', datatype=float, required=False,
description='Choose items that the system should count the value when aggregating',
output_item='mean_outputs', is_output_datatype_derived=True))
# define arguments that behave as function outputs
outputs = []
outputs.append(UIFunctionOutSingle(name='output_item', datatype=bool, description='Dummy function output'))
return (inputs, outputs)
class IoTCosFunction(BaseTransformer):
"""
Execute a serialized function retrieved from cloud object storage.
Function returns a single output.
Function is replaced by PythonFunction
"""
is_deprecated = True
def __init__(self, function_name, input_items, output_item=None, parameters=None):
warnings.warn('IoTCosFunction is deprecated. Use PythonFunction.', DeprecationWarning)
# the function name may be passed as a function object or function name (string)
# if a string is provided, it is assumed that the function object has already been serialized to COS
# if a function onbject is supplied, it will be serialized to cos
self.input_items = input_items
if output_item is None:
self.output_item = 'output_item'
else:
self.output_item = output_item
super().__init__()
# get the cos bucket
# if function object, serialize and get name
self.function_name = function_name
# The function called during execution accepts a single dictionary as input
# add all instance variables to the parameters dict in case the function needs them
if parameters is None:
parameters = {}
parameters = {**parameters, **self.__dict__}
self.parameters = parameters
def execute(self, df):
db = self.get_db()
bucket = self.get_bucket_name()
# first test execution could include a fnction object
# serialize it
if callable(self.function_name):
db.cos_save(persisted_object=self.function_name, filename=self.function_name.__name__, bucket=bucket,
binary=True)
self.function_name = self.function_name.__name__
# retrieve
function = db.cos_load(filename=self.function_name, bucket=bucket, binary=True)
# execute
df = df.copy()
rf = function(df, self.parameters)
# rf will contain the orginal columns along with a single new output column.
return rf
@classmethod
def build_ui(cls):
# define arguments that behave as function inputs
inputs = []
inputs.append(UIMultiItem('input_items'))
inputs.append(UISingle(name='function_name', datatype=float,
description='Name of function object. Function object must be serialized to COS before you can use it'))
inputs.append(UISingle(name='parameters', datatype=dict, required=False,
description='Parameters required by the function are provides as json.'))
# define arguments that behave as function outputs
outputs = []
outputs.append(UIFunctionOutSingle('output_item'))
# All of below functions are moved from calc.py file in Analytics Service.
class Alert:
@classmethod
def metadata(cls):
return _generate_metadata(cls, {
'description': 'Create alerts that are triggered when data values reach a particular range.', 'input': [
{'name': 'sources', 'description': 'Select one or more data items to build your alert.',
'type': 'DATA_ITEM', 'required': True, 'dataType': 'ARRAY',
'jsonSchema': {"$schema": "http://json-schema.org/draft-07/schema#", "title": "sources",
"type": "array", "minItems": 1, "items": {"type": "string"}}}, {'name': 'expression',
'description': 'Build the expression for your alert by using Python script. To reference a data item, use the format ${DATA_ITEM}.',
'type': 'CONSTANT',
'required': True,
'dataType': 'LITERAL'}],
'output': [{'name': 'name', 'description': 'The name of the new alert.', 'dataType': 'BOOLEAN',
'tags': ['ALERT', 'EVENT']}], 'tags': ['EVENT']})
def __init__(self, name=None, sources=None, expression=None):
warnings.warn('Alert function is deprecated. Use AlertExpression.', DeprecationWarning)
self.logger = logging.getLogger('%s.%s' % (self.__module__, self.__class__.__name__))
if name is None or not isinstance(name, str):
raise RuntimeError("argument name must be provided and must be a string")
if expression is None or not isinstance(expression, str):
raise RuntimeError("argument expression must be provided and must be a string")
self.name = name
self.expression = expression
self.sources = sources
def execute(self, df):
c = self._entity_type.get_attributes_dict()
sources_not_in_column = df.index.names
df = df.reset_index()
expr = re.sub(r"\$\{(\w+)\}", r"df['\1']", self.expression)
self.logger.debug('alert_expression=%s' % str(expr))
df[self.name] = np.where(eval(expr), True, None)
self.logger.debug('alert_name {}'.format(self.name))
if 'test' in self.name:
self.logger.debug('alert_dataframe {}'.format(df[self.name]))
df = df.set_index(keys=sources_not_in_column)
return df
class NewColFromCalculation:
@classmethod
def metadata(cls):
return _generate_metadata(cls, {'description': 'Create a new data item by expression.', 'input': [
{'name': 'sources', 'description': 'Select one or more data items to be used in the expression.',
'type': 'DATA_ITEM', 'required': True, 'dataType': 'ARRAY',
'jsonSchema': {"$schema": "http://json-schema.org/draft-07/schema#", "title": "sources", "type": "array",
"minItems": 1, "items": {"type": "string"}}}, {'name': 'expression',
'description': 'Build the expression by using Python script. To reference a data item, use the format ${DATA_ITEM}.',
'type': 'CONSTANT', 'required': True,
'dataType': 'LITERAL'}],
'output': [{'name': 'name', 'description': 'The name of the new data item.'}],
'tags': ['EVENT', 'JUPYTER']})
def __init__(self, name=None, sources=None, expression=None):
self.logger = logging.getLogger('%s.%s' % (self.__module__, self.__class__.__name__))
if name is None or not isinstance(name, str):
raise RuntimeError("argument name must be provided and must be a string")
if expression is None or not isinstance(expression, str):
raise RuntimeError("argument expression must be provided and must be a string")
self.name = name
self.expression = expression
self.sources = sources
def execute(self, df):
c = self._entity_type.get_attributes_dict()
sources_not_in_column = df.index.names
df = df.reset_index()
expr = re.sub(r"\$\{(\w+)\}", r"df['\1']", self.expression)
self.logger.debug('new_column_expression=%s' % str(expr))
df[self.name] = eval(expr)
df = df.set_index(keys=sources_not_in_column)
return df
class Filter:
@classmethod
def metadata(cls):
return _generate_metadata(cls, {'description': 'Filter data by expression.', 'input': [
{'name': 'sources', 'description': 'Select one or more data items to be used in the expression.',
'type': 'DATA_ITEM', 'required': True, 'dataType': 'ARRAY',
'jsonSchema': {"$schema": "http://json-schema.org/draft-07/schema#", "title": "sources", "type": "array",
"minItems": 1, "items": {"type": "string"}}}, {'name': 'expression',
'description': 'Build the filtering expression by using Python script. To reference a data item, use the format ${DATA_ITEM}.',
'type': 'CONSTANT', 'required': True,
'dataType': 'LITERAL'},
{'name': 'filtered_sources',
'description': 'Data items to be kept when expression is evaluated to be true.', 'type': 'DATA_ITEM',
'required': True, 'dataType': 'ARRAY',
'jsonSchema': {"$schema": "http://json-schema.org/draft-07/schema#", "title": "filtered_sources",
"type": "array", "minItems": 1, "items": {"type": "string"}}}], 'output': [
{'name': 'names', 'description': 'The names of the new data items.', 'dataTypeFrom': 'filtered_sources',
'cardinalityFrom': 'filtered_sources'}], 'tags': ['EVENT', 'JUPYTER']})
def __init__(self, names=None, filtered_sources=None, sources=None, expression=None):
self.logger = logging.getLogger('%s.%s' % (self.__module__, self.__class__.__name__))
if names is not None and isinstance(names, str):
names = [n.strip() for n in names.split(',') if len(n.strip()) > 0]
if names is None or not isinstance(names, list):
raise RuntimeError("argument names must be provided and must be a list")
if filtered_sources is None or not isinstance(filtered_sources, list) or len(filtered_sources) != len(names):
raise RuntimeError(
"argument filtered_sources must be provided and must be a list and of the same length of names")
if filtered_sources is not None and not set(names).isdisjoint(set(filtered_sources)):
raise RuntimeError("argument filtered_sources must not have overlapped items with names")
if expression is None or not isinstance(expression, str):
raise RuntimeError("argument expression must be provided and must be a string")
self.names = {}
for name, source in list(zip(names, filtered_sources)):
self.names[source] = name
self.expression = expression
self.sources = sources
self.filtered_sources = filtered_sources
def execute(self, df):
c = self._entity_type.get_attributes_dict()
# Make index levels available as columns
sources_not_in_column = df.index.names
df = df.reset_index()
# remove conflicting column names
cleaned_names = {}
for name, new_name in self.names.items():
if name in df.columns:
if new_name not in df.columns:
cleaned_names[name] = new_name
else:
self.logger.warning('The filter cannot be applied to column %s because the destination column %s '
'already exists in the dataframe. Available columns in the dataframe are %s' % (
name, new_name, list(df.columns)))
else:
self.logger.warning('The filter cannot be applied to column %s because this column is not available '
'in the dataframe. Therefore column %s cannot be calculated. Available columns '
'in the dataframe are %s' % (name, new_name, list(df.columns)))
# execute given expression
expr = re.sub(r"\$\{(\w+)\}", r"df['\1']", self.expression)
self.logger.debug('filter_expression=%s' % str(expr))
mask = eval(expr)
# copy columns and apply mask
for name, new_name in self.names.items():
df[new_name] = df[name].where(mask)
df.set_index(keys=sources_not_in_column, drop=True, inplace=True)
return df
class NewColFromSql:
def _set_dms(self, dms):
self.dms = dms
def _get_dms(self):
return self.dms
@classmethod
def metadata(cls):
return _generate_metadata(cls, {'description': 'Create new data items by joining SQL query result.', 'input': [
{'name': 'sql', 'description': 'The SQL query.', 'type': 'CONSTANT', 'required': True,
'dataType': 'LITERAL'}, {'name': 'index_col',
'description': 'Columns in the SQL query result to be joined (multiple items are comma separated).',
'type': 'CONSTANT', 'required': True, 'dataType': 'ARRAY',
'jsonSchema': {"$schema": "http://json-schema.org/draft-07/schema#",
"title": "index_col", "type": "array", "minItems": 1,
"items": {"type": "string"}}}, {'name': 'parse_dates',
'description': 'Columns in the SQL query result to be parsed as dates (multiple items are comma separated).',
'type': 'CONSTANT',
'required': True,
'dataType': 'ARRAY',
'jsonSchema': {
"$schema": "http://json-schema.org/draft-07/schema#",
"title": "parse_dates",
"type": "array", "minItems": 1,
"items": {"type": "string"}}},
{'name': 'join_on', 'description': 'Data items to join the query result to.', 'type': 'DATA_ITEM',
'required': True, 'dataType': 'ARRAY',
'jsonSchema': {"$schema": "http://json-schema.org/draft-07/schema#", "title": "join_on", "type": "array",
"minItems": 1, "items": {"type": "string"}}}], 'output': [
{'name': 'names', 'description': 'The names of the new data items.'}], 'tags': ['JUPYTER']})
def __init__(self, names=None, sql=None, index_col=None, parse_dates=None, join_on=None):
self.logger = logging.getLogger('%s.%s' % (self.__module__, self.__class__.__name__))
if names is not None and isinstance(names, str):
names = [n.strip() for n in names.split(',') if len(n.strip()) > 0]
if index_col is not None and isinstance(index_col, str):
index_col = [n.strip() for n in index_col.split(',') if len(n.strip()) > 0]
if parse_dates is not None and isinstance(parse_dates, str):
parse_dates = [n.strip() for n in parse_dates.split(',') if len(n.strip()) > 0]
if names is None or not isinstance(names, list):
raise RuntimeError("argument names must be provided and must be a list")
if sql is None or not isinstance(sql, str) or len(sql) == 0:
raise RuntimeError('argument sql must be given as a non-empty string')
if index_col is None or not isinstance(index_col, list):
raise RuntimeError('argument index_col must be provided and must be a list')
if join_on is None:
raise RuntimeError('argument join_on must be given')
if parse_dates is not None and not isinstance(parse_dates, list):
raise RuntimeError('argument parse_dates must be a list')
self.names = names
self.sql = sql
self.index_col = index_col
self.parse_dates = parse_dates
self.join_on = asList(join_on)
def execute(self, df):
df_sql = self._get_dms().db.read_sql_query(self.sql, index_col=self.index_col, parse_dates=self.parse_dates)
if len(self.names) > len(df_sql.columns):
raise RuntimeError(
'length of names (%d) is larger than the length of query result (%d)' % (len(self.names), len(df_sql)))
# in case the join_on is in index, reset first then set back after join
sources_not_in_column = df.index.names
df = df.reset_index()
df = df.merge(df_sql, left_on=self.join_on, right_index=True, how='left')
df = df.set_index(keys=sources_not_in_column)
renamed_cols = {df_sql.columns[idx]: name for idx, name in enumerate(self.names)}
df = df.rename(columns=renamed_cols)
return df
class NewColFromScalarSql:
def _set_dms(self, dms):
self.dms = dms
def _get_dms(self):
return self.dms
@classmethod
def metadata(cls):
return _generate_metadata(cls, {
'description': 'Create a new data item from a scalar SQL query returning a single value.', 'input': [
{'name': 'sql', 'description': 'The SQL query.', 'type': 'CONSTANT', 'required': True,
'dataType': 'LITERAL'}], 'output': [{'name': 'name', 'description': 'The name of the new data item.'}],
'tags': ['JUPYTER']})
def __init__(self, name=None, sql=None):
self.logger = logging.getLogger('%s.%s' % (self.__module__, self.__class__.__name__))
if name is None or not isinstance(name, str):
raise RuntimeError('argument name must be given')
if sql is None or not isinstance(sql, str) or len(sql) == 0:
raise RuntimeError('argument sql must be given as a non-empty string')
self.name = name
self.sql = sql
def execute(self, df):
df_sql = self._get_dms().db.read_sql_query(self.sql)
if df_sql.shape != (1, 1):
raise RuntimeError(
'the scalar sql=%s does not return single value, but the shape=%s' % (len(self.sql), len(df_sql.shape)))
df[self.name] = df_sql.iloc[0, 0]
return df
class Shift:
def __init__(self, name, start, end, cross_day_to_next=True):
self.logger = logging.getLogger('%s.%s' % (self.__module__, self.__class__.__name__))
self.name = name
self.ranges = [start, end]
self.cross_day_to_next = cross_day_to_next
self.cross_day = (start > end)
if self.cross_day:
self.ranges.insert(1, dt.time(0, 0, 0))
self.ranges.insert(1, dt.time(23, 59, 59, 999999))
self.ranges = list(pairwise(self.ranges))
def within(self, datetime):
if isinstance(datetime, dt.datetime):
date = dt.date(datetime.year, datetime.month, datetime.day)
time = dt.time(datetime.hour, datetime.minute, datetime.second, datetime.microsecond)
elif isinstance(datetime, dt.time):
date = None
time = datetime
else:
logger.debug('unknown datetime value type::%s' % datetime)
raise ValueError('unknown datetime value type')
for idx, range in enumerate(self.ranges):
if range[0] <= time and time < range[1]:
if self.cross_day and date is not None:
if self.cross_day_to_next and idx == 0:
date += dt.timedelta(days=1)
elif not self.cross_day_to_next and idx == 1:
date -= dt.timedelta(days=1)
return (date, True)
return False
def start_time(self, shift_day=None):
if shift_day is None:
return self.ranges[0][0]
else:
if self.cross_day and self.cross_day_to_next:
shift_day -= dt.timedelta(days=1)
return dt.datetime.combine(shift_day, self.ranges[0][0])
def end_time(self, shift_day=None):
if shift_day is None:
return self.ranges[-1][-1]
else:
if self.cross_day and not self.cross_day_to_next:
shift_day += dt.timedelta(days=1)
return dt.datetime.combine(shift_day, self.ranges[-1][-1])
def __eq__(self, other):
return self.name == other.name and self.ranges == other.ranges
def __repr__(self):
return self.__str__()
def __str__(self):
return "%s: (%s, %s)" % (self.name, self.ranges[0][0], self.ranges[-1][1])
class ShiftPlan:
def __init__(self, shifts, cross_day_to_next=True):
self.logger = logging.getLogger('%s.%s' % (self.__module__, self.__class__.__name__))
shifts = {shift: [dt.time(*tuple(time)) for time in list(pairwise(time_range))] for shift, time_range in
shifts.items()}
# validation: shifts cannot overlap, gaps are allowed though
self.shifts = []
for shift, time_range in shifts.items():
self.shifts.append(Shift(shift, time_range[0], time_range[1], cross_day_to_next=cross_day_to_next))
self.shifts.sort(key=lambda x: x.ranges[0][0])
if cross_day_to_next and self.shifts[-1].cross_day:
self.shifts.insert(0, self.shifts[-1])
del self.shifts[-1]
self.cross_day_to_next = cross_day_to_next
self.logger.debug("ShiftPlan: shifts=%s, cross_day_to_next=%s" % (self.shifts, self.cross_day_to_next))
def get_shift(self, datetime):
for shift in self.shifts:
ret = shift.within(datetime)
if ret:
return (ret[0], shift)
return None
def next_shift(self, shift_day, shift):
shift_idx = None
for idx, shft in enumerate(self.shifts):
if shift == shft:
shift_idx = idx
break
if shift_idx is None:
logger.debug("unknown shift: %s" % str(shift))
raise ValueError("unknown shift: %s" % str(shift))
shift_idx = shift_idx + 1
if shift_idx >= len(self.shifts):
shift_idx %= len(self.shifts)
shift_day += dt.timedelta(days=1)
return (shift_day, self.shifts[shift_idx])
def get_real_datetime(self, shift_day, shift, time):
if shift.cross_day == False:
return dt.datetime.combine(shift_day, time)
if self.cross_day_to_next and time > shift.ranges[-1][-1]:
# cross day shift the part before midnight
return dt.datetime.combine(shift_day - dt.timedelta(days=1), time)
elif self.cross_day_to_next == False and time < shift.ranges[0][0]:
# cross day shift the part after midnight
return dt.datetime.combine(shift_day + dt.timedelta(days=1), time)
else:
return dt.datetime.combine(shift_day, time)
def split(self, start, end):
start_shift = self.get_shift(start)
end_shift = self.get_shift(end)
if start_shift is None:
raise ValueError("starting time not fit in any shift: start_shift is None")
if end_shift is None:
raise ValueError("ending time not fit in any shift: end_shift is None")
if start > end:
logger.warning('starting time must not be after ending time %s %s. Ignoring end date.' % (start, end))
return [(start_shift, start, start)]
if start_shift == end_shift:
return [(start_shift, start, end)]
splits = []
shift_day, shift = start_shift
splits.append((start_shift, start, self.get_real_datetime(shift_day, shift, shift.ranges[-1][-1])))
start_shift = self.next_shift(shift_day, shift)
while start_shift != end_shift:
shift_day, shift = start_shift
splits.append((start_shift, self.get_real_datetime(shift_day, shift, shift.ranges[0][0]),
self.get_real_datetime(shift_day, shift, shift.ranges[-1][-1])))
start_shift = self.next_shift(shift_day, shift)
shift_day, shift = end_shift
splits.append((end_shift, self.get_real_datetime(shift_day, shift, shift.ranges[0][0]), end))
return splits
def __repr__(self):
return self.__str__()
def __str__(self):
return str(self.shifts)
class IdentifyShiftFromTimestamp:
@classmethod
def metadata(cls):
return _generate_metadata(cls, {
'description': 'Identifies the shift that was active when data was received by using the timestamp on the data.',
'input': [{'name': 'timestamp',
'description': 'Specify the timestamp data item on which to base your calculation.',
'type': 'DATA_ITEM', 'required': True, 'dataType': 'TIMESTAMP'}, {'name': 'shifts',
'description': 'Specify the shift plan in JSON syntax. For example, {"1": [7, 30, 16, 30]} Where 1 is the shift ID, 7 is the start hour, 30 is the start minutes, 16 is the end hour, and 30 is the end minutes. You can enter multiple shifts separated by commas.',
'type': 'CONSTANT',
'required': True,
'dataType': 'JSON'},
{'name': 'cross_day_to_next',
'description': 'If a shift extends past midnight, count it as the first shift of the next calendar day.',
'type': 'CONSTANT', 'required': False, 'dataType': 'BOOLEAN'}], 'output': [{'name': 'shift_day',
'description': 'The staring timestamp of a day, as identified by the timestamp and the shift plan.',
'dataType': 'TIMESTAMP',
'tags': [
'DIMENSION']},
{'name': 'shift_id',
'description': 'The shift ID, as identified by the timestamp and the shift plan.',
'dataType': 'LITERAL',
'tags': [
'DIMENSION']}, {
'name': 'shift_start',
'description': 'The starting time of the shift, as identified by the timestamp and the shift plan.',
'dataType': 'TIMESTAMP',
'tags': [
'DIMENSION']},
{'name': 'shift_end',
'description': 'The ending time of the shift, as identified by the timestamp and the shift plan.',
'dataType': 'TIMESTAMP',
'tags': [
'DIMENSION']},
{'name': 'hour_no',
'description': 'The hour of the day, as identified by the timestamp and the shift plan.',
'dataType': 'NUMBER',
'tags': [
'DIMENSION']}],
'tags': ['JUPYTER']})
def __init__(self, shift_day=None, shift_id=None, shift_start=None, shift_end=None, hour_no=None,
timestamp="timestamp", shifts=None, cross_day_to_next=True):
self.logger = logging.getLogger('%s.%s' % (self.__module__, self.__class__.__name__))
if shift_day is None:
raise RuntimeError("argument shift_day must be provided: shift_day is None")
if shift_id is None:
raise RuntimeError("argument shift_id must be provided: shift_id is None")
if shift_start is not None and not isinstance(shift_start, str):
raise RuntimeError("argument shift_start must be a string")
if shift_end is not None and not isinstance(shift_end, str):
raise RuntimeError("argument shift_end must be a string")
if hour_no is not None and not isinstance(hour_no, str):
raise RuntimeError("argument hour_no must be a string")
if timestamp is None:
raise RuntimeError("argument timestamp must be provided: timestamp is None")
if shifts is None or not isinstance(shifts, dict) and len(shifts) > 0:
raise RuntimeError("argument shifts must be provided and is a non-empty dict")
self.shift_day = shift_day
self.shift_id = shift_id
self.shift_start = shift_start if shift_start is not None and len(shift_start.strip()) > 0 else None
self.shift_end = shift_end if shift_end is not None and len(shift_end.strip()) > 0 else None
self.hour_no = hour_no if hour_no is not None and len(hour_no.strip()) > 0 else None
self.timestamp = timestamp
self.shifts = ShiftPlan(shifts, cross_day_to_next=cross_day_to_next)
def execute(self, df):
generated_values = {self.shift_day: [], self.shift_id: [], 'self.shift_start': [], 'self.shift_end': [],
'self.hour_no': [], }
df[self.shift_day] = self.shift_day
df[self.shift_id] = self.shift_id
if self.shift_start is not None:
df[self.shift_start] = self.shift_start
if self.shift_end is not None:
df[self.shift_end] = self.shift_end
if self.hour_no is not None:
df[self.hour_no] = self.hour_no
timestampIndex = df.index.names.index(self.timestamp) if self.timestamp in df.index.names else None
if timestampIndex is not None:
# Timestamp is a index level
for idx in df.index:
t = idx[timestampIndex]
if isinstance(t, str):
t = pd.to_datetime(t)
ret = self.shifts.get_shift(t)
if ret is None:
continue
shift_day, shift = ret
generated_values[self.shift_day].append(
pd.Timestamp(year=shift_day.year, month=shift_day.month, day=shift_day.day))
generated_values[self.shift_id].append(shift.name)
generated_values['self.shift_start'].append(shift.start_time(shift_day))
generated_values['self.shift_end'].append(shift.end_time(shift_day))
generated_values['self.hour_no'].append(t.hour)
else:
# Timestamp is a column
for idx, value in df[self.timestamp].items():
t = value
if isinstance(t, str):
t = pd.to_datetime(t)
ret = self.shifts.get_shift(t)
if ret is None:
continue
shift_day, shift = ret
generated_values[self.shift_day].append(
pd.Timestamp(year=shift_day.year, month=shift_day.month, day=shift_day.day))
generated_values[self.shift_id].append(shift.name)
generated_values['self.shift_start'].append(shift.start_time(shift_day))
generated_values['self.shift_end'].append(shift.end_time(shift_day))
generated_values['self.hour_no'].append(t.hour)
df[self.shift_day] = generated_values[self.shift_day]
df[self.shift_id] = generated_values[self.shift_id]
if self.shift_start is not None:
df[self.shift_start] = generated_values['self.shift_start']
if self.shift_end is not None:
df[self.shift_end] = generated_values['self.shift_end']
if self.hour_no is not None:
df[self.hour_no] = generated_values['self.hour_no']
return df
class SplitDataByActiveShifts:
@classmethod
def metadata(cls):
return _generate_metadata(cls, {
'description': 'Identifies the shift that was active when data was received by using the timestamp on the data.',
'input': [{'name': 'start_timestamp',
'description': 'Specify the timestamp data item on which the data to be split must be based.',
'type': 'DATA_ITEM', 'required': True, 'dataType': 'TIMESTAMP'}, {'name': 'end_timestamp',
'description': 'Specify the timestamp data item on which the data to be split must be based.',
'type': 'DATA_ITEM',
'required': True,
'dataType': 'TIMESTAMP'},
{'name': 'shifts',
'description': 'Specify the shift plan in JSON syntax. For example, {"1": [7, 30, 16, 30]} Where 1 is the shift ID, 7 is the start hour, 30 is the start minutes, 16 is the end hour, and 30 is the end minutes. You can enter multiple shifts separated by commas.',
'type': 'CONSTANT', 'required': True, 'dataType': 'JSON'}, {'name': 'cross_day_to_next',
'description': 'If a shift extends past midnight, count it as the first shift of the next calendar day.',
'type': 'CONSTANT',
'required': False,
'dataType': 'BOOLEAN'}], 'output': [
{'name': 'shift_day',
'description': 'The staring timestamp of a day, as identified by the timestamp and the shift plan.',
'dataType': 'TIMESTAMP', 'tags': ['DIMENSION']},
{'name': 'shift_id', 'description': 'The shift ID, as identified by the timestamp and the shift plan.',
'dataType': 'LITERAL', 'tags': ['DIMENSION']}, {'name': 'shift_start',
'description': 'The starting time of the shift, as identified by the timestamp and the shift plan.',
'dataType': 'TIMESTAMP', 'tags': ['DIMENSION']},
{'name': 'shift_end',
'description': 'The ending time of the shift, as identified by the timestamp and the shift plan.',
'dataType': 'TIMESTAMP', 'tags': ['DIMENSION']}], 'tags': ['JUPYTER']})
def __init__(self, start_timestamp, end_timestamp, ids='id', shift_day=None, shift_id=None, shift_start=None,
shift_end=None, shifts=None, cross_day_to_next=True):
self.logger = logging.getLogger('%s.%s' % (self.__module__, self.__class__.__name__))
if ids is None:
raise RuntimeError("argument ids must be provided")
if start_timestamp is None:
raise RuntimeError("argument start_timestamp must be provided")
if end_timestamp is None:
raise RuntimeError("argument end_timestamp must be provided")
if shift_day is None:
raise RuntimeError("argument shift_day must be provided")
if shift_id is None:
raise RuntimeError("argument shift_id must be provided")
if shift_start is not None and not isinstance(shift_start, str):
raise RuntimeError("argument shift_start must be a string")
if shift_end is not None and not isinstance(shift_end, str):
raise RuntimeError("argument shift_end must be a string")
if shifts is None or not isinstance(shifts, dict) and len(shifts) > 0:
raise RuntimeError("argument shifts must be provided and is a non-empty dict")
self.ids = ids
self.start_timestamp = start_timestamp
self.end_timestamp = end_timestamp
self.shift_day = shift_day
self.shift_id = shift_id
self.shift_start = shift_start if shift_start is not None and len(shift_start.strip()) > 0 else None
self.shift_end = shift_end if shift_end is not None and len(shift_end.strip()) > 0 else None
self.shifts = ShiftPlan(shifts, cross_day_to_next=cross_day_to_next)
def execute(self, df):
generated_rows = []
generated_values = {self.shift_day: [], self.shift_id: [], 'self.shift_start': [], 'self.shift_end': [], }
append_generated_values = {self.shift_day: [], self.shift_id: [], 'self.shift_start': [],
'self.shift_end': [], }
df[self.shift_day] = self.shift_day
df[self.shift_id] = self.shift_id
if self.shift_start is not None:
df[self.shift_start] = self.shift_start
if self.shift_end is not None:
df[self.shift_end] = self.shift_end
# self.logger.debug('df_index_before_move=%s' % str(df.index.to_frame().dtypes.to_dict()))
indexes_moved_to_columns = df.index.names
df = df.reset_index()
# self.logger.debug('df_index_after_move=%s, df_columns=%s' % (str(df.index.to_frame().dtypes.to_dict()), str(df.dtypes.to_dict())))
# Remember positions of columns in dataframe (position starts with 1 because df_row will contain index of
# dataframe at position 0)
position_column = {}
for pos, col_name in enumerate(df.columns, 1):
position_column[col_name] = pos
cnt = 0
cnt2 = 0
for df_row in df.itertuples(index=True, name=None):
idx = df_row[0]
if cnt % 1000 == 0:
self.logger.debug('%d rows processed, %d rows added' % (cnt, cnt2))
cnt += 1
row_start_timestamp = df_row[position_column[self.start_timestamp]]
row_end_timestamp = df_row[position_column[self.end_timestamp]]
if pd.notna(row_start_timestamp) and pd.notna(row_end_timestamp):
result_rows = self.shifts.split(pd.to_datetime(row_start_timestamp), | pd.to_datetime(row_end_timestamp) | pandas.to_datetime |
from datetime import datetime
import operator
import numpy as np
import pytest
from pandas import DataFrame, Index, Series, bdate_range
import pandas._testing as tm
from pandas.core import ops
class TestSeriesLogicalOps:
@pytest.mark.parametrize("bool_op", [operator.and_, operator.or_, operator.xor])
def test_bool_operators_with_nas(self, bool_op):
# boolean &, |, ^ should work with object arrays and propagate NAs
ser = Series(bdate_range("1/1/2000", periods=10), dtype=object)
ser[::2] = np.nan
mask = ser.isna()
filled = ser.fillna(ser[0])
result = bool_op(ser < ser[9], ser > ser[3])
expected = bool_op(filled < filled[9], filled > filled[3])
expected[mask] = False
tm.assert_series_equal(result, expected)
def test_logical_operators_bool_dtype_with_empty(self):
# GH#9016: support bitwise op for integer types
index = list("bca")
s_tft = Series([True, False, True], index=index)
s_fff = Series([False, False, False], index=index)
s_empty = Series([], dtype=object)
res = s_tft & s_empty
expected = s_fff
tm.assert_series_equal(res, expected)
res = s_tft | s_empty
expected = s_tft
tm.assert_series_equal(res, expected)
def test_logical_operators_int_dtype_with_int_dtype(self):
# GH#9016: support bitwise op for integer types
# TODO: unused
# s_0101 = Series([0, 1, 0, 1])
s_0123 = Series(range(4), dtype="int64")
s_3333 = Series([3] * 4)
s_4444 = Series([4] * 4)
res = s_0123 & s_3333
expected = Series(range(4), dtype="int64")
tm.assert_series_equal(res, expected)
res = s_0123 | s_4444
expected = Series(range(4, 8), dtype="int64")
tm.assert_series_equal(res, expected)
s_1111 = Series([1] * 4, dtype="int8")
res = s_0123 & s_1111
expected = Series([0, 1, 0, 1], dtype="int64")
tm.assert_series_equal(res, expected)
res = s_0123.astype(np.int16) | s_1111.astype(np.int32)
expected = Series([1, 1, 3, 3], dtype="int32")
tm.assert_series_equal(res, expected)
def test_logical_operators_int_dtype_with_int_scalar(self):
# GH#9016: support bitwise op for integer types
s_0123 = Series(range(4), dtype="int64")
res = s_0123 & 0
expected = Series([0] * 4)
tm.assert_series_equal(res, expected)
res = s_0123 & 1
expected = Series([0, 1, 0, 1])
tm.assert_series_equal(res, expected)
def test_logical_operators_int_dtype_with_float(self):
# GH#9016: support bitwise op for integer types
s_0123 = Series(range(4), dtype="int64")
msg = "Cannot perform.+with a dtyped.+array and scalar of type"
with pytest.raises(TypeError, match=msg):
s_0123 & np.NaN
with pytest.raises(TypeError, match=msg):
s_0123 & 3.14
msg = "unsupported operand type.+for &:"
with pytest.raises(TypeError, match=msg):
s_0123 & [0.1, 4, 3.14, 2]
with pytest.raises(TypeError, match=msg):
s_0123 & np.array([0.1, 4, 3.14, 2])
with pytest.raises(TypeError, match=msg):
s_0123 & Series([0.1, 4, -3.14, 2])
def test_logical_operators_int_dtype_with_str(self):
s_1111 = Series([1] * 4, dtype="int8")
msg = "Cannot perform 'and_' with a dtyped.+array and scalar of type"
with pytest.raises(TypeError, match=msg):
s_1111 & "a"
with pytest.raises(TypeError, match="unsupported operand.+for &"):
s_1111 & ["a", "b", "c", "d"]
def test_logical_operators_int_dtype_with_bool(self):
# GH#9016: support bitwise op for integer types
s_0123 = Series(range(4), dtype="int64")
expected = Series([False] * 4)
result = s_0123 & False
tm.assert_series_equal(result, expected)
result = s_0123 & [False]
tm.assert_series_equal(result, expected)
result = s_0123 & (False,)
tm.assert_series_equal(result, expected)
result = s_0123 ^ False
expected = Series([False, True, True, True])
tm.assert_series_equal(result, expected)
def test_logical_operators_int_dtype_with_object(self):
# GH#9016: support bitwise op for integer types
s_0123 = Series(range(4), dtype="int64")
result = s_0123 & Series([False, np.NaN, False, False])
expected = Series([False] * 4)
tm.assert_series_equal(result, expected)
s_abNd = Series(["a", "b", np.NaN, "d"])
with pytest.raises(TypeError, match="unsupported.* 'int' and 'str'"):
s_0123 & s_abNd
def test_logical_operators_bool_dtype_with_int(self):
index = list("bca")
s_tft = Series([True, False, True], index=index)
s_fff = Series([False, False, False], index=index)
res = s_tft & 0
expected = s_fff
| tm.assert_series_equal(res, expected) | pandas._testing.assert_series_equal |
from flask import Flask, render_template
import re
from pymongo import MongoClient
from collections import Counter
from bson import json_util
import json
import pandas as pd
connection = MongoClient('localhost', 27017)
collection = connection.db.tweets_sfe
application = Flask(__name__)
@application.route("/user")
def user():
return render_template('user.html')
@application.route("/")
def hello():
# documents = collection.find()
# response = []
# for document in documents:
# document['_id'] = str(document['_id'])
# response.append(document)
# print(json.dumps(response))
# return render_template('sample.html',data=json.dumps(response))
tweet_cursor = collection.find()
df1 = pd.DataFrame(list(tweet_cursor))
print("Df1 columns:",df1.columns)
df = df1.sort_values(by='Likes', ascending=False)
df = df.reset_index(drop=True)
x = []
Y = []
p = []
q = []
x1=[]
y1=[]
if len(df) > 5:
for j in range(5):
p.append(df.iloc[j, 7])
q.append(df.iloc[j, 8])
df['Created_date'] = pd.to_datetime(df['Created_date'])
df_final = df.groupby([pd.Grouper(key='Created_date', freq='H')]).size().reset_index(name='count')
df_list = df_final.values.tolist()
for key, value in df_list:
x.append(key)
Y.append(value)
df1['hashtag'] = df1['Tweet_text'].apply(lambda x: re.findall(r'\B#\w*[a-zA-Z]+\w*', x))
temp = []
for i in df1['hashtag'].values:
for k in i:
temp.append(k)
d = Counter(temp)
#print(d)
df2 = | pd.DataFrame.from_dict(d, orient='index') | pandas.DataFrame.from_dict |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 24 11:26:20 2018
@author: nbaya
"""
import os
import glob
import re
import pandas as pd
from subprocess import call
from joblib import Parallel, delayed
import multiprocessing
import sys
import numpy as np
v3_path = "/Users/nbaya/Documents/lab/ukbb-sexdiff/imputed-v3-results/"
#Get saved phenotypes
malefiles = (list(map(os.path.basename,glob.glob(v3_path+"*.male*.gz")))) #restrict to male files to prevent counting phenotype twice
find = re.compile(r"^(.*?)\..*") #regex search term for grabbing all the text before the first period in a string
savedphenotypes = list(map(lambda filename: re.search(find,filename).group(1), malefiles)) #list of all downloaded phenotypes (for me, it gives 78: 77 original samples + 20116_2)
#Get all phenotypes
allphenotypes = pd.Series.tolist(pd.read_table(v3_path+"phenotypes.both_sexes.tsv").iloc[:]["phenotype"]) #list of all phenotypes (male & female)
allphenotypes = pd.DataFrame({'phenotype':allphenotypes})
allphenotypes.to_csv(v3_path+"allphenotypeslist.tsv",sep = "\t")
# TEMPORARY -------------------------------------------------------------------
#savedFiles= (list(map(os.path.basename,glob.glob(chrX_path+"*.gz")))) #restrict to male files to prevent counting phenotype twice
#find = re.compile(r"^(.*?)\..*") #regex search term for grabbing all the text before the first period in a string
#newphenotypes = list(map(lambda filename: re.search(find,filename).group(1), savedFiles)) #list of all downloaded phenotypes (for me, it gives 78: 77 original samples + 20116_2)
#
#nextphenotypes = list(set(savedphenotypes).difference(set(newphenotypes)))
#
#len(nextphenotypes)
# -----------------------------------------------------------------------------
n_cores = multiprocessing.cpu_count()
#old method of extracting chrX
def prev_chrX_from_saved_phenotypes(ph):
tb_male = pd.read_csv((v3_path+ph+".imputed_v3.results.male.tsv.gz"), compression='gzip', sep='\t') #read files
tb_female = pd.read_csv((v3_path+ph+".imputed_v3.results.female.tsv.gz"), compression='gzip', sep='\t')
chrX_male = tb_male[tb_male.iloc[:]["variant"].str.match('X')][:] #get chrX variants for males
chrX_female = tb_female[tb_female.iloc[:]["variant"].str.match('X')][:] #get chrX variants for females
chrX = pd.merge(chrX_male,chrX_female, on = 'variant',suffixes = ("_male","_female"))
chrX.to_csv(chrX_path+ph+".chrX.tsv.gz",sep = '\t', compression = 'gzip')
#Parallel(n_jobs=n_cores,verbose = 50)(delayed(chrX_from_saved_phenotypes)(ph) for ph in savedphenotypes)
# TEMPORARY -------------------------------------------------------------------
#Parallel(n_jobs=n_cores,verbose = 50)(delayed(chrX_from_saved_phenotypes)(ph) for ph in nextphenotypes)
# -----------------------------------------------------------------------------
#def chrX_from_new_phenotypes(ph):
#
## call(["gsutil" ,"cp","gs://ukbb-gwas-imputed-v3-results/export1/"+ph+".**male*",
## "~/Documents/lab/ukbb-sexdiff/chrX/"])
#
#
# call('gsutil ls gs://ukbb-gwas-imputed-v3-results/export1/'+ph+'.**male*', shell=True)
## "~/Documents/lab/ukbb-sexdiff/chrX/',)
## call(["paste","<(cat", ph, ".imputed_v3.results.female.tsv.gz","|","zcat",
## "|" , "cut -f 1,2,3,5,6,8)", "<(cat", ph,".imputed_v3.results.male.tsv.gz" ,
## "|", "zcat", "|", "cut", "-f", "1,2,3,5,6,8)", "|", "awk" ,"\'", "NR==1{",
## "print", "\"variant\",\"n_female\",\"n_male\",\"frq_female\",\"frq_male\",\"beta_female\",\"se_female\",\"p_female\",\"beta_male\",\"se_male\",\"p_male\"",
## "}NR>1", "&&", "$1==$7{", "maff=$3/(2*$2);" , "mafm=$9/(2*$8);" ,
## "if(maff > .05 && maff<.95 && mafm > .05 && mafm < .95){",
## "print $1,$2,$8,maff,mafm,$4,$5,$6,$10,$11,$12} }\' | gzip >", ph, ".sexdiff.gz]"])
#
#testph = ['46','47']
#
#for ph in testph:
# chrX_from_new_phenotypes(ph)
#for ph in set(allphenotypes).difference(set(savedphenotypes)): #for all phenotypes not saved
# -----------------------------------------------------------------------------
chrX_path = "/Users/nbaya/Documents/lab/ukbb-sexdiff/chrX/data/"
ph = "1757"
#Males
tb_male = | pd.read_csv((v3_path+ph+".imputed_v3.results.male.tsv.gz"), compression='gzip', sep='\t') | pandas.read_csv |
import os, sys, json, warnings, logging as log
import pandas as pd, tqdm, dpath
import annotate, collect
from pprint import pprint
def make_items(iter_labeled_meta, iter_all_meta, n_unlabeled, read_rows):
'''Generate metadata from gold-standard and unlabled'''
labeled_items = [(meta, read_rows(meta['url']))
for meta in iter_labeled_meta]
annotated_table_urls = set([meta['url'] for meta, _ in labeled_items])
unlabeled_meta = []
for meta in iter_all_meta:
if (n_unlabeled is not None) and len(unlabeled_meta) >= n_unlabeled:
break
if meta['url'] not in annotated_table_urls:
unlabeled_meta.append(meta)
unlabeled_items = [(meta, read_rows(meta['url']))
for meta in unlabeled_meta]
return labeled_items, unlabeled_items
def make_labelquery(args):
querytype, template, slots, value, templates, namespace, kbdomain, name = args
return querytype, name, annotate.make_labelquery(*args)
def parallel_query(labelqueries,
templates,
namespace,
kbdomain,
max_workers=1):
import tqdm, multiprocessing
with multiprocessing.Pool(max_workers) as p:
stream_args = [(q['label'], q['template'], q['slots'], q['value'],
templates, namespace, kbdomain, name)
for name, q in labelqueries.items()]
t = len(stream_args)
# yield from tqdm.tqdm(p.imap_unordered(make_labelquery, stream_args), total=t)
yield from p.imap_unordered(make_labelquery, stream_args)
def cache_labelquery_results(modeldir,
namespace,
kbdomain,
selected_queries=[],
results_fname=None,
parallel=False,
verbose=False):
labelqueries, templates = annotate.load_labelqueries_templates(modeldir)
if not results_fname:
os.makedirs(os.path.join(modeldir, 'labelqueries', 'cache'),
exist_ok=True)
results_fname = os.path.join(modeldir, 'labelqueries', 'cache',
'results.json')
labelquery_results = load_labelquery_results(modeldir,
results_fname=results_fname)
l = len(labelqueries)
if parallel:
if selected_queries:
labelqueries = {
name: q
for name, q in labelqueries.items() if name in selected_queries
}
lqs = parallel_query(labelqueries,
templates,
namespace,
kbdomain,
max_workers=parallel)
for qt, name, lq in lqs:
labelquery_results.setdefault(qt, {})[name] = lq
else:
for i, (name, q) in enumerate(labelqueries.items()):
if selected_queries and (name not in selected_queries):
continue
lq = annotate.make_labelquery(q['label'],
q['template'],
q['slots'],
q['value'],
templates,
namespace,
kbdomain=kbdomain,
name=name)
if verbose:
print(len(lq.transformations), 'results')
labelquery_results.setdefault(q['label'], {})[name] = lq
with open(results_fname, 'w') as fw:
results_json = {
label: {name: vars(lq)
for name, lq in lqs.items()}
for label, lqs in labelquery_results.items()
}
json.dump(results_json, fw, indent=2)
with open(results_fname.replace('.json', '.stats.json'), 'w') as fw:
results_json = {
name: len(lq.transformations)
for label, lqs in labelquery_results.items()
for name, lq in lqs.items()
}
json.dump(results_json, fw, indent=2)
return labelquery_results
def load_labelquery_results(modeldir, results_fname=None):
typed_labelqueries = {}
if not results_fname:
os.makedirs(os.path.join(modeldir, 'labelqueries', 'cache'),
exist_ok=True)
results_fname = os.path.join(modeldir, 'labelqueries', 'cache',
'results.json')
if os.path.exists(results_fname):
typed_labelqueries = json.load(open(results_fname))
for lq_type, labelqueries in typed_labelqueries.items():
for name, lq_params in labelqueries.items():
labelqueries[name] = annotate.LabelQuery(**lq_params)
return typed_labelqueries
def transform_all(labelqueries, unlabeled_items, model, **kwargs):
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=RuntimeWarning)
id_items = {m['@id']: (m, r) for m, r in unlabeled_items}
lX = []
lq_labels = {}
l = len(labelqueries)
for i, (name, lq) in enumerate(labelqueries.items()):
print(f'Transforming using query {name:>4s} [{i+1:3d}/{l:3d}] ...',
end='\r',
file=sys.stderr)
# Get corresponding metadata for query results
selected_items = [
id_items[i] for i in lq.transformations if i in id_items
]
transformed_items = tuple(
zip(*[(lq.transform(m, r, **kwargs), r)
for m, r in selected_items]))
if transformed_items:
recs = tuple(
zip(*model.__class__.make_records(*transformed_items)))
if recs:
qlX, qly = recs
qlX = pd.DataFrame.from_records(list(qlX)).set_index('@id')
lX.append(qlX)
lq_labels[name] = pd.Series(qly, index=qlX.index)
print(file=sys.stderr)
lX = pd.concat(lX).drop_duplicates().replace([pd.np.nan], 0)
L = | pd.DataFrame(index=lX.index) | pandas.DataFrame |
from datetime import datetime
import numpy as np
import pytest
import pandas as pd
from pandas import (
Categorical,
CategoricalIndex,
DataFrame,
Index,
MultiIndex,
Series,
qcut,
)
import pandas._testing as tm
def cartesian_product_for_groupers(result, args, names, fill_value=np.NaN):
"""Reindex to a cartesian production for the groupers,
preserving the nature (Categorical) of each grouper
"""
def f(a):
if isinstance(a, (CategoricalIndex, Categorical)):
categories = a.categories
a = Categorical.from_codes(
np.arange(len(categories)), categories=categories, ordered=a.ordered
)
return a
index = MultiIndex.from_product(map(f, args), names=names)
return result.reindex(index, fill_value=fill_value).sort_index()
_results_for_groupbys_with_missing_categories = {
# This maps the builtin groupby functions to their expected outputs for
# missing categories when they are called on a categorical grouper with
# observed=False. Some functions are expected to return NaN, some zero.
# These expected values can be used across several tests (i.e. they are
# the same for SeriesGroupBy and DataFrameGroupBy) but they should only be
# hardcoded in one place.
"all": np.NaN,
"any": np.NaN,
"count": 0,
"corrwith": np.NaN,
"first": np.NaN,
"idxmax": np.NaN,
"idxmin": np.NaN,
"last": np.NaN,
"mad": np.NaN,
"max": np.NaN,
"mean": np.NaN,
"median": np.NaN,
"min": np.NaN,
"nth": np.NaN,
"nunique": 0,
"prod": np.NaN,
"quantile": np.NaN,
"sem": np.NaN,
"size": 0,
"skew": np.NaN,
"std": np.NaN,
"sum": 0,
"var": np.NaN,
}
def test_apply_use_categorical_name(df):
cats = qcut(df.C, 4)
def get_stats(group):
return {
"min": group.min(),
"max": group.max(),
"count": group.count(),
"mean": group.mean(),
}
result = df.groupby(cats, observed=False).D.apply(get_stats)
assert result.index.names[0] == "C"
def test_basic():
cats = Categorical(
["a", "a", "a", "b", "b", "b", "c", "c", "c"],
categories=["a", "b", "c", "d"],
ordered=True,
)
data = DataFrame({"a": [1, 1, 1, 2, 2, 2, 3, 4, 5], "b": cats})
exp_index = CategoricalIndex(list("abcd"), name="b", ordered=True)
expected = DataFrame({"a": [1, 2, 4, np.nan]}, index=exp_index)
result = data.groupby("b", observed=False).mean()
tm.assert_frame_equal(result, expected)
cat1 = Categorical(["a", "a", "b", "b"], categories=["a", "b", "z"], ordered=True)
cat2 = Categorical(["c", "d", "c", "d"], categories=["c", "d", "y"], ordered=True)
df = DataFrame({"A": cat1, "B": cat2, "values": [1, 2, 3, 4]})
# single grouper
gb = df.groupby("A", observed=False)
exp_idx = CategoricalIndex(["a", "b", "z"], name="A", ordered=True)
expected = DataFrame({"values": Series([3, 7, 0], index=exp_idx)})
result = gb.sum()
tm.assert_frame_equal(result, expected)
# GH 8623
x = DataFrame(
[[1, "<NAME>"], [2, "<NAME>"], [1, "<NAME>"]],
columns=["person_id", "person_name"],
)
x["person_name"] = Categorical(x.person_name)
g = x.groupby(["person_id"], observed=False)
result = g.transform(lambda x: x)
tm.assert_frame_equal(result, x[["person_name"]])
result = x.drop_duplicates("person_name")
expected = x.iloc[[0, 1]]
tm.assert_frame_equal(result, expected)
def f(x):
return x.drop_duplicates("person_name").iloc[0]
result = g.apply(f)
expected = x.iloc[[0, 1]].copy()
expected.index = Index([1, 2], name="person_id")
expected["person_name"] = expected["person_name"].astype("object")
tm.assert_frame_equal(result, expected)
# GH 9921
# Monotonic
df = DataFrame({"a": [5, 15, 25]})
c = pd.cut(df.a, bins=[0, 10, 20, 30, 40])
result = df.a.groupby(c, observed=False).transform(sum)
tm.assert_series_equal(result, df["a"])
tm.assert_series_equal(
df.a.groupby(c, observed=False).transform(lambda xs: np.sum(xs)), df["a"]
)
tm.assert_frame_equal(df.groupby(c, observed=False).transform(sum), df[["a"]])
tm.assert_frame_equal(
df.groupby(c, observed=False).transform(lambda xs: np.max(xs)), df[["a"]]
)
# Filter
tm.assert_series_equal(df.a.groupby(c, observed=False).filter(np.all), df["a"])
tm.assert_frame_equal(df.groupby(c, observed=False).filter(np.all), df)
# Non-monotonic
df = DataFrame({"a": [5, 15, 25, -5]})
c = pd.cut(df.a, bins=[-10, 0, 10, 20, 30, 40])
result = df.a.groupby(c, observed=False).transform(sum)
tm.assert_series_equal(result, df["a"])
tm.assert_series_equal(
df.a.groupby(c, observed=False).transform(lambda xs: np.sum(xs)), df["a"]
)
tm.assert_frame_equal(df.groupby(c, observed=False).transform(sum), df[["a"]])
tm.assert_frame_equal(
df.groupby(c, observed=False).transform(lambda xs: np.sum(xs)), df[["a"]]
)
# GH 9603
df = DataFrame({"a": [1, 0, 0, 0]})
c = pd.cut(df.a, [0, 1, 2, 3, 4], labels=Categorical(list("abcd")))
result = df.groupby(c, observed=False).apply(len)
exp_index = CategoricalIndex(c.values.categories, ordered=c.values.ordered)
expected = Series([1, 0, 0, 0], index=exp_index)
expected.index.name = "a"
tm.assert_series_equal(result, expected)
# more basic
levels = ["foo", "bar", "baz", "qux"]
codes = np.random.randint(0, 4, size=100)
cats = Categorical.from_codes(codes, levels, ordered=True)
data = DataFrame(np.random.randn(100, 4))
result = data.groupby(cats, observed=False).mean()
expected = data.groupby(np.asarray(cats), observed=False).mean()
exp_idx = CategoricalIndex(levels, categories=cats.categories, ordered=True)
expected = expected.reindex(exp_idx)
tm.assert_frame_equal(result, expected)
grouped = data.groupby(cats, observed=False)
desc_result = grouped.describe()
idx = cats.codes.argsort()
ord_labels = np.asarray(cats).take(idx)
ord_data = data.take(idx)
exp_cats = Categorical(
ord_labels, ordered=True, categories=["foo", "bar", "baz", "qux"]
)
expected = ord_data.groupby(exp_cats, sort=False, observed=False).describe()
tm.assert_frame_equal(desc_result, expected)
# GH 10460
expc = Categorical.from_codes(np.arange(4).repeat(8), levels, ordered=True)
exp = CategoricalIndex(expc)
tm.assert_index_equal((desc_result.stack().index.get_level_values(0)), exp)
exp = Index(["count", "mean", "std", "min", "25%", "50%", "75%", "max"] * 4)
tm.assert_index_equal((desc_result.stack().index.get_level_values(1)), exp)
def test_level_get_group(observed):
# GH15155
df = DataFrame(
data=np.arange(2, 22, 2),
index=MultiIndex(
levels=[CategoricalIndex(["a", "b"]), range(10)],
codes=[[0] * 5 + [1] * 5, range(10)],
names=["Index1", "Index2"],
),
)
g = df.groupby(level=["Index1"], observed=observed)
# expected should equal test.loc[["a"]]
# GH15166
expected = DataFrame(
data=np.arange(2, 12, 2),
index=MultiIndex(
levels=[CategoricalIndex(["a", "b"]), range(5)],
codes=[[0] * 5, range(5)],
names=["Index1", "Index2"],
),
)
result = g.get_group("a")
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("ordered", [True, False])
def test_apply(ordered):
# GH 10138
dense = Categorical(list("abc"), ordered=ordered)
# 'b' is in the categories but not in the list
missing = Categorical(list("aaa"), categories=["a", "b"], ordered=ordered)
values = np.arange(len(dense))
df = DataFrame({"missing": missing, "dense": dense, "values": values})
grouped = df.groupby(["missing", "dense"], observed=True)
# missing category 'b' should still exist in the output index
idx = MultiIndex.from_arrays([missing, dense], names=["missing", "dense"])
expected = DataFrame([0, 1, 2.0], index=idx, columns=["values"])
# GH#21636 tracking down the xfail, in some builds np.mean(df.loc[[0]])
# is coming back as Series([0., 1., 0.], index=["missing", "dense", "values"])
# when we expect Series(0., index=["values"])
result = grouped.apply(lambda x: np.mean(x))
tm.assert_frame_equal(result, expected)
# we coerce back to ints
expected = expected.astype("int")
result = grouped.mean()
tm.assert_frame_equal(result, expected)
result = grouped.agg(np.mean)
tm.assert_frame_equal(result, expected)
# but for transform we should still get back the original index
idx = MultiIndex.from_arrays([missing, dense], names=["missing", "dense"])
expected = Series(1, index=idx)
result = grouped.apply(lambda x: 1)
tm.assert_series_equal(result, expected)
def test_observed(observed):
# multiple groupers, don't re-expand the output space
# of the grouper
# gh-14942 (implement)
# gh-10132 (back-compat)
# gh-8138 (back-compat)
# gh-8869
cat1 = Categorical(["a", "a", "b", "b"], categories=["a", "b", "z"], ordered=True)
cat2 = Categorical(["c", "d", "c", "d"], categories=["c", "d", "y"], ordered=True)
df = DataFrame({"A": cat1, "B": cat2, "values": [1, 2, 3, 4]})
df["C"] = ["foo", "bar"] * 2
# multiple groupers with a non-cat
gb = df.groupby(["A", "B", "C"], observed=observed)
exp_index = MultiIndex.from_arrays(
[cat1, cat2, ["foo", "bar"] * 2], names=["A", "B", "C"]
)
expected = DataFrame({"values": Series([1, 2, 3, 4], index=exp_index)}).sort_index()
result = gb.sum()
if not observed:
expected = cartesian_product_for_groupers(
expected, [cat1, cat2, ["foo", "bar"]], list("ABC"), fill_value=0
)
tm.assert_frame_equal(result, expected)
gb = df.groupby(["A", "B"], observed=observed)
exp_index = MultiIndex.from_arrays([cat1, cat2], names=["A", "B"])
expected = DataFrame({"values": [1, 2, 3, 4]}, index=exp_index)
result = gb.sum()
if not observed:
expected = cartesian_product_for_groupers(
expected, [cat1, cat2], list("AB"), fill_value=0
)
tm.assert_frame_equal(result, expected)
# https://github.com/pandas-dev/pandas/issues/8138
d = {
"cat": Categorical(
["a", "b", "a", "b"], categories=["a", "b", "c"], ordered=True
),
"ints": [1, 1, 2, 2],
"val": [10, 20, 30, 40],
}
df = DataFrame(d)
# Grouping on a single column
groups_single_key = df.groupby("cat", observed=observed)
result = groups_single_key.mean()
exp_index = CategoricalIndex(
list("ab"), name="cat", categories=list("abc"), ordered=True
)
expected = DataFrame({"ints": [1.5, 1.5], "val": [20.0, 30]}, index=exp_index)
if not observed:
index = CategoricalIndex(
list("abc"), name="cat", categories=list("abc"), ordered=True
)
expected = expected.reindex(index)
tm.assert_frame_equal(result, expected)
# Grouping on two columns
groups_double_key = df.groupby(["cat", "ints"], observed=observed)
result = groups_double_key.agg("mean")
expected = DataFrame(
{
"val": [10, 30, 20, 40],
"cat": Categorical(
["a", "a", "b", "b"], categories=["a", "b", "c"], ordered=True
),
"ints": [1, 2, 1, 2],
}
).set_index(["cat", "ints"])
if not observed:
expected = cartesian_product_for_groupers(
expected, [df.cat.values, [1, 2]], ["cat", "ints"]
)
tm.assert_frame_equal(result, expected)
# GH 10132
for key in [("a", 1), ("b", 2), ("b", 1), ("a", 2)]:
c, i = key
result = groups_double_key.get_group(key)
expected = df[(df.cat == c) & (df.ints == i)]
tm.assert_frame_equal(result, expected)
# gh-8869
# with as_index
d = {
"foo": [10, 8, 4, 8, 4, 1, 1],
"bar": [10, 20, 30, 40, 50, 60, 70],
"baz": ["d", "c", "e", "a", "a", "d", "c"],
}
df = DataFrame(d)
cat = pd.cut(df["foo"], np.linspace(0, 10, 3))
df["range"] = cat
groups = df.groupby(["range", "baz"], as_index=False, observed=observed)
result = groups.agg("mean")
groups2 = df.groupby(["range", "baz"], as_index=True, observed=observed)
expected = groups2.agg("mean").reset_index()
tm.assert_frame_equal(result, expected)
def test_observed_codes_remap(observed):
d = {"C1": [3, 3, 4, 5], "C2": [1, 2, 3, 4], "C3": [10, 100, 200, 34]}
df = DataFrame(d)
values = pd.cut(df["C1"], [1, 2, 3, 6])
values.name = "cat"
groups_double_key = df.groupby([values, "C2"], observed=observed)
idx = MultiIndex.from_arrays([values, [1, 2, 3, 4]], names=["cat", "C2"])
expected = DataFrame({"C1": [3, 3, 4, 5], "C3": [10, 100, 200, 34]}, index=idx)
if not observed:
expected = cartesian_product_for_groupers(
expected, [values.values, [1, 2, 3, 4]], ["cat", "C2"]
)
result = groups_double_key.agg("mean")
tm.assert_frame_equal(result, expected)
def test_observed_perf():
# we create a cartesian product, so this is
# non-performant if we don't use observed values
# gh-14942
df = DataFrame(
{
"cat": np.random.randint(0, 255, size=30000),
"int_id": np.random.randint(0, 255, size=30000),
"other_id": np.random.randint(0, 10000, size=30000),
"foo": 0,
}
)
df["cat"] = df.cat.astype(str).astype("category")
grouped = df.groupby(["cat", "int_id", "other_id"], observed=True)
result = grouped.count()
assert result.index.levels[0].nunique() == df.cat.nunique()
assert result.index.levels[1].nunique() == df.int_id.nunique()
assert result.index.levels[2].nunique() == df.other_id.nunique()
def test_observed_groups(observed):
# gh-20583
# test that we have the appropriate groups
cat = Categorical(["a", "c", "a"], categories=["a", "b", "c"])
df = DataFrame({"cat": cat, "vals": [1, 2, 3]})
g = df.groupby("cat", observed=observed)
result = g.groups
if observed:
expected = {"a": Index([0, 2], dtype="int64"), "c": Index([1], dtype="int64")}
else:
expected = {
"a": Index([0, 2], dtype="int64"),
"b": Index([], dtype="int64"),
"c": Index([1], dtype="int64"),
}
tm.assert_dict_equal(result, expected)
def test_observed_groups_with_nan(observed):
# GH 24740
df = DataFrame(
{
"cat": Categorical(["a", np.nan, "a"], categories=["a", "b", "d"]),
"vals": [1, 2, 3],
}
)
g = df.groupby("cat", observed=observed)
result = g.groups
if observed:
expected = {"a": Index([0, 2], dtype="int64")}
else:
expected = {
"a": Index([0, 2], dtype="int64"),
"b": Index([], dtype="int64"),
"d": Index([], dtype="int64"),
}
tm.assert_dict_equal(result, expected)
def test_observed_nth():
# GH 26385
cat = Categorical(["a", np.nan, np.nan], categories=["a", "b", "c"])
ser = Series([1, 2, 3])
df = DataFrame({"cat": cat, "ser": ser})
result = df.groupby("cat", observed=False)["ser"].nth(0)
index = Categorical(["a", "b", "c"], categories=["a", "b", "c"])
expected = Series([1, np.nan, np.nan], index=index, name="ser")
expected.index.name = "cat"
tm.assert_series_equal(result, expected)
def test_dataframe_categorical_with_nan(observed):
# GH 21151
s1 = Categorical([np.nan, "a", np.nan, "a"], categories=["a", "b", "c"])
s2 = Series([1, 2, 3, 4])
df = DataFrame({"s1": s1, "s2": s2})
result = df.groupby("s1", observed=observed).first().reset_index()
if observed:
expected = DataFrame(
{"s1": Categorical(["a"], categories=["a", "b", "c"]), "s2": [2]}
)
else:
expected = DataFrame(
{
"s1": Categorical(["a", "b", "c"], categories=["a", "b", "c"]),
"s2": [2, np.nan, np.nan],
}
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("ordered", [True, False])
@pytest.mark.parametrize("observed", [True, False])
@pytest.mark.parametrize("sort", [True, False])
def test_dataframe_categorical_ordered_observed_sort(ordered, observed, sort):
# GH 25871: Fix groupby sorting on ordered Categoricals
# GH 25167: Groupby with observed=True doesn't sort
# Build a dataframe with cat having one unobserved category ('missing'),
# and a Series with identical values
label = Categorical(
["d", "a", "b", "a", "d", "b"],
categories=["a", "b", "missing", "d"],
ordered=ordered,
)
val = Series(["d", "a", "b", "a", "d", "b"])
df = DataFrame({"label": label, "val": val})
# aggregate on the Categorical
result = df.groupby("label", observed=observed, sort=sort)["val"].aggregate("first")
# If ordering works, we expect index labels equal to aggregation results,
# except for 'observed=False': label 'missing' has aggregation None
label = Series(result.index.array, dtype="object")
aggr = Series(result.array)
if not observed:
aggr[aggr.isna()] = "missing"
if not all(label == aggr):
msg = (
"Labels and aggregation results not consistently sorted\n"
f"for (ordered={ordered}, observed={observed}, sort={sort})\n"
f"Result:\n{result}"
)
assert False, msg
def test_datetime():
# GH9049: ensure backward compatibility
levels = pd.date_range("2014-01-01", periods=4)
codes = np.random.randint(0, 4, size=100)
cats = Categorical.from_codes(codes, levels, ordered=True)
data = DataFrame(np.random.randn(100, 4))
result = data.groupby(cats, observed=False).mean()
expected = data.groupby(np.asarray(cats), observed=False).mean()
expected = expected.reindex(levels)
expected.index = CategoricalIndex(
expected.index, categories=expected.index, ordered=True
)
tm.assert_frame_equal(result, expected)
grouped = data.groupby(cats, observed=False)
desc_result = grouped.describe()
idx = cats.codes.argsort()
ord_labels = cats.take(idx)
ord_data = data.take(idx)
expected = ord_data.groupby(ord_labels, observed=False).describe()
tm.assert_frame_equal(desc_result, expected)
tm.assert_index_equal(desc_result.index, expected.index)
tm.assert_index_equal(
desc_result.index.get_level_values(0), expected.index.get_level_values(0)
)
# GH 10460
expc = Categorical.from_codes(np.arange(4).repeat(8), levels, ordered=True)
exp = CategoricalIndex(expc)
tm.assert_index_equal((desc_result.stack().index.get_level_values(0)), exp)
exp = Index(["count", "mean", "std", "min", "25%", "50%", "75%", "max"] * 4)
tm.assert_index_equal((desc_result.stack().index.get_level_values(1)), exp)
def test_categorical_index():
s = np.random.RandomState(12345)
levels = ["foo", "bar", "baz", "qux"]
codes = s.randint(0, 4, size=20)
cats = Categorical.from_codes(codes, levels, ordered=True)
df = DataFrame(np.repeat(np.arange(20), 4).reshape(-1, 4), columns=list("abcd"))
df["cats"] = cats
# with a cat index
result = df.set_index("cats").groupby(level=0, observed=False).sum()
expected = df[list("abcd")].groupby(cats.codes, observed=False).sum()
expected.index = CategoricalIndex(
Categorical.from_codes([0, 1, 2, 3], levels, ordered=True), name="cats"
)
tm.assert_frame_equal(result, expected)
# with a cat column, should produce a cat index
result = df.groupby("cats", observed=False).sum()
expected = df[list("abcd")].groupby(cats.codes, observed=False).sum()
expected.index = CategoricalIndex(
Categorical.from_codes([0, 1, 2, 3], levels, ordered=True), name="cats"
)
tm.assert_frame_equal(result, expected)
def test_describe_categorical_columns():
# GH 11558
cats = CategoricalIndex(
["qux", "foo", "baz", "bar"],
categories=["foo", "bar", "baz", "qux"],
ordered=True,
)
df = DataFrame(np.random.randn(20, 4), columns=cats)
result = df.groupby([1, 2, 3, 4] * 5).describe()
tm.assert_index_equal(result.stack().columns, cats)
tm.assert_categorical_equal(result.stack().columns.values, cats.values)
def test_unstack_categorical():
# GH11558 (example is taken from the original issue)
df = DataFrame(
{"a": range(10), "medium": ["A", "B"] * 5, "artist": list("XYXXY") * 2}
)
df["medium"] = df["medium"].astype("category")
gcat = df.groupby(["artist", "medium"], observed=False)["a"].count().unstack()
result = gcat.describe()
exp_columns = CategoricalIndex(["A", "B"], ordered=False, name="medium")
tm.assert_index_equal(result.columns, exp_columns)
tm.assert_categorical_equal(result.columns.values, exp_columns.values)
result = gcat["A"] + gcat["B"]
expected = Series([6, 4], index=Index(["X", "Y"], name="artist"))
tm.assert_series_equal(result, expected)
def test_bins_unequal_len():
# GH3011
series = Series([np.nan, np.nan, 1, 1, 2, 2, 3, 3, 4, 4])
bins = pd.cut(series.dropna().values, 4)
# len(bins) != len(series) here
msg = r"Length of grouper \(8\) and axis \(10\) must be same length"
with pytest.raises(ValueError, match=msg):
series.groupby(bins).mean()
def test_as_index():
# GH13204
df = DataFrame(
{
"cat": Categorical([1, 2, 2], [1, 2, 3]),
"A": [10, 11, 11],
"B": [101, 102, 103],
}
)
result = df.groupby(["cat", "A"], as_index=False, observed=True).sum()
expected = DataFrame(
{
"cat": Categorical([1, 2], categories=df.cat.cat.categories),
"A": [10, 11],
"B": [101, 205],
},
columns=["cat", "A", "B"],
)
tm.assert_frame_equal(result, expected)
# function grouper
f = lambda r: df.loc[r, "A"]
result = df.groupby(["cat", f], as_index=False, observed=True).sum()
expected = DataFrame(
{
"cat": Categorical([1, 2], categories=df.cat.cat.categories),
"A": [10, 22],
"B": [101, 205],
},
columns=["cat", "A", "B"],
)
tm.assert_frame_equal(result, expected)
# another not in-axis grouper (conflicting names in index)
s = Series(["a", "b", "b"], name="cat")
result = df.groupby(["cat", s], as_index=False, observed=True).sum()
tm.assert_frame_equal(result, expected)
# is original index dropped?
group_columns = ["cat", "A"]
expected = DataFrame(
{
"cat": Categorical([1, 2], categories=df.cat.cat.categories),
"A": [10, 11],
"B": [101, 205],
},
columns=["cat", "A", "B"],
)
for name in [None, "X", "B"]:
df.index = Index(list("abc"), name=name)
result = df.groupby(group_columns, as_index=False, observed=True).sum()
tm.assert_frame_equal(result, expected)
def test_preserve_categories():
# GH-13179
categories = list("abc")
# ordered=True
df = DataFrame({"A": Categorical(list("ba"), categories=categories, ordered=True)})
index = CategoricalIndex(categories, categories, ordered=True, name="A")
tm.assert_index_equal(
df.groupby("A", sort=True, observed=False).first().index, index
)
tm.assert_index_equal(
df.groupby("A", sort=False, observed=False).first().index, index
)
# ordered=False
df = DataFrame({"A": Categorical(list("ba"), categories=categories, ordered=False)})
sort_index = CategoricalIndex(categories, categories, ordered=False, name="A")
nosort_index = CategoricalIndex(list("bac"), list("bac"), ordered=False, name="A")
tm.assert_index_equal(
df.groupby("A", sort=True, observed=False).first().index, sort_index
)
tm.assert_index_equal(
df.groupby("A", sort=False, observed=False).first().index, nosort_index
)
def test_preserve_categorical_dtype():
# GH13743, GH13854
df = DataFrame(
{
"A": [1, 2, 1, 1, 2],
"B": [10, 16, 22, 28, 34],
"C1": Categorical(list("abaab"), categories=list("bac"), ordered=False),
"C2": Categorical(list("abaab"), categories=list("bac"), ordered=True),
}
)
# single grouper
exp_full = DataFrame(
{
"A": [2.0, 1.0, np.nan],
"B": [25.0, 20.0, np.nan],
"C1": Categorical(list("bac"), categories=list("bac"), ordered=False),
"C2": Categorical(list("bac"), categories=list("bac"), ordered=True),
}
)
for col in ["C1", "C2"]:
result1 = df.groupby(by=col, as_index=False, observed=False).mean()
result2 = df.groupby(by=col, as_index=True, observed=False).mean().reset_index()
expected = exp_full.reindex(columns=result1.columns)
tm.assert_frame_equal(result1, expected)
tm.assert_frame_equal(result2, expected)
@pytest.mark.parametrize(
"func, values",
[
("first", ["second", "first"]),
("last", ["fourth", "third"]),
("min", ["fourth", "first"]),
("max", ["second", "third"]),
],
)
def test_preserve_on_ordered_ops(func, values):
# gh-18502
# preserve the categoricals on ops
c = Categorical(["first", "second", "third", "fourth"], ordered=True)
df = DataFrame({"payload": [-1, -2, -1, -2], "col": c})
g = df.groupby("payload")
result = getattr(g, func)()
expected = DataFrame(
{"payload": [-2, -1], "col": Series(values, dtype=c.dtype)}
).set_index("payload")
tm.assert_frame_equal(result, expected)
def test_categorical_no_compress():
data = Series(np.random.randn(9))
codes = np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])
cats = Categorical.from_codes(codes, [0, 1, 2], ordered=True)
result = data.groupby(cats, observed=False).mean()
exp = data.groupby(codes, observed=False).mean()
exp.index = CategoricalIndex(
exp.index, categories=cats.categories, ordered=cats.ordered
)
tm.assert_series_equal(result, exp)
codes = np.array([0, 0, 0, 1, 1, 1, 3, 3, 3])
cats = Categorical.from_codes(codes, [0, 1, 2, 3], ordered=True)
result = data.groupby(cats, observed=False).mean()
exp = data.groupby(codes, observed=False).mean().reindex(cats.categories)
exp.index = CategoricalIndex(
exp.index, categories=cats.categories, ordered=cats.ordered
)
tm.assert_series_equal(result, exp)
cats = Categorical(
["a", "a", "a", "b", "b", "b", "c", "c", "c"],
categories=["a", "b", "c", "d"],
ordered=True,
)
data = DataFrame({"a": [1, 1, 1, 2, 2, 2, 3, 4, 5], "b": cats})
result = data.groupby("b", observed=False).mean()
result = result["a"].values
exp = np.array([1, 2, 4, np.nan])
tm.assert_numpy_array_equal(result, exp)
def test_groupby_empty_with_category():
# GH-9614
# test fix for when group by on None resulted in
# coercion of dtype categorical -> float
df = DataFrame({"A": [None] * 3, "B": Categorical(["train", "train", "test"])})
result = df.groupby("A").first()["B"]
expected = Series(
Categorical([], categories=["test", "train"]),
index=Series([], dtype="object", name="A"),
name="B",
)
tm.assert_series_equal(result, expected)
def test_sort():
# https://stackoverflow.com/questions/23814368/sorting-pandas-
# categorical-labels-after-groupby
# This should result in a properly sorted Series so that the plot
# has a sorted x axis
# self.cat.groupby(['value_group'])['value_group'].count().plot(kind='bar')
df = DataFrame({"value": np.random.randint(0, 10000, 100)})
labels = [f"{i} - {i+499}" for i in range(0, 10000, 500)]
cat_labels = Categorical(labels, labels)
df = df.sort_values(by=["value"], ascending=True)
df["value_group"] = pd.cut(
df.value, range(0, 10500, 500), right=False, labels=cat_labels
)
res = df.groupby(["value_group"], observed=False)["value_group"].count()
exp = res[sorted(res.index, key=lambda x: float(x.split()[0]))]
exp.index = CategoricalIndex(exp.index, name=exp.index.name)
tm.assert_series_equal(res, exp)
def test_sort2():
# dataframe groupby sort was being ignored # GH 8868
df = DataFrame(
[
["(7.5, 10]", 10, 10],
["(7.5, 10]", 8, 20],
["(2.5, 5]", 5, 30],
["(5, 7.5]", 6, 40],
["(2.5, 5]", 4, 50],
["(0, 2.5]", 1, 60],
["(5, 7.5]", 7, 70],
],
columns=["range", "foo", "bar"],
)
df["range"] = Categorical(df["range"], ordered=True)
index = CategoricalIndex(
["(0, 2.5]", "(2.5, 5]", "(5, 7.5]", "(7.5, 10]"], name="range", ordered=True
)
expected_sort = DataFrame(
[[1, 60], [5, 30], [6, 40], [10, 10]], columns=["foo", "bar"], index=index
)
col = "range"
result_sort = df.groupby(col, sort=True, observed=False).first()
tm.assert_frame_equal(result_sort, expected_sort)
# when categories is ordered, group is ordered by category's order
expected_sort = result_sort
result_sort = df.groupby(col, sort=False, observed=False).first()
tm.assert_frame_equal(result_sort, expected_sort)
df["range"] = Categorical(df["range"], ordered=False)
index = CategoricalIndex(
["(0, 2.5]", "(2.5, 5]", "(5, 7.5]", "(7.5, 10]"], name="range"
)
expected_sort = DataFrame(
[[1, 60], [5, 30], [6, 40], [10, 10]], columns=["foo", "bar"], index=index
)
index = CategoricalIndex(
["(7.5, 10]", "(2.5, 5]", "(5, 7.5]", "(0, 2.5]"],
categories=["(7.5, 10]", "(2.5, 5]", "(5, 7.5]", "(0, 2.5]"],
name="range",
)
expected_nosort = DataFrame(
[[10, 10], [5, 30], [6, 40], [1, 60]], index=index, columns=["foo", "bar"]
)
col = "range"
# this is an unordered categorical, but we allow this ####
result_sort = df.groupby(col, sort=True, observed=False).first()
tm.assert_frame_equal(result_sort, expected_sort)
result_nosort = df.groupby(col, sort=False, observed=False).first()
tm.assert_frame_equal(result_nosort, expected_nosort)
def test_sort_datetimelike():
# GH10505
# use same data as test_groupby_sort_categorical, which category is
# corresponding to datetime.month
df = DataFrame(
{
"dt": [
datetime(2011, 7, 1),
datetime(2011, 7, 1),
datetime(2011, 2, 1),
datetime(2011, 5, 1),
datetime(2011, 2, 1),
datetime(2011, 1, 1),
datetime(2011, 5, 1),
],
"foo": [10, 8, 5, 6, 4, 1, 7],
"bar": [10, 20, 30, 40, 50, 60, 70],
},
columns=["dt", "foo", "bar"],
)
# ordered=True
df["dt"] = Categorical(df["dt"], ordered=True)
index = [
datetime(2011, 1, 1),
datetime(2011, 2, 1),
datetime(2011, 5, 1),
datetime(2011, 7, 1),
]
result_sort = DataFrame(
[[1, 60], [5, 30], [6, 40], [10, 10]], columns=["foo", "bar"]
)
result_sort.index = CategoricalIndex(index, name="dt", ordered=True)
index = [
datetime(2011, 7, 1),
datetime(2011, 2, 1),
datetime(2011, 5, 1),
datetime(2011, 1, 1),
]
result_nosort = DataFrame(
[[10, 10], [5, 30], [6, 40], [1, 60]], columns=["foo", "bar"]
)
result_nosort.index = CategoricalIndex(
index, categories=index, name="dt", ordered=True
)
col = "dt"
tm.assert_frame_equal(
result_sort, df.groupby(col, sort=True, observed=False).first()
)
# when categories is ordered, group is ordered by category's order
tm.assert_frame_equal(
result_sort, df.groupby(col, sort=False, observed=False).first()
)
# ordered = False
df["dt"] = Categorical(df["dt"], ordered=False)
index = [
datetime(2011, 1, 1),
datetime(2011, 2, 1),
datetime(2011, 5, 1),
datetime(2011, 7, 1),
]
result_sort = DataFrame(
[[1, 60], [5, 30], [6, 40], [10, 10]], columns=["foo", "bar"]
)
result_sort.index = CategoricalIndex(index, name="dt")
index = [
datetime(2011, 7, 1),
datetime(2011, 2, 1),
datetime(2011, 5, 1),
datetime(2011, 1, 1),
]
result_nosort = DataFrame(
[[10, 10], [5, 30], [6, 40], [1, 60]], columns=["foo", "bar"]
)
result_nosort.index = CategoricalIndex(index, categories=index, name="dt")
col = "dt"
tm.assert_frame_equal(
result_sort, df.groupby(col, sort=True, observed=False).first()
)
tm.assert_frame_equal(
result_nosort, df.groupby(col, sort=False, observed=False).first()
)
def test_empty_sum():
# https://github.com/pandas-dev/pandas/issues/18678
df = DataFrame(
{"A": Categorical(["a", "a", "b"], categories=["a", "b", "c"]), "B": [1, 2, 1]}
)
expected_idx = CategoricalIndex(["a", "b", "c"], name="A")
# 0 by default
result = df.groupby("A", observed=False).B.sum()
expected = Series([3, 1, 0], expected_idx, name="B")
tm.assert_series_equal(result, expected)
# min_count=0
result = df.groupby("A", observed=False).B.sum(min_count=0)
expected = Series([3, 1, 0], expected_idx, name="B")
tm.assert_series_equal(result, expected)
# min_count=1
result = df.groupby("A", observed=False).B.sum(min_count=1)
expected = Series([3, 1, np.nan], expected_idx, name="B")
tm.assert_series_equal(result, expected)
# min_count>1
result = df.groupby("A", observed=False).B.sum(min_count=2)
expected = Series([3, np.nan, np.nan], expected_idx, name="B")
tm.assert_series_equal(result, expected)
def test_empty_prod():
# https://github.com/pandas-dev/pandas/issues/18678
df = DataFrame(
{"A": Categorical(["a", "a", "b"], categories=["a", "b", "c"]), "B": [1, 2, 1]}
)
expected_idx = CategoricalIndex(["a", "b", "c"], name="A")
# 1 by default
result = df.groupby("A", observed=False).B.prod()
expected = | Series([2, 1, 1], expected_idx, name="B") | pandas.Series |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
DWD-Pilotstation software source code file
by <NAME>. Non-commercial use only.
'''
import numpy as np
import pandas as pd
import xarray as xr
import re
import datetime
import itertools as it
import operator as op
import warnings
# import packackes used for plotting quicklooks
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import matplotlib.colors as mcolors
import matplotlib.cm as cm
from matplotlib.ticker import MultipleLocator
from pathlib import Path
# import
from hpl2netCDF_client.hpl_files.hpl_files import hpl_files
from hpl2netCDF_client.config.config import config
from scipy.linalg import diagsvd, svdvals
### functions used for plotting
def cmap_discretize(cmap, N):
"""Return a discrete colormap from the continuous colormap cmap.
cmap: colormap instance, eg. cm.jet.
N: number of colors.
"""
if type(cmap) == str:
cmap = plt.get_cmap(cmap)
colors_i = np.concatenate((np.linspace(0, 1., N), (0.,0.,0.,0.)))
colors_rgba = cmap(colors_i)
indices = np.linspace(0, 1., N+1)
cdict = {}
for ki, key in enumerate(('red','green','blue')):
cdict[key] = [(indices[i], colors_rgba[i-1,ki], colors_rgba[i,ki]) for i in range(N+1)]
# Return colormap object.
return mcolors.LinearSegmentedColormap(cmap.name + "_%d"%N, cdict, 1024)
### functions for the retrieval
def build_Amatrix(azimuth_vec,elevation_vec):
return np.einsum('ij -> ji',
np.vstack(
[
np.sin((np.pi/180)*(azimuth_vec))*np.sin((np.pi/180)*(90-elevation_vec))
,np.cos((np.pi/180)*(azimuth_vec))*np.sin((np.pi/180)*(90-elevation_vec))
,np.cos((np.pi/180)*(90-elevation_vec))
])
)
#Note: numpy's lstsq-function uses singular value decomposition already
def VAD_retrieval(azimuth_vec,elevation_vec,Vr):
# u, s, vh = np.linalg.svd(build_Amatrix(azimuth_vec,elevation_vec), full_matrices=True)
# A = build_Amatrix(azimuth_vec,elevation_vec)
# return vh.transpose() @ np.linalg.pinv(diagsvd(s,u.shape[0],vh.shape[0])) @ u.transpose() @ Vr
return np.linalg.lstsq(build_Amatrix(azimuth_vec,elevation_vec), Vr, rcond=-1)
def uvw_2_spd(uvw,uvw_unc):
if (np.isfinite(uvw[0]) * np.isfinite(uvw[1])) & (~np.isnan(uvw[0]) * ~np.isnan(uvw[1])):
speed = np.sqrt((uvw[0])**2.+(uvw[1])**2.)
else:
speed = np.nan
if speed > 0:
df_du = uvw[0] * 1/speed
df_dv = uvw[1] * 1/speed
error = np.sqrt((df_du*uvw_unc[0])**2 + (df_dv*uvw_unc[1])**2)
else:
error = np.nan
return {'speed': speed, 'error': error}
def uvw_2_dir(uvw,uvw_unc):
if (np.isfinite(uvw[0]) * np.isfinite(uvw[1])) & (~np.isnan(uvw[0]) * ~np.isnan(uvw[1])):
wdir = np.arctan2(uvw[0],uvw[1])*180/np.pi + 180
else:
wdir = np.nan
if np.isfinite(wdir):
error = (180/np.pi)*np.sqrt((uvw[0]*uvw_unc[0])**2 + (uvw[1]*uvw_unc[1])**2)/(uvw[0]**2 + uvw[1]**2)
else:
error = np.nan
return {'wdir': wdir, 'error': error}
def calc_sigma_single(SNR_dB,Mpts,nsmpl,BW,delta_v):
'calculates the instrument uncertainty: SNR in dB!'
# SNR_dB = np.ma.masked_values(SNR_dB, np.nan)
# SNR_dB = np.ma.masked_invalid(SNR_dB)
SNR= 10**(SNR_dB/10)
bb = np.sqrt(2.*np.pi)*(delta_v/BW)
alpha = SNR/bb
Np = Mpts*nsmpl*SNR
# a1 = (2.*np.sqrt(np.sqrt(np.pi)/alpha)).filled(np.nan)
# a1 = 2.*np.sqrt( np.divide(np.sqrt(np.pi), alpha
# , out=np.full((alpha.shape), np.nan)
# , where=alpha!=0)
# )
a1 = 2.*(np.sqrt(np.ma.divide(np.sqrt(np.pi), alpha)))#.filled(np.nan)
a2 = (1+0.16*alpha)#.filled(np.nan)
a3 = np.ma.divide(delta_v, np.sqrt(Np))#.filled(np.nan) ##here, Cramer Rao lower bound!
SNR= SNR#.filled(np.nan)
sigma = np.ma.masked_where( SNR_dB > -5
, (a1*a2*a3).filled(np.nan)
).filled(a3.filled(np.nan))
# sigma= np.where(~np.isnan(SNR)
# ,np.where(SNR_dB <= -5., (a1*a2*a3), a3)
# ,np.nan)
return sigma
def log10_inf(x):
result = np.zeros(x.shape)
result[x>0] = np.log10(x[x>0])
result[x<0] = -float('Inf')
return result
# def in_dB(x):
# return np.real(10*log10_inf(np.float64(x)))
def consensus_mean(Vr,SNR,CNS_range,CNS_percentage,SNR_threshold):
if SNR_threshold < 0:
SNR_threshold= 10**(SNR_threshold/10)
with np.errstate(divide='ignore', invalid='ignore'):
Vr_X= np.expand_dims(Vr, axis=0)
AjdM = (abs(np.einsum('ij... -> ji...',Vr_X)-Vr_X)<CNS_range).astype(int)
SUMlt= np.sum(AjdM, axis=0)
X= np.sum( np.einsum('il...,lj... -> ij...',AjdM
, np.where( np.sum(SNR>SNR_threshold, axis=0)/SNR.shape[0] >= CNS_percentage/100
, np.apply_along_axis(np.diag, 0,(SUMlt/np.sum(SNR>SNR_threshold, axis=0) >= CNS_percentage/100).astype(int))
,0))#[:,:,kk]
, axis=0)#[:,kk]
W= np.where(X>0,X/np.sum(X, axis=0),np.nan)
mask= np.isnan(W)
Wm= np.ma.masked_where(mask,W)
Xm= np.ma.masked_where(mask,Vr)
OutCNS=Xm*Wm
MEAN= OutCNS.sum(axis=0).filled(np.nan)
diff= Vr- MEAN
mask_m= abs(diff)<3
Vr_m = np.ma.masked_where(~mask_m,Vr)
# Vr_m.mean(axis=0).filled(np.nan)
IDX= mask_m
UNC= (Vr_m.max(axis=0)-Vr_m.min(axis=0)).filled(np.nan)/2
return MEAN, IDX, UNC
def consensus_median(Vr,SNR,CNS_range,CNS_percentage,SNR_threshold):
if SNR_threshold < 0:
SNR_threshold= 10**(SNR_threshold/10)
with np.errstate(divide='ignore', invalid='ignore'):
Vr_X= np.expand_dims(Vr, axis=0)
AjdM = (abs(np.einsum('ij... -> ji...',Vr_X)-Vr_X)<CNS_range).astype(int)
SUMlt= np.sum(AjdM, axis=0)
X= np.sum(np.einsum('il...,lj... -> ij...',AjdM
,np.where(np.sum(SNR>SNR_threshold, axis=0)/SNR.shape[0] >= CNS_percentage/100
,np.apply_along_axis(np.diag, 0,(SUMlt/np.sum(SNR>SNR_threshold, axis=0) >= CNS_percentage/100).astype(int))
,0))#[:,:,kk]
, axis=0)#[:,kk]
W= np.where(X>0,X/np.sum(X, axis=0),np.nan)
mask= np.isnan(W)
Wm= np.ma.masked_where(mask,W)
Xm= np.ma.masked_where(mask,Vr)
OutCNS=Xm*Wm
MEAN= OutCNS.sum(axis=0).filled(np.nan)
diff= Vr- MEAN
diff= np.ma.masked_values(diff, np.nan)
mask_m= (abs(diff)<3)*(~np.isnan(diff))
Vr_m= np.ma.masked_where(~mask_m,Vr)
MEAN= np.ma.median(Vr_m, axis =0).filled(np.nan)
IDX= ~np.isnan(W)
UNC= (Vr_m.max(axis=0)-Vr_m.min(axis=0)).filled(np.nan)/2
return MEAN, IDX, UNC
###############################################################################################
# functions used to identify single cycles
###############################################################################################
def process(lst,mon):
# Guard clause against empty lists
if len(lst) < 1:
return lst
# use an object here to work around closure limitations
state = type('State', (object,), dict(prev=lst[0], n=0))
def grouper_proc(x):
if mon==1:
if x < state.prev:
state.n += 1
elif mon==-1:
if x > state.prev:
state.n += 1
state.prev = x
return state.n
return { k: list(g) for k, g in it.groupby(lst, grouper_proc) }
def get_cycles(lst,mon):
ll= 0
res= {}
for key, lst in process(lst,int(np.median(np.sign(np.diff(np.array(lst)))))).items():
# print(key,np.arange(ll,ll+len(lst)),lst)
id_tmp= np.arange(ll,ll+len(lst))
ll+= len(lst)
res.update( { key:{'indices': list(id_tmp), 'values': lst} } )
return res
###############################################################################################
def grouper(iterable, n, fillvalue=None):
'''Collect data into fixed-length chunks or blocks'''
# grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx"
args = [iter(iterable)] * n
return it.zip_longest(*args, fillvalue=fillvalue)
# def calc_node_degree(Vr,CNS_range):
# '''takes masked array as input'''
# f_abs_pairdiff = lambda x,y: op.abs(op.sub(x,y))<CNS_range
# with np.errstate(invalid='ignore'):
# return np.array(list(grouper(it.starmap(f_abs_pairdiff,((it.permutations(Vr.filled(np.nan),2)))),Vr.shape[0]-1))).sum(axis=1)
def calc_node_degree(Vr,CNS_range,B, metric='l1norm'):
'''takes masked array as input'''
if metric == 'l1norm':
f_abs_pairdiff = lambda x,y: op.abs(op.sub(x,y))<CNS_range
if metric == 'l1norm_aa':
f_abs_pairdiff = lambda x,y: op.sub(B,op.abs(op.sub(op.abs(op.sub(x,y)),B)))<CNS_range
with np.errstate(invalid='ignore'):
return np.array(list(grouper(it.starmap(f_abs_pairdiff,((it.permutations(Vr.filled(np.nan),2)))),Vr.shape[0]-1))).sum(axis=1)
def diff_aa(x,y,c):
'''calculate aliasing independent differences'''
return (c-abs(abs(x-y)-c))
# def consensus(Vr,SNR,CNS_range,CNS_percentage,SNR_threshold):
def consensus(Vr,SNR,BETA,CNS_range,CNS_percentage,SNR_threshold,B):
'''
consensus(Vr,SNR,CNS_range,CNS_percentage,SNR_threshold)
Calculate consensus average:
--> row-wise, calculate the arithmetic mean using only the members of the most likely cluster
, i.e. the cluster with the maximum number of edges, if the total number of valid clusters is greater than a specified limit.
Parameters
----------
Vr(time,height) : array_like (intended for using numpy array)
n-dimensional array representing a signal.
SNR(time,height) : array_like (intended for using numpy array)
n-dimensional array of the same dimension as x representing the signal to noise ratio.
CNS_range(scalar) : scalar value, i.e. 0-dimensional
scalar value giving the radius for neighboring values in Vr.
CNS_percentage(scalar) : scalar value, i.e. 0-dimensional
scalar value stating the minimum percentage for the relative number of valid clusters compared to the totaö number of clusters.
SNR_threshold(scalar) : scalar value, i.e. 0-dimensional
scalar value giving the lower bounded threshold of the signal to noise threshold.
order : {'Vr', 'SNR', 'CNS_range', 'CNS_percentage', 'SNR_threshold'}
Returns
-------
(MEAN, IDX, UNC) --> [numpy array, boolean array, numpy array]
MEAN - consensus average of array, ...
IDX - index of values used for the consensus, ...
UNC - standard deviation of centered around the consensus average of array, ...
...for each row
Dependencies
------------
functions : check_if_db(x), in_mag(snr), filter_by_snr(vr,snr,snr_threshold)
Notes
-----
All array inputs must have the same dimensions, namely (time,height).
If the input SNR is already given in dB, do NOT filter the input SNR in advance for missing values, because filtering will be done during the calculation
Translation between dB and magnitude can be done with the functions "in_dB(x)" and "in_mag(x)".
This function implicitely uses machine epsilon (np.float16) for the numerical value of 0, see "filter_by_snr(vr,snr,snr_threshold)".
'''
condi_0 = SNR > 0
if SNR_threshold == 0:
condi_snr = condi_0
else:
condi_snr = (10*np.log10(SNR.astype(np.complex)).real > SNR_threshold) & (BETA > 0)
Vr_m = np.ma.masked_where( ~condi_snr, Vr)
condi_vr = (abs(Vr_m.filled(-999.)) <= B)
Vr_m = np.ma.masked_where( ~condi_vr, Vr_m)
### calculate the number of points within the consensusrange
## easy-to-understand way
# SUMlt= 1 + np.sum(
# np.einsum( 'ij...,ik...-> ij...'
# , (abs(np.einsum('ij... -> ji...', Vr_m[None,...]) - Vr_m[None,...]) < CNS_range).filled(False).astype(int)
# , np.apply_along_axis(np.diag, 0, condi_vr).astype(int))
# , axis=0) - (condi_vr).astype(int)
## performance strong way using iterators
SUMlt= 1 + calc_node_degree(Vr_m, CNS_range, B, metric='l1norm')
Vr_maxim= np.ma.masked_where( ~((100*np.max(SUMlt, axis=0)/CNS_percentage >= condi_vr.sum(axis=0)) & (condi_vr.sum(axis=0) >= Vr.shape[0]/100*60.))
# ~((100*np.max(SUMlt, axis=0)/condi_vr.sum(axis=0) >= CNS_percentage) & (100*condi_vr.sum(axis=0)/Vr.shape[0] > 60.))
, Vr_m[-(np.argmax(np.flipud(SUMlt),axis=0)+1), np.arange(0,SUMlt.shape[1])]
# , Vr_m[np.argmax(SUMlt,axis=0), np.arange(0,SUMlt.shape[1])]
)
mask_m= abs(Vr_m.filled(999.) - Vr_maxim.filled(-999.)) < CNS_range
Vr_m= np.ma.masked_where(~(mask_m), Vr_m.filled(-999.))
MEAN= Vr_m.sum(axis=0).filled(np.nan)/np.max(SUMlt, axis=0)
IDX= mask_m
UNC= np.nanstd(Vr_m-MEAN.T, axis=0)
UNC[np.isnan(MEAN)]= np.nan
### memory and time efficient option
# SUMlt= 1 + calc_node_degree(Vr_m, CNS_range, B, metric='l1norm')
### this code is more efficient, but less intuitive and accounts for one-time velocity folding
#SUMlt= 1 + calc_node_degree(Vr_m, CNS_range, B, metric='l1norm_aa')
# Vr_maxim= np.ma.masked_where( ~((100*np.max(SUMlt, axis=0)/condi_vr.sum(axis=0) >= CNS_percentage) & (100*condi_vr.sum(axis=0)/Vr.shape[0] > 60.))
# , Vr_m[-(np.argmax(np.flipud(SUMlt),axis=0)+1), np.arange(0,SUMlt.shape[1])]
# # , Vr_m[np.argmax(SUMlt,axis=0), np.arange(0,SUMlt.shape[1])]
# )
# mask_m= diff_aa(Vr_m, V_max, B) < 3
# Vr_m = np.ma.masked_where((mask_m), Vr).filled(Vr-np.sign(Vr-V_max)*2*B*np.heaviside(abs(Vr-V_max)-B, 1))
# Vr_m = np.ma.masked_where(~(mask_m), Vr_m)
# MEAN= Vr_m.mean(axis=0).filled(np.nan)
# IDX= mask_m
# UNC= np.nanstd(Vr_m-MEAN.T, axis=0)
# UNC[np.isnan(MEAN)]= np.nan
return np.round(MEAN, 4), IDX, UNC
def check_if_db(x):
'''
check_if_db(X)
Static method for checking if the input is in dB
Parameters
----------
x : array or scalar
representing the signal to noise of a lidar signal.
Returns
-------
bool
stating wether the input "likely" in dB.
Notes
-----
The method is only tested empirically and therefore not absolute.
'''
return np.any(x<-1)
def filter_by_snr(x,snr,snr_threshold):
'''
filter_by_snr(X,SNR,SNR_threshold)
Masking an n-dimensional array (X) according to a given signal to noise ratio (SNR) and specified threshold (SNR_threshold).
Parameters
----------
x : array_like (intended for using numpy array)
n-dimensional array representing a signal.
snr : array_like (intended for using numpy array)
n-dimensional array of the same dimension as x representing the signal to noise ratio.
snr_threshold : scalar value, i.e. 0-dimensional
scalar value giving the lower bounded threshold of the signal to noise threshold.
order : {'x', 'snr', 'snr_threshold'}
Returns
-------
masked_array, i.e. [data, mask]
Masked numpy array to be used in further processing.
Dependencies
------------
functions : check_if_db(x), in_mag(snr)
Notes
-----
If the input SNR is already given in dB, do NOT filter the input SNR in advance for missing values.
Translation between dB and magnitude can be done with the functions "in_dB(x)" and "in_mag(x)".
This functions uses machine epsilon (np.float16) for the numerical value of 0.
'''
if check_if_db(snr)==True:
print('SNR interpreted as dB')
print(snr.min(),snr.max())
snr= in_mag(snr)
if check_if_db(snr_threshold)==True:
print('SNR-threshold interpreted as dB')
snr_threshold= in_mag(snr_threshold)
snr_threshold+= np.finfo(np.float32).eps
return np.ma.masked_where(~(snr>snr_threshold), x)
def in_db(x):
'''
in_db(X)
Calculates dB values of a given input (X). The intended input is the signal to noise ratio of a Doppler lidar.
Parameters
----------
x : array_like (intended for using numpy array) OR numerical scalar
n-dimensional array
Returns
-------
X in dB
Dependencies
------------
functions : check_if_db(x)
Notes
-----
If the input X is already given in dB, X is returned without further processing.
Please, do NOT filter the input in advance for missing values.
This functions uses machine epsilon (np.float32) for the numerical value of 0.
'''
if check_if_db(x)==True:
print('Input already in dB')
return x
else:
epsilon_val= np.finfo(np.float32).eps
if np.ma.size(x)==0:
print('0-dimensional input!')
else:
if np.ma.size(x)>1:
x[x<=0]= epsilon_val
return 10*np.log10(np.ma.masked_where((x<= epsilon_val), x)).filled(10*np.log10(epsilon_val))
else:
if x<=0:
x= epsilon_val
return 10*np.log10(np.ma.masked_where((x<= epsilon_val), x)).filled(10*np.log10(epsilon_val))
def in_mag(x):
'''
in_mag(X)
Calculates the magnitude values of a given dB input (X). The intended input is the signal to noise ratio of a Doppler lidar.
Parameters
----------
x : array_like (intended for using numpy array) OR numerical scalar
n-dimensional array
Returns
-------
X in magnitude
Dependencies
------------
functions : check_if_db(x)
Notes
-----
If the input X is already given in magnitde, X is returned without further processing.
Please, do NOT filter the input in advance for missing values.
This functions uses machine epsilon (np.float32) for the numerical value of 0.
'''
if check_if_db(x)==False:
print('Input already in magnitude')
return x
else:
epsilon_val= np.finfo(np.float32).eps
if np.ma.size(x)==0:
print('0-dimensional input!')
else:
if np.ma.size(x)>1:
res= 10**(x/10)
res[res<epsilon_val]= epsilon_val
return res
else:
res= 10**(x/10)
if res<=epsilon_val:
res= epsilon_val
return res
def CN_est(X):
Fill_Val = 0
X_f = X.filled(Fill_Val)
if np.all(X_f == 0):
return np.inf
else:
max_val = svdvals(X_f).max()
min_val = svdvals(X_f).min()
if min_val == 0:
return np.inf
else:
return max_val/min_val
def check_num_dir(n_rays,calc_idx,azimuth,idx_valid):
h, be = np.histogram(np.mod(azimuth[calc_idx[idx_valid]],360), bins=2*n_rays, range=(0, 360))
counts = np.sum(np.r_[h[-1], h[:-1]].reshape(-1, 2), axis=1) # rotate and sum
edges = np.r_[np.r_[be[-2], be[:-2]][::2], be[-2]] # rotate and skip
kk_idx= counts >= 3
return kk_idx, np.arange(0,360,360//n_rays), edges
def find_num_dir(n_rays,calc_idx,azimuth,idx_valid):
if np.all(check_num_dir(n_rays,calc_idx,azimuth,idx_valid)[0]):
return np.all(check_num_dir(n_rays,calc_idx,azimuth,idx_valid)[0]), n_rays, check_num_dir(n_rays,calc_idx,azimuth,idx_valid)[1], check_num_dir(n_rays,calc_idx,azimuth,idx_valid)[2]
elif ~np.all(check_num_dir(n_rays,calc_idx,azimuth,idx_valid)[0]):
if n_rays > 4:
print('number of directions to high...try' + str(n_rays//2) + '...instead of ' + str(n_rays))
return find_num_dir(n_rays//2,calc_idx,azimuth,idx_valid)
elif n_rays < 4:
print('number of directions to high...try' + str(4) + '...instead' )
return find_num_dir(4,calc_idx,azimuth,idx_valid)
else:
print('not enough valid directions!-->skip non-convergent time windows' )
return np.all(check_num_dir(n_rays,calc_idx,azimuth,idx_valid)[0]), n_rays, check_num_dir(n_rays,calc_idx,azimuth,idx_valid)[1], check_num_dir(n_rays,calc_idx,azimuth,idx_valid)[2]
### the actual processing is done in this class
class hpl2netCDFClient(object):
def __init__(self, config_dir, cmd, date2proc):
self.config_dir = config_dir
self.cmd= cmd
self.date2proc= date2proc
def display_config_dir(self):
print('config-file taken from ' + self.config_dir)
def display_configDict(self):
confDict= config.gen_confDict(url= self.config_dir)
print(confDict)
def dailylvl1(self):
date_chosen = self.date2proc
confDict= config.gen_confDict(url= self.config_dir)
hpl_list= hpl_files.make_file_list(date_chosen, confDict, url=confDict['PROC_PATH'])
if not hpl_list.name:
print('no files found')
else:
print('combining files to daily lvl1...')
print(' ...')
## look at the previous and following day for potential files
# and add to hpl_list
print('looking at the previous day')
hpl_listm1 = hpl_files.make_file_list(date_chosen + datetime.timedelta(minutes=-30), confDict, url=confDict['PROC_PATH'])
print('looking at the following day')
hpl_listp1 = hpl_files.make_file_list(date_chosen + datetime.timedelta(days=+1, minutes=30), confDict, url=confDict['PROC_PATH'])
namelist = hpl_list.name
timelist = hpl_list.time
#print('check 1')
if len(hpl_listm1.time) > 0:
if date_chosen - hpl_listm1.time[-1] <= datetime.timedelta(minutes=30):
namelist = [hpl_listm1.name[-1]] + namelist
timelist = [hpl_listm1.time[-1]] + timelist
print('adding last file of previous day before')
#print('check 2')
if len(hpl_listp1.time) > 0:
if hpl_listp1.time[0] - date_chosen <= datetime.timedelta(days=1, minutes=30):
namelist = namelist + [hpl_listp1.name[0]]
timelist = timelist + [hpl_listp1.time[0]]
print('adding first file of following day after')
hpl_list = hpl_files(namelist, timelist)
# print('check 3')
# read_idx= hpl_files.reader_idx(hpl_list,confDict,chunks=False)
nc_name= hpl_files.combine_lvl1(hpl_list, confDict, date_chosen)
print(nc_name)
ds_tmp= xr.open_dataset(nc_name)
print(ds_tmp.info)
ds_tmp.close()
def dailylvl2(self):
date_chosen = self.date2proc
confDict= config.gen_confDict(url= self.config_dir)
path= Path(confDict['NC_L1_PATH'] + '/'
+ date_chosen.strftime("%Y") + '/'
+ date_chosen.strftime("%Y%m")
)
search_pattern = '**/' + confDict['NC_L1_BASENAME'] + '*' + date_chosen.strftime("%Y%m%d")+ '*.nc'
mylist= list(path.glob(search_pattern))
if not mylist:
raise FileNotFoundError(f"Could not find file matching pattern: {search_pattern}")
print(mylist[0])
if len(mylist)>1:
print('!!!multiple files found!!!, only first is processed!')
try:
ds_tmp= xr.open_dataset(mylist[0])
except:
print('no such file exists: ' + path.name + '... .nc')
if not ds_tmp:
print('unable to continue processing!')
else:
print('processing lvl1 to lvl2...')
## do processiong!!
# read lidar parameters
n_rays= int(confDict['NUMBER_OF_DIRECTIONS'])
# number of gates
n_gates= int(confDict['NUMBER_OF_GATES'])
# number of pulses used in the data point aquisition
n= ds_tmp.prf.data
# number of points per range gate
M= ds_tmp.nsmpl.data
# half of detector bandwidth in velocity space
B= ds_tmp.nqv.data
# filter Stares within scan
elevation= 90-ds_tmp.zenith.data
azimuth= ds_tmp.azi.data[elevation < 89] % 360
time_ds = ds_tmp.time.data[elevation < 89]
dv= ds_tmp.dv.data[elevation < 89]
snr= ds_tmp.intensity.data[elevation < 89]-1
beta= ds_tmp.beta.data[elevation < 89]
height= ds_tmp.range.data*np.sin(np.nanmedian(elevation[elevation < 89])*np.pi/180)
width= ds_tmp.range.data*2*np.cos(np.nanmedian(elevation[elevation < 89])*np.pi/180)
height_bnds= ds_tmp.range_bnds.data
height_bnds[:,0]= np.sin(np.nanmedian(elevation[elevation < 89])*np.pi/180)*(height_bnds[:,0])
height_bnds[:,1]= np.sin(np.nanmedian(elevation[elevation < 89])*np.pi/180)*(height_bnds[:,1])
# define time chunks
## Look for UTC_OFFSET in config
if 'UTC_OFFSET' in confDict:
time_offset = np.timedelta64(int(confDict['UTC_OFFSET']), 'h')
time_delta = int(confDict['UTC_OFFSET'])
else:
time_offset = np.timedelta64(0, 'h')
time_delta = 0
time_vec= np.arange(date_chosen - datetime.timedelta(hours=time_delta)
,date_chosen+datetime.timedelta(days = 1) - datetime.timedelta(hours=time_delta)
+datetime.timedelta(minutes= int(confDict['AVG_MIN']))
,datetime.timedelta(minutes= int(confDict['AVG_MIN'])))
calc_idx= [np.where((ii <= time_ds)*(time_ds < iip1))
for ii,iip1 in zip(time_vec[0:-1],time_vec[1::])]
time_start= np.array([int(pd.to_datetime(time_ds[t[0][-1]]).replace(tzinfo=datetime.timezone.utc).timestamp())
if len(t[0]) != 0
else int(pd.to_datetime(time_vec[ii+1]).replace(tzinfo=datetime.timezone.utc).timestamp())
for ii,t in enumerate(calc_idx)
])
time_bnds= np.array([[ int( | pd.to_datetime(time_ds[t[0][0]]) | pandas.to_datetime |
import requests
import pandas as pd
import util_functions as uf
import geopandas as gpd
from shapely.geometry import Point, Polygon
import itertools
from geopy.distance import vincenty
import os
def extract_json(json_id):
# Loop through each feature in GeoJson and pull our metadata and polygon
url = "https://opendata.arcgis.com/datasets/{}.geojson".format(json_id)
resp = requests.get(url).json()
# Define empty list for concat
feature_df_list = []
for enum, feature in enumerate(resp['features']):
# Pull out metadata
feature_df = pd.DataFrame(feature['properties'], index=[enum])
# Convert Polygon geometry to geodataframe
geometry_df = gpd.GeoDataFrame(feature['geometry'])
# Convert geometry to polygon and add back to metadata dataframe
feature_df['polygon'] = Polygon(geometry_df['coordinates'].iloc[0])
feature_df_list.append(feature_df)
# Combine each Cluster into master dataframe
combined_df = pd.concat(feature_df_list, axis=0)
return combined_df
def get_aws_station_info():
# Bring in station information and assign cluster to each
stations_df = | pd.read_sql("""SELECT cabi_stations_temp.*, cabi_system.code AS region_code
FROM cabi_stations_temp
LEFT JOIN cabi_system
ON cabi_stations_temp.region_id = cabi_system.region_id""", con=conn) | pandas.read_sql |
import numpy as np
import pandas as pd
from numba import njit
import pytest
import os
from collections import namedtuple
from itertools import product, combinations
from vectorbt import settings
from vectorbt.utils import checks, config, decorators, math, array, random, enum, data, params
from tests.utils import hash
seed = 42
# ############# config.py ############# #
class TestConfig:
def test_config(self):
conf = config.Config({'a': 0, 'b': {'c': 1}}, frozen=False)
conf['b']['d'] = 2
conf = config.Config({'a': 0, 'b': {'c': 1}}, frozen=True)
conf['a'] = 2
with pytest.raises(Exception) as e_info:
conf['d'] = 2
with pytest.raises(Exception) as e_info:
conf.update(d=2)
conf.update(d=2, force_update=True)
assert conf['d'] == 2
conf = config.Config({'a': 0, 'b': {'c': 1}}, read_only=True)
with pytest.raises(Exception) as e_info:
conf['a'] = 2
with pytest.raises(Exception) as e_info:
del conf['a']
with pytest.raises(Exception) as e_info:
conf.pop('a')
with pytest.raises(Exception) as e_info:
conf.popitem()
with pytest.raises(Exception) as e_info:
conf.clear()
with pytest.raises(Exception) as e_info:
conf.update(a=2)
assert isinstance(conf.merge_with(dict(b=dict(d=2))), config.Config)
assert conf.merge_with(dict(b=dict(d=2)), read_only=True).read_only
assert conf.merge_with(dict(b=dict(d=2)))['b']['d'] == 2
conf = config.Config({'a': 0, 'b': {'c': [1, 2]}})
conf['a'] = 1
conf['b']['c'].append(3)
conf['b']['d'] = 2
assert conf == {'a': 1, 'b': {'c': [1, 2, 3], 'd': 2}}
conf.reset()
assert conf == {'a': 0, 'b': {'c': [1, 2]}}
def test_merge_dicts(self):
assert config.merge_dicts({'a': 1}, {'b': 2}) == {'a': 1, 'b': 2}
assert config.merge_dicts({'a': 1}, {'a': 2}) == {'a': 2}
assert config.merge_dicts({'a': {'b': 2}}, {'a': {'c': 3}}) == {'a': {'b': 2, 'c': 3}}
assert config.merge_dicts({'a': {'b': 2}}, {'a': {'b': 3}}) == {'a': {'b': 3}}
def test_configured(self):
class H(config.Configured):
def __init__(self, a, b=2, **kwargs):
super().__init__(a=a, b=b, **kwargs)
assert H(1).config == {'a': 1, 'b': 2}
assert H(1).copy(b=3).config == {'a': 1, 'b': 3}
assert H(1).copy(c=4).config == {'a': 1, 'b': 2, 'c': 4}
assert H(pd.Series([1, 2, 3])) == H(pd.Series([1, 2, 3]))
assert H(pd.Series([1, 2, 3])) != H(pd.Series([1, 2, 4]))
assert H(pd.DataFrame([1, 2, 3])) == H(pd.DataFrame([1, 2, 3]))
assert H(pd.DataFrame([1, 2, 3])) != H(pd.DataFrame([1, 2, 4]))
assert H(pd.Index([1, 2, 3])) == H(pd.Index([1, 2, 3]))
assert H(pd.Index([1, 2, 3])) != H(pd.Index([1, 2, 4]))
assert H(np.array([1, 2, 3])) == H(np.array([1, 2, 3]))
assert H(np.array([1, 2, 3])) != H(np.array([1, 2, 4]))
assert H(None) == H(None)
assert H(None) != H(10.)
# ############# decorators.py ############# #
class TestDecorators:
def test_class_or_instancemethod(self):
class G:
@decorators.class_or_instancemethod
def g(self_or_cls):
if isinstance(self_or_cls, type):
return True # class
return False # instance
assert G.g()
assert not G().g()
def test_custom_property(self):
class G:
@decorators.custom_property(some='key')
def cache_me(self): return np.random.uniform()
assert 'some' in G.cache_me.kwargs
assert G.cache_me.kwargs['some'] == 'key'
def test_custom_method(self):
class G:
@decorators.custom_method(some='key')
def cache_me(self): return np.random.uniform()
assert 'some' in G.cache_me.kwargs
assert G.cache_me.kwargs['some'] == 'key'
def test_cached_property(self):
np.random.seed(seed)
class G:
@decorators.cached_property
def cache_me(self): return np.random.uniform()
g = G()
cached_number = g.cache_me
assert g.cache_me == cached_number
class G:
@decorators.cached_property(hello="world", hello2="world2")
def cache_me(self): return np.random.uniform()
assert 'hello' in G.cache_me.kwargs
assert G.cache_me.kwargs['hello'] == 'world'
g = G()
g2 = G()
class G3(G):
pass
g3 = G3()
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me == cached_number
assert g2.cache_me == cached_number2
assert g3.cache_me == cached_number3
# clear_cache method
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
G.cache_me.clear_cache(g)
assert g.cache_me != cached_number
assert g2.cache_me == cached_number2
assert g3.cache_me == cached_number3
# test blacklist
# instance + name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append((g, 'cache_me'))
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me != cached_number
assert g2.cache_me == cached_number2
assert g3.cache_me == cached_number3
settings.caching.reset()
# name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append('cache_me')
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me != cached_number
assert g2.cache_me != cached_number2
assert g3.cache_me != cached_number3
settings.caching.reset()
# instance
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append(g)
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me != cached_number
assert g2.cache_me == cached_number2
assert g3.cache_me == cached_number3
settings.caching.reset()
# class + name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append((G, 'cache_me'))
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me != cached_number
assert g2.cache_me != cached_number2
assert g3.cache_me == cached_number3
settings.caching.reset()
# class
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append(G)
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me != cached_number
assert g2.cache_me != cached_number2
assert g3.cache_me == cached_number3
settings.caching.reset()
# class name + name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append('G.cache_me')
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me != cached_number
assert g2.cache_me != cached_number2
assert g3.cache_me == cached_number3
settings.caching.reset()
# class name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append('G')
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me != cached_number
assert g2.cache_me != cached_number2
assert g3.cache_me == cached_number3
settings.caching.reset()
# improper class name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append('g')
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me == cached_number
assert g2.cache_me == cached_number2
assert g3.cache_me == cached_number3
settings.caching.reset()
# kwargs
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append({'hello': 'world'})
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me != cached_number
assert g2.cache_me != cached_number2
assert g3.cache_me != cached_number3
settings.caching.reset()
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append({'hello': 'world', 'hello2': 'world2'})
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me != cached_number
assert g2.cache_me != cached_number2
assert g3.cache_me != cached_number3
settings.caching.reset()
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append({'hello': 'world', 'hello2': 'world2', 'hello3': 'world3'})
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me == cached_number
assert g2.cache_me == cached_number2
assert g3.cache_me == cached_number3
settings.caching.reset()
# disabled globally
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me != cached_number
assert g2.cache_me != cached_number2
assert g3.cache_me != cached_number3
settings.caching.reset()
# test whitelist
# instance + name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append((g, 'cache_me'))
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me == cached_number
assert g2.cache_me != cached_number2
assert g3.cache_me != cached_number3
settings.caching.reset()
# name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append('cache_me')
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me == cached_number
assert g2.cache_me == cached_number2
assert g3.cache_me == cached_number3
settings.caching.reset()
# instance
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append(g)
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me == cached_number
assert g2.cache_me != cached_number2
assert g3.cache_me != cached_number3
settings.caching.reset()
# class + name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append((G, 'cache_me'))
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me == cached_number
assert g2.cache_me == cached_number2
assert g3.cache_me != cached_number3
settings.caching.reset()
# class
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append(G)
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me == cached_number
assert g2.cache_me == cached_number2
assert g3.cache_me != cached_number3
settings.caching.reset()
# class name + name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append('G.cache_me')
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me == cached_number
assert g2.cache_me == cached_number2
assert g3.cache_me != cached_number3
settings.caching.reset()
# class name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append('G')
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me == cached_number
assert g2.cache_me == cached_number2
assert g3.cache_me != cached_number3
settings.caching.reset()
# improper class name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append('g')
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me != cached_number
assert g2.cache_me != cached_number2
assert g3.cache_me != cached_number3
settings.caching.reset()
# kwargs
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append({'hello': 'world'})
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me == cached_number
assert g2.cache_me == cached_number2
assert g3.cache_me == cached_number3
settings.caching.reset()
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append({'hello': 'world', 'hello2': 'world2'})
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me == cached_number
assert g2.cache_me == cached_number2
assert g3.cache_me == cached_number3
settings.caching.reset()
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append({'hello': 'world', 'hello2': 'world2', 'hello3': 'world3'})
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me != cached_number
assert g2.cache_me != cached_number2
assert g3.cache_me != cached_number3
settings.caching.reset()
def test_cached_method(self):
np.random.seed(seed)
class G:
@decorators.cached_method
def cache_me(self, b=10): return np.random.uniform()
g = G()
cached_number = g.cache_me
assert g.cache_me == cached_number
class G:
@decorators.cached_method(hello="world", hello2="world2")
def cache_me(self, b=10): return np.random.uniform()
assert 'hello' in G.cache_me.kwargs
assert G.cache_me.kwargs['hello'] == 'world'
g = G()
g2 = G()
class G3(G):
pass
g3 = G3()
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() == cached_number
assert g2.cache_me() == cached_number2
assert g3.cache_me() == cached_number3
# clear_cache method
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
G.cache_me.clear_cache(g)
assert g.cache_me() != cached_number
assert g2.cache_me() == cached_number2
assert g3.cache_me() == cached_number3
# test blacklist
# function
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append(g.cache_me)
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() != cached_number
assert g2.cache_me() == cached_number2
assert g3.cache_me() == cached_number3
settings.caching.reset()
# instance + name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append((g, 'cache_me'))
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() != cached_number
assert g2.cache_me() == cached_number2
assert g3.cache_me() == cached_number3
settings.caching.reset()
# name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append('cache_me')
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() != cached_number
assert g2.cache_me() != cached_number2
assert g3.cache_me() != cached_number3
settings.caching.reset()
# instance
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append(g)
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() != cached_number
assert g2.cache_me() == cached_number2
assert g3.cache_me() == cached_number3
settings.caching.reset()
# class + name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append((G, 'cache_me'))
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() != cached_number
assert g2.cache_me() != cached_number2
assert g3.cache_me() == cached_number3
settings.caching.reset()
# class
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append(G)
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() != cached_number
assert g2.cache_me() != cached_number2
assert g3.cache_me() == cached_number3
settings.caching.reset()
# class name + name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append('G.cache_me')
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() != cached_number
assert g2.cache_me() != cached_number2
assert g3.cache_me() == cached_number3
settings.caching.reset()
# class name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append('G')
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() != cached_number
assert g2.cache_me() != cached_number2
assert g3.cache_me() == cached_number3
settings.caching.reset()
# improper class name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append('g')
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() == cached_number
assert g2.cache_me() == cached_number2
assert g3.cache_me() == cached_number3
settings.caching.reset()
# kwargs
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append({'hello': 'world'})
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() != cached_number
assert g2.cache_me() != cached_number2
assert g3.cache_me() != cached_number3
settings.caching.reset()
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append({'hello': 'world', 'hello2': 'world2'})
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() != cached_number
assert g2.cache_me() != cached_number2
assert g3.cache_me() != cached_number3
settings.caching.reset()
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append({'hello': 'world', 'hello2': 'world2', 'hello3': 'world3'})
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() == cached_number
assert g2.cache_me() == cached_number2
assert g3.cache_me() == cached_number3
settings.caching.reset()
# disabled globally
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() != cached_number
assert g2.cache_me() != cached_number2
assert g3.cache_me() != cached_number3
settings.caching.reset()
# test whitelist
# function
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append(g.cache_me)
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() == cached_number
assert g2.cache_me() != cached_number2
assert g3.cache_me() != cached_number3
settings.caching.reset()
# instance + name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append((g, 'cache_me'))
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() == cached_number
assert g2.cache_me() != cached_number2
assert g3.cache_me() != cached_number3
settings.caching.reset()
# name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append('cache_me')
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() == cached_number
assert g2.cache_me() == cached_number2
assert g3.cache_me() == cached_number3
settings.caching.reset()
# instance
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append(g)
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() == cached_number
assert g2.cache_me() != cached_number2
assert g3.cache_me() != cached_number3
settings.caching.reset()
# class + name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append((G, 'cache_me'))
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() == cached_number
assert g2.cache_me() == cached_number2
assert g3.cache_me() != cached_number3
settings.caching.reset()
# class
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append(G)
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() == cached_number
assert g2.cache_me() == cached_number2
assert g3.cache_me() != cached_number3
settings.caching.reset()
# class name + name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append('G.cache_me')
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() == cached_number
assert g2.cache_me() == cached_number2
assert g3.cache_me() != cached_number3
settings.caching.reset()
# class name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append('G')
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() == cached_number
assert g2.cache_me() == cached_number2
assert g3.cache_me() != cached_number3
settings.caching.reset()
# improper class name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append('g')
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() != cached_number
assert g2.cache_me() != cached_number2
assert g3.cache_me() != cached_number3
settings.caching.reset()
# kwargs
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append({'hello': 'world'})
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() == cached_number
assert g2.cache_me() == cached_number2
assert g3.cache_me() == cached_number3
settings.caching.reset()
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append({'hello': 'world', 'hello2': 'world2'})
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() == cached_number
assert g2.cache_me() == cached_number2
assert g3.cache_me() == cached_number3
settings.caching.reset()
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append({'hello': 'world', 'hello2': 'world2', 'hello3': 'world3'})
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() != cached_number
assert g2.cache_me() != cached_number2
assert g3.cache_me() != cached_number3
settings.caching.reset()
# disabled by non-hashable args
G.cache_me.clear_cache(g)
cached_number = g.cache_me(b=np.zeros(1))
assert g.cache_me(b=np.zeros(1)) != cached_number
def test_traverse_attr_kwargs(self):
class A:
@decorators.custom_property(some_key=0)
def a(self): pass
class B:
@decorators.cached_property(some_key=0, child_cls=A)
def a(self): pass
@decorators.custom_method(some_key=1)
def b(self): pass
class C:
@decorators.cached_method(some_key=0, child_cls=B)
def b(self): pass
@decorators.custom_property(some_key=1)
def c(self): pass
assert hash(str(decorators.traverse_attr_kwargs(C))) == 16728515581653529580
assert hash(str(decorators.traverse_attr_kwargs(C, key='some_key'))) == 16728515581653529580
assert hash(str(decorators.traverse_attr_kwargs(C, key='some_key', value=1))) == 703070484833749378
assert hash(str(decorators.traverse_attr_kwargs(C, key='some_key', value=(0, 1)))) == 16728515581653529580
# ############# checks.py ############# #
class TestChecks:
def test_is_pandas(self):
assert not checks.is_pandas(0)
assert not checks.is_pandas(np.array([0]))
assert checks.is_pandas(pd.Series([1, 2, 3]))
assert checks.is_pandas(pd.DataFrame([1, 2, 3]))
def test_is_series(self):
assert not checks.is_series(0)
assert not checks.is_series(np.array([0]))
assert checks.is_series(pd.Series([1, 2, 3]))
assert not checks.is_series(pd.DataFrame([1, 2, 3]))
def test_is_frame(self):
assert not checks.is_frame(0)
assert not checks.is_frame(np.array([0]))
assert not checks.is_frame(pd.Series([1, 2, 3]))
assert checks.is_frame(pd.DataFrame([1, 2, 3]))
def test_is_array(self):
assert not checks.is_array(0)
assert checks.is_array(np.array([0]))
assert checks.is_array(pd.Series([1, 2, 3]))
assert checks.is_array(pd.DataFrame([1, 2, 3]))
def test_is_numba_func(self):
def test_func(x):
return x
@njit
def test_func_nb(x):
return x
assert not checks.is_numba_func(test_func)
assert checks.is_numba_func(test_func_nb)
def test_is_hashable(self):
assert checks.is_hashable(2)
assert not checks.is_hashable(np.asarray(2))
def test_is_index_equal(self):
assert checks.is_index_equal(
pd.Index([0]),
pd.Index([0])
)
assert not checks.is_index_equal(
pd.Index([0]),
pd.Index([1])
)
assert not checks.is_index_equal(
pd.Index([0], name='name'),
pd.Index([0])
)
assert checks.is_index_equal(
pd.Index([0], name='name'),
| pd.Index([0]) | pandas.Index |
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn import tree
# get titanic training & test file
titanic_train = | pd.read_csv("input/train.csv") | pandas.read_csv |
"""Creates the QualityControlDiagnosticSuite and specific data-based subclasses."""
from abc import abstractmethod
from typing import List, Tuple, Union
from datetime import time, datetime
import pytz
import h5pyd
import numpy as np
import pandas as pd
import dateutil
import matplotlib.pyplot as plt
from pyproj import Proj
from dateutil import tz
from operational_analysis import logging, logged_method_call
from operational_analysis.toolkits import timeseries
Number = Union[int, float]
logger = logging.getLogger(__name__)
def _read_data(data: Union[pd.DataFrame, str]) -> pd.DataFrame:
"""Takes the `DataFrame` or file path and returns a `DataFrame`
Args:
data(:obj: `Union[pd.DataFrame, str]`): The actual data or a path to the csv data.
Returns
(:obj: `pd.DataFrame`): The data fram object.
"""
if isinstance(data, pd.DataFrame):
return data
return pd.read_csv(data)
def _remove_tz(df: pd.DataFrame, t_local_column: str) -> Tuple[np.ndarray, np.ndarray]:
"""Identify the non-timestamp elements in the DataFrame timestamp column and return
a truth array for filtering the values and the timezone-naive timestamps.
This function should be used after all data has been converted to timestamps, and will
therefore only be checking for `float` data as invalid because this is the standard
fault data-type in the conversion to datetime data.
Args:
df (:obj:`pandas.DataFrame`): The DataFrame of interest.
t_local_column (:obj:`str`): The name of the timestamp column.
Returns:
:obj:`numpy.ndarray`: Truth array that can be used to filter the timestamps and subsequent values.
:obj:`numpy.ndarray`: Array of timezone-naive python `datetime` objects.
"""
arr = np.array(
[
[True, pd.to_datetime(el).tz_localize(None).to_pydatetime()]
if not isinstance(el, float)
else [False, np.nan]
for ix, el in enumerate(df.loc[:, t_local_column])
]
)
ix_filter = arr[:, 0].astype(bool)
time_stamps = arr[:, 1]
return ix_filter, time_stamps
def _get_time_window(df, ix, hour_window, time_col, local_time_col, utc_time_col):
"""Retrieves the time window in a DataFrame with likely confusing
implementation of timezones.
Args:
df (:obj:`pandas.DataFrame`): The DataFrame of interest.
ix (:obj:`pandas._libs.tslibs.timestamps.Timestamp`]): The starting
Timestamp on which to base the time window.
hour_window (:obj:`pandas._libs.tslibs.timedeltas.Timedelta`): The number
length of the window, in hours.
time_col (:obj:`str`): The original input datetime column.
local_time_col (:obj:`str`): The local timezone resolved datetime column.
utc_time_col (:obj:`str`): The UTC resolved datetime column.
Returns:
(:obj:`pandas.DataFrame`): The filtered DataFrame object
"""
if ix.tz is None:
col = time_col
elif str(ix.tz) == "UTC":
col = utc_time_col
else:
col = local_time_col
start = np.where(df[col] == ix - hour_window)[0][0]
end = np.where(df[col] == ix + hour_window)[0][0]
return df.iloc[start:end]
class QualityControlDiagnosticSuite:
"""This class defines key analytical procedures in a quality check process for turbine data.
After analyzing the data for missing and duplicate timestamps, timezones, Daylight Savings Time
corrections, and extrema values, the user can make informed decisions about how to handle the data.
"""
@logged_method_call
def __init__(
self,
data: Union[pd.DataFrame, str],
ws_field: str = "wmet_wdspd_avg",
power_field: str = "wtur_W_avg",
time_field: str = "datetime",
id_field: str = None,
freq: str = "10T",
lat_lon: Tuple[Number, Number] = (0, 0),
local_tz: str = "UTC",
timezone_aware: bool = False,
):
"""
Initialize QCAuto object with data and parameters.
Args:
data(:obj: `Union[pd.DataFrame, str]`): The actual data or a path to the csv data.
ws_field(:obj: 'String'): String name of the windspeed field to df
power_field(:obj: 'String'): String name of the power field to df
time_field(:obj: 'String'): String name of the time field to df
id_field(:obj: 'String'): String name of the id field to df
freq(:obj: 'String'): String representation of the resolution for the time field to df
lat_lon(:obj: 'tuple'): latitude and longitude of farm represented as a tuple; this is
purely informational.
local_tz(:obj: 'String'): The `pytz`-compatible timezone for the input `time_field`, by
default UTC. This should be in the format of "Country/City" or "Region/City" such as
"America/Denver" or "Europe/Paris".
timezone_aware(:obj: `bool`): If True, this indicates the `time_field` column has timezone
information embedded, and if False, then there is no timezone information, by default False.
"""
logger.info("Initializing QC_Automation Object")
self._df = _read_data(data)
self._ws = ws_field
self._w = power_field
self._t = time_field
self._t_utc = f"{time_field}_utc"
self._t_local = f"{time_field}_localized"
self._id = id_field
self._freq = freq
self._lat_lon = lat_lon
self._local_tz = local_tz
self._local_ptz = pytz.timezone(local_tz)
self._tz_aware = timezone_aware
self._offset = "utc_offset"
self._dst = "is_dst"
self._non_dst_offset = self._local_ptz.localize(datetime(2021, 1, 1)).utcoffset()
if self._id is None:
self._id = "ID"
self._df["ID"] = "Data"
self._convert_datetime_column()
def _determine_offset_dst(self, df: pd.DataFrame) -> None:
"""Creates a column of "utc_offset" and "is_dst".
Args:
df(:obj:`pd.DataFrame`): The dataframe object to manipulate.
Returns:
(:obj:`pd.DataFrame`): The updated dataframe with "utc_offset" and "is_dst" columns created.
"""
dt = df.copy().tz_convert(self._local_tz)
dt_col = dt.index.to_pydatetime()
# Determine the Daylight Savings Time status and UTC offset
dt[self._offset] = [el.utcoffset() for el in dt_col]
dt[self._dst] = (dt[self._offset] != self._non_dst_offset).astype(bool)
# Convert back to UTC
dt = dt.tz_convert("UTC")
return dt
def _convert_datetime_column(self) -> None:
"""Converts the passed timestamp data to a pandas-encoded Datetime, and creates a
corresponding localized and UTC timestamp using the `time_field` column name with either
"localized" or "utc", respectively. The `_df` object then uses the local timezone
timestamp for its index.
"""
# Convert the timestamps to datetime.datetime objects
dt_col = self._df[self._t].values
# Check for raw timestamp inputs or pre-formatted
if isinstance(dt_col[0], str):
dt_col = [dateutil.parser.parse(el) for el in dt_col]
# Read the timestamps as UTC, then convert to the local timezone if the data are
# timezone-aware, otherwise localize the timestamp to the local timezone
if self._tz_aware:
pd_dt_col = pd.to_datetime(dt_col, utc=True).tz_convert(self._local_tz)
self._df[self._t_local] = pd_dt_col
else:
pd_dt_col = | pd.to_datetime(dt_col) | pandas.to_datetime |
#!/usr/bin/env python
# coding: utf-8
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import missingno as msno
from sklearn.feature_selection import RFECV
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.calibration import CalibratedClassifierCV, calibration_curve
from sklearn.preprocessing import LabelEncoder, StandardScaler, MinMaxScaler
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import confusion_matrix, accuracy_score, f1_score, recall_score, precision_score, classification_report
from sklearn.cluster import MeanShift, estimate_bandwidth
from imblearn.over_sampling import SMOTE
import time
from datetime import datetime
import sqlite3 as sql
import warnings
warnings.filterwarnings("ignore")
def reduce_mem_usage(df, verbose=True):
numerics = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']
start_mem = df.memory_usage().sum() / 1024**2
for col in df.columns:
col_type = df[col].dtypes
if col_type in numerics:
c_min = df[col].min()
c_max = df[col].max()
if str(col_type)[:3] == 'int':
if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max:
df[col] = df[col].astype(np.int8)
elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max:
df[col] = df[col].astype(np.int16)
elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max:
df[col] = df[col].astype(np.int32)
elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max:
df[col] = df[col].astype(np.int64)
else:
if c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max:
df[col] = df[col].astype(np.float16)
elif c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max:
df[col] = df[col].astype(np.float32)
else:
df[col] = df[col].astype(np.float64)
end_mem = df.memory_usage().sum() / 1024**2
if verbose: print('Mem. usage decreased to {:5.2f} Mb ({:.1f}% reduction)'.format(end_mem, 100 * (start_mem - end_mem) / start_mem))
return df
data = pd.read_excel("data.xlsx")
df = data.copy()
df.replace("no_match", np.nan, inplace=True)
df["has_paid"].replace([True, False], [1,0], inplace=True)
df = df[df.columns.to_list()[2:]+[df.columns.to_list()[0]]+[df.columns.to_list()[1]]]
corr = df.corr()
cr = corr.copy()
top_corr_columns = []
#Determine best correlate columns over 0.1
top_corr_columns = cr.loc[:, 'default'][:-1]
best_accurate_columns = top_corr_columns[abs(top_corr_columns) > 0.1].sort_values(ascending=False)
before_fillna = best_accurate_columns
df['account_worst_status_0_3m'].fillna(value=0, inplace=True) # Statüsü 1-2-3-4 olanlar. NA olanlara 0 atandı.
df['account_worst_status_12_24m'].fillna(value=0, inplace=True) # Statüsü 1-2-3-4 olanlar. NA olanlara 0 atandı.
df['account_worst_status_3_6m'].fillna(value=0, inplace=True) # Statüsü 1-2-3-4 olanlar. NA olanlara 0 atandı.
df['account_worst_status_6_12m'].fillna(value=0, inplace=True) # Statüsü 1-2-3-4 olanlar. NA olanlara 0 atandı.
df["account_status"].fillna(value=0, inplace=True) # Statüsü 1-2-3-4 olanlar. NA olanlara 0 atandı.
df["avg_payment_span_0_12m"].fillna(value=df["avg_payment_span_0_12m"].mean(), inplace=True) # Default ile korelasyonu 0 ile doldurduktan absolute 0.1'den yüksek çıkmıyor.
df["num_active_div_by_paid_inv_0_12m"].fillna(value=0, inplace=True) # Default ile korelasyonu 0 ile doldurduktan absolute 0.1'den yüksek çıkmıyor.
df["account_days_in_dc_12_24m"].fillna(value=0, inplace=True) #+
df["account_days_in_rem_12_24m"].fillna(value=0, inplace=True) #+
df["account_days_in_term_12_24m"].fillna(value=0, inplace=True) #+
df["name_in_email"].fillna(value=df["name_in_email"].value_counts().index[0], inplace=True) #+
df["num_arch_written_off_0_12m"].fillna(value=0, inplace=True) #+
df["num_arch_written_off_12_24m"].fillna(value=0, inplace=True) #+
cols_to_delete = ["avg_payment_span_0_3m", "max_paid_inv_0_24m", "num_arch_ok_12_24m",
"status_2nd_last_archived_0_24m", "status_3rd_last_archived_0_24m",
"status_max_archived_0_24_months", "status_max_archived_0_12_months",
"account_incoming_debt_vs_paid_0_24m", "worst_status_active_inv",
"num_arch_written_off_0_12m", "num_arch_written_off_12_24m"]
df.drop(columns=cols_to_delete, axis=1, inplace=True)
sorted(df.columns.to_list())
corr = df.corr()
cr = corr.copy()
top_corr_columns = []
#Determine best correlate columns over 0.05
top_corr_columns = cr.loc[:, 'default'][:-1]
best_accurate_columns = top_corr_columns[abs(top_corr_columns) > 0.05].sort_values(ascending=False)
after_fillna = best_accurate_columns
after_fillna
df_prepared = pd.read_csv("../data/prepared_data.csv")
df_prepared = reduce_mem_usage(df_prepared)
df_prepared = pd.merge(df_prepared, df[["uuid", "age", "merchant_category", "merchant_group", "name_in_email", "has_paid"]], how="left", on="uuid")
bins_methods = [ "auto", "fd", "doane", "scott", "stone", "rice", "sturges", "sqrt"]
# https://stackoverflow.com/a/18364570
def get_columns_bins(column_name):
all_bins = []
for method in bins_methods:
start = datetime.now()
hist, bin_edges = np.histogram(column_name,bins=method)
all_bins.append(bin_edges)
print("Method : {:<7} - Running Time : {:<5} - Number of bins : {:<5} - Head : {} - Tail : {}".format(method,str(datetime.now()-start), len(bin_edges), bin_edges[:3], bin_edges[-3:-1]))
return all_bins
# https://stackoverflow.com/a/18364570
def get_clustering_bins(s, quantile=0.3, n_samples=None):
series = s.dropna().values.reshape(-1, 1)
bandwidth = estimate_bandwidth(series, quantile=quantile, n_samples=n_samples)
clustering = MeanShift(bandwidth=bandwidth, bin_seeding=True).fit(series)
d = pd.DataFrame(columns=['data_column', 'label_column'])
d['data_column'] = series.reshape(-1)
d['label_column'] = clustering.labels_
sorted_vals = d.groupby('label_column')['data_column'].max().sort_values().values
bins = np.insert(sorted_vals, [0] , [series.min()-1])
bins[-1] = bins[-1] + 1
return bins, range(bins.size-1)
age_bins = []
age_bins = get_columns_bins(df_prepared.age)
age_bin,label = get_clustering_bins(pd.Series(age_bins[0]), quantile=0.2, n_samples=10)
df_prepared.age.hist(bins=age_bin)
age_bin
len(age_bin) , df_prepared.age.value_counts(bins=age_bin)
df_prepared['age_category'] = pd.cut(df_prepared.age, age_bin).cat.codes
df_prepared.head(5).append(df_prepared.tail(5))
test = df_prepared.copy()
merchant_cat_others = list(df_prepared["merchant_category"].value_counts()[df_prepared["merchant_category"].value_counts() < 800].index)
df_prepared["merchant_category"] = df_prepared["merchant_category"].apply(lambda x: "Other" if x in merchant_cat_others else x)
merchant_dict = {'Entertainment':1, 'Leisure, Sport & Hobby':2, 'Clothing & Shoes':4, 'Health & Beauty':6, 'Jewelry & Accessories':7,
'Food & Beverage':9, 'Children Products':11, 'Home & Garden':13, 'Electronics':15, 'Automotive Products':17,
'Intangible products':19, 'Erotic Materials':20}
df_prepared["merchant_group"] = df_prepared["merchant_group"].replace(merchant_dict.keys(), merchant_dict.values())
df_prepared = pd.concat([df_prepared, pd.get_dummies(df_prepared["name_in_email"],prefix="in_email_")], axis=1)
df_prepared.drop(columns=["name_in_email", "age"], axis=1, inplace=True)
le_merchant_category = LabelEncoder()
df_prepared["merchant_category"] = le_merchant_category.fit_transform(df_prepared["merchant_category"])
df_default_null = df_prepared[pd.isnull(df_prepared["default"])].reset_index(drop=True)
df_analyze = df_prepared.dropna().reset_index(drop=True)
X = df_analyze.drop(columns=["uuid", "default"])
y = df_analyze["default"]
os = SMOTE(random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0)
columns = X_train.columns
os_data_X,os_data_y=os.fit_sample(X_train, y_train)
os_data_X = pd.DataFrame(data=os_data_X,columns=columns )
os_data_y= pd.DataFrame(data=os_data_y,columns=['default'])
# we can Check the numbers of our data
#print("Length of oversampled data is ",len(os_data_X))
#print("Number of no subscription in oversampled data",len(os_data_y[os_data_y['default']==0]))
#print("Number of subscription",len(os_data_y[os_data_y['default']==1]))
#print("Proportion of no subscription data in oversampled data is ",len(os_data_y[os_data_y['default']==0])/len(os_data_X))
#print("Proportion of subscription data in oversampled data is ",len(os_data_y[os_data_y['default']==1])/len(os_data_X))
# # Random Forest
start = time.time()
n_estimators = [200, 700]
max_depth = [5, 8]
min_samples_split = [10, 100]
min_samples_leaf = [5, 10]
hyper_random = {"n_estimators":n_estimators,
"max_depth":max_depth,
"min_samples_split":min_samples_split,
"min_samples_leaf":min_samples_leaf}
clf_rf_tuned = GridSearchCV(RandomForestClassifier(), hyper_random,
cv = 5, verbose = 1,
n_jobs = 4)
clf_rf_tuned.fit(os_data_X, os_data_y)
best_params_random = clf_rf_tuned.best_params_
print(best_params_random)
CV_clf_rf = RandomForestClassifier(max_depth=best_params_random["max_depth"],
min_samples_leaf=best_params_random["min_samples_leaf"],
min_samples_split=best_params_random["min_samples_split"],
n_estimators= best_params_random["n_estimators"])
CV_clf_rf.fit(os_data_X, os_data_y)
y_test_predict_proba_random = CV_clf_rf.predict_proba(X_test)[:, 1]
yhat_random = CV_clf_rf.predict(X_test)
fraction_of_positives, mean_predicted_value = calibration_curve(y_test, y_test_predict_proba_random, n_bins=10)
end = time.time()
hours, rem = divmod(end-start, 3600)
minutes, seconds = divmod(rem, 60)
print("\nÇalışma süresi: "+"{:0>2}:{:0>2}:{:05.2f}".format(int(hours),int(minutes),seconds))
start = time.time()
# Create a corrected classifier.
clf_sigmoid = CalibratedClassifierCV(CV_clf_rf, cv=10, method='sigmoid')
clf_sigmoid.fit(os_data_X, os_data_y)
y_test_predict_proba_random_calibrated = clf_sigmoid.predict_proba(X_test)[:, 1]
yhat_calibrated_random = clf_sigmoid.predict(X_test)
fraction_of_positives, mean_predicted_value = calibration_curve(y_test, y_test_predict_proba_random_calibrated, n_bins=10)
end = time.time()
hours, rem = divmod(end-start, 3600)
minutes, seconds = divmod(rem, 60)
print("\nProcess: "+"{:0>2}:{:0>2}:{:05.2f}".format(int(hours),int(minutes),seconds))
print(classification_report(y_test, yhat_random))
# ## Calibrated Random Forest Results
print(classification_report(y_test, yhat_calibrated_random))
# # Gaussian Naive Bayes
start = time.time()
# Uncalibrated
clf_nb = GaussianNB()
clf_nb.fit(os_data_X, os_data_y)
y_test_predict_proba_nb = clf_nb.predict_proba(X_test)[:, 1]
yhat_nb = clf_nb.predict(X_test)
fraction_of_positives_nb, mean_predicted_value_nb = calibration_curve(y_test, y_test_predict_proba_nb, n_bins=10)
# Calibrated
clf_sigmoid_nb = CalibratedClassifierCV(clf_nb, cv=10, method='isotonic')
clf_sigmoid_nb.fit(os_data_X, os_data_y)
y_test_predict_proba_nb_calib = clf_sigmoid_nb.predict_proba(X_test)[:, 1]
yhat_calibrated_nb = clf_sigmoid_nb.predict(X_test)
fraction_of_positives_nb_calib, mean_predicted_value_nb_calib = calibration_curve(y_test, y_test_predict_proba_nb_calib, n_bins=10)
# Calibrated, Platt
clf_sigmoid_nb_calib_sig = CalibratedClassifierCV(clf_nb, cv=10, method='sigmoid')
clf_sigmoid_nb_calib_sig.fit(os_data_X, os_data_y)
y_test_predict_proba_nb_calib_platt = clf_sigmoid_nb_calib_sig.predict_proba(X_test)[:, 1]
yhat_calibrated_platt = clf_sigmoid_nb_calib_sig.predict(X_test)
fraction_of_positives_nb_calib_platt, mean_predicted_value_nb_calib_platt = calibration_curve(y_test, y_test_predict_proba_nb_calib_platt, n_bins=10)
end = time.time()
hours, rem = divmod(end-start, 3600)
minutes, seconds = divmod(rem, 60)
print("\nProcess time: "+"{:0>2}:{:0>2}:{:05.2f}".format(int(hours),int(minutes),seconds))
# ## Gaussian Naive Bayes Results
print(classification_report(y_test, yhat_nb))
# ## Calibrated Gaussian Naive Bayes Results (Isotonic)
print(classification_report(y_test, yhat_calibrated_nb))
# ## Calibrated Gaussian Naive Bayes Results (Sigmoid)
print(classification_report(y_test, yhat_calibrated_platt))
df_default_null_keep = df_default_null[["uuid", "default"]]
df_default_null.drop(columns=["uuid", "default"], axis=1, inplace=True)
# Random Forest
y_predict_proba = CV_clf_rf.predict_proba(df_default_null)[:, 1]
yhat_predict = CV_clf_rf.predict(df_default_null)
# Calibrated Random Forest
y_predict_proba_crf = clf_sigmoid.predict_proba(df_default_null)[:, 1]
yhat_predict_crf = clf_sigmoid.predict(df_default_null)
#NB
y_predict_nb = clf_nb.predict_proba(df_default_null)[:, 1]
yhat_predict_nb = clf_nb.predict(df_default_null)
# Isotonic
y_predict_nb_isotonic = clf_sigmoid_nb.predict_proba(df_default_null)[:, 1]
yhat_predict_isotonic = clf_sigmoid_nb.predict(df_default_null)
# Sigmoid
y_predict_nb_sigmoid = clf_sigmoid_nb_calib_sig.predict_proba(df_default_null)[:, 1]
yhat_predict_sigmoid = clf_sigmoid_nb_calib_sig.predict(df_default_null)
nan_df_rf_nb = pd.concat([df_default_null_keep,
pd.Series(y_predict_proba, name="Random Forest Probability"),
pd.Series(y_predict_proba_crf, name="Calibrated Random Forest Probability"),
pd.Series(y_predict_nb, name="Naive Bayes"),
pd.Series(y_predict_nb_isotonic, name="Calibrated Naive Bayes (Isotonic)"),
pd.Series(y_predict_nb_sigmoid, name="Calibrated Naive Bayes (Sigmoid)")], axis=1)
nan_df_rf_nb
df_default_train = df_analyze[df_analyze.index.isin(X_train.index.to_list())]
df_default_train.sort_index(inplace=True)
X_train.sort_index(inplace=True)
# Random Forest
y_predict_proba_train = CV_clf_rf.predict_proba(X_train)[:, 1]
yhat_predict_train = CV_clf_rf.predict(X_train)
# Calibrated Random Forest
y_predict_proba_crf_train = clf_sigmoid.predict_proba(X_train)[:, 1]
yhat_predict_crf_train = clf_sigmoid.predict(X_train)
#NB
y_predict_nb_train = clf_nb.predict_proba(X_train)[:, 1]
yhat_predict_nb_train = clf_nb.predict(X_train)
# Isotonic
y_predict_nb_isotonic_train = clf_sigmoid_nb.predict_proba(X_train)[:, 1]
yhat_predict_isotonic_train = clf_sigmoid_nb.predict(X_train)
# Sigmoid
y_predict_nb_sigmoid_train = clf_sigmoid_nb_calib_sig.predict_proba(X_train)[:, 1]
yhat_predict_sigmoid_train = clf_sigmoid_nb_calib_sig.predict(X_train)
train_df_rf_nb = pd.concat([df_default_train[["uuid", "default"]].reset_index(drop=True),
pd.Series(y_predict_proba_train, name="Random Forest Probability").reset_index(drop=True),
pd.Series(y_predict_proba_crf_train, name="Calibrated Random Forest Probability").reset_index(drop=True),
pd.Series(y_predict_nb_train, name="Naive Bayes").reset_index(drop=True),
pd.Series(y_predict_nb_isotonic_train, name="Calibrated Naive Bayes (Isotonic)").reset_index(drop=True),
pd.Series(y_predict_nb_sigmoid_train, name="Calibrated Naive Bayes (Sigmoid)").reset_index(drop=True)], axis=1)
# # Test Results
#df_analyze_train
#df_default_train = df_analyze[X_train.index]["uuid", "default"]
df_default_test = df_analyze[df_analyze.index.isin(X_test.index.to_list())]
df_default_test.sort_index(inplace=True)
X_test.sort_index(inplace=True)
# Random Forest
y_predict_proba_test = CV_clf_rf.predict_proba(X_test)[:, 1]
yhat_predict_test = CV_clf_rf.predict(X_test)
# Calibrated Random Forest
y_predict_proba_crf_test = clf_sigmoid.predict_proba(X_test)[:, 1]
yhat_predict_crf_test = clf_sigmoid.predict(X_test)
#NB
y_predict_nb_test = clf_nb.predict_proba(X_test)[:, 1]
yhat_predict_nb_test = clf_nb.predict(X_test)
# Isotonic
y_predict_nb_isotonic_test = clf_sigmoid_nb.predict_proba(X_test)[:, 1]
yhat_predict_isotonic_test = clf_sigmoid_nb.predict(X_test)
# Sigmoid
y_predict_nb_sigmoid_test = clf_sigmoid_nb_calib_sig.predict_proba(X_test)[:, 1]
yhat_predict_sigmoid_test = clf_sigmoid_nb_calib_sig.predict(X_test)
test_df_rf_nb = pd.concat([df_default_test[["uuid", "default"]].reset_index(drop=True),
pd.Series(y_predict_proba_test, name="Random Forest Probability").reset_index(drop=True),
pd.Series(y_predict_proba_crf_test, name="Calibrated Random Forest Probability").reset_index(drop=True),
pd.Series(y_predict_nb_test, name="Naive Bayes").reset_index(drop=True),
| pd.Series(y_predict_nb_isotonic_test, name="Calibrated Naive Bayes (Isotonic)") | pandas.Series |
import os
import sys
import pandas
# rida 120 tase 7 kysimustiku lopp
# INSERT INTO Kysimus (kysimus_tekst, kysimusteplokk_id) VALUES ("kysimusetekst", ploki FK)
# INSERT INTO KysimustePlokk (kysimusteplokk_nimi, kysimustik_id) VALUES ("kysimuseplokinimi", kysimustikuid FK)
# INSERT INTO Kysimustik (kysimustik_pealkiri) VALUES ("kysimustiku pealkiri")
kysimustikPath = "/home/mait/Documents/Programming/opetaja-prof-arengu-mudel/server/kysimustik/KS_tegevusnaitajad_tasemeti.xlsx"
outputPath = os.path.join(
sys.path[0], "kysimustik/ks_tegevusnaitajad_tase7.txt")
pealkiri = "KS tegevusnäitajad ja tagasiside Tase 7"
data = pandas.read_excel(
kysimustikPath, sheet_name="KS tegevusnäitajad ja tagasisid")
plokkCounter = 0
kysimuseCounter = 1
f = open(outputPath, "a")
f.write(
'INSERT INTO Kysimustik (kysimustik_pealkiri) VALUES ("{0}");\n'.format(pealkiri))
for i in range(0, 120):
if not pandas.isnull(data['Unnamed: 1'][i]) and i > 1:
row = 'INSERT INTO KysimustePlokk (kysimusteplokk_nimi, kysimustik_id) VALUES ("{0}", 1);\n'.format(
data['Unnamed: 1'][i])
f.write(row)
plokkCounter += 1
if not pandas.isnull(data['Unnamed: 2'][i]) and i > 3:
row = 'INSERT INTO Kysimus (kysimus_tekst, kysimusteplokk_id) VALUES ("{0}", {1});\n'.format(
data['Unnamed: 2'][i], plokkCounter)
f.write(row)
if not | pandas.isnull(data['Unnamed: 5'][i]) | pandas.isnull |
import datetime
from time import sleep
import pandas as pd
from loguru import logger
import ofanalysis.const as const
import ofanalysis.utility as ut
import tushare as ts
class TSDataUpdate:
def __init__(self, ts_pro_token:str):
self.__pro = ts.pro_api(ts_pro_token)
self.__today = datetime.date.today()
def retrieve_all(self):
self.retrieve_stock_basic()
self.retrieve_stock_daily_basic()
self.retrieve_stock_daily()
self.retrieve_fund_basic()
self.retrieve_fund_nav()
self.retrieve_fund_share()
self.retrieve_fund_manager()
self.retrieve_fund_portfolio()
def retrieve_stock_basic(self):
logger.info('全量更新股票基础信息stock_basic')
# 分页读取数据
df_stock_basic = pd.DataFrame()
i = 0
while True: # 分页读取数据
df_batch_result = self.__pro.stock_basic(**{
"ts_code": "",
"name": "",
"exchange": "",
"market": "",
"is_hs": "",
"list_status": "",
"limit": const.EACH_TIME_ITEM,
"offset": i
}, fields=[
"ts_code",
"symbol",
"name",
"area",
"industry",
"market",
"list_date",
"is_hs",
"delist_date",
"list_status",
"curr_type",
"exchange",
"cnspell",
"enname",
"fullname"
])
if len(df_batch_result) == 0:
break
df_stock_basic = pd.concat([df_stock_basic, df_batch_result], ignore_index=True)
i += const.EACH_TIME_ITEM
ut.db_del_dict_from_mongodb( # 非增量更新 先清空数据
mongo_db_name=const.MONGODB_DB_TUSHARE,
col_name=const.MONGODB_COL_TS_STOCK_BASIC,
query_dict={}
)
ut.db_save_dict_to_mongodb(
mongo_db_name=const.MONGODB_DB_TUSHARE,
col_name=const.MONGODB_COL_TS_STOCK_BASIC,
target_dict=df_stock_basic.to_dict(orient='records')
)
def retrieve_stock_daily_basic(self):
check_field = 'trade_date' # 设置增量更新依据字段
logger.info('更新股票每日指标stock_daily_basic')
existed_records = ut.db_get_distinct_from_mongodb(
mongo_db_name=const.MONGODB_DB_TUSHARE,
col_name=const.MONGODB_COL_TS_STOCK_DAILY_BASIC,
field=check_field
)
if len(existed_records) == 0: # 空表
trade_cal_start_date = '20000101'
else:
existed_records.sort(reverse=True) # 倒排
trade_cal_start_date = pd.to_datetime(existed_records[-1]) + datetime.timedelta(days=1)
trade_cal_start_date = trade_cal_start_date.strftime('%Y%m%d')
trade_cal_list = ut.get_trade_cal_from_ts(ts_pro_token=self.__pro, start_date=trade_cal_start_date)
for date in [x for x in trade_cal_list if x not in existed_records]:
logger.info('更新股票每日指标stock_daily_basic: %s的数据' % date)
df_daily = pd.DataFrame()
i = 0
while True: # 分页读取数据
for _ in range(const.RETRY_TIMES): # 重试机制
try:
df_batch_daily = self.__pro.daily_basic(**{
"ts_code": "",
"trade_date": date,
"start_date": "",
"end_date": "",
"limit": const.EACH_TIME_ITEM,
"offset": i
}, fields=[
"ts_code",
"trade_date",
"close",
"turnover_rate",
"turnover_rate_f",
"volume_ratio",
"pe",
"pe_ttm",
"pb",
"ps",
"ps_ttm",
"dv_ratio",
"dv_ttm",
"total_share",
"float_share",
"free_share",
"total_mv",
"circ_mv"
])
except:
sleep(1)
else:
break
if len(df_batch_daily) == 0:
break
df_daily = pd.concat([df_daily, df_batch_daily], ignore_index=True)
i += const.EACH_TIME_ITEM
if len(df_daily) == 0:
logger.info('日期:%s, 股票每日指标stock_daily_basic返回为空' % date)
continue
ut.db_save_dict_to_mongodb(
mongo_db_name=const.MONGODB_DB_TUSHARE,
col_name=const.MONGODB_COL_TS_STOCK_DAILY_BASIC,
target_dict=df_daily.to_dict(orient='records')
)
def retrieve_stock_daily(self):
check_field = 'trade_date' # 设置增量更新依据字段
logger.info('更新股票日线行情stock_daily')
existed_records = ut.db_get_distinct_from_mongodb(
mongo_db_name=const.MONGODB_DB_TUSHARE,
col_name=const.MONGODB_COL_TS_STOCK_DAILY,
field=check_field
)
if len(existed_records) == 0: # 空表
trade_cal_start_date = '20000101'
else:
existed_records.sort(reverse=True) # 倒排
trade_cal_start_date = pd.to_datetime(existed_records[-1]) + datetime.timedelta(days=1)
trade_cal_start_date = trade_cal_start_date.strftime('%Y%m%d')
trade_cal_list = ut.get_trade_cal_from_ts(ts_pro_token=self.__pro, start_date=trade_cal_start_date)
for date in [x for x in trade_cal_list if x not in existed_records]:
logger.info('更新股票日线行情stock_daily: %s的数据' % date)
df_daily = pd.DataFrame()
i = 0
while True: # 分页读取数据
for _ in range(const.RETRY_TIMES): # 重试机制
try:
df_batch_daily = self.__pro.daily(**{
"ts_code": "",
"trade_date": date,
"start_date": "",
"end_date": "",
"offset": i,
"limit": const.EACH_TIME_ITEM
}, fields=[
"ts_code",
"trade_date",
"open",
"high",
"low",
"close",
"pre_close",
"change",
"pct_chg",
"vol",
"amount"
])
except:
sleep(1)
else:
break
if len(df_batch_daily) == 0:
break
df_daily = pd.concat([df_daily, df_batch_daily], ignore_index=True)
i += const.EACH_TIME_ITEM
if len(df_daily) == 0:
logger.info('日期:%s, 股票日线行情stock_daily返回为空' % date)
continue
ut.db_save_dict_to_mongodb(
mongo_db_name=const.MONGODB_DB_TUSHARE,
col_name=const.MONGODB_COL_TS_STOCK_DAILY,
target_dict=df_daily.to_dict(orient='records')
)
def retrieve_fund_basic(self):
logger.info('全量更新基金基础信息fund_basic')
df_all_fund = pd.DataFrame()
i = 0
while True: # 分页读取数据
df_batch_result = self.__pro.fund_basic(**{
"ts_code": "",
"market": "",
"update_flag": "",
"offset": i,
"limit": const.EACH_TIME_ITEM,
"status": ""
}, fields=[
"ts_code",
"name",
"management",
"custodian",
"fund_type",
"found_date",
"due_date",
"list_date",
"issue_date",
"delist_date",
"issue_amount",
"m_fee",
"c_fee",
"duration_year",
"p_value",
"min_amount",
"exp_return",
"benchmark",
"status",
"invest_type",
"type",
"trustee",
"purc_startdate",
"redm_startdate",
"market"
])
if len(df_batch_result) == 0:
break
df_all_fund = pd.concat([df_all_fund, df_batch_result], ignore_index=True)
i += const.EACH_TIME_ITEM
sleep(8)
ut.db_del_dict_from_mongodb( # 非增量更新 先清空数据
mongo_db_name=const.MONGODB_DB_TUSHARE,
col_name=const.MONGODB_COL_TS_FUND_BASIC,
query_dict={}
)
ut.db_save_dict_to_mongodb(
mongo_db_name=const.MONGODB_DB_TUSHARE,
col_name=const.MONGODB_COL_TS_FUND_BASIC,
target_dict=df_all_fund.to_dict(orient='records')
)
def retrieve_fund_nav(self):
check_field = 'nav_date' # 设置增量更新依据字段
logger.info('更新基金净值行情fund_nav')
existed_records = ut.db_get_distinct_from_mongodb(
mongo_db_name=const.MONGODB_DB_TUSHARE,
col_name=const.MONGODB_COL_TS_FUND_NAV,
field=check_field
)
if len(existed_records) == 0: # 空表
trade_cal_start_date = '20000101'
else:
existed_records.sort(reverse=True) # 倒排
trade_cal_start_date = pd.to_datetime(existed_records[-1]) + datetime.timedelta(days=1)
trade_cal_start_date = trade_cal_start_date.strftime('%Y%m%d')
trade_cal_list = ut.get_trade_cal_from_ts(ts_pro_token=self.__pro, start_date=trade_cal_start_date)
for date in [x for x in trade_cal_list if x not in existed_records]:
logger.info('更新基金净值行情fund_nav: %s的数据' % date)
df_daily = pd.DataFrame()
i = 0
while True: # 分页读取数据
for _ in range(const.RETRY_TIMES): # 重试机制
try:
df_batch_daily = self.__pro.fund_nav(**{
"ts_code": "",
"nav_date": date,
"offset": i,
"limit": const.EACH_TIME_ITEM,
"market": "",
"start_date": "",
"end_date": ""
}, fields=[
"ts_code",
"ann_date",
"nav_date",
"unit_nav",
"accum_nav",
"accum_div",
"net_asset",
"total_netasset",
"adj_nav",
"update_flag"
])
except:
sleep(1)
else:
break
if len(df_batch_daily) == 0:
break
df_daily = pd.concat([df_daily, df_batch_daily], ignore_index=True)
i += const.EACH_TIME_ITEM
if len(df_daily) == 0:
logger.info('日期:%s, 基金净值行情fund_nav返回为空' % date)
continue
ut.db_save_dict_to_mongodb(
mongo_db_name=const.MONGODB_DB_TUSHARE,
col_name=const.MONGODB_COL_TS_FUND_NAV,
target_dict=df_daily.to_dict(orient='records')
)
def retrieve_fund_share(self):
check_field = 'trade_date' # 设置增量更新依据字段
logger.info('更新基金净值规模fund_share')
existed_records = ut.db_get_distinct_from_mongodb(
mongo_db_name=const.MONGODB_DB_TUSHARE,
col_name=const.MONGODB_COL_TS_FUND_SHARE,
field=check_field
)
if len(existed_records) == 0: # 空表
trade_cal_start_date = '20000101'
else:
existed_records.sort(reverse=True) # 倒排
trade_cal_start_date = pd.to_datetime(existed_records[-1]) + datetime.timedelta(days=1)
trade_cal_start_date = trade_cal_start_date.strftime('%Y%m%d')
trade_cal_list = ut.get_trade_cal_from_ts(ts_pro_token=self.__pro, start_date=trade_cal_start_date)
for date in [x for x in trade_cal_list if x not in existed_records]:
logger.info('更新基金净值规模fund_share: %s的数据' % date)
df_daily = pd.DataFrame()
i = 0
while True: # 分页读取数据
for _ in range(const.RETRY_TIMES): # 重试机制
try:
df_batch_daily = self.__pro.fund_share(**{
"ts_code": "",
"trade_date": date,
"start_date": "",
"end_date": "",
"market": "",
"fund_type": "",
"limit": const.EACH_TIME_ITEM,
"offset": i
}, fields=[
"ts_code",
"trade_date",
"fd_share",
"fund_type",
"market"
])
except:
sleep(1)
else:
break
if len(df_batch_daily) == 0:
break
df_daily = pd.concat([df_daily, df_batch_daily], ignore_index=True)
i += const.EACH_TIME_ITEM
if len(df_daily) == 0:
logger.info('日期:%s, 基金净值规模fund_share返回为空' % date)
continue
ut.db_save_dict_to_mongodb(
mongo_db_name=const.MONGODB_DB_TUSHARE,
col_name=const.MONGODB_COL_TS_FUND_SHARE,
target_dict=df_daily.to_dict(orient='records')
)
def retrieve_fund_manager(self):
logger.info('全量更新基金经理fund_manager')
df_result = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 7 15:50:55 2020
@author: Emmett
"""
import nltk
nltk.download('stopwords')
nltk.download('wordnet')
import LDA_Sampler
import string
import copy
import pandas as pd
import numpy as np
import keras.backend as K
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow import keras
import kerastuner as kt
import IPython
from keras import regularizers
from keras.models import Model
from numpy import linalg as LA
from nltk.corpus import stopwords
from scipy.special import gammaln
from keras.models import Sequential
from scipy.sparse import csr_matrix
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.decomposition import LatentDirichletAllocation
from sklearn.feature_extraction.text import TfidfVectorizer
from keras.layers import Dense, Activation, Embedding, LSTM
from nltk.corpus import stopwords
stoplist = stopwords.words('english')
make_singularRoot = nltk.stem.WordNetLemmatizer()
remove_ws = nltk.tokenize.WhitespaceTokenizer()
def preprocess(pd):
pd = pd.str.lower()
pd = pd.str.replace('[{}]'.format(string.punctuation), ' ')
pd = pd.apply(lambda x: [make_singularRoot.lemmatize(w) for w in remove_ws.tokenize(x)])
pd = | pd.apply(lambda x: [item for item in x if item not in stoplist]) | pandas.apply |
#!/usr/bin/python
"""code to analyze the outputs of our 2d tuning model
"""
import matplotlib as mpl
# we do this because sometimes we run this without an X-server, and this backend doesn't need
# one. We set warn=False because the notebook uses a different backend and will spout out a big
# warning to that effect; that's unnecessarily alarming, so we hide it.
mpl.use('svg', warn=False)
import pandas as pd
import numpy as np
import torch
import re
import functools
import os
import argparse
import glob
import itertools
import warnings
from sklearn import metrics
from torch.utils import data as torchdata
from . import model as sfp_model
from tqdm import tqdm
def load_LogGaussianDonut(save_path_stem):
"""this loads and returns the actual model, given the saved parameters, for analysis
"""
# generally want to use cpu
device = torch.device("cpu")
# we try and infer model type from the path name, which we can do assuming we used the
# Snakefile to generate saved model.
vary_amps = save_path_stem.split('_')[-1]
ecc_type = save_path_stem.split('_')[-2]
ori_type = save_path_stem.split('_')[-3]
model = sfp_model.LogGaussianDonut(ori_type, ecc_type, vary_amps)
model.load_state_dict(torch.load(save_path_stem + '_model.pt', map_location=device.type))
model.eval()
model.to(device)
return model
def load_single_model(save_path_stem, load_results_df=True):
"""load in the model, loss df, and model df found at the save_path_stem
we also send the model to the appropriate device
"""
try:
if load_results_df:
results_df = pd.read_csv(save_path_stem + '_results_df.csv')
else:
results_df = pd.read_csv(save_path_stem + '_results_df.csv', nrows=1)
except FileNotFoundError as e:
if load_results_df:
raise e
results_df = None
loss_df = pd.read_csv(save_path_stem + '_loss.csv')
model_history_df = pd.read_csv(save_path_stem + "_model_history.csv")
if 'test_subset' not in loss_df.columns or 'test_subset' not in model_history_df.columns:
# unclear why this happens, it's really strange
test_subset = re.findall('_(c[\d,]+)_', save_path_stem)[0]
if not test_subset.startswith('c'):
raise Exception("Can't grab test_subset from path %s, found %s!" %
(save_path_stem, test_subset))
# this will give it the same spacing as the original version
test_subset = ', '.join(test_subset[1:].split(','))
if "test_subset" not in loss_df.columns:
loss_df['test_subset'] = test_subset
if "test_subset" not in model_history_df.columns:
model_history_df['test_subset'] = test_subset
model = load_LogGaussianDonut(save_path_stem)
return model, loss_df, results_df, model_history_df
def combine_models(base_path_template, load_results_df=True, groupaverage=False):
"""load in many models and combine into dataframes
returns: model_df, loss_df, results_df
base_path_template: path template where we should find the results. should contain no string
formatting symbols (e.g., "{0}" or "%s") but should contain at least one '*' because we will
use glob to find them (and therefore should point to an actual file when passed to glob, one
of: the loss df, model df, or model paramters).
load_results_df: boolean. Whether to load the results_df or not. Set False if your results_df
are too big and you're worried about having them all in memory. In this case, the returned
results_df will be None.
groupaverage : boolean. Whether to grab the individual subject fits
or the sub-groupaverage subject (which is a bootstrapped average
subject)
"""
models = []
loss_df = []
results_df = []
model_history_df = []
path_stems = []
for p in glob.glob(base_path_template):
if groupaverage and 'sub-groupaverage' not in p:
continue
if not groupaverage and 'sub-groupaverage' in p:
continue
path_stem = (p.replace('_loss.csv', '').replace('_model.pt', '')
.replace('_results_df.csv', '').replace('_model_history.csv', '')
.replace('_preds.pt', ''))
# we do this to make sure we're not loading in the outputs of a model twice (by finding
# both its loss.csv and its results_df.csv, for example)
if path_stem in path_stems:
continue
# based on how these are saved, we can make some assumptions and grab extra info from their
# paths
metadata = {}
if 'tuning_2d_simulated' in path_stem:
metadata['modeling_goal'] = path_stem.split(os.sep)[-2]
elif 'tuning_2d_model' in path_stem:
metadata['subject'] = path_stem.split(os.sep)[-3]
if not groupaverage:
metadata['session'] = path_stem.split(os.sep)[-2]
else:
session_dir = path_stem.split(os.sep)[-2]
metadata['session'] = session_dir.split('_')[0]
metadata['groupaverage_seed'] = session_dir.split('_')[-1]
metadata['modeling_goal'] = path_stem.split(os.sep)[-4]
metadata['mat_type'] = path_stem.split(os.sep)[-5]
metadata['atlas_type'] = path_stem.split(os.sep)[-6]
metadata['task'] = re.search('_(task-[a-z0-9]+)_', path_stem).groups()[0]
metadata['indicator'] = str((metadata['subject'], metadata['session'], metadata['task'])).replace("'", "")
path_stems.append(path_stem)
model, loss, results, model_history = load_single_model(path_stem,
load_results_df=load_results_df)
for k, v in metadata.items():
if results is not None:
results[k] = v
loss[k] = v
model_history[k] = v
results_df.append(results)
loss_df.append(loss)
model_history_df.append(model_history)
tmp = loss.head(1)
tmp = tmp.drop(['epoch_num', 'batch_num', 'loss'], 1)
tmp['model'] = model
for name, val in model.named_parameters():
tmper = tmp.copy()
tmper['model_parameter'] = name
tmper['fit_value'] = val.cpu().detach().numpy()
if results is not None:
if 'true_model_%s' % name in results.columns:
tmper['true_value'] = results['true_model_%s' % name].unique()[0]
models.append(tmper)
loss_df = pd.concat(loss_df).reset_index(drop=True)
model_history_df = pd.concat(model_history_df).reset_index(drop=True)
if load_results_df:
results_df = pd.concat(results_df).reset_index(drop=True).drop('index', 1)
else:
results_df = None
models = pd.concat(models)
return models, loss_df, results_df, model_history_df
def _finish_feature_df(df, reference_frame='absolute'):
"""helper function to clean up the feature dataframes
This helper function cleans up the feature dataframes so that they
can be more easily used for plotting with feature_df_plot and
feature_df_polar_plot functions. It performs the following actions:
1. Adds reference_frame as column.
2. Converts retinotopic angles to human-readable labels (only if
default retinotopic angles used).
3. Adds "Stimulus type" as column, giving human-readable labels
based on "Orientation" columns.
Parameters
----------
df : pd.DataFrame
The feature dataframe to finish up
reference_frame : {'absolute, 'relative'}
The reference frame of df
Returns
-------
df : pd.DataFrame
The cleaned up dataframe
"""
if isinstance(df, list):
df = pd.concat(df).reset_index(drop=True)
df['reference_frame'] = reference_frame
angle_ref = np.linspace(0, np.pi, 4, endpoint=False)
angle_labels = ['0', r'$\frac{\pi}{4}$', r'$\frac{\pi}{2}$', r'$\frac{3\pi}{4}$']
# this may seem backwards (as defined in first_level_analysis.py, forward
# spirals/diagonals are those where w_r/w_x = w_a/w_y, reverse are where
# they're negatives of each other), but this is the correct mapping, which
# you can see by playing around with sfp.figures.input_schematic (and by
# seeing that the predictions for the obliques in one reference frame match
# correctly with the cardinals in the other, e.g., the prediction for
# forward diagonal should be the same as angular at 45 degrees, in the top
# right quadrant)
rel_labels = ['radial', 'reverse spiral', 'angular', 'forward spiral']
abs_labels = ['vertical', 'reverse diagonal', 'horizontal', 'forward diagonal']
if np.array_equiv(angle_ref, df["Retinotopic angle (rad)"].unique()):
df["Retinotopic angle (rad)"] = df["Retinotopic angle (rad)"].map(dict((k, v) for k, v in
zip(angle_ref,
angle_labels)))
if reference_frame == 'relative':
df["Stimulus type"] = df["Orientation (rad)"].map(dict((k, v) for k, v in
zip(angle_ref, rel_labels)))
elif reference_frame == 'absolute':
df["Stimulus type"] = df["Orientation (rad)"].map(dict((k, v) for k, v in
zip(angle_ref, abs_labels)))
return df
def create_preferred_period_df(model, reference_frame='absolute',
retinotopic_angle=np.linspace(0, np.pi, 4, endpoint=False),
orientation=np.linspace(0, np.pi, 4, endpoint=False),
eccentricity=np.linspace(0, 11, 11)):
"""Create dataframe summarizing preferred period as function of eccentricity
Generally, you should not call this function directly, but use
create_feature_df. Differences from that function: this functions
requires the initialized model and only creates the info for a
single model, while create_feature_df uses the models dataframe to
initialize models itself, and combines the outputs across multiple
scanning sessions.
This function creates a dataframe summarizing the specified model's
preferred period as a function of eccentricity, for multiple
stimulus orientations (in either absolute or relative reference
frames) and retinotopic angles. This dataframe is then used for
creating plots to summarize the model.
You can also use this function to create the information necessary
to plot preferred period as a function of retinotopic angle at a
specific eccentricity. You can do that by reducing the number of
eccentricities (e.g., eccentricity=[5]) and increasing the number of
retinotopic angles (e.g., np.linspace(0, 2*np.pi, 49)).
Unless you have something specific in mind, you can trust the
default options for retinotopic_angle, orientation, and
eccentricity.
Parameters
----------
model : sfp.model.LogGaussianDonut
a single, initialized model, which we will summarize.
reference_frame : {"absolute", "relative"}, optional
Whether we use the absolute or relative reference frame in the
feature dataframe; that is whether we consider conventional
gratings (absolute, orientation is relative to
vertical/horizontal), or our log-polar gratings (relative,
orientation is relative to fovea).
retinotopic_angle : np.array, optional
Array specifying which retinotopic angles to find the preferred
period for. If you don't care about retinotopic angle and just
want to summarize the model's overall features, you should use
the default (which includes all angles where the model can have
different preferences, based on its parametrization) and then
average over them.
orientation : np.array, optional
Array specifying which stimulus orientations to find the
preferred period for. Note that the meaning of these
orientations will differ depending on the value of
reference_frame; you should most likely plot and interpret the
output based on the "Stimulus type" column instead (which
include strings like 'vertical'/'horizontal' or
'radial'/'angular'). However, this mapping can only happen if
the values in orientation line up with our stimuli (0, pi/4,
pi/2, 3*pi/2), and thus it's especially recommended that you use
the default value for this argument. If you don't care about
orientation and just want to summarize the model's overall
features, you should use the default (which includes all
orientations where the model can have different preferences,
based on its parametrization) and then average over them.
eccentricity : np.array, optional
Array specifying which eccentricities to find the preferred
period for. The default values span the range of measurements
for our experiment, but you can certainly go higher if you
wish. Note that, for creating the plot of preferred period as a
function of eccentricity, the model's predictions will always be
linear and so you most likely only need 2 points. More are
included because you may want to examine the preferred period at
specific eccentricities
Returns
-------
preferred_period_df : pd.DataFrame
Dataframe containing preferred period of the model, to use with
sfp.plotting.feature_df_plot for plotting preferred period as a
function of eccentricity.
"""
df = []
for o in orientation:
if reference_frame == 'absolute':
tmp = model.preferred_period(eccentricity, retinotopic_angle, o)
elif reference_frame == 'relative':
tmp = model.preferred_period(eccentricity, retinotopic_angle, rel_sf_angle=o)
tmp = pd.DataFrame(tmp.detach().numpy(), index=retinotopic_angle, columns=eccentricity)
tmp = tmp.reset_index().rename(columns={'index': 'Retinotopic angle (rad)'})
tmp['Orientation (rad)'] = o
df.append(pd.melt(tmp, ['Retinotopic angle (rad)', 'Orientation (rad)'],
var_name='Eccentricity (deg)', value_name='Preferred period (deg)'))
return _finish_feature_df(df, reference_frame)
def create_preferred_period_contour_df(model, reference_frame='absolute',
retinotopic_angle=np.linspace(0, 2*np.pi, 49),
orientation=np.linspace(0, np.pi, 4, endpoint=False),
period_target=[.5, 1, 1.5], ):
"""Create dataframe summarizing preferred period as function of retinotopic angle
Generally, you should not call this function directly, but use
create_feature_df. Differences from that function: this functions
requires the initialized model and only creates the info for a
single model, while create_feature_df uses the models dataframe to
initialize models itself, and combines the outputs across multiple
scanning sessions.
This function creates a dataframe summarizing the specified model's
preferred period as a function of retinotopic angle, for multiple
stimulus orientations (in either absolute or relative reference
frames) and target periods. That is, it contains information showing
at what eccentricity the model's preferred period is, e.g., 1 for a
range of retinotopic angles and stimulus orientation. This dataframe
is then used for creating plots to summarize the model.
So this function creates information to plot iso-preferred period
lines. If you want to plot preferred period as a function of
retinotopic angle for a specific eccentricity, you can do that with
create_preferred_period_df, by reducing the number of eccentricities
(e.g., eccentricity=[5]) and increasing the number of retinotopic
angles (e.g., np.linspace(0, 2*np.pi, 49))
Unless you have something specific in mind, you can trust the
default options for retinotopic_angle, orientation, and
period_target
Parameters
----------
model : sfp.model.LogGaussianDonut
a single, initialized model, which we will summarize.
reference_frame : {"absolute", "relative"}, optional
Whether we use the absolute or relative reference frame in the
feature dataframe; that is whether we consider conventional
gratings (absolute, orientation is relative to
vertical/horizontal), or our log-polar gratings (relative,
orientation is relative to fovea).
retinotopic_angle : np.array, optional
Array specifying which retinotopic angles to find the preferred
period for. Note that the sampling of retinotopic angle is much
finer than for create_preferred_period_df (and goes all the way
to 2*pi), because this is what we will use as the dependent
variable in our plotsl
orientation : np.array, optional
Array specifying which stimulus orientations to find the
preferred period for. Note that the meaning of these
orientations will differ depending on the value of
reference_frame; you should most likely plot and interpret the
output based on the "Stimulus type" column instead (which
include strings like 'vertical'/'horizontal' or
'radial'/'angular'). However, this mapping can only happen if
the values in orientation line up with our stimuli (0, pi/4,
pi/2, 3*pi/2), and thus it's especially recommended that you use
the default value for this argument. If you don't care about
orientation and just want to summarize the model's overall
features, you should use the default (which includes all
orientations where the model can have different preferences,
based on its parametrization) and then average over them.
period_target : np.array, optional
Array specifying which the target periods for the model. The
intended use of this dataframe is to plot contour plots showing
at what eccentricity the model will have a specified preferred
period (for a range of angles and orientations), and this
argument specifies those periods.
Returns
-------
preferred_period_contour_df : pd.DataFrame
Dataframe containing preferred period of the model, to use with
sfp.plotting.feature_df_polar_plot for plotting preferred period
as a function of retinotopic angle.
"""
df = []
for p in period_target:
if reference_frame == 'absolute':
tmp = model.preferred_period_contour(p, retinotopic_angle, orientation)
elif reference_frame == 'relative':
tmp = model.preferred_period_contour(p, retinotopic_angle, rel_sf_angle=orientation)
tmp = pd.DataFrame(tmp.detach().numpy(), index=retinotopic_angle, columns=orientation)
tmp = tmp.reset_index().rename(columns={'index': 'Retinotopic angle (rad)'})
tmp['Preferred period (deg)'] = p
df.append(pd.melt(tmp, ['Retinotopic angle (rad)', 'Preferred period (deg)'],
var_name='Orientation (rad)', value_name='Eccentricity (deg)'))
return _finish_feature_df(df, reference_frame)
def create_max_amplitude_df(model, reference_frame='absolute',
retinotopic_angle=np.linspace(0, 2*np.pi, 49),
orientation=np.linspace(0, np.pi, 4, endpoint=False)):
"""Create dataframe summarizing max amplitude as function of retinotopic angle
Generally, you should not call this function directly, but use
create_feature_df. Differences from that function: this functions
requires the initialized model and only creates the info for a
single model, while create_feature_df uses the models dataframe to
initialize models itself, and combines the outputs across multiple
scanning sessions.
This function creates a dataframe summarizing the specified model's
maximum amplitude as a function of retinotopic angle, for multiple
stimulus orientations (in either absolute or relative reference
frames). This dataframe is then used for creating plots to summarize
the model.
Unless you have something specific in mind, you can trust the
default options for retinotopic_angle and orientation.
Parameters
----------
model : sfp.model.LogGaussianDonut
a single, initialized model, which we will summarize.
reference_frame : {"absolute", "relative"}, optional
Whether we use the absolute or relative reference frame in the
feature dataframe; that is whether we consider conventional
gratings (absolute, orientation is relative to
vertical/horizontal), or our log-polar gratings (relative,
orientation is relative to fovea).
retinotopic_angle : np.array, optional
Array specifying which retinotopic angles to find the preferred
period for. Note that the sampling of retinotopic angle is much
finer than for create_preferred_period_df (and goes all the way
to 2*pi), because this is what we will use as the dependent
variable in our plotsl
orientation : np.array, optional
Array specifying which stimulus orientations to find the
preferred period for. Note that the meaning of these
orientations will differ depending on the value of
reference_frame; you should most likely plot and interpret the
output based on the "Stimulus type" column instead (which
include strings like 'vertical'/'horizontal' or
'radial'/'angular'). However, this mapping can only happen if
the values in orientation line up with our stimuli (0, pi/4,
pi/2, 3*pi/2), and thus it's especially recommended that you use
the default value for this argument. If you don't care about
orientation and just want to summarize the model's overall
features, you should use the default (which includes all
orientations where the model can have different preferences,
based on its parametrization) and then average over them.
Returns
-------
max_amplitude_df : pd.DataFrame
Dataframe containing maximum amplitude of the model, to use with
sfp.plotting.feature_df_polar_plot for plotting max amplitude as
a function of retinotopic angle.
"""
if reference_frame == 'absolute':
tmp = model.max_amplitude(retinotopic_angle, orientation).detach().numpy()
elif reference_frame == 'relative':
tmp = model.max_amplitude(retinotopic_angle, rel_sf_angle=orientation).detach().numpy()
tmp = pd.DataFrame(tmp, index=retinotopic_angle, columns=orientation)
tmp = tmp.reset_index().rename(columns={'index': 'Retinotopic angle (rad)'})
df = pd.melt(tmp, ['Retinotopic angle (rad)'], var_name='Orientation (rad)',
value_name='Max amplitude')
return _finish_feature_df(df, reference_frame)
def create_feature_df(models, feature_type='preferred_period', reference_frame='absolute',
gb_cols=['subject', 'bootstrap_num'], **kwargs):
"""Create dataframe to summarize the predictions made by our models
The point of this dataframe is to generate plots (using
plotting.feature_df_plot and plotting.feature_df_polar_plot) to
easily visualize what the parameters of our model mean, either for
demonstrative purposes or with the parameters fit to actual data.
This is used to create a feature data frame that combines info
across multiple models, using the columns indicated in gb_cols to
separate them, and serves as a wrapper around three other functions:
create_preferred_period_df, create_preferred_period_contour_df, and
create_max_amplitude_df (based on the value of the feature_type
arg). We loop through the unique subsets of the data given by
models.groupby(gb_cols) in the models dataframe and instantiate a
model for each one (thus, each subset must only have one associated
model). We then create dataframes summarizing the relevant features,
add the identifying information, and, concatenate.
The intended use of these dataframes is to create plots showing the
models' predictions for (using bootstraps to get confidence
intervals to show variability across subjects):
1. preferred period as a function of eccentricity:
```
pref_period = create_feature_df(models, feature_type='preferred_period')
sfp.plotting.feature_df_plot(pref_period)
# average over retinotopic angle
sfp.plotting.feature_df_plot(pref_period, col=None,
pre_boot_gb_func=np.mean)
```
2. preferred period as a function of a function of retinotopic
angle of stimulus orientation at a given eccentricity:
```
pref_period = create_feature_df(models, feature_type='preferred_period',
eccentricity=[5],
retinotopic_angle=np.linspace(0, 2*np.pi, 49))
sfp.plotting.feature_df_polar_plot(pref_period, col='Eccentricity (deg)',
r='Preferred period (deg)')
```
3. iso-preferred period lines as a function of retinotopic angle and
stimulus orientation (i.e., at what eccentricity do you have a
preferred period of 1 for this angle and orientation):
```
pref_period_contour = create_feature_df(models,
feature_type='preferred_period_contour')
sfp.plotting.feature_df_polar_plot(pref_period_contour)
```
4. max amplitude as a function of retinotopic angle and stimulus
orientation:
```
max_amp = create_feature_df(models, feature_type='max_amplitude')
sfp.plotting.feature_df_polar_plot(max_amp, col=None, r='Max amplitude')
```
Parameters
----------
models : pd.DataFrame
dataframe summarizing model fits across many subjects / sessions
(as created by analyze_model.combine_models function). Must
contain the columns indicated in gb_cols and a row for each of
the model's 11 parameters
feature_type : {"preferred_period", "preferred_period_contour", "max_amplitude"}, optional
Which feature dataframe to create. Determines which function we
call, from create_preferred_period_df,
create_preferred_period_contour_df, and create_max_amplitude_df
reference_frame : {"absolute", "relative"}, optional
Whether we use the absolute or relative reference frame in the
feature dataframe; that is whether we consider conventional
gratings (absolute, orientation is relative to
vertical/horizontal), or our log-polar gratings (relative,
orientation is relative to fovea).
gb_cols : list, optional
list of strs indicating columns in the df to groupby. when we
groupby these columns, each subset should give a single
model. Thus, this should be something like ['subject'],
['subject', 'bootstrap_num'], or ['subject', 'session']
(depending on the contents of your df)
kwargs : {retinotopic_angle, orientation, eccentricity, period_target}
passed to the various create_*_df functions. See their
docstrings for more info. if not set, use the defaults.
Returns
-------
feature_df : pd.DataFrame
Dataframe containing specified feature info
"""
df = []
for n, g in models.groupby(gb_cols):
m = sfp_model.LogGaussianDonut.init_from_df(g)
if feature_type == 'preferred_period':
df.append(create_preferred_period_df(m, reference_frame, **kwargs))
elif feature_type == 'preferred_period_contour':
df.append(create_preferred_period_contour_df(m, reference_frame, **kwargs))
elif feature_type == 'max_amplitude':
df.append(create_max_amplitude_df(m, reference_frame, **kwargs))
# in this case, gb_cols is a list with one element, so n will
# just be a single element (probably a str). In order for the
# following dict(zip(gb_cols, n)) call to work correctly, both
# have to be lists with the same length, so this ensures that.
if len(gb_cols) == 1:
n = [n]
df[-1] = df[-1].assign(**dict(zip(gb_cols, n)))
return pd.concat(df).reset_index(drop=True)
def collect_final_loss(paths):
"""collect up the loss files, add some metadata, and concat
We loop through the paths, loading each in, grab the last epoch, add
some metadata by parsing the path, and then concatenate and return
the resulting df
Note that the assumed use here is collecting the loss.csv files
created by the different folds of cross-validation, but we don't
explicitly check for that and so this maybe useful in other contexts
Parameters
----------
paths : list
list of strs giving the paths to the loss files. we attempt to
parse these strings to find the subject, session, and task, and
will raise an Exception if we can't do so
Returns
-------
df : pd.DataFrame
the collected loss
"""
df = []
print(f"Loading in {len(paths)} total paths")
pbar = tqdm(range(len(paths)))
for i in pbar:
p = paths[i]
regexes = [r'(sub-[a-z0-9]+)', r'(ses-[a-z0-9]+)', r'(task-[a-z0-9]+)']
regex_names = ['subject', 'session', 'task']
if 'sub-groupaverage' in p:
regex_names.append('groupaverage_seed')
regexes.append(r'(_s[0-9]+)')
pbar.set_postfix(path=os.path.split(p)[-1])
tmp = pd.read_csv(p)
last_epoch = tmp.epoch_num.unique().max()
tmp = tmp.query("epoch_num == @last_epoch")
for n, regex in zip(regex_names, regexes):
res = re.findall(regex, p)
if len(set(res)) != 1:
raise Exception(f"Unable to infer {n} from path {p}!")
tmp[n] = res[0]
df.append(tmp)
return pd.concat(df)
def _calc_loss(preds, targets, loss_func, average=True):
"""Compute loss from preds and targets.
Parameters
----------
preds : torch.tensor
The torch tensor containing the predictions
targets : torch.tensor
The torch tensor containing the targets
loss_func : str
The loss function to compute. One of: {'weighted_normed_loss',
'crosscorrelation', 'normed_loss', 'explained_variance_score',
'cosine_distance', 'cosine_distance_scaled'}.
average : bool, optional
If True, we average the cv loss so we have only one value. If False, we
return one value per voxel.
Returns
-------
loss : array or float
The loss, either overall or per voxel.
"""
if loss_func == 'crosscorrelation':
# targets[..., 0] contains the actual targets, targets[..., 1]
# contains the precision, unimportant right here
corr = np.corrcoef(targets[..., 0].cpu().detach().numpy(),
preds.cpu().detach().numpy())
cv_loss = corr[0, 1]
if not average:
raise Exception("crosscorrelation must be averaged!")
elif 'normed_loss' in loss_func:
if loss_func.startswith('weighted'):
weighted = True
else:
weighted = False
cv_loss = sfp_model.weighted_normed_loss(preds, targets, weighted=weighted,
average=average)
if not average:
cv_loss = cv_loss.cpu().detach().numpy().mean(1)
else:
cv_loss = cv_loss.item()
elif loss_func == 'explained_variance_score':
# targets[..., 0] contains the actual targets, targets[..., 1]
# contains the precision, unimportant right here
cv_loss = metrics.explained_variance_score(targets[..., 0].cpu().detach().numpy(),
preds.cpu().detach().numpy(),
multioutput='uniform_average')
if not average:
raise Exception("explained variance score must be averaged!")
elif loss_func.startswith('cosine_distance'):
cv_loss = metrics.pairwise.cosine_distances(targets[..., 0].cpu().detach().numpy(),
preds.cpu().detach().numpy())
# for some reason, this returns a matrix of distances, giving the
# distance between each sample in X and Y. in our case, that means the
# distance between the targets of each voxel and the prediction of each
# voxel. We just want the diagonal, which is the distance between
# voxel's target and its own predictions
cv_loss = np.diag(cv_loss)
if loss_func.endswith('_scaled'):
# see paper / notebook for derivation, but I determined that
# our normed loss (without precision-weighting) is 2/n times
# the cosine distance (where n is the number of classes)
cv_loss = cv_loss * (2/preds.shape[-1])
if average:
cv_loss = cv_loss.mean()
return cv_loss
def calc_cv_error(loss_files, dataset_path, wildcards, outputs,
df_filter_string='drop_voxels_with_any_negative_amplitudes,drop_voxels_near_border'):
"""Calculate cross-validated loss and save as new dataframe
We use 12-fold cross-validation to determine the mode that best fits
the data for each scanning session. To do that, we fit the model to
a subset of the data (the subset contains all responses to 44 out of
the 48 stimulus classes, none to the other 4). When fitting the
cross-validation models, we follow the same procedure we use when
fitting all the data, but we need to use something else for
evaluation: we get each cross-validation model's predictions for the
4 classes it *didn't* see, concatenating together these predictions
for all 12 of them, then compare this against the full dataset (all
voxels, all stimuli). We then create a dataframe containing this
loss, as well as other identifying information, and save it at the
specified path.
We also save out the predictions and targets tensors.
The arguments for this function are a bit strange because it's
expressly meant to be called by a snakemake rule and not directly
from a python interpreter (it gets called by the rules calc_cv_error
and calc_simulated_cv_error)
Parameters
----------
loss_files : list
list of strings giving the paths to loss files for the
cross-validation models. each one contains, among other things
the specific test subset for this model, and there should be an
associated model.pt file saved in the same folder.
dataset_path : str
The path to the first_level_analysis dataframe, saved as a csv,
which contains the actual data our model was fit to predict
wildcards : dict
dictionary of wildcards, information used to identify this model
(e.g., subject, session, crossvalidation seed, model type,
etc). Automatically put together by snakemake
outputs : list
list containing two strings, the paths to save the loss dataframe (as a
csv) and the predictions / targets tensors (as a pt)
df_filter_string : str or None, optional
a str specifying how to filter the voxels in the dataset. see
the docstrings for sfp.model.FirstLevelDataset and
sfp.model.construct_df_filter for more details. If None, we
won't filter. Should probably use the default, which is what all
models are trained using.
"""
device = torch.device("cpu")
if df_filter_string:
df_filter = sfp_model.construct_df_filter(df_filter_string)
else:
df_filter = None
ds = sfp_model.FirstLevelDataset(dataset_path, device=device, df_filter=df_filter)
dl = torchdata.DataLoader(ds, len(ds))
features, targets = next(iter(dl))
preds = torch.empty(targets.shape[:2], dtype=targets.dtype)
for path in loss_files:
m, l, _, _ = load_single_model(path.replace('_loss.csv', ''), False)
test_subset = l.test_subset.unique()
test_subset = [int(i) for i in test_subset[0].split(',')]
pred = m(features[:, test_subset, :])
preds[:, test_subset] = pred
torch.save({'predictions': preds, 'targets': targets}, outputs[1])
data = dict(wildcards)
data.pop('model_type')
data['loss_func'] = []
data['cv_loss'] = []
for loss_func in ['weighted_normed_loss', 'crosscorrelation', 'normed_loss',
'explained_variance_score', 'cosine_distance',
'cosine_distance_scaled']:
cv_loss = _calc_loss(preds, targets, loss_func, True)
data['loss_func'].append(loss_func)
data['cv_loss'].append(cv_loss)
data['dataset_df_path'] = dataset_path
data['fit_model_type'] = l.fit_model_type.unique()[0]
if 'true_model_type' in l.columns:
data['true_model_type'] = l.true_model_type.unique()[0]
cv_loss_csv = pd.DataFrame(data)
cv_loss_csv.to_csv(outputs[0], index=False)
def gather_results(base_path, outputs, metadata, cv_loss_files=None, groupaverage=False):
"""Combine model dataframes
We fit a huge number of models as part of this analysis pipeline. In
order to make examining their collective results easier, we need to
combine them in a meaningful way, throwing away the unnecessary
info. This function uses the combine_models function to load in the
models, loss, and models_history dataframes (not the results one,
which is the largest). We then use df.groupby(metadata) and some
well-placed funtions to summarize the information and then save them
out.
This was written to be called by snakemake rules, not from a python
interpeter directly
Parameters
----------
base_path : str
path template where we should find the results. Should contain
no string formatting symbols, but should contain at least one
'*' because we will use glob to find them. We do not search for
them recursively, so you will need multiple '*'s if you want to
combine dataframes contained in different folders
outputs : list
list of 5 or 6 strs giving the paths to save models,
grouped_loss, timing_df, diff_df, model_history, and
(optionally) cv_loss to.
metadata : list
list of strs giving the columns in the individual models, loss,
and model_history dataframes that we will groupby in order to
summarize them.
cv_loss_files : list, optional
either None or list of cross-validated loss dataframes (as
cretated by calc_cv_error). If not None, outputs must contain 6
strs. because of how these dataframes were constructed, we
simply concatenate them, doing none fo the fancy groupby we do
for the other dataframes
groupaverage : bool, optional
whether to grab the individual subject fits or the
sub-groupaverage subject (which is a bootstrapped average
subject)
"""
models, loss_df, _, model_history = combine_models(base_path, False, groupaverage)
timing_df = loss_df.groupby(metadata + ['epoch_num']).time.max().reset_index()
grouped_loss = loss_df.groupby(metadata + ['epoch_num', 'time']).loss.mean().reset_index()
grouped_loss = grouped_loss.groupby(metadata).last().reset_index()
final_model_history = model_history.groupby(metadata + ['parameter']).last().reset_index().rename(columns={'parameter': 'model_parameter'})
models = pd.merge(models, final_model_history[metadata + ['model_parameter', 'hessian']])
models = models.fillna(0)
diff_df = loss_df.groupby(metadata + ['epoch_num'])[['loss', 'time']].mean().reset_index()
diff_df['loss_diff'] = diff_df.groupby(metadata)['loss'].diff()
diff_df['time_diff'] = diff_df.groupby(metadata)['time'].diff()
model_history['value_diff'] = model_history.groupby(metadata + ['parameter'])['value'].diff()
models.to_csv(outputs[0], index=False)
grouped_loss.to_csv(outputs[1], index=False)
timing_df.to_csv(outputs[2], index=False)
diff_df.to_csv(outputs[3], index=False)
model_history.to_csv(outputs[4], index=False)
if cv_loss_files is not None:
cv_loss = []
for path in cv_loss_files:
cv_loss.append(pd.read_csv(path))
cv_loss = pd.concat(cv_loss)
cv_loss.to_csv(outputs[-1], index=False)
def combine_summarized_results(base_template, outputs, cv_loss_flag=True):
"""Combine model dataframes (second-order)
This function combined model dataframes that have already been
combined (that is, are the outputs of gather_results). As such, we
don't do anything more than load them all in and concatenate them
(no groupby, adding new columns, or anything else).
This was written to be called by snakemake rules, not from a python
interpeter directly
Parameters
----------
base_template : list
list of strs, each of which is a path template where we should
find the results. Unlike gather_results's base_path, this
shouldn't contain any '*' (nor any string formatting symbols),
but should just be the path to a single _all_models.csv, with
that removed (see snakemake rule summarize_gathered_resutls,
params.base_template for an example). For each p in
base_template, we'll load in: p+'_all_models.csv',
p+'_all_loss.csv', p+'_all_timing.csv', and (if cv_loss_flag is
True) p+'_all_cv_loss.csv'.
outputs : list
list of 3 or 4 strs, the paths to save out our combined models,
grouped_loss, timing, and (if cv_loss_flag is True) cv_loss
dataframes
cv_loss_flag : bool, optional
whether we load in and save out the cross-validated loss
dataframes
"""
models = []
grouped_loss_df = []
timing_df = []
cv_loss = []
for p in base_template:
models.append(pd.read_csv(p+'_all_models.csv'))
grouped_loss_df.append(pd.read_csv(p+'_all_loss.csv'))
timing_df.append(pd.read_csv(p+'_all_timing.csv'))
if cv_loss_flag:
cv_loss.append(pd.read_csv(p+'_all_cv_loss.csv'))
models = | pd.concat(models, sort=False) | pandas.concat |
import datetime
import inspect
import numpy.testing as npt
import os.path
import pandas as pd
import pandas.util.testing as pdt
import sys
from tabulate import tabulate
import unittest
# #find parent directory and import model
# parentddir = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))
# sys.path.append(parentddir)
from ..screenip_exe import Screenip
test = {}
class TestScreenip(unittest.TestCase):
"""
Unit tests for screenip.
"""
print("screenip unittests conducted at " + str(datetime.datetime.today()))
def setUp(self):
"""
Setup routine for screenip unittest.
:return:
"""
pass
# screenip2 = screenip_model.screenip(0, pd_obj_inputs, pd_obj_exp_out)
# setup the test as needed
# e.g. pandas to open screenip qaqc csv
# Read qaqc csv and create pandas DataFrames for inputs and expected outputs
def tearDown(self):
"""
Teardown routine for screenip unittest.
:return:
"""
pass
# teardown called after each test
# e.g. maybe write test results to some text file
def create_screenip_object(self):
# create empty pandas dataframes to create empty object for testing
df_empty = pd.DataFrame()
# create an empty screenip object
screenip_empty = Screenip(df_empty, df_empty)
return screenip_empty
def test_screenip_unit_fw_bird(self):
"""
unittest for function screenip.fw_bird:
:return:
"""
expected_results = | pd.Series([0.0162, 0.0162, 0.0162], dtype='float') | pandas.Series |
import pandas as pd
from fairlens.metrics.correlation import distance_cn_correlation, distance_nn_correlation
from fairlens.sensitive.correlation import find_column_correlation, find_sensitive_correlations
pair_race = "race", "Ethnicity"
pair_age = "age", "Age"
pair_marital = "marital", "Family Status"
pair_gender = "gender", "Gender"
pair_nationality = "nationality", "Nationality"
def test_correlation():
col_names = ["gender", "random", "score"]
data = [
["male", 10, 60],
["female", 10, 80],
["male", 10, 60],
["female", 10, 80],
["male", 9, 59],
["female", 11, 80],
["male", 12, 61],
["female", 10, 83],
]
df = | pd.DataFrame(data, columns=col_names) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 6 13:01:32 2019
@author: abibeka
Purpose: Compare VISSIM Volumes with collected volumes. Calc GEH
"""
#0.0 Housekeeping. Clear variable space
from IPython import get_ipython #run magic commands
ipython = get_ipython()
ipython.magic("reset -f")
ipython = get_ipython()
#1 Import Required Packages
#*********************************************************************************
import os
import pandas as pd
import numpy as np
import subprocess
os.chdir(r'C:\Users\abibeka\OneDrive - Kittelson & Associates, Inc\Documents\12th-Street-TransitWay\Results')
#os.chdir(r'H:\20\20548 - Arlington County Engineering On-Call\009 - 12th Street Transitway Extension\vissim\Results')
os.getcwd()
# Read the Mapping Data
#*********************************************************************************
x1 = pd.ExcelFile('ResultsNameMap.xlsx')
x1.sheet_names
NodeMap = x1.parse('NodeMap')
DirMap = x1.parse('DirMap')
# Read VISSIM File
#*********************************************************************************
def ProcessVissimVolumes(file, NodeMap = NodeMap, DirMap = DirMap, SimRun = "AVG"):
'''
file: Node Evaluation file
Returns: Cleaned resuls
Read the Node-Evaluation File
Ignore the ExistingPMfi. Can be used all AM and PM existing Node Evaluation
'''
ExistingPMDat=pd.read_csv(file,sep =';',skiprows=1,comment="*")
ExistingPMDat.columns
#Use Avg values only
ExistingPMDat = ExistingPMDat[(ExistingPMDat['$MOVEMENTEVALUATION:SIMRUN'] ==SimRun)]
#Only use motorized links
Mask_Not4or5 = ~((ExistingPMDat['MOVEMENT\TOLINK\LINKBEHAVTYPE']==4) | (ExistingPMDat['MOVEMENT\TOLINK\LINKBEHAVTYPE']==5))
ExistingPMDat = ExistingPMDat[Mask_Not4or5]
ExistingPMDat.rename(columns={'MOVEMENT':'Mvmt'},inplace=True)
ExPMMvMDat = ExistingPMDat.copy() #Something from previous code. Not needed here
ExPMMvMDat.loc[:,"HourInt"] = 'Nan'
ExPMMvMDat.loc[:,'HourInt'] = ExPMMvMDat.TIMEINT
ExPMMvMDat.HourInt = pd.Categorical(ExPMMvMDat.HourInt,['900-4500']) # Something from previous code. Not needed here
ExPMMvMDat.rename(columns= {'VEHS(ALL)':'VissimVol','MOVEMENT\DIRECTION':'VissimDir'},inplace=True)
new = ExPMMvMDat.Mvmt.str.split(':',expand=True)
ExPMMvMDat['Intersection'] = (new[0].str.split('-',n=1,expand=True)[0]).astype('int')
ExPMMvMDat = ExPMMvMDat[['Intersection','VissimDir','VissimVol']]
#Handle duplicate rows
ExPMMvMDat = ExPMMvMDat.groupby(['Intersection','VissimDir']).agg({'VissimVol':'sum'}).reset_index()
ExPMMvMDat = ExPMMvMDat.merge(DirMap, left_on = 'VissimDir',right_on = 'VissimDir',how = 'left')
#--------------------------------------------------------------------------------------------------
ExPMMvMDat = pd.merge(ExPMMvMDat,NodeMap,left_on=['Intersection'],
right_on=["NodeNum"], how = 'left')
ExPMMvMDat.CardinalDir = | pd.Categorical(ExPMMvMDat.CardinalDir,[
'EBL','EBT','EBR','WBL','WBT','WBR','NBL','NBT','NBR','SBL','SBT','SBR']) | pandas.Categorical |
import pandas as pd
from pandas import (
DataFrame,
Index,
Series,
Timestamp,
date_range,
)
import pandas._testing as tm
class TestDatetimeIndex:
def test_indexing_with_datetime_tz(self):
# GH#8260
# support datetime64 with tz
idx = Index(date_range("20130101", periods=3, tz="US/Eastern"), name="foo")
dr = date_range("20130110", periods=3)
df = DataFrame({"A": idx, "B": dr})
df["C"] = idx
df.iloc[1, 1] = pd.NaT
df.iloc[1, 2] = pd.NaT
expected = Series(
[Timestamp("2013-01-02 00:00:00-0500", tz="US/Eastern"), pd.NaT, pd.NaT],
index=list("ABC"),
dtype="object",
name=1,
)
# indexing
result = df.iloc[1]
tm.assert_series_equal(result, expected)
result = df.loc[1]
tm.assert_series_equal(result, expected)
def test_indexing_fast_xs(self):
# indexing - fast_xs
df = DataFrame({"a": date_range("2014-01-01", periods=10, tz="UTC")})
result = df.iloc[5]
expected = Series(
[Timestamp("2014-01-06 00:00:00+0000", tz="UTC")], index=["a"], name=5
)
| tm.assert_series_equal(result, expected) | pandas._testing.assert_series_equal |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# pylint: disable=E1101
"""
Created on Saturday, March 14 15:23 2020
@author: khayes847
"""
import itertools
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_score
from sklearn.preprocessing import OneHotEncoder
from sklearn.cluster import AgglomerativeClustering
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.metrics import accuracy_score, f1_score, confusion_matrix
from sklearn.decomposition import PCA
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
def kmeans_score(data, n_groups):
"""
This function will perform KMeans clustering on the included dataset,
using the n_groups numbers as the number of clusters. It will then
find and return the predicted clusters' Silhouette Score.
Parameters:
data: The dataset in question.
n_groups (int): The number of clusters.
Returns:
score (float): The Silhouette Score for the clustered dataset.
"""
k_means = KMeans(n_clusters=n_groups, random_state=42)
k_means.fit(data)
labels = k_means.labels_
score = float(silhouette_score(data, labels))
return score
def agg_score(data, n_groups, score=True):
"""
Performs Agglomerative Hierarchical Clustering on data using
the specified number of components. If "Score" is selected,
returns the Silhouette Score. Otherwise, produces the cluster labels,
and adds them to the original dataset. For convenience, the function
also performs the data cleaning steps that don't require the log, outlier-
capping, or scaling transformations.
Parameters:
data: The dataset in question.
n_groups (int): The number of clusters.
score (bool): Whether the function will return the Silhouette
Score. If 'True', the function will return the Silhouette
Score. If 'False', the function will add the clustered labels
to the dataset, then save and return the dataset.
Returns:
score_val (float): The Silhouette Score for the clustered dataset.
target: The target labels as a pandas dataframe.
"""
agg_comp = AgglomerativeClustering(n_clusters=n_groups)
agg_comp.fit(data)
labels = agg_comp.labels_
if score:
score_val = float(silhouette_score(data, labels))
return score_val
data = pd.read_csv("data/shoppers.csv")
# Combining datasets
target = | pd.DataFrame(labels, columns=['Target']) | pandas.DataFrame |
# coding=utf-8
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import operator
from itertools import product, starmap
from numpy import nan, inf
import numpy as np
import pandas as pd
from pandas import (Index, Series, DataFrame, isnull, bdate_range,
NaT, date_range, timedelta_range,
_np_version_under1p8)
from pandas.tseries.index import Timestamp
from pandas.tseries.tdi import Timedelta
import pandas.core.nanops as nanops
from pandas.compat import range, zip
from pandas import compat
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
from .common import TestData
class TestSeriesOperators(TestData, tm.TestCase):
_multiprocess_can_split_ = True
def test_comparisons(self):
left = np.random.randn(10)
right = np.random.randn(10)
left[:3] = np.nan
result = nanops.nangt(left, right)
with np.errstate(invalid='ignore'):
expected = (left > right).astype('O')
expected[:3] = np.nan
assert_almost_equal(result, expected)
s = Series(['a', 'b', 'c'])
s2 = Series([False, True, False])
# it works!
exp = Series([False, False, False])
tm.assert_series_equal(s == s2, exp)
tm.assert_series_equal(s2 == s, exp)
def test_op_method(self):
def check(series, other, check_reverse=False):
simple_ops = ['add', 'sub', 'mul', 'floordiv', 'truediv', 'pow']
if not compat.PY3:
simple_ops.append('div')
for opname in simple_ops:
op = getattr(Series, opname)
if op == 'div':
alt = operator.truediv
else:
alt = getattr(operator, opname)
result = op(series, other)
expected = alt(series, other)
tm.assert_almost_equal(result, expected)
if check_reverse:
rop = getattr(Series, "r" + opname)
result = rop(series, other)
expected = alt(other, series)
tm.assert_almost_equal(result, expected)
check(self.ts, self.ts * 2)
check(self.ts, self.ts[::2])
check(self.ts, 5, check_reverse=True)
check(tm.makeFloatSeries(), tm.makeFloatSeries(), check_reverse=True)
def test_neg(self):
assert_series_equal(-self.series, -1 * self.series)
def test_invert(self):
assert_series_equal(-(self.series < 0), ~(self.series < 0))
def test_div(self):
with np.errstate(all='ignore'):
# no longer do integer div for any ops, but deal with the 0's
p = DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]})
result = p['first'] / p['second']
expected = Series(
p['first'].values.astype(float) / p['second'].values,
dtype='float64')
expected.iloc[0:3] = np.inf
assert_series_equal(result, expected)
result = p['first'] / 0
expected = Series(np.inf, index=p.index, name='first')
assert_series_equal(result, expected)
p = p.astype('float64')
result = p['first'] / p['second']
expected = Series(p['first'].values / p['second'].values)
assert_series_equal(result, expected)
p = DataFrame({'first': [3, 4, 5, 8], 'second': [1, 1, 1, 1]})
result = p['first'] / p['second']
assert_series_equal(result, p['first'].astype('float64'),
check_names=False)
self.assertTrue(result.name is None)
self.assertFalse(np.array_equal(result, p['second'] / p['first']))
# inf signing
s = Series([np.nan, 1., -1.])
result = s / 0
expected = Series([np.nan, np.inf, -np.inf])
assert_series_equal(result, expected)
# float/integer issue
# GH 7785
p = DataFrame({'first': (1, 0), 'second': (-0.01, -0.02)})
expected = Series([-0.01, -np.inf])
result = p['second'].div(p['first'])
assert_series_equal(result, expected, check_names=False)
result = p['second'] / p['first']
assert_series_equal(result, expected)
# GH 9144
s = Series([-1, 0, 1])
result = 0 / s
expected = Series([0.0, nan, 0.0])
assert_series_equal(result, expected)
result = s / 0
expected = Series([-inf, nan, inf])
assert_series_equal(result, expected)
result = s // 0
expected = Series([-inf, nan, inf])
assert_series_equal(result, expected)
def test_operators(self):
def _check_op(series, other, op, pos_only=False,
check_dtype=True):
left = np.abs(series) if pos_only else series
right = np.abs(other) if pos_only else other
cython_or_numpy = op(left, right)
python = left.combine(right, op)
tm.assert_series_equal(cython_or_numpy, python,
check_dtype=check_dtype)
def check(series, other):
simple_ops = ['add', 'sub', 'mul', 'truediv', 'floordiv', 'mod']
for opname in simple_ops:
_check_op(series, other, getattr(operator, opname))
_check_op(series, other, operator.pow, pos_only=True)
_check_op(series, other, lambda x, y: operator.add(y, x))
_check_op(series, other, lambda x, y: operator.sub(y, x))
_check_op(series, other, lambda x, y: operator.truediv(y, x))
_check_op(series, other, lambda x, y: operator.floordiv(y, x))
_check_op(series, other, lambda x, y: operator.mul(y, x))
_check_op(series, other, lambda x, y: operator.pow(y, x),
pos_only=True)
_check_op(series, other, lambda x, y: operator.mod(y, x))
check(self.ts, self.ts * 2)
check(self.ts, self.ts * 0)
check(self.ts, self.ts[::2])
check(self.ts, 5)
def check_comparators(series, other, check_dtype=True):
_check_op(series, other, operator.gt, check_dtype=check_dtype)
_check_op(series, other, operator.ge, check_dtype=check_dtype)
_check_op(series, other, operator.eq, check_dtype=check_dtype)
_check_op(series, other, operator.lt, check_dtype=check_dtype)
_check_op(series, other, operator.le, check_dtype=check_dtype)
check_comparators(self.ts, 5)
check_comparators(self.ts, self.ts + 1, check_dtype=False)
def test_operators_empty_int_corner(self):
s1 = Series([], [], dtype=np.int32)
s2 = Series({'x': 0.})
tm.assert_series_equal(s1 * s2, Series([np.nan], index=['x']))
def test_operators_timedelta64(self):
# invalid ops
self.assertRaises(Exception, self.objSeries.__add__, 1)
self.assertRaises(Exception, self.objSeries.__add__,
np.array(1, dtype=np.int64))
self.assertRaises(Exception, self.objSeries.__sub__, 1)
self.assertRaises(Exception, self.objSeries.__sub__,
np.array(1, dtype=np.int64))
# seriese ops
v1 = date_range('2012-1-1', periods=3, freq='D')
v2 = date_range('2012-1-2', periods=3, freq='D')
rs = Series(v2) - Series(v1)
xp = Series(1e9 * 3600 * 24,
rs.index).astype('int64').astype('timedelta64[ns]')
assert_series_equal(rs, xp)
self.assertEqual(rs.dtype, 'timedelta64[ns]')
df = DataFrame(dict(A=v1))
td = Series([timedelta(days=i) for i in range(3)])
self.assertEqual(td.dtype, 'timedelta64[ns]')
# series on the rhs
result = df['A'] - df['A'].shift()
self.assertEqual(result.dtype, 'timedelta64[ns]')
result = df['A'] + td
self.assertEqual(result.dtype, 'M8[ns]')
# scalar Timestamp on rhs
maxa = df['A'].max()
tm.assertIsInstance(maxa, Timestamp)
resultb = df['A'] - df['A'].max()
self.assertEqual(resultb.dtype, 'timedelta64[ns]')
# timestamp on lhs
result = resultb + df['A']
values = [Timestamp('20111230'), Timestamp('20120101'),
Timestamp('20120103')]
expected = Series(values, name='A')
assert_series_equal(result, expected)
# datetimes on rhs
result = df['A'] - datetime(2001, 1, 1)
expected = Series(
[timedelta(days=4017 + i) for i in range(3)], name='A')
assert_series_equal(result, expected)
self.assertEqual(result.dtype, 'm8[ns]')
d = datetime(2001, 1, 1, 3, 4)
resulta = df['A'] - d
self.assertEqual(resulta.dtype, 'm8[ns]')
# roundtrip
resultb = resulta + d
assert_series_equal(df['A'], resultb)
# timedeltas on rhs
td = timedelta(days=1)
resulta = df['A'] + td
resultb = resulta - td
assert_series_equal(resultb, df['A'])
self.assertEqual(resultb.dtype, 'M8[ns]')
# roundtrip
td = timedelta(minutes=5, seconds=3)
resulta = df['A'] + td
resultb = resulta - td
assert_series_equal(df['A'], resultb)
self.assertEqual(resultb.dtype, 'M8[ns]')
# inplace
value = rs[2] + np.timedelta64(timedelta(minutes=5, seconds=1))
rs[2] += np.timedelta64(timedelta(minutes=5, seconds=1))
self.assertEqual(rs[2], value)
def test_operator_series_comparison_zerorank(self):
# GH 13006
result = np.float64(0) > pd.Series([1, 2, 3])
expected = 0.0 > pd.Series([1, 2, 3])
self.assert_series_equal(result, expected)
result = pd.Series([1, 2, 3]) < np.float64(0)
expected = pd.Series([1, 2, 3]) < 0.0
self.assert_series_equal(result, expected)
result = np.array([0, 1, 2])[0] > pd.Series([0, 1, 2])
expected = 0.0 > pd.Series([1, 2, 3])
self.assert_series_equal(result, expected)
def test_timedeltas_with_DateOffset(self):
# GH 4532
# operate with pd.offsets
s = Series([Timestamp('20130101 9:01'), Timestamp('20130101 9:02')])
result = s + pd.offsets.Second(5)
result2 = pd.offsets.Second(5) + s
expected = Series([Timestamp('20130101 9:01:05'), Timestamp(
'20130101 9:02:05')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s - pd.offsets.Second(5)
result2 = -pd.offsets.Second(5) + s
expected = Series([Timestamp('20130101 9:00:55'), Timestamp(
'20130101 9:01:55')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s + pd.offsets.Milli(5)
result2 = pd.offsets.Milli(5) + s
expected = Series([Timestamp('20130101 9:01:00.005'), Timestamp(
'20130101 9:02:00.005')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s + pd.offsets.Minute(5) + pd.offsets.Milli(5)
expected = Series([Timestamp('20130101 9:06:00.005'), Timestamp(
'20130101 9:07:00.005')])
assert_series_equal(result, expected)
# operate with np.timedelta64 correctly
result = s + np.timedelta64(1, 's')
result2 = np.timedelta64(1, 's') + s
expected = Series([Timestamp('20130101 9:01:01'), Timestamp(
'20130101 9:02:01')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s + np.timedelta64(5, 'ms')
result2 = np.timedelta64(5, 'ms') + s
expected = Series([Timestamp('20130101 9:01:00.005'), Timestamp(
'20130101 9:02:00.005')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
# valid DateOffsets
for do in ['Hour', 'Minute', 'Second', 'Day', 'Micro', 'Milli',
'Nano']:
op = getattr(pd.offsets, do)
s + op(5)
op(5) + s
def test_timedelta_series_ops(self):
# GH11925
s = Series(timedelta_range('1 day', periods=3))
ts = Timestamp('2012-01-01')
expected = Series(date_range('2012-01-02', periods=3))
assert_series_equal(ts + s, expected)
assert_series_equal(s + ts, expected)
expected2 = Series(date_range('2011-12-31', periods=3, freq='-1D'))
assert_series_equal(ts - s, expected2)
assert_series_equal(ts + (-s), expected2)
def test_timedelta64_operations_with_DateOffset(self):
# GH 10699
td = Series([timedelta(minutes=5, seconds=3)] * 3)
result = td + pd.offsets.Minute(1)
expected = Series([timedelta(minutes=6, seconds=3)] * 3)
assert_series_equal(result, expected)
result = td - pd.offsets.Minute(1)
expected = Series([timedelta(minutes=4, seconds=3)] * 3)
assert_series_equal(result, expected)
result = td + Series([pd.offsets.Minute(1), pd.offsets.Second(3),
pd.offsets.Hour(2)])
expected = Series([timedelta(minutes=6, seconds=3), timedelta(
minutes=5, seconds=6), timedelta(hours=2, minutes=5, seconds=3)])
assert_series_equal(result, expected)
result = td + pd.offsets.Minute(1) + pd.offsets.Second(12)
expected = Series([timedelta(minutes=6, seconds=15)] * 3)
assert_series_equal(result, expected)
# valid DateOffsets
for do in ['Hour', 'Minute', 'Second', 'Day', 'Micro', 'Milli',
'Nano']:
op = getattr(pd.offsets, do)
td + op(5)
op(5) + td
td - op(5)
op(5) - td
def test_timedelta64_operations_with_timedeltas(self):
# td operate with td
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td2 = timedelta(minutes=5, seconds=4)
result = td1 - td2
expected = Series([timedelta(seconds=0)] * 3) - Series([timedelta(
seconds=1)] * 3)
self.assertEqual(result.dtype, 'm8[ns]')
assert_series_equal(result, expected)
result2 = td2 - td1
expected = (Series([timedelta(seconds=1)] * 3) - Series([timedelta(
seconds=0)] * 3))
assert_series_equal(result2, expected)
# roundtrip
assert_series_equal(result + td2, td1)
# Now again, using pd.to_timedelta, which should build
# a Series or a scalar, depending on input.
td1 = Series(pd.to_timedelta(['00:05:03'] * 3))
td2 = pd.to_timedelta('00:05:04')
result = td1 - td2
expected = Series([timedelta(seconds=0)] * 3) - Series([timedelta(
seconds=1)] * 3)
self.assertEqual(result.dtype, 'm8[ns]')
assert_series_equal(result, expected)
result2 = td2 - td1
expected = (Series([timedelta(seconds=1)] * 3) - Series([timedelta(
seconds=0)] * 3))
assert_series_equal(result2, expected)
# roundtrip
assert_series_equal(result + td2, td1)
def test_timedelta64_operations_with_integers(self):
# GH 4521
# divide/multiply by integers
startdate = Series(date_range('2013-01-01', '2013-01-03'))
enddate = Series(date_range('2013-03-01', '2013-03-03'))
s1 = enddate - startdate
s1[2] = np.nan
s2 = Series([2, 3, 4])
expected = Series(s1.values.astype(np.int64) / s2, dtype='m8[ns]')
expected[2] = np.nan
result = s1 / s2
assert_series_equal(result, expected)
s2 = Series([20, 30, 40])
expected = Series(s1.values.astype(np.int64) / s2, dtype='m8[ns]')
expected[2] = np.nan
result = s1 / s2
assert_series_equal(result, expected)
result = s1 / 2
expected = Series(s1.values.astype(np.int64) / 2, dtype='m8[ns]')
expected[2] = np.nan
assert_series_equal(result, expected)
s2 = Series([20, 30, 40])
expected = Series(s1.values.astype(np.int64) * s2, dtype='m8[ns]')
expected[2] = np.nan
result = s1 * s2
assert_series_equal(result, expected)
for dtype in ['int32', 'int16', 'uint32', 'uint64', 'uint32', 'uint16',
'uint8']:
s2 = Series([20, 30, 40], dtype=dtype)
expected = Series(
s1.values.astype(np.int64) * s2.astype(np.int64),
dtype='m8[ns]')
expected[2] = np.nan
result = s1 * s2
assert_series_equal(result, expected)
result = s1 * 2
expected = Series(s1.values.astype(np.int64) * 2, dtype='m8[ns]')
expected[2] = np.nan
assert_series_equal(result, expected)
result = s1 * -1
expected = Series(s1.values.astype(np.int64) * -1, dtype='m8[ns]')
expected[2] = np.nan
assert_series_equal(result, expected)
# invalid ops
assert_series_equal(s1 / s2.astype(float),
Series([Timedelta('2 days 22:48:00'), Timedelta(
'1 days 23:12:00'), Timedelta('NaT')]))
assert_series_equal(s1 / 2.0,
Series([Timedelta('29 days 12:00:00'), Timedelta(
'29 days 12:00:00'), Timedelta('NaT')]))
for op in ['__add__', '__sub__']:
sop = getattr(s1, op, None)
if sop is not None:
self.assertRaises(TypeError, sop, 1)
self.assertRaises(TypeError, sop, s2.values)
def test_timedelta64_conversions(self):
startdate = Series(date_range('2013-01-01', '2013-01-03'))
enddate = Series(date_range('2013-03-01', '2013-03-03'))
s1 = enddate - startdate
s1[2] = np.nan
for m in [1, 3, 10]:
for unit in ['D', 'h', 'm', 's', 'ms', 'us', 'ns']:
# op
expected = s1.apply(lambda x: x / np.timedelta64(m, unit))
result = s1 / np.timedelta64(m, unit)
assert_series_equal(result, expected)
if m == 1 and unit != 'ns':
# astype
result = s1.astype("timedelta64[{0}]".format(unit))
assert_series_equal(result, expected)
# reverse op
expected = s1.apply(
lambda x: Timedelta(np.timedelta64(m, unit)) / x)
result = np.timedelta64(m, unit) / s1
# astype
s = Series(date_range('20130101', periods=3))
result = s.astype(object)
self.assertIsInstance(result.iloc[0], datetime)
self.assertTrue(result.dtype == np.object_)
result = s1.astype(object)
self.assertIsInstance(result.iloc[0], timedelta)
self.assertTrue(result.dtype == np.object_)
def test_timedelta64_equal_timedelta_supported_ops(self):
ser = Series([Timestamp('20130301'), Timestamp('20130228 23:00:00'),
Timestamp('20130228 22:00:00'), Timestamp(
'20130228 21:00:00')])
intervals = 'D', 'h', 'm', 's', 'us'
# TODO: unused
# npy16_mappings = {'D': 24 * 60 * 60 * 1000000,
# 'h': 60 * 60 * 1000000,
# 'm': 60 * 1000000,
# 's': 1000000,
# 'us': 1}
def timedelta64(*args):
return sum(starmap(np.timedelta64, zip(args, intervals)))
for op, d, h, m, s, us in product([operator.add, operator.sub],
*([range(2)] * 5)):
nptd = timedelta64(d, h, m, s, us)
pytd = timedelta(days=d, hours=h, minutes=m, seconds=s,
microseconds=us)
lhs = op(ser, nptd)
rhs = op(ser, pytd)
try:
assert_series_equal(lhs, rhs)
except:
raise AssertionError(
"invalid comparsion [op->{0},d->{1},h->{2},m->{3},"
"s->{4},us->{5}]\n{6}\n{7}\n".format(op, d, h, m, s,
us, lhs, rhs))
def test_operators_datetimelike(self):
def run_ops(ops, get_ser, test_ser):
# check that we are getting a TypeError
# with 'operate' (from core/ops.py) for the ops that are not
# defined
for op_str in ops:
op = getattr(get_ser, op_str, None)
with tm.assertRaisesRegexp(TypeError, 'operate'):
op(test_ser)
# ## timedelta64 ###
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
td2 = timedelta(minutes=5, seconds=4)
ops = ['__mul__', '__floordiv__', '__pow__', '__rmul__',
'__rfloordiv__', '__rpow__']
run_ops(ops, td1, td2)
td1 + td2
td2 + td1
td1 - td2
td2 - td1
td1 / td2
td2 / td1
# ## datetime64 ###
dt1 = Series([Timestamp('20111230'), Timestamp('20120101'),
Timestamp('20120103')])
dt1.iloc[2] = np.nan
dt2 = Series([Timestamp('20111231'), Timestamp('20120102'),
Timestamp('20120104')])
ops = ['__add__', '__mul__', '__floordiv__', '__truediv__', '__div__',
'__pow__', '__radd__', '__rmul__', '__rfloordiv__',
'__rtruediv__', '__rdiv__', '__rpow__']
run_ops(ops, dt1, dt2)
dt1 - dt2
dt2 - dt1
# ## datetime64 with timetimedelta ###
ops = ['__mul__', '__floordiv__', '__truediv__', '__div__', '__pow__',
'__rmul__', '__rfloordiv__', '__rtruediv__', '__rdiv__',
'__rpow__']
run_ops(ops, dt1, td1)
dt1 + td1
td1 + dt1
dt1 - td1
# TODO: Decide if this ought to work.
# td1 - dt1
# ## timetimedelta with datetime64 ###
ops = ['__sub__', '__mul__', '__floordiv__', '__truediv__', '__div__',
'__pow__', '__rmul__', '__rfloordiv__', '__rtruediv__',
'__rdiv__', '__rpow__']
run_ops(ops, td1, dt1)
td1 + dt1
dt1 + td1
# 8260, 10763
# datetime64 with tz
ops = ['__mul__', '__floordiv__', '__truediv__', '__div__', '__pow__',
'__rmul__', '__rfloordiv__', '__rtruediv__', '__rdiv__',
'__rpow__']
tz = 'US/Eastern'
dt1 = Series(date_range('2000-01-01 09:00:00', periods=5,
tz=tz), name='foo')
dt2 = dt1.copy()
dt2.iloc[2] = np.nan
td1 = Series(timedelta_range('1 days 1 min', periods=5, freq='H'))
td2 = td1.copy()
td2.iloc[1] = np.nan
run_ops(ops, dt1, td1)
result = dt1 + td1[0]
exp = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = dt2 + td2[0]
exp = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
# odd numpy behavior with scalar timedeltas
if not _np_version_under1p8:
result = td1[0] + dt1
exp = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = td2[0] + dt2
exp = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = dt1 - td1[0]
exp = (dt1.dt.tz_localize(None) - td1[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
self.assertRaises(TypeError, lambda: td1[0] - dt1)
result = dt2 - td2[0]
exp = (dt2.dt.tz_localize(None) - td2[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
self.assertRaises(TypeError, lambda: td2[0] - dt2)
result = dt1 + td1
exp = (dt1.dt.tz_localize(None) + td1).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = dt2 + td2
exp = (dt2.dt.tz_localize(None) + td2).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = dt1 - td1
exp = (dt1.dt.tz_localize(None) - td1).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = dt2 - td2
exp = (dt2.dt.tz_localize(None) - td2).dt.tz_localize(tz)
assert_series_equal(result, exp)
self.assertRaises(TypeError, lambda: td1 - dt1)
self.assertRaises(TypeError, lambda: td2 - dt2)
def test_sub_single_tz(self):
# GH12290
s1 = Series([pd.Timestamp('2016-02-10', tz='America/Sao_Paulo')])
s2 = Series([pd.Timestamp('2016-02-08', tz='America/Sao_Paulo')])
result = s1 - s2
expected = Series([Timedelta('2days')])
assert_series_equal(result, expected)
result = s2 - s1
expected = Series([Timedelta('-2days')])
assert_series_equal(result, expected)
def test_ops_nat(self):
# GH 11349
timedelta_series = Series([NaT, Timedelta('1s')])
datetime_series = Series([NaT, Timestamp('19900315')])
nat_series_dtype_timedelta = Series(
[NaT, NaT], dtype='timedelta64[ns]')
nat_series_dtype_timestamp = Series([NaT, NaT], dtype='datetime64[ns]')
single_nat_dtype_datetime = Series([NaT], dtype='datetime64[ns]')
single_nat_dtype_timedelta = Series([NaT], dtype='timedelta64[ns]')
# subtraction
assert_series_equal(timedelta_series - NaT, nat_series_dtype_timedelta)
assert_series_equal(-NaT + timedelta_series,
nat_series_dtype_timedelta)
assert_series_equal(timedelta_series - single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(-single_nat_dtype_timedelta + timedelta_series,
nat_series_dtype_timedelta)
assert_series_equal(datetime_series - NaT, nat_series_dtype_timestamp)
assert_series_equal(-NaT + datetime_series, nat_series_dtype_timestamp)
assert_series_equal(datetime_series - single_nat_dtype_datetime,
nat_series_dtype_timedelta)
with tm.assertRaises(TypeError):
-single_nat_dtype_datetime + datetime_series
assert_series_equal(datetime_series - single_nat_dtype_timedelta,
nat_series_dtype_timestamp)
assert_series_equal(-single_nat_dtype_timedelta + datetime_series,
nat_series_dtype_timestamp)
# without a Series wrapping the NaT, it is ambiguous
# whether it is a datetime64 or timedelta64
# defaults to interpreting it as timedelta64
assert_series_equal(nat_series_dtype_timestamp - NaT,
nat_series_dtype_timestamp)
assert_series_equal(-NaT + nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
assert_series_equal(nat_series_dtype_timestamp -
single_nat_dtype_datetime,
nat_series_dtype_timedelta)
with tm.assertRaises(TypeError):
-single_nat_dtype_datetime + nat_series_dtype_timestamp
assert_series_equal(nat_series_dtype_timestamp -
single_nat_dtype_timedelta,
nat_series_dtype_timestamp)
assert_series_equal(-single_nat_dtype_timedelta +
nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
with tm.assertRaises(TypeError):
timedelta_series - single_nat_dtype_datetime
# addition
assert_series_equal(nat_series_dtype_timestamp + NaT,
nat_series_dtype_timestamp)
assert_series_equal(NaT + nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
assert_series_equal(nat_series_dtype_timestamp +
single_nat_dtype_timedelta,
nat_series_dtype_timestamp)
assert_series_equal(single_nat_dtype_timedelta +
nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
assert_series_equal(nat_series_dtype_timedelta + NaT,
nat_series_dtype_timedelta)
assert_series_equal(NaT + nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(nat_series_dtype_timedelta +
single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(single_nat_dtype_timedelta +
nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(timedelta_series + NaT, nat_series_dtype_timedelta)
assert_series_equal(NaT + timedelta_series, nat_series_dtype_timedelta)
assert_series_equal(timedelta_series + single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(single_nat_dtype_timedelta + timedelta_series,
nat_series_dtype_timedelta)
assert_series_equal(nat_series_dtype_timestamp + NaT,
nat_series_dtype_timestamp)
assert_series_equal(NaT + nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
assert_series_equal(nat_series_dtype_timestamp +
single_nat_dtype_timedelta,
nat_series_dtype_timestamp)
assert_series_equal(single_nat_dtype_timedelta +
nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
assert_series_equal(nat_series_dtype_timedelta + NaT,
nat_series_dtype_timedelta)
assert_series_equal(NaT + nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(nat_series_dtype_timedelta +
single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(single_nat_dtype_timedelta +
nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(nat_series_dtype_timedelta +
single_nat_dtype_datetime,
nat_series_dtype_timestamp)
assert_series_equal(single_nat_dtype_datetime +
nat_series_dtype_timedelta,
nat_series_dtype_timestamp)
# multiplication
assert_series_equal(nat_series_dtype_timedelta * 1.0,
nat_series_dtype_timedelta)
assert_series_equal(1.0 * nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(timedelta_series * 1, timedelta_series)
assert_series_equal(1 * timedelta_series, timedelta_series)
assert_series_equal(timedelta_series * 1.5,
Series([NaT, Timedelta('1.5s')]))
assert_series_equal(1.5 * timedelta_series,
Series([NaT, Timedelta('1.5s')]))
assert_series_equal(timedelta_series * nan, nat_series_dtype_timedelta)
assert_series_equal(nan * timedelta_series, nat_series_dtype_timedelta)
with tm.assertRaises(TypeError):
datetime_series * 1
with tm.assertRaises(TypeError):
nat_series_dtype_timestamp * 1
with tm.assertRaises(TypeError):
datetime_series * 1.0
with tm.assertRaises(TypeError):
nat_series_dtype_timestamp * 1.0
# division
assert_series_equal(timedelta_series / 2,
Series([NaT, Timedelta('0.5s')]))
assert_series_equal(timedelta_series / 2.0,
Series([NaT, Timedelta('0.5s')]))
assert_series_equal(timedelta_series / nan, nat_series_dtype_timedelta)
with tm.assertRaises(TypeError):
nat_series_dtype_timestamp / 1.0
with tm.assertRaises(TypeError):
nat_series_dtype_timestamp / 1
def test_ops_datetimelike_align(self):
# GH 7500
# datetimelike ops need to align
dt = Series(date_range('2012-1-1', periods=3, freq='D'))
dt.iloc[2] = np.nan
dt2 = dt[::-1]
expected = Series([timedelta(0), timedelta(0), pd.NaT])
# name is reset
result = dt2 - dt
assert_series_equal(result, expected)
expected = Series(expected, name=0)
result = (dt2.to_frame() - dt.to_frame())[0]
assert_series_equal(result, expected)
def test_object_comparisons(self):
s = Series(['a', 'b', np.nan, 'c', 'a'])
result = s == 'a'
expected = Series([True, False, False, False, True])
assert_series_equal(result, expected)
result = s < 'a'
expected = Series([False, False, False, False, False])
assert_series_equal(result, expected)
result = s != 'a'
expected = -(s == 'a')
assert_series_equal(result, expected)
def test_comparison_tuples(self):
# GH11339
# comparisons vs tuple
s = Series([(1, 1), (1, 2)])
result = s == (1, 2)
expected = Series([False, True])
assert_series_equal(result, expected)
result = s != (1, 2)
expected = Series([True, False])
assert_series_equal(result, expected)
result = s == (0, 0)
expected = Series([False, False])
assert_series_equal(result, expected)
result = s != (0, 0)
expected = Series([True, True])
assert_series_equal(result, expected)
s = Series([(1, 1), (1, 1)])
result = s == (1, 1)
expected = Series([True, True])
assert_series_equal(result, expected)
result = s != (1, 1)
expected = Series([False, False])
assert_series_equal(result, expected)
s = Series([frozenset([1]), frozenset([1, 2])])
result = s == frozenset([1])
expected = Series([True, False])
assert_series_equal(result, expected)
def test_comparison_operators_with_nas(self):
s = Series(bdate_range('1/1/2000', periods=10), dtype=object)
s[::2] = np.nan
# test that comparisons work
ops = ['lt', 'le', 'gt', 'ge', 'eq', 'ne']
for op in ops:
val = s[5]
f = getattr(operator, op)
result = f(s, val)
expected = f(s.dropna(), val).reindex(s.index)
if op == 'ne':
expected = expected.fillna(True).astype(bool)
else:
expected = expected.fillna(False).astype(bool)
assert_series_equal(result, expected)
# fffffffuuuuuuuuuuuu
# result = f(val, s)
# expected = f(val, s.dropna()).reindex(s.index)
# assert_series_equal(result, expected)
# boolean &, |, ^ should work with object arrays and propagate NAs
ops = ['and_', 'or_', 'xor']
mask = s.isnull()
for bool_op in ops:
f = getattr(operator, bool_op)
filled = s.fillna(s[0])
result = f(s < s[9], s > s[3])
expected = f(filled < filled[9], filled > filled[3])
expected[mask] = False
assert_series_equal(result, expected)
def test_comparison_object_numeric_nas(self):
s = Series(np.random.randn(10), dtype=object)
shifted = s.shift(2)
ops = ['lt', 'le', 'gt', 'ge', 'eq', 'ne']
for op in ops:
f = getattr(operator, op)
result = f(s, shifted)
expected = f(s.astype(float), shifted.astype(float))
assert_series_equal(result, expected)
def test_comparison_invalid(self):
# GH4968
# invalid date/int comparisons
s = Series(range(5))
s2 = Series(date_range('20010101', periods=5))
for (x, y) in [(s, s2), (s2, s)]:
self.assertRaises(TypeError, lambda: x == y)
self.assertRaises(TypeError, lambda: x != y)
self.assertRaises(TypeError, lambda: x >= y)
self.assertRaises(TypeError, lambda: x > y)
self.assertRaises(TypeError, lambda: x < y)
self.assertRaises(TypeError, lambda: x <= y)
def test_more_na_comparisons(self):
for dtype in [None, object]:
left = Series(['a', np.nan, 'c'], dtype=dtype)
right = Series(['a', np.nan, 'd'], dtype=dtype)
result = left == right
expected = Series([True, False, False])
assert_series_equal(result, expected)
result = left != right
expected = Series([False, True, True])
assert_series_equal(result, expected)
result = left == np.nan
expected = Series([False, False, False])
assert_series_equal(result, expected)
result = left != np.nan
expected = Series([True, True, True])
assert_series_equal(result, expected)
def test_nat_comparisons(self):
data = [([pd.Timestamp('2011-01-01'), pd.NaT,
pd.Timestamp('2011-01-03')],
[pd.NaT, pd.NaT, pd.Timestamp('2011-01-03')]),
([pd.Timedelta('1 days'), pd.NaT,
pd.Timedelta('3 days')],
[pd.NaT, pd.NaT, pd.Timedelta('3 days')]),
([pd.Period('2011-01', freq='M'), pd.NaT,
pd.Period('2011-03', freq='M')],
[pd.NaT, pd.NaT, pd.Period('2011-03', freq='M')])]
# add lhs / rhs switched data
data = data + [(r, l) for l, r in data]
for l, r in data:
for dtype in [None, object]:
left = Series(l, dtype=dtype)
# Series, Index
for right in [Series(r, dtype=dtype), Index(r, dtype=dtype)]:
expected = Series([False, False, True])
assert_series_equal(left == right, expected)
expected = Series([True, True, False])
assert_series_equal(left != right, expected)
expected = Series([False, False, False])
assert_series_equal(left < right, expected)
expected = Series([False, False, False])
assert_series_equal(left > right, expected)
expected = Series([False, False, True])
assert_series_equal(left >= right, expected)
expected = Series([False, False, True])
assert_series_equal(left <= right, expected)
def test_nat_comparisons_scalar(self):
data = [[pd.Timestamp('2011-01-01'), pd.NaT,
pd.Timestamp('2011-01-03')],
[pd.Timedelta('1 days'), pd.NaT, pd.Timedelta('3 days')],
[pd.Period('2011-01', freq='M'), pd.NaT,
pd.Period('2011-03', freq='M')]]
for l in data:
for dtype in [None, object]:
left = Series(l, dtype=dtype)
expected = Series([False, False, False])
assert_series_equal(left == pd.NaT, expected)
assert_series_equal(pd.NaT == left, expected)
expected = Series([True, True, True])
assert_series_equal(left != pd.NaT, expected)
assert_series_equal(pd.NaT != left, expected)
expected = Series([False, False, False])
assert_series_equal(left < pd.NaT, expected)
assert_series_equal(pd.NaT > left, expected)
assert_series_equal(left <= pd.NaT, expected)
assert_series_equal(pd.NaT >= left, expected)
assert_series_equal(left > pd.NaT, expected)
assert_series_equal(pd.NaT < left, expected)
assert_series_equal(left >= pd.NaT, expected)
assert_series_equal(pd.NaT <= left, expected)
def test_comparison_different_length(self):
a = Series(['a', 'b', 'c'])
b = Series(['b', 'a'])
self.assertRaises(ValueError, a.__lt__, b)
a = Series([1, 2])
b = Series([2, 3, 4])
self.assertRaises(ValueError, a.__eq__, b)
def test_comparison_label_based(self):
# GH 4947
# comparisons should be label based
a = Series([True, False, True], list('bca'))
b = Series([False, True, False], list('abc'))
expected = Series([False, True, False], list('abc'))
result = a & b
assert_series_equal(result, expected)
expected = Series([True, True, False], list('abc'))
result = a | b
assert_series_equal(result, expected)
expected = Series([True, False, False], list('abc'))
result = a ^ b
assert_series_equal(result, expected)
# rhs is bigger
a = Series([True, False, True], list('bca'))
b = Series([False, True, False, True], list('abcd'))
expected = Series([False, True, False, False], list('abcd'))
result = a & b
assert_series_equal(result, expected)
expected = Series([True, True, False, False], list('abcd'))
result = a | b
assert_series_equal(result, expected)
# filling
# vs empty
result = a & Series([])
expected = Series([False, False, False], list('bca'))
assert_series_equal(result, expected)
result = a | Series([])
expected = Series([True, False, True], list('bca'))
assert_series_equal(result, expected)
# vs non-matching
result = a & Series([1], ['z'])
expected = Series([False, False, False, False], list('abcz'))
assert_series_equal(result, expected)
result = a | Series([1], ['z'])
expected = Series([True, True, False, False], list('abcz'))
assert_series_equal(result, expected)
# identity
# we would like s[s|e] == s to hold for any e, whether empty or not
for e in [Series([]), Series([1], ['z']),
Series(np.nan, b.index), Series(np.nan, a.index)]:
result = a[a | e]
assert_series_equal(result, a[a])
for e in [Series(['z'])]:
if compat.PY3:
with tm.assert_produces_warning(RuntimeWarning):
result = a[a | e]
else:
result = a[a | e]
assert_series_equal(result, a[a])
# vs scalars
index = list('bca')
t = Series([True, False, True])
for v in [True, 1, 2]:
result = Series([True, False, True], index=index) | v
expected = Series([True, True, True], index=index)
assert_series_equal(result, expected)
for v in [np.nan, 'foo']:
self.assertRaises(TypeError, lambda: t | v)
for v in [False, 0]:
result = Series([True, False, True], index=index) | v
expected = Series([True, False, True], index=index)
assert_series_equal(result, expected)
for v in [True, 1]:
result = Series([True, False, True], index=index) & v
expected = Series([True, False, True], index=index)
assert_series_equal(result, expected)
for v in [False, 0]:
result = Series([True, False, True], index=index) & v
expected = Series([False, False, False], index=index)
assert_series_equal(result, expected)
for v in [np.nan]:
self.assertRaises(TypeError, lambda: t & v)
def test_comparison_flex_basic(self):
left = pd.Series(np.random.randn(10))
right = pd.Series(np.random.randn(10))
tm.assert_series_equal(left.eq(right), left == right)
tm.assert_series_equal(left.ne(right), left != right)
tm.assert_series_equal(left.le(right), left < right)
tm.assert_series_equal(left.lt(right), left <= right)
tm.assert_series_equal(left.gt(right), left > right)
tm.assert_series_equal(left.ge(right), left >= right)
# axis
for axis in [0, None, 'index']:
tm.assert_series_equal(left.eq(right, axis=axis), left == right)
tm.assert_series_equal(left.ne(right, axis=axis), left != right)
tm.assert_series_equal(left.le(right, axis=axis), left < right)
tm.assert_series_equal(left.lt(right, axis=axis), left <= right)
tm.assert_series_equal(left.gt(right, axis=axis), left > right)
tm.assert_series_equal(left.ge(right, axis=axis), left >= right)
#
msg = 'No axis named 1 for object type'
for op in ['eq', 'ne', 'le', 'le', 'gt', 'ge']:
with tm.assertRaisesRegexp(ValueError, msg):
getattr(left, op)(right, axis=1)
def test_comparison_flex_alignment(self):
left = Series([1, 3, 2], index=list('abc'))
right = Series([2, 2, 2], index=list('bcd'))
exp = pd.Series([False, False, True, False], index=list('abcd'))
tm.assert_series_equal(left.eq(right), exp)
exp = pd.Series([True, True, False, True], index=list('abcd'))
tm.assert_series_equal(left.ne(right), exp)
exp = pd.Series([False, False, True, False], index=list('abcd'))
tm.assert_series_equal(left.le(right), exp)
exp = pd.Series([False, False, False, False], index=list('abcd'))
tm.assert_series_equal(left.lt(right), exp)
exp = pd.Series([False, True, True, False], index=list('abcd'))
tm.assert_series_equal(left.ge(right), exp)
exp = pd.Series([False, True, False, False], index=list('abcd'))
tm.assert_series_equal(left.gt(right), exp)
def test_comparison_flex_alignment_fill(self):
left = Series([1, 3, 2], index=list('abc'))
right = Series([2, 2, 2], index=list('bcd'))
exp = pd.Series([False, False, True, True], index=list('abcd'))
tm.assert_series_equal(left.eq(right, fill_value=2), exp)
exp = pd.Series([True, True, False, False], index=list('abcd'))
tm.assert_series_equal(left.ne(right, fill_value=2), exp)
exp = pd.Series([False, False, True, True], index=list('abcd'))
tm.assert_series_equal(left.le(right, fill_value=0), exp)
exp = pd.Series([False, False, False, True], index=list('abcd'))
tm.assert_series_equal(left.lt(right, fill_value=0), exp)
exp = pd.Series([True, True, True, False], index=list('abcd'))
tm.assert_series_equal(left.ge(right, fill_value=0), exp)
exp = pd.Series([True, True, False, False], index=list('abcd'))
tm.assert_series_equal(left.gt(right, fill_value=0), exp)
def test_operators_bitwise(self):
# GH 9016: support bitwise op for integer types
index = list('bca')
s_tft = Series([True, False, True], index=index)
s_fff = Series([False, False, False], index=index)
s_tff = Series([True, False, False], index=index)
s_empty = Series([])
# TODO: unused
# s_0101 = Series([0, 1, 0, 1])
s_0123 = Series(range(4), dtype='int64')
s_3333 = Series([3] * 4)
s_4444 = Series([4] * 4)
res = s_tft & s_empty
expected = s_fff
assert_series_equal(res, expected)
res = s_tft | s_empty
expected = s_tft
assert_series_equal(res, expected)
res = s_0123 & s_3333
expected = Series(range(4), dtype='int64')
assert_series_equal(res, expected)
res = s_0123 | s_4444
expected = Series(range(4, 8), dtype='int64')
assert_series_equal(res, expected)
s_a0b1c0 = Series([1], list('b'))
res = s_tft & s_a0b1c0
expected = s_tff.reindex(list('abc'))
assert_series_equal(res, expected)
res = s_tft | s_a0b1c0
expected = s_tft.reindex(list('abc'))
assert_series_equal(res, expected)
n0 = 0
res = s_tft & n0
expected = s_fff
assert_series_equal(res, expected)
res = s_0123 & n0
expected = Series([0] * 4)
assert_series_equal(res, expected)
n1 = 1
res = s_tft & n1
expected = s_tft
assert_series_equal(res, expected)
res = s_0123 & n1
expected = Series([0, 1, 0, 1])
assert_series_equal(res, expected)
s_1111 = Series([1] * 4, dtype='int8')
res = s_0123 & s_1111
expected = Series([0, 1, 0, 1], dtype='int64')
assert_series_equal(res, expected)
res = s_0123.astype(np.int16) | s_1111.astype(np.int32)
expected = Series([1, 1, 3, 3], dtype='int32')
assert_series_equal(res, expected)
self.assertRaises(TypeError, lambda: s_1111 & 'a')
self.assertRaises(TypeError, lambda: s_1111 & ['a', 'b', 'c', 'd'])
self.assertRaises(TypeError, lambda: s_0123 & np.NaN)
self.assertRaises(TypeError, lambda: s_0123 & 3.14)
self.assertRaises(TypeError, lambda: s_0123 & [0.1, 4, 3.14, 2])
# s_0123 will be all false now because of reindexing like s_tft
if compat.PY3:
# unable to sort incompatible object via .union.
exp = Series([False] * 7, index=['b', 'c', 'a', 0, 1, 2, 3])
with tm.assert_produces_warning(RuntimeWarning):
assert_series_equal(s_tft & s_0123, exp)
else:
exp = Series([False] * 7, index=[0, 1, 2, 3, 'a', 'b', 'c'])
assert_series_equal(s_tft & s_0123, exp)
# s_tft will be all false now because of reindexing like s_0123
if compat.PY3:
# unable to sort incompatible object via .union.
exp = Series([False] * 7, index=[0, 1, 2, 3, 'b', 'c', 'a'])
with tm.assert_produces_warning(RuntimeWarning):
assert_series_equal(s_0123 & s_tft, exp)
else:
exp = Series([False] * 7, index=[0, 1, 2, 3, 'a', 'b', 'c'])
assert_series_equal(s_0123 & s_tft, exp)
assert_series_equal(s_0123 & False, Series([False] * 4))
assert_series_equal(s_0123 ^ False, Series([False, True, True, True]))
assert_series_equal(s_0123 & [False], Series([False] * 4))
assert_series_equal(s_0123 & (False), Series([False] * 4))
assert_series_equal(s_0123 & Series([False, np.NaN, False, False]),
Series([False] * 4))
s_ftft = Series([False, True, False, True])
assert_series_equal(s_0123 & Series([0.1, 4, -3.14, 2]), s_ftft)
s_abNd = Series(['a', 'b', np.NaN, 'd'])
res = s_0123 & s_abNd
expected = s_ftft
assert_series_equal(res, expected)
def test_scalar_na_cmp_corners(self):
s = Series([2, 3, 4, 5, 6, 7, 8, 9, 10])
def tester(a, b):
return a & b
self.assertRaises(TypeError, tester, s, datetime(2005, 1, 1))
s = Series([2, 3, 4, 5, 6, 7, 8, 9, datetime(2005, 1, 1)])
s[::2] = np.nan
expected = Series(True, index=s.index)
expected[::2] = False
assert_series_equal(tester(s, list(s)), expected)
d = DataFrame({'A': s})
# TODO: Fix this exception - needs to be fixed! (see GH5035)
# (previously this was a TypeError because series returned
# NotImplemented
# this is an alignment issue; these are equivalent
# https://github.com/pydata/pandas/issues/5284
self.assertRaises(ValueError, lambda: d.__and__(s, axis='columns'))
self.assertRaises(ValueError, tester, s, d)
# this is wrong as its not a boolean result
# result = d.__and__(s,axis='index')
def test_operators_corner(self):
series = self.ts
empty = Series([], index=Index([]))
result = series + empty
self.assertTrue(np.isnan(result).all())
result = empty + Series([], index=Index([]))
self.assertEqual(len(result), 0)
# TODO: this returned NotImplemented earlier, what to do?
# deltas = Series([timedelta(1)] * 5, index=np.arange(5))
# sub_deltas = deltas[::2]
# deltas5 = deltas * 5
# deltas = deltas + sub_deltas
# float + int
int_ts = self.ts.astype(int)[:-5]
added = self.ts + int_ts
expected = Series(self.ts.values[:-5] + int_ts.values,
index=self.ts.index[:-5], name='ts')
self.assert_series_equal(added[:-5], expected)
def test_operators_reverse_object(self):
# GH 56
arr = Series(np.random.randn(10), index=np.arange(10), dtype=object)
def _check_op(arr, op):
result = op(1., arr)
expected = op(1., arr.astype(float))
assert_series_equal(result.astype(float), expected)
_check_op(arr, operator.add)
_check_op(arr, operator.sub)
_check_op(arr, operator.mul)
_check_op(arr, operator.truediv)
_check_op(arr, operator.floordiv)
def test_arith_ops_df_compat(self):
# GH 1134
s1 = pd.Series([1, 2, 3], index=list('ABC'), name='x')
s2 = pd.Series([2, 2, 2], index=list('ABD'), name='x')
exp = pd.Series([3.0, 4.0, np.nan, np.nan],
index=list('ABCD'), name='x')
tm.assert_series_equal(s1 + s2, exp)
tm.assert_series_equal(s2 + s1, exp)
exp = pd.DataFrame({'x': [3.0, 4.0, np.nan, np.nan]},
index=list('ABCD'))
tm.assert_frame_equal(s1.to_frame() + s2.to_frame(), exp)
tm.assert_frame_equal(s2.to_frame() + s1.to_frame(), exp)
# different length
s3 = pd.Series([1, 2, 3], index=list('ABC'), name='x')
s4 = pd.Series([2, 2, 2, 2], index=list('ABCD'), name='x')
exp = pd.Series([3, 4, 5, np.nan],
index=list('ABCD'), name='x')
tm.assert_series_equal(s3 + s4, exp)
tm.assert_series_equal(s4 + s3, exp)
exp = pd.DataFrame({'x': [3, 4, 5, np.nan]},
index=list('ABCD'))
tm.assert_frame_equal(s3.to_frame() + s4.to_frame(), exp)
tm.assert_frame_equal(s4.to_frame() + s3.to_frame(), exp)
def test_comp_ops_df_compat(self):
# GH 1134
s1 = pd.Series([1, 2, 3], index=list('ABC'), name='x')
s2 = pd.Series([2, 2, 2], index=list('ABD'), name='x')
s3 = pd.Series([1, 2, 3], index=list('ABC'), name='x')
s4 = pd.Series([2, 2, 2, 2], index=list('ABCD'), name='x')
for l, r in [(s1, s2), (s2, s1), (s3, s4), (s4, s3)]:
msg = "Can only compare identically-labeled Series objects"
with tm.assertRaisesRegexp(ValueError, msg):
l == r
with tm.assertRaisesRegexp(ValueError, msg):
l != r
with tm.assertRaisesRegexp(ValueError, msg):
l < r
msg = "Can only compare identically-labeled DataFrame objects"
with tm.assertRaisesRegexp(ValueError, msg):
l.to_frame() == r.to_frame()
with tm.assertRaisesRegexp(ValueError, msg):
l.to_frame() != r.to_frame()
with tm.assertRaisesRegexp(ValueError, msg):
l.to_frame() < r.to_frame()
def test_bool_ops_df_compat(self):
# GH 1134
s1 = pd.Series([True, False, True], index=list('ABC'), name='x')
s2 = pd.Series([True, True, False], index=list('ABD'), name='x')
exp = pd.Series([True, False, False, False],
index=list('ABCD'), name='x')
tm.assert_series_equal(s1 & s2, exp)
tm.assert_series_equal(s2 & s1, exp)
# True | np.nan => True
exp = pd.Series([True, True, True, False],
index=list('ABCD'), name='x')
tm.assert_series_equal(s1 | s2, exp)
# np.nan | True => np.nan, filled with False
exp = pd.Series([True, True, False, False],
index=list('ABCD'), name='x')
tm.assert_series_equal(s2 | s1, exp)
# DataFrame doesn't fill nan with False
exp = pd.DataFrame({'x': [True, False, np.nan, np.nan]},
index=list('ABCD'))
tm.assert_frame_equal(s1.to_frame() & s2.to_frame(), exp)
tm.assert_frame_equal(s2.to_frame() & s1.to_frame(), exp)
exp = pd.DataFrame({'x': [True, True, np.nan, np.nan]},
index=list('ABCD'))
tm.assert_frame_equal(s1.to_frame() | s2.to_frame(), exp)
tm.assert_frame_equal(s2.to_frame() | s1.to_frame(), exp)
# different length
s3 = pd.Series([True, False, True], index=list('ABC'), name='x')
s4 = pd.Series([True, True, True, True], index=list('ABCD'), name='x')
exp = pd.Series([True, False, True, False],
index=list('ABCD'), name='x')
tm.assert_series_equal(s3 & s4, exp)
tm.assert_series_equal(s4 & s3, exp)
# np.nan | True => np.nan, filled with False
exp = pd.Series([True, True, True, False],
index=list('ABCD'), name='x')
tm.assert_series_equal(s3 | s4, exp)
# True | np.nan => True
exp = pd.Series([True, True, True, True],
index=list('ABCD'), name='x')
tm.assert_series_equal(s4 | s3, exp)
exp = pd.DataFrame({'x': [True, False, True, np.nan]},
index=list('ABCD'))
tm.assert_frame_equal(s3.to_frame() & s4.to_frame(), exp)
tm.assert_frame_equal(s4.to_frame() & s3.to_frame(), exp)
exp = pd.DataFrame({'x': [True, True, True, np.nan]},
index=list('ABCD'))
tm.assert_frame_equal(s3.to_frame() | s4.to_frame(), exp)
tm.assert_frame_equal(s4.to_frame() | s3.to_frame(), exp)
def test_series_frame_radd_bug(self):
# GH 353
vals = Series(tm.rands_array(5, 10))
result = 'foo_' + vals
expected = vals.map(lambda x: 'foo_' + x)
assert_series_equal(result, expected)
frame = DataFrame({'vals': vals})
result = 'foo_' + frame
expected = DataFrame({'vals': vals.map(lambda x: 'foo_' + x)})
tm.assert_frame_equal(result, expected)
# really raise this time
with tm.assertRaises(TypeError):
datetime.now() + self.ts
with tm.assertRaises(TypeError):
self.ts + datetime.now()
def test_series_radd_more(self):
data = [[1, 2, 3],
[1.1, 2.2, 3.3],
[pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-02'),
pd.NaT],
['x', 'y', 1]]
for d in data:
for dtype in [None, object]:
s = Series(d, dtype=dtype)
with tm.assertRaises(TypeError):
'foo_' + s
for dtype in [None, object]:
res = 1 + pd.Series([1, 2, 3], dtype=dtype)
exp = pd.Series([2, 3, 4], dtype=dtype)
tm.assert_series_equal(res, exp)
res = pd.Series([1, 2, 3], dtype=dtype) + 1
tm.assert_series_equal(res, exp)
res = np.nan + pd.Series([1, 2, 3], dtype=dtype)
exp = pd.Series([np.nan, np.nan, np.nan], dtype=dtype)
tm.assert_series_equal(res, exp)
res = pd.Series([1, 2, 3], dtype=dtype) + np.nan
tm.assert_series_equal(res, exp)
s = pd.Series([pd.Timedelta('1 days'), pd.Timedelta('2 days'),
pd.Timedelta('3 days')], dtype=dtype)
exp = pd.Series([pd.Timedelta('4 days'), pd.Timedelta('5 days'),
pd.Timedelta('6 days')])
tm.assert_series_equal(pd.Timedelta('3 days') + s, exp)
tm.assert_series_equal(s + pd.Timedelta('3 days'), exp)
s = pd.Series(['x', np.nan, 'x'])
tm.assert_series_equal('a' + s, pd.Series(['ax', np.nan, 'ax']))
tm.assert_series_equal(s + 'a', pd.Series(['xa', np.nan, 'xa']))
def test_frame_radd_more(self):
data = [[1, 2, 3],
[1.1, 2.2, 3.3],
[pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-02'),
pd.NaT],
['x', 'y', 1]]
for d in data:
for dtype in [None, object]:
s = DataFrame(d, dtype=dtype)
with tm.assertRaises(TypeError):
'foo_' + s
for dtype in [None, object]:
res = 1 + pd.DataFrame([1, 2, 3], dtype=dtype)
exp = pd.DataFrame([2, 3, 4], dtype=dtype)
tm.assert_frame_equal(res, exp)
res = pd.DataFrame([1, 2, 3], dtype=dtype) + 1
tm.assert_frame_equal(res, exp)
res = np.nan + pd.DataFrame([1, 2, 3], dtype=dtype)
exp = pd.DataFrame([np.nan, np.nan, np.nan], dtype=dtype)
tm.assert_frame_equal(res, exp)
res = pd.DataFrame([1, 2, 3], dtype=dtype) + np.nan
tm.assert_frame_equal(res, exp)
df = pd.DataFrame(['x', np.nan, 'x'])
tm.assert_frame_equal('a' + df, pd.DataFrame(['ax', np.nan, 'ax']))
tm.assert_frame_equal(df + 'a', pd.DataFrame(['xa', np.nan, 'xa']))
def test_operators_frame(self):
# rpow does not work with DataFrame
df = DataFrame({'A': self.ts})
tm.assert_series_equal(self.ts + self.ts, self.ts + df['A'],
check_names=False)
tm.assert_series_equal(self.ts ** self.ts, self.ts ** df['A'],
check_names=False)
tm.assert_series_equal(self.ts < self.ts, self.ts < df['A'],
check_names=False)
tm.assert_series_equal(self.ts / self.ts, self.ts / df['A'],
check_names=False)
def test_operators_combine(self):
def _check_fill(meth, op, a, b, fill_value=0):
exp_index = a.index.union(b.index)
a = a.reindex(exp_index)
b = b.reindex(exp_index)
amask = | isnull(a) | pandas.isnull |
'''This script helps to identify the acc type which is used in the data processing script. For further development
purposes a little test function is implemented to directly test acc type recognition and print infos provided by separately maintained excel'''
import os
import sys
import pandas as pd
if not hasattr(sys, 'frozen'): #get local folder path for subfolder 'suppldata' if window is not frozen
ACCINFO_FILE = os.path.join(os.getcwd(), 'suppldata', 'acc_infofile.txt')
else:
ACCINFO_FILE = os.path.join(sys.prefix, 'suppldata', 'acc_infofile.txt')
#necessary function to define how date can be read-in correctly
def dateadjuster(val, timevar):
'''format transformation for parsing dates from csv'''
#new format because of deprecitation warning; error values are handeld as 'NaT' automatically
if timevar == 'capy':
return pd.to_datetime(val, format='%d.%m.%Y', errors='coerce')
else:
return | pd.to_datetime(val, format='%d.%m.%y', errors='coerce') | pandas.to_datetime |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 24 11:06:32 2021
@author: ifti
"""
#import lib
import pandas as pd
import requests
from bs4 import BeautifulSoup
import importlib
from importlib import reload
def scrapping_IPA():
'''
scrapping_IPA() is a function to scrap the IPA from the web site
of https://polyglotclub.com/wiki/Language/Central-pashto/Pronunciation/Alphabet-and-Pronunciation
parameter :
no parameter
return
it will create the csv file on current path
'''
try:
URL = "https://polyglotclub.com/wiki/Language/Central-pashto/Pronunciation/Alphabet-and-Pronunciation"
r = requests.get(URL)
except:
print("Error : link is Down")
soup = BeautifulSoup(r.content, 'html5lib')
# If this line causes an error, run 'pip install html5lib' or install html5lib
# print(soup)
tables = soup.find(class_="wikitable")
column_name = []
Final = []
Medial = []
Initial = []
Isolated= []
IPA = []
for group in tables.find_all('tbody'):
for row in group.find_all('tr'):
if row.find_all('td'):
# print("---------",row.find_all('td'))
# for ipa col
try : # may be some of them is not availables yet
ipa = row.find_all('td')[0]
IPA.append(ipa.get_text().strip())
except:
IPA.append("Not yet")
try :
# for final columns
final = row.find_all('td')[1]
Final.append(final.get_text().strip())
except:
Final.append("Not yet")
try:
# for Middle columns
medial = row.find_all('td')[2]
Medial.append(medial.get_text().strip())
except:
Medial.append("Not yet")
try:
# for Initail columns
initail = row.find_all('td')[3]
Initial.append(initail.get_text().strip())
except:
Initial.append("Not yet")
try:
# for Isolated columns
isolated = row.find_all('td')[4]
Isolated.append(isolated.get_text().strip())
except:
Isolated.append("Not yet")
ipa_dictionay = {}
ipa_dictionay["IPA"] = IPA
ipa_dictionay["Final"] = Final
ipa_dictionay["Medial"] = Medial
ipa_dictionay["Initial"] = Initial
ipa_dictionay["Isolated"] = Isolated
# print((Isolated,IPA))
# ipa_dictionay
df = pd.DataFrame.from_dict(ipa_dictionay)
df.to_csv("IPA_pashto.csv",index=False,encoding='utf-8')
def IPA(char ,Final_=False ,Medial_=False, Initial_=False,Isolated_=False):
'''
IPA() is a function to return the IPA of pashto char
parameter:
it have five paramters
first paramter is char which is actually char character
final , medial and initial and Isolated respective taking the
bool value according to the sending the information
'''
# getting from file
import pandas as pd
df = | pd.read_csv("Datasets/IPA_pashto.csv",encoding="utf-8") | pandas.read_csv |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2/20/2019 6:47 PM
# @Author : chinshin
# @FileName: data_generator.py
"""
generate data under different splitters
splitters are as follows:
drug-based
random split:
scaffold split
interaction-based:
random split
random split with different scales
dataset:
input:
add_zinc_smiles.txt: contains a list of drugs whose total number is 548.
drug_drug_matrix: contains known interactions between differnet drugs.
output:
ddi_train.csv: train dataset containing multiple DDIs.
ddi_test.csv: test datast containing multiple DDIs.
index_train.txt: optional for drug-based splitter
index_test.txt: optional for drug-based splitter
"""
import os
import csv
import sys
import copy
import random
import logging
import argparse
import pandas as pd
import numpy as np
import pickle
from collections import OrderedDict
from rdkit.Chem import MolFromSmiles
from os.path import abspath, dirname
from sklearn.model_selection import train_test_split
from chainer_chemistry.dataset.splitters import ScaffoldSplitter, RandomSplitter, StratifiedSplitter
from deepchem.feat import WeaveFeaturizer
logging.basicConfig(format='%(asctime)s: %(filename)s: %(funcName)s: %(lineno)d: %(message)s', level=logging.INFO)
ROOT_PATH = dirname(dirname(dirname(abspath(__file__))))
sys.path.insert(0, ROOT_PATH)
global_seed = 2018
random.seed(global_seed)
from utils import load_csv, index2id, is_number
from setting import *
def add_super_nodes(graph_distance=True, explicit_H=False,
use_chirality=False):
"""
add super nodes for each drug molecule.
feature vector incoporates original information used in MPNN.
"""
drug_list_filename = 'drug_list.csv'
drug_list_filepath = os.path.join(DRUG_LIST_PATH, drug_list_filename)
df = pd.read_csv(drug_list_filepath)
smiles_list = df['smiles']
mol_list = [MolFromSmiles(smiles) for smiles in smiles_list]
featurizer = WeaveFeaturizer(graph_distance=graph_distance,
explicit_H=explicit_H, use_chirality=use_chirality)
weave_mol_list = featurizer.featurize(mol_list)
atom_feat_list = [mol.get_atom_features().sum(axis=0) for mol in weave_mol_list]
mean_atom_feat_list = [mol.get_atom_features().mean(axis=0) for mol in weave_mol_list]
max_atom_feat_list = [mol.get_atom_features().max(axis=0) for mol in weave_mol_list]
atom_feat_list = np.concatenate(
(atom_feat_list, mean_atom_feat_list, max_atom_feat_list), axis=1)
atom_feat_list = np.concatenate(
(atom_feat_list, np.zeros((NUM_DRUGS, 244 - 75 * 3))), axis=1
)
smiles2atom_feat = dict()
for smiles, atom_feat in zip(smiles_list, atom_feat_list):
smiles2atom_feat[smiles] = atom_feat
filename = 'super_nodes.pkl'
filepath = os.path.join(SUPER_NODE_PATH, filename)
with open(filepath, 'w') as writer:
pickle.dump(smiles2atom_feat, writer)
print('Super nodes have been generated in path {}'.format(filepath))
# return smiles2atom_feat
def add_super_nodes2(n_super_features=244, mean=0.0, scale=1.0):
MAX_ATOMIC_NUM = 117
w = np.random.normal(loc=mean, scale=scale, size=(MAX_ATOMIC_NUM, n_super_features))
drug_list_filename = 'drug_list.csv'
drug_list_filepath = os.path.join(DRUG_LIST_PATH, drug_list_filename)
df = pd.read_csv(drug_list_filepath)
smiles_list = df['smiles']
mol_list = [MolFromSmiles(smiles) for smiles in smiles_list]
super_node_list = list()
for mol in mol_list:
atomic_num_list = [atom.GetAtomicNum() for atom in mol.GetAtoms()]
atomic_embedding_list = [w[atomic_num - 1] for atomic_num in atomic_num_list]
super_node_feat = np.array(atomic_embedding_list, dtype=np.float32).mean(axis=0)
super_node_list.append(super_node_feat)
smiles2super_node = dict()
for smiles, super_node_feat in zip(smiles_list, super_node_list):
smiles2super_node[smiles] = super_node_feat
filename = 'super_nodes_random_embedding.pkl'
filepath = os.path.join(SUPER_NODE_PATH, filename)
with open(filepath, 'w') as writer:
pickle.dump(smiles2super_node, writer)
print('Super nodes have been generated in path {}'.format(filepath))
def load_super_nodes(filename='super_nodes.txt'):
"""
add super nodes for each drug molecule.
feature vector incoporates original information used in MPNN.
"""
filepath = os.path.join(ROOT_PATH, 'examples', 'ddi', filename)
smiles_list = list()
atom_feat_list = list()
with open(filepath, 'r') as reader:
for line in reader.readlines():
line = line.strip('\n')
smiles, feat_str = line.split('\t')[0], line.split('\t')[-1]
feat = [float(digit) for digit in feat_str.split(' ') if is_number(digit)]
smiles_list.append(smiles_list)
atom_feat_list.append(feat)
return smiles_list, atom_feat_list
def generate_drug_list():
filename = 'drug_list_copy.csv'
filepath = os.path.join(DRUG_LIST_PATH, filename)
df = pd.read_csv(filepath)
data = list()
for row_id, row_series in df.iterrows():
row_dict = dict(row_series)
row_dict.pop('Unnamed: 0')
if MolFromSmiles(row_dict['smiles']) is not None:
data.append(row_dict)
new_filename = 'drug_list.csv'
new_filepath = os.path.join(DRUG_LIST_PATH, new_filename)
new_df = pd.DataFrame(data=data)
new_df.to_csv(new_filepath)
new_df = pd.read_csv(new_filepath)
assert sum([MolFromSmiles(smiles) is None for smiles in new_df['smiles']]) == 0
def validate_drug_list():
filename = 'drug_list.csv'
filepath = os.path.join(DRUG_LIST_PATH, filename)
df = pd.read_csv(filepath)
assert sum([MolFromSmiles(smiles) is None for smiles in df['smiles']]) == 0
class Splitter(object):
def __init__(self):
drug_list_filepath = os.path.join(DRUG_LIST_PATH, 'drug_list.csv')
self.drug_list_df = pd.read_csv(drug_list_filepath)
drug_drug_matrix_filename = 'drug_drug_matrix.csv'
drug_drug_matrix_filepath = os.path.join(GROUND_TRUTH_PATH, drug_drug_matrix_filename)
self.drug_drug_matrix_df = pd.read_csv(drug_drug_matrix_filepath)
def __generate_data_and_labels(self):
drug_list_df = self.drug_list_df
drug_drug_matrix_df = self.drug_drug_matrix_df
pairs = list()
labels = list()
columns = drug_drug_matrix_df.columns.values[1:]
columns_from_drug_list = drug_list_df['cid'].values
assert len(columns) == NUM_DRUGS
assert list(columns) == list(columns_from_drug_list)
for row_id, row_series in drug_drug_matrix_df.iterrows():
row_cid = columns[row_id]
for col_id, col_cid in enumerate(columns):
if col_id > row_id:
pairs.append((row_cid, col_cid))
labels.append(int(row_series.loc[col_cid]))
pairs = np.array(pairs)
labels = np.array(labels)
assert len(pairs) == NUM_INTERACTIONS
assert len(labels) == NUM_INTERACTIONS
return pairs, labels
def stat(self, pairs, labels):
num_pos = np.sum(labels == 1)
num_neg = np.sum(labels == 0)
ratio = float(num_pos) / float(num_neg)
print('pos: {}, neg: {}, ratio: {}'.format(num_pos, num_neg, ratio))
def __write_to_disk(self, split_and_dataset):
for split, item in split_and_dataset.iteritems():
dataset, filepath = item
pairs, labels = dataset
df = self.drug_list_df
cid2drugbank_id = dict(zip(df['cid'], df['drugbank_id']))
cid2smiles = dict(zip(df['cid'], df['smiles']))
data = list()
for pair, label in zip(pairs, labels):
cid_1, cid_2 = pair
dbid_1 = cid2drugbank_id[cid_1]
dbid_2 = cid2drugbank_id[cid_2]
smiles_1 = cid2smiles[cid_1]
smiles_2 = cid2smiles[cid_2]
item = OrderedDict({
'cid_1': cid_1,
'cid_2': cid_2,
'drugbank_id_1': dbid_1,
'drugbank_id_2': dbid_2,
'smiles_1': smiles_1,
'smiles_2': smiles_2,
'label': label
})
data.append(item)
df_train = pd.DataFrame(data=data)
df_train.to_csv(filepath)
print('{} dataset generated.'.format(split[0].upper() + split[1:], format))
def random_split_based_drug(self, train_filepath, valid_filepath, test_filepath,
frac_train=0.8, frac_valid=0.1, frac_test=0.1):
"""
Splitter based on distinct drug.
Corresponding splitter about DDI are based on the splitted drugs.
"""
pairs, labels = self.__generate_data_and_labels()
drug_list_df = copy.deepcopy(self.drug_list_df)
cid_arr = drug_list_df['cid'].values
ss = RandomSplitter()
train_inds, valid_inds, test_inds = ss.train_valid_test_split(dataset=drug_list_df,
frac_train=frac_train, frac_valid=frac_valid,
frac_test=frac_test,
return_index=True, seed=GLOBAL_SEED)
assert len(train_inds) + len(valid_inds) + len(test_inds) == NUM_DRUGS
cids_train, cids_valid, cids_test = cid_arr[train_inds], cid_arr[valid_inds], cid_arr[test_inds]
train_inds_filepath = os.path.join(
os.path.dirname(train_filepath),
os.path.basename(train_filepath).split('.')[0] + '_ind' + '.csv')
valid_inds_filepath = os.path.join(
os.path.dirname(valid_filepath),
os.path.basename(valid_filepath).split('.')[0] + '_ind' + '.csv')
test_inds_filepath = os.path.join(
os.path.dirname(test_filepath),
os.path.basename(test_filepath).split('.')[0] + '_ind' + '.csv')
# row selection
df_train = drug_list_df.loc[train_inds]
del df_train['Unnamed: 0']
df_valid = drug_list_df.loc[valid_inds]
del df_valid['Unnamed: 0']
df_test = drug_list_df.loc[test_inds]
del df_test['Unnamed: 0']
df_train.to_csv(train_inds_filepath)
df_valid.to_csv(valid_inds_filepath)
df_test.to_csv(test_inds_filepath)
pairs_train, labels_train = list(), list()
pairs_valid, labels_valid = list(), list()
pairs_test, labels_test = list(), list()
for pair, label in zip(pairs, labels):
cid_1, cid_2 = pair
# train dataset generation
if cid_1 in cids_train and cid_2 in cids_train:
pairs_train.append((cid_1, cid_2))
labels_train.append(label)
# valid dataset generation
elif (cid_1 in cids_train and cid_2 in cids_valid) or \
(cid_1 in cids_valid and cid_2 in cids_train):
pairs_valid.append((cid_1, cid_2))
labels_valid.append(label)
# test dataset generation
elif (cid_1 in cids_train and cid_2 in cids_test) or \
(cid_1 in cids_test and cid_2 in cids_train):
pairs_test.append((cid_1, cid_2))
labels_test.append(label)
pairs_train = np.array(pairs_train)
labels_train = np.array(labels_train)
pairs_valid = np.array(pairs_valid)
labels_valid = np.array(labels_valid)
pairs_test = np.array(pairs_test)
labels_test = np.array(labels_test)
# statistics
# train: num_total, num_pos, num_neg
# test: num_total, num_pos, num_neg
num_total_train = labels_train.shape[0]
num_pos_train = np.sum(labels_train == 1)
num_neg_train = np.sum(labels_train == 0)
num_total_valid = labels_valid.shape[0]
num_pos_valid = np.sum(labels_valid == 1)
num_neg_valid = np.sum(labels_valid == 0)
num_total_test = labels_test.shape[0]
num_pos_test = np.sum(labels_test == 1)
num_neg_test = np.sum(labels_test == 0)
print('Statistics: ')
print('Train# total: {}, pos: {}, neg: {}'.format(num_total_train, num_pos_train, num_neg_train))
print('Valid# total: {}, pos: {}, neg: {}'.format(num_total_valid, num_pos_valid, num_neg_valid))
print('Test # total: {}, pos: {}, neg: {}'.format(num_total_test, num_pos_test, num_neg_test))
split_and_dataset = {
'train': [(pairs_train, labels_train), train_filepath],
'valid': [(pairs_valid, labels_valid), valid_filepath],
'test': [(pairs_test, labels_test), test_filepath],
}
# write train dataset and test dataset into disk
self.__write_to_disk(split_and_dataset)
def scaffold_split_based_drug(self, train_filepath, valid_filepath, test_filepath,
frac_train=0.8, frac_valid=0.1, frac_test=0.1):
"""
We need to delete two drugs:
CID000004609, DB00526
[H][N]1([H])[C@@H]2CCCC[C@H]2[N]([H])([H])[Pt]11OC(=O)C(=O)O1
CID000060754, DB00225
O=C1[O-][Gd+3]234567[O]=C(C[N]2(CC[N]3(CC([O-]4)=O)CC[N]5(CC(=[O]6)NC)CC(=O)[O-]7)C1)NC
"""
pairs, labels = self.__generate_data_and_labels()
drug_list_df = copy.deepcopy(self.drug_list_df)
db_ids_removed = ['DB00526', 'DB00225']
drug_list_df.drop(
drug_list_df.loc[drug_list_df['drugbank_id'].isin(db_ids_removed)].index)
assert sum([db_id in drug_list_df['drugbank_id'] for db_id in db_ids_removed]) == 0
cid_arr = drug_list_df['cid'].values
smiles_arr = drug_list_df['smiles'].values
ss = ScaffoldSplitter()
train_inds, valid_inds, test_inds = ss.train_valid_test_split(drug_list_df, smiles_arr,
frac_train=frac_train, frac_valid=frac_valid,
frac_test=frac_test,
return_index=True)
assert len(train_inds) + len(valid_inds) + len(test_inds) == NUM_DRUGS
cids_train, cids_valid, cids_test = cid_arr[train_inds], cid_arr[valid_inds], cid_arr[test_inds]
train_inds_filepath = os.path.join(
os.path.dirname(train_filepath),
os.path.basename(train_filepath).split('.')[0] + '_ind' + '.csv')
valid_inds_filepath = os.path.join(
os.path.dirname(valid_filepath),
os.path.basename(valid_filepath).split('.')[0] + '_ind' + '.csv')
test_inds_filepath = os.path.join(
os.path.dirname(test_filepath),
os.path.basename(test_filepath).split('.')[0] + '_ind' + '.csv')
df_train = drug_list_df.loc[train_inds]
# del df_train['Unnamed: 0']
df_valid = drug_list_df.loc[valid_inds]
# del df_valid['Unnamed: 0']
df_test = drug_list_df.loc[test_inds]
# del df_valid['Unnamed: 0']
df_train.to_csv(train_inds_filepath, index=False)
df_valid.to_csv(valid_inds_filepath, index=False)
df_test.to_csv(test_inds_filepath)
pairs_train, labels_train = list(), list()
pairs_valid, labels_valid = list(), list()
pairs_test, labels_test = list(), list()
for pair, label in zip(pairs, labels):
cid_1, cid_2 = pair
# train dataset generation
if cid_1 in cids_train and cid_2 in cids_train:
pairs_train.append((cid_1, cid_2))
labels_train.append(label)
# valid dataset generation
elif (cid_1 in cids_train and cid_2 in cids_valid) or \
(cid_2 in cids_train and cid_1 in cids_valid):
pairs_valid.append((cid_1, cid_2))
labels_valid.append(label)
# test dataset generation
elif (cid_1 in cids_train and cid_2 in cids_test) or \
(cid_1 in cids_test and cid_2 in cids_train):
pairs_test.append((cid_1, cid_2))
labels_test.append(label)
pairs_train = np.array(pairs_train)
labels_train = np.array(labels_train)
pairs_valid = np.array(pairs_valid)
labels_valid = np.array(labels_valid)
pairs_test = np.array(pairs_test)
labels_test = np.array(labels_test)
# statistics
# train: num_total, num_pos, num_neg
# test: num_total, num_pos, num_neg
num_total_train = labels_train.shape[0]
num_pos_train = np.sum(labels_train == 1)
num_neg_train = np.sum(labels_train == 0)
num_total_valid = labels_valid.shape[0]
num_pos_valid = np.sum(labels_valid == 1)
num_neg_valid = np.sum(labels_valid == 0)
num_total_test = labels_test.shape[0]
num_pos_test = np.sum(labels_test == 1)
num_neg_test = np.sum(labels_test == 0)
print('Statistics: ')
print('Train# total: {}, pos: {}, neg: {}'.format(num_total_train, num_pos_train, num_neg_train))
print('Valid# total: {}, pos: {}, neg: {}'.format(num_total_valid, num_pos_valid, num_neg_valid))
print('Test # total: {}, pos: {}, neg: {}'.format(num_total_test, num_pos_test, num_neg_test))
split_and_dataset = {
'train': [(pairs_train, labels_train), train_filepath],
'valid': [(pairs_valid, labels_valid), valid_filepath],
'test': [(pairs_test, labels_test), test_filepath],
}
# write train dataset and test dataset into disk
self.__write_to_disk(split_and_dataset)
def random_split_based_interaction(self, train_filepath, valid_filepath, test_filepath,
frac_train=0.8, frac_valid=0.1, frac_test=0.1):
pairs, labels = self.__generate_data_and_labels()
ss = StratifiedSplitter()
train_inds, valid_inds, test_inds = ss.train_valid_test_split(
dataset=pairs, labels=labels,
frac_train=frac_train, frac_valid=frac_valid, frac_test=frac_test,
return_index=True, seed=GLOBAL_SEED)
assert len(train_inds) + len(valid_inds) + len(test_inds) == NUM_INTERACTIONS
pairs_train, pairs_valid, pairs_test = pairs[train_inds], pairs[valid_inds], pairs[test_inds]
labels_train, labels_valid, labels_test = labels[train_inds], labels[valid_inds], labels[test_inds]
ratio_train = (float(np.sum(labels_train == 1)) / float(np.sum(labels_train == 0)))
ratio_valid = (float(np.sum(labels_valid == 1)) / float(np.sum(labels_valid == 0)))
ratio_test = (float(np.sum(labels_test == 1)) / float(np.sum(labels_test == 0)))
ratio = (float(np.sum(labels == 1))) / float(np.sum(labels == 0))
assert int(100 * ratio) == int(100 * ratio_train) == int(100 * ratio_valid) == int(100 * ratio_test)
# statistics
# train: num_total, num_pos, num_neg
# test: num_total, num_pos, num_neg
num_total_train = labels_train.shape[0]
num_pos_train = np.sum(labels_train == 1)
num_neg_train = np.sum(labels_train == 0)
num_total_valid = labels_valid.shape[0]
num_pos_valid = np.sum(labels_valid == 1)
num_neg_valid = np.sum(labels_valid == 0)
num_total_test = labels_test.shape[0]
num_pos_test = np.sum(labels_test == 1)
num_neg_test = np.sum(labels_test == 0)
print('Statistics: ')
print('Train# total: {}, pos: {}, neg: {}'.format(num_total_train, num_pos_train, num_neg_train))
print('Valid# total: {}, pos: {}, neg: {}'.format(num_total_valid, num_pos_valid, num_neg_valid))
print('Test # total: {}, pos: {}, neg: {}'.format(num_total_test, num_pos_test, num_neg_test))
split_and_dataset = {
'train': [(pairs_train, labels_train), train_filepath],
'valid': [(pairs_valid, labels_valid), valid_filepath],
'test': [(pairs_test, labels_test), test_filepath],
}
# write train dataset and test dataset into disk.
self.__write_to_disk(split_and_dataset)
def random_split_based_interaction_equal(self, train_filepath, valid_filepath, test_filepath,
frac_train=0.8, frac_valid=0.1, frac_test=0.1):
pairs, labels = self.__generate_data_and_labels()
ss = StratifiedSplitter()
train_inds, valid_inds, test_inds = ss.train_valid_test_split(
dataset=pairs, labels=labels,
frac_train=frac_train, frac_valid=frac_valid, frac_test=frac_test,
return_index=True)
assert len(train_inds) + len(valid_inds) + len(test_inds) == NUM_INTERACTIONS
pairs_train, pairs_valid, pairs_test = pairs[train_inds], pairs[valid_inds], pairs[test_inds]
labels_train, labels_valid, labels_test = labels[train_inds], labels[valid_inds], labels[test_inds]
ratio_train = (float(np.sum(labels_train == 1)) / float(np.sum(labels_train == 0)))
ratio_valid = (float(np.sum(labels_valid == 1)) / float(np.sum(labels_valid == 0)))
ratio_test = (float(np.sum(labels_test == 1)) / float(np.sum(labels_test == 0)))
ratio = (float(np.sum(labels == 1))) / float(np.sum(labels == 0))
assert int(100 * ratio) == int(100 * ratio_train) == int(100 * ratio_valid) == int(100 * ratio_test)
pairs_train_pos = pairs_train[labels_train == 1]
pairs_train_neg = pairs_train[labels_train == 0]
num_pos = np.sum(labels_train == 1)
num_neg = np.sum(labels_train == 0)
scale = np.sum(labels_train == 1)
indices_pos = np.arange(0, num_pos)
indices_pos = np.random.choice(indices_pos, size=scale, replace=False)
pairs_train_pos = pairs_train_pos[indices_pos]
indices_neg = np.arange(0, num_neg)
indices_neg = np.random.choice(indices_neg, size=scale, replace=False)
pairs_train_neg = pairs_train_neg[indices_neg]
pairs_train = np.concatenate((pairs_train_pos, pairs_train_neg), axis=0)
labels_train = np.concatenate(
(np.ones(shape=(scale,)), np.zeros(shape=(scale,))), axis=0)
indices = np.arange(0, 2 * scale)
np.random.seed(GLOBAL_SEED)
np.random.shuffle(indices)
pairs_train = pairs_train[indices]
labels_train = labels_train[indices]
assert len(pairs_train) == len(labels_train) == 2 * scale
# statistics
# train: num_total, num_pos, num_neg
# test: num_total, num_pos, num_neg
num_total_train = labels_train.shape[0]
num_pos_train = np.sum(labels_train == 1)
num_neg_train = np.sum(labels_train == 0)
num_total_valid = labels_valid.shape[0]
num_pos_valid = np.sum(labels_valid == 1)
num_neg_valid = np.sum(labels_valid == 0)
num_total_test = labels_test.shape[0]
num_pos_test = np.sum(labels_test == 1)
num_neg_test = np.sum(labels_test == 0)
print('Statistics: ')
print('Train# total: {}, pos: {}, neg: {}'.format(num_total_train, num_pos_train, num_neg_train))
print('Valid# total: {}, pos: {}, neg: {}'.format(num_total_valid, num_pos_valid, num_neg_valid))
print('Test # total: {}, pos: {}, neg: {}'.format(num_total_test, num_pos_test, num_neg_test))
split_and_dataset = {
'train': [(pairs_train, labels_train), train_filepath],
'valid': [(pairs_valid, labels_valid), valid_filepath],
'test': [(pairs_test, labels_test), test_filepath],
}
# write train dataset and test dataset into disk.
self.__write_to_disk(split_and_dataset)
def random_split_based_interaction_different_scales(self, scale, train_filepath, valid_filepath, test_filepath,
frac_train=0.8, frac_valid=0.1, frac_test=0.1):
pairs, labels = self.__generate_data_and_labels()
ss = StratifiedSplitter()
train_inds, valid_inds, test_inds = ss.train_valid_test_split(
dataset=pairs, labels=labels,
frac_train=frac_train, frac_valid=frac_valid, frac_test=frac_test,
return_index=True)
assert len(train_inds) + len(valid_inds) + len(test_inds) == NUM_INTERACTIONS
pairs_train, pairs_valid, pairs_test = pairs[train_inds], pairs[valid_inds], pairs[test_inds]
labels_train, labels_valid, labels_test = labels[train_inds], labels[valid_inds], labels[test_inds]
ratio_train = (float(np.sum(labels_train == 1)) / float(np.sum(labels_train == 0)))
ratio_valid = (float(np.sum(labels_valid == 1)) / float(np.sum(labels_valid == 0)))
ratio_test = (float(np.sum(labels_test == 1)) / float(np.sum(labels_test == 0)))
ratio = (float(np.sum(labels == 1))) / float(np.sum(labels == 0))
assert int(100 * ratio) == int(100 * ratio_train) == int(100 * ratio_valid) == int(100 * ratio_test)
pairs_train_pos = pairs_train[labels_train == 1]
pairs_train_neg = pairs_train[labels_train == 0]
num_pos = np.sum(labels_train == 1)
num_neg = np.sum(labels_train == 0)
assert scale <= np.sum(labels_train == 1) and scale <= np.sum(labels_train == 0)
indices_pos = np.arange(0, num_pos)
indices_pos = np.random.choice(indices_pos, size=scale, replace=False)
pairs_train_pos = pairs_train_pos[indices_pos]
indices_neg = np.arange(0, num_neg)
indices_neg = np.random.choice(indices_neg, size=scale, replace=False)
pairs_train_neg = pairs_train_neg[indices_neg]
pairs_train = np.concatenate((pairs_train_pos, pairs_train_neg), axis=0)
labels_train = np.concatenate(
(np.ones(shape=(scale,)), np.zeros(shape=(scale,))), axis=0)
indices = np.arange(0, 2 * scale)
np.random.seed(GLOBAL_SEED)
np.random.shuffle(indices)
pairs_train = pairs_train[indices]
labels_train = labels_train[indices]
assert len(pairs_train) == len(labels_train) == 2 * scale
# statistics
# train: num_total, num_pos, num_neg
# test: num_total, num_pos, num_neg
num_total_train = labels_train.shape[0]
num_pos_train = np.sum(labels_train == 1)
num_neg_train = np.sum(labels_train == 0)
num_total_valid = labels_valid.shape[0]
num_pos_valid = np.sum(labels_valid == 1)
num_neg_valid = np.sum(labels_valid == 0)
num_total_test = labels_test.shape[0]
num_pos_test = np.sum(labels_test == 1)
num_neg_test = np.sum(labels_test == 0)
print('Statistics: ')
print('Train# total: {}, pos: {}, neg: {}'.format(num_total_train, num_pos_train, num_neg_train))
print('Valid# total: {}, pos: {}, neg: {}'.format(num_total_valid, num_pos_valid, num_neg_valid))
print('Test # total: {}, pos: {}, neg: {}'.format(num_total_test, num_pos_test, num_neg_test))
split_and_dataset = {
'train': [(pairs_train, labels_train), train_filepath],
'valid': [(pairs_valid, labels_valid), valid_filepath],
'test': [(pairs_test, labels_test), test_filepath],
}
# write train dataset and test dataset into disk.
self.__write_to_disk(split_and_dataset)
class SymmetricPair(object):
def __init__(self, former, latter):
self.former = former
self.latter = latter
def __eq__(self, other):
if self.former == other.former and self.latter == other.latter \
or self.former == other.latter and self.latter == other.former:
return True
else:
return False
def __getitem__(self, item):
if item == 0:
return self.former
elif item == 1:
return self.latter
else:
raise ValueError('No the third element.')
class KaistSplitter(object):
"""
Ouput:
a list of drugs: drug_list.csv
interaction: ddi_total.csv, ddi_train.csv, ddi_valid.csv, ddi_test.csv
dict, key=(drugbank_id_1, drugbank_id_2), value=dict(labels=[], masks=[], descs=[]))
item = {
(drugbank_id_1, drugbank_id_2): {
'labels': [labels_1, labels_2, ..., label_n],
'masks': [mask_1, mask_2, ..., mask_n],
'descs': [desc_1, desc_2, ..., desc_n],
}
}
assert len(labels) == len(masks) == len(descs)
the key named 'labels', 'masks' and 'descs' should be converted into the string format.
the delimiter should be '||'
先分别
"""
def __init__(self):
ddi_filename = 'original_ddi_total.csv'
ddi_filepath = os.path.join(KAIST_PATH, ddi_filename)
self.ddi_ori_df = pd.read_csv(ddi_filepath)
def __generate_drug_list(self):
ddi_df = self.ddi_ori_df
db_id_1_list = list(ddi_df['Drug1'].values)
db_id_2_list = list(ddi_df['Drug2'].values)
db_id_list = list(set(db_id_1_list + db_id_2_list))
print('Before preprocessing, num of drugs: {}'.format(len(db_id_list)))
# eliminate the invalid drugs whose SMILES representation are unavailable or which can
# not be converted into rdkit.Chem.Mol object.
drug_list_filename = 'drug_list_from_drugbank_latest.csv'
drug_list_filepath = os.path.join(DRUGBANK_PATH, drug_list_filename)
drug_list_df = pd.read_csv(drug_list_filepath)
db_id2smiles = dict(zip(drug_list_df['drugbank_id'], drug_list_df['smiles']))
invalid_count = 0
valid_db_id_list = list()
for db_id in db_id_list:
smiles = db_id2smiles.get(db_id, None)
if smiles is None or MolFromSmiles(smiles) is None:
invalid_count += 1
print('Invalid drug: {}'.format(db_id))
continue
valid_db_id_list.append(db_id)
print('Invalid count: {}'.format(invalid_count))
kaist_drug_list = list()
for row_id, row in drug_list_df.iterrows():
db_id = row['drugbank_id']
if db_id in valid_db_id_list:
item = dict(row)
kaist_drug_list.append(item)
kaist_drug_df = pd.DataFrame(kaist_drug_list, columns=drug_list_df.columns.values)
filename = 'drug_list.csv'
filepath = os.path.join(KAIST_PATH, filename)
kaist_drug_df.to_csv(filepath)
assert len(kaist_drug_df) == NUM_DRUGS_KAIST
print('After preprocessing, num of drugs: {}'.format(len(kaist_drug_df)))
def split_train_valid_split(self):
ddi_ori_df = self.ddi_ori_df
# generate python dictionary to convert DBID to SMILES representation.
# the conversion is always successful.
drug_list_filename = 'drug_list.csv'
drug_list_filepath = os.path.join(KAIST_PATH, drug_list_filename)
if not os.path.exists(drug_list_filepath):
self.__generate_drug_list()
drug_list_df = | pd.read_csv(drug_list_filepath) | pandas.read_csv |
# coding: utf-8
"""tools for working with collections of cases"""
from os import path
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from radcomp import USER_DIR
from radcomp.vertical import case, plotting
COL_START = 'start'
COL_END = 'end'
def read_case_times(name):
"""Read case starting and ending times from cases directory."""
filepath = path.join(USER_DIR, 'cases', name + '.csv')
dts = pd.read_csv(filepath, parse_dates=[COL_START, COL_END], comment='#',
skip_blank_lines=True)
indexing_func = lambda row: case.case_id_fmt(row[COL_START], row[COL_END])
dts.index = dts.apply(indexing_func, axis=1)
dts.index.name = 'id'
return dts
def _row_bool_flag(row, flag_name, default=None):
"""If exists, convert flag to boolean, else use default. None on empty."""
if flag_name in row:
if np.isnan(row[flag_name]):
flag = None
else:
flag = bool(row[flag_name])
return flag
return default
def read_cases(name):
"""Read cases based on a cases list."""
dts = read_case_times(name)
cases_list = []
for cid, row in dts.iterrows():
case_kws = dict()
case_kws['has_ml'] = _row_bool_flag(row, 'ml', default=False)
case_kws['is_convective'] = _row_bool_flag(row, 'convective', default=None)
try:
t_start, t_end = row[COL_START], row[COL_END]
c = case.Case.from_dtrange(t_start, t_end, **case_kws)
if c.data.empty:
err_msg_fmt = 'No data available between {} and {}.'
raise ValueError(err_msg_fmt.format(t_start, t_end))
cases_list.append(c)
except ValueError as e:
print('Error: {}. Skipping {}'.format(e, cid))
dts.drop(cid, inplace=True)
dts['case'] = cases_list
return dts
def plot_convective_occurrence(occ, ax=None, **kws):
"""Bar plot convective occurrence.
Args:
occ (Series)
"""
ax = ax or plt.gca()
#occ.plot.bar(ax=ax, **kws) # bug in pandas or mpl
ax.bar(occ.index, occ.values, width=0.5, **kws) # workaround
ax.set_xticks(occ.index) # workaround
ax.set_ylabel('norm. freq. in\nconvection')
ax.yaxis.grid(True)
def ts_case_ids(cases):
"""case ids by timestamp"""
cids_list = []
for cid, c in cases.case.iteritems():
cids_list.append(c.timestamps().apply(lambda x: cid))
ts = | pd.concat(cids_list) | pandas.concat |
import pandas as pd
import numpy as np
from collections import namedtuple
from IPython.display import HTML, display
from .dFSM import startstopFSM
## Resultate aus einem FSM Lauf ermitteln.
def disp_result(startversuch):
summary = pd.DataFrame(startversuch[startstopFSM.run2filter_content]).T
#summary = pd.DataFrame.from_dict({k:v for k,v in dict(startversuch[['index'] + fsm.filters['run2filter_times']]).items() if v == v}, orient='index').T.round(2)
#summary = pd.DataFrame(startversuch[fsm.filters['run2filter_times']], dtype=np.float64).fillna(0).round(2).T
display(HTML(summary.to_html(escape=False, index=False)))
#display(HTML('<h3>'+ summary.to_html(escape=False, index=False) + '</h3>'))
def disp_alarms(startversuch):
ald = []; alt = []
for al in startversuch['alarms']:
ald.append({
'state':al['state'],'severity':al['msg']['severity'],'Number':al['msg']['name'],
'date':pd.to_datetime(int(al['msg']['timestamp'])*1e6).strftime('%d.%m.%Y %H:%M:%S'),
'message':al['msg']['message']
})
alt.append(pd.to_datetime(int(al['msg']['timestamp'])*1e6))
aldf = pd.DataFrame(ald)
if not aldf.empty:
display(HTML(aldf.to_html(escape=False, index=False)))
#display(HTML('<h3>'+ aldf.to_html(escape=False, index=False) + '</h3>'))
return alt
def disp_warnings(startversuch):
wad = []; wat = []
for wd in startversuch['warnings']:
wad.append({
'state':wd['state'],'severity':wd['msg']['severity'],'Number':wd['msg']['name'],
'date':pd.to_datetime(int(wd['msg']['timestamp'])*1e6).strftime('%d.%m.%Y %H:%M:%S'),
'message':wd['msg']['message']
})
wat.append(pd.to_datetime(int(wd['msg']['timestamp'])*1e6))
wdf = | pd.DataFrame(wad) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 26 19:49:04 2016
@author: noore
"""
import os
import pandas as pd
import numpy as np
import settings as S
from cobra.io.sbml import create_cobra_model_from_sbml_file
from cobra.manipulation.modify import convert_to_irreversible
from bool_parser import BoolParser
import matplotlib.pyplot as plt
import seaborn as sns
class Volume(object):
def __init__(self):
self.cobra_model = create_cobra_model_from_sbml_file(S.ECOLI_SBML_FNAME)
convert_to_irreversible(self.cobra_model)
self.met_conc_df = self.get_metabolite_data()
self.km_df = self.get_km_data()
self.enz_conc_df, self.enz_mw_df = self.get_enzyme_data()
self.flux_df = self.get_flux_data()
self.data_df = pd.merge(self.km_df, self.met_conc_df, on='bigg.metabolite')
self.data_df = pd.merge(self.data_df, self.enz_conc_df, on=['bigg.reaction', 'condition'])
self.data_df = pd.merge(self.data_df, self.enz_mw_df, on=['bigg.reaction'])
self.data_df = pd.merge(self.data_df, self.flux_df, on=['bigg.reaction', 'condition'])
# keep only rows with non-zero flux, non-zero enzyme, and stoichiometry coeff = 1
ind = (self.data_df['flux [mmol/gCDW/h]'] > 0) & \
(self.data_df['enzyme conc [M]'] > 0) & \
(self.data_df['stoichiometry'] == -1)
self.data_df = self.data_df[ind]
def get_metabolite_data(self):
"""
get the metabolomics data from Gerosa et al. 2015
"""
_df = pd.DataFrame.from_csv(S.ECOLI_METAB_FNAME)
_df.index.name = 'bigg.metabolite'
met_conc_mean = _df.iloc[:, 1:9] * 1e-3 # convert mM to M
met_conc_std = _df.iloc[:, 10:] * 1e-3 # convert mM to M
met_conc_mean.columns = [c[:-7].lower() for c in met_conc_mean.columns]
met_conc_std.columns = [c[:-6].lower() for c in met_conc_std.columns]
met_conc_df = pd.melt(met_conc_mean.reset_index(), id_vars='bigg.metabolite',
var_name='condition', value_name='metabolite conc [M]')
# join the concentration data with the molecular weight of each metabolite
met_mw_df = pd.DataFrame(columns=('bigg.metabolite', 'metabolite MW [Da]'),
data=[(m.id, m.formula_weight) for m in self.cobra_model.metabolites])
met_mw_df.set_index('bigg.metabolite', inplace=True)
met_conc_df = met_conc_df.join(met_mw_df, on='bigg.metabolite')
return met_conc_df
def get_km_data(self):
km_df = S.read_cache('km')
km_df = km_df[km_df['Organism'] == 'Escherichia coli']
km_df = km_df[km_df['KM_Value'] != -999]
km_df = km_df[['EC_number', 'KM_Value', 'bigg.metabolite']]
km_df = km_df.groupby(('EC_number', 'bigg.metabolite')).median().reset_index()
# some compounds have specific steriochemistry in BRENDA, but not in the
# E. coli model (and other datasets). Therefore, we need to map them to
# the stereo-unspecific BiGG IDs in order to join the tables later
stereo_mapping = {'fdp_B_c': 'fdp_c', 'f6p_B_c': 'f6p_c'}
km_df['bigg.metabolite'].replace(stereo_mapping, inplace=True)
# get a mapping from EC numbers to bigg.reaction,
# remember we need to duplicate every reaction ID also for the reverse
# reaction (since we use a model that is converted to irreversible)
model_reactions = S.get_reaction_table_from_xls()
bigg2ec = model_reactions[['Reaction Abbreviation', 'EC Number']]
bigg2ec.rename(columns={'Reaction Abbreviation': 'bigg.reaction',
'EC Number': 'EC_number'}, inplace=True)
bigg2ec = bigg2ec[~ | pd.isnull(bigg2ec['EC_number']) | pandas.isnull |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# This file contains dummy data for the model unit tests
import numpy as np
import pandas as pd
AIR_FCST_LINEAR_95 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("1961-01-01 00:00:00"),
1: pd.Timestamp("1961-02-01 00:00:00"),
2: pd.Timestamp("1961-03-01 00:00:00"),
3: pd.Timestamp("1961-04-01 00:00:00"),
4: pd.Timestamp("1961-05-01 00:00:00"),
5: pd.Timestamp("1961-06-01 00:00:00"),
6: pd.Timestamp("1961-07-01 00:00:00"),
7: pd.Timestamp("1961-08-01 00:00:00"),
8: pd.Timestamp("1961-09-01 00:00:00"),
9: pd.Timestamp("1961-10-01 00:00:00"),
10: pd.Timestamp("1961-11-01 00:00:00"),
11: pd.Timestamp("1961-12-01 00:00:00"),
12: pd.Timestamp("1962-01-01 00:00:00"),
13: pd.Timestamp("1962-02-01 00:00:00"),
14: pd.Timestamp("1962-03-01 00:00:00"),
15: pd.Timestamp("1962-04-01 00:00:00"),
16: pd.Timestamp("1962-05-01 00:00:00"),
17: pd.Timestamp("1962-06-01 00:00:00"),
18: pd.Timestamp("1962-07-01 00:00:00"),
19: pd.Timestamp("1962-08-01 00:00:00"),
20: pd.Timestamp("1962-09-01 00:00:00"),
21: pd.Timestamp("1962-10-01 00:00:00"),
22: pd.Timestamp("1962-11-01 00:00:00"),
23: pd.Timestamp("1962-12-01 00:00:00"),
24: pd.Timestamp("1963-01-01 00:00:00"),
25: pd.Timestamp("1963-02-01 00:00:00"),
26: pd.Timestamp("1963-03-01 00:00:00"),
27: pd.Timestamp("1963-04-01 00:00:00"),
28: pd.Timestamp("1963-05-01 00:00:00"),
29: pd.Timestamp("1963-06-01 00:00:00"),
},
"fcst": {
0: 472.9444444444443,
1: 475.60162835249025,
2: 478.2588122605362,
3: 480.9159961685822,
4: 483.57318007662815,
5: 486.23036398467417,
6: 488.88754789272014,
7: 491.5447318007661,
8: 494.20191570881207,
9: 496.85909961685803,
10: 499.516283524904,
11: 502.17346743295,
12: 504.830651340996,
13: 507.48783524904195,
14: 510.1450191570879,
15: 512.8022030651339,
16: 515.4593869731799,
17: 518.1165708812258,
18: 520.7737547892718,
19: 523.4309386973177,
20: 526.0881226053638,
21: 528.7453065134097,
22: 531.4024904214557,
23: 534.0596743295017,
24: 536.7168582375476,
25: 539.3740421455936,
26: 542.0312260536396,
27: 544.6884099616856,
28: 547.3455938697316,
29: 550.0027777777775,
},
"fcst_lower": {
0: 380.6292037661305,
1: 383.26004701147235,
2: 385.8905370924373,
3: 388.52067431512216,
4: 391.1504589893095,
5: 393.7798914284503,
6: 396.4089719496461,
7: 399.0377008736321,
8: 401.66607852475926,
9: 404.2941052309762,
10: 406.9217813238114,
11: 409.54910713835505,
12: 412.1760830132403,
13: 414.80270929062544,
14: 417.42898631617453,
15: 420.0549144390392,
16: 422.68049401183924,
17: 425.3057253906438,
18: 427.93060893495215,
19: 430.555145007674,
20: 433.1793339751107,
21: 435.8031762069345,
22: 438.42667207616984,
23: 441.0498219591729,
24: 443.6726262356114,
25: 446.2950852884452,
26: 448.91719950390507,
27: 451.53896927147304,
28: 454.1603949838614,
29: 456.78147703699216,
},
"fcst_upper": {
0: 565.2596851227581,
1: 567.9432096935082,
2: 570.6270874286351,
3: 573.3113180220422,
4: 575.9959011639468,
5: 578.680836540898,
6: 581.3661238357942,
7: 584.0517627279,
8: 586.7377528928648,
9: 589.4240940027398,
10: 592.1107857259966,
11: 594.797827727545,
12: 597.4852196687516,
13: 600.1729612074585,
14: 602.8610519980012,
15: 605.5494916912286,
16: 608.2382799345206,
17: 610.9274163718079,
18: 613.6169006435915,
19: 616.3067323869615,
20: 618.9969112356168,
21: 621.6874368198849,
22: 624.3783087667415,
23: 627.0695266998305,
24: 629.7610902394838,
25: 632.4529990027421,
26: 635.145252603374,
27: 637.8378506518982,
28: 640.5307927556019,
29: 643.2240785185628,
},
}
)
AIR_FCST_LINEAR_99 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("1961-01-01 00:00:00"),
1: pd.Timestamp("1961-02-01 00:00:00"),
2: pd.Timestamp("1961-03-01 00:00:00"),
3: pd.Timestamp("1961-04-01 00:00:00"),
4: pd.Timestamp("1961-05-01 00:00:00"),
5: pd.Timestamp("1961-06-01 00:00:00"),
6: pd.Timestamp("1961-07-01 00:00:00"),
7: pd.Timestamp("1961-08-01 00:00:00"),
8: pd.Timestamp("1961-09-01 00:00:00"),
9: pd.Timestamp("1961-10-01 00:00:00"),
10: pd.Timestamp("1961-11-01 00:00:00"),
11: pd.Timestamp("1961-12-01 00:00:00"),
12: pd.Timestamp("1962-01-01 00:00:00"),
13: pd.Timestamp("1962-02-01 00:00:00"),
14: pd.Timestamp("1962-03-01 00:00:00"),
15: pd.Timestamp("1962-04-01 00:00:00"),
16: pd.Timestamp("1962-05-01 00:00:00"),
17: pd.Timestamp("1962-06-01 00:00:00"),
18: pd.Timestamp("1962-07-01 00:00:00"),
19: pd.Timestamp("1962-08-01 00:00:00"),
20: pd.Timestamp("1962-09-01 00:00:00"),
21: pd.Timestamp("1962-10-01 00:00:00"),
22: pd.Timestamp("1962-11-01 00:00:00"),
23: pd.Timestamp("1962-12-01 00:00:00"),
24: pd.Timestamp("1963-01-01 00:00:00"),
25: pd.Timestamp("1963-02-01 00:00:00"),
26: pd.Timestamp("1963-03-01 00:00:00"),
27: pd.Timestamp("1963-04-01 00:00:00"),
28: pd.Timestamp("1963-05-01 00:00:00"),
29: pd.Timestamp("1963-06-01 00:00:00"),
},
"fcst": {
0: 472.9444444444443,
1: 475.60162835249025,
2: 478.2588122605362,
3: 480.9159961685822,
4: 483.57318007662815,
5: 486.23036398467417,
6: 488.88754789272014,
7: 491.5447318007661,
8: 494.20191570881207,
9: 496.85909961685803,
10: 499.516283524904,
11: 502.17346743295,
12: 504.830651340996,
13: 507.48783524904195,
14: 510.1450191570879,
15: 512.8022030651339,
16: 515.4593869731799,
17: 518.1165708812258,
18: 520.7737547892718,
19: 523.4309386973177,
20: 526.0881226053638,
21: 528.7453065134097,
22: 531.4024904214557,
23: 534.0596743295017,
24: 536.7168582375476,
25: 539.3740421455936,
26: 542.0312260536396,
27: 544.6884099616856,
28: 547.3455938697316,
29: 550.0027777777775,
},
"fcst_lower": {
0: 351.01805478037915,
1: 353.64044896268456,
2: 356.2623766991775,
3: 358.883838394139,
4: 361.50483445671773,
5: 364.12536530090745,
6: 366.74543134552374,
7: 369.3650330141812,
8: 371.98417073526997,
9: 374.6028449419319,
10: 377.2210560720369,
11: 379.83880456815905,
12: 382.45609087755207,
13: 385.07291545212513,
14: 387.68927874841813,
15: 390.3051812275768,
16: 392.92062335532785,
17: 395.5356056019535,
18: 398.15012844226646,
19: 400.764192355584,
20: 403.37779782570226,
21: 405.99094534087044,
22: 408.60363539376465,
23: 411.2158684814615,
24: 413.82764510541136,
25: 416.4389657714128,
26: 419.04983098958445,
27: 421.66024127433906,
28: 424.2701971443558,
29: 426.8796991225531,
},
"fcst_upper": {
0: 594.8708341085095,
1: 597.562807742296,
2: 600.255247821895,
3: 602.9481539430253,
4: 605.6415256965386,
5: 608.3353626684409,
6: 611.0296644399166,
7: 613.724430587351,
8: 616.4196606823541,
9: 619.1153542917842,
10: 621.8115109777711,
11: 624.508130297741,
12: 627.2052118044398,
13: 629.9027550459588,
14: 632.6007595657577,
15: 635.299224902691,
16: 637.998150591032,
17: 640.6975361604982,
18: 643.3973811362772,
19: 646.0976850390515,
20: 648.7984473850253,
21: 651.4996676859489,
22: 654.2013454491467,
23: 656.903480177542,
24: 659.6060713696838,
25: 662.3091185197744,
26: 665.0126211176946,
27: 667.716578649032,
28: 670.4209905951075,
29: 673.1258564330019,
},
}
)
PEYTON_FCST_LINEAR_95 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2013-05-01 00:00:00"),
1: pd.Timestamp("2013-05-02 00:00:00"),
2: pd.Timestamp("2013-05-03 00:00:00"),
3: pd.Timestamp("2013-05-04 00:00:00"),
4: pd.Timestamp("2013-05-05 00:00:00"),
5: pd.Timestamp("2013-05-06 00:00:00"),
6: pd.Timestamp("2013-05-07 00:00:00"),
7: pd.Timestamp("2013-05-08 00:00:00"),
8: pd.Timestamp("2013-05-09 00:00:00"),
9: pd.Timestamp("2013-05-10 00:00:00"),
10: pd.Timestamp("2013-05-11 00:00:00"),
11: pd.Timestamp("2013-05-12 00:00:00"),
12: pd.Timestamp("2013-05-13 00:00:00"),
13: pd.Timestamp("2013-05-14 00:00:00"),
14: pd.Timestamp("2013-05-15 00:00:00"),
15: pd.Timestamp("2013-05-16 00:00:00"),
16: pd.Timestamp("2013-05-17 00:00:00"),
17: pd.Timestamp("2013-05-18 00:00:00"),
18: pd.Timestamp("2013-05-19 00:00:00"),
19: pd.Timestamp("2013-05-20 00:00:00"),
20: pd.Timestamp("2013-05-21 00:00:00"),
21: pd.Timestamp("2013-05-22 00:00:00"),
22: pd.Timestamp("2013-05-23 00:00:00"),
23: pd.Timestamp("2013-05-24 00:00:00"),
24: pd.Timestamp("2013-05-25 00:00:00"),
25: pd.Timestamp("2013-05-26 00:00:00"),
26: pd.Timestamp("2013-05-27 00:00:00"),
27: pd.Timestamp("2013-05-28 00:00:00"),
28: pd.Timestamp("2013-05-29 00:00:00"),
29: pd.Timestamp("2013-05-30 00:00:00"),
},
"fcst": {
0: 8.479624727157459,
1: 8.479984673362159,
2: 8.480344619566859,
3: 8.48070456577156,
4: 8.48106451197626,
5: 8.48142445818096,
6: 8.481784404385662,
7: 8.482144350590362,
8: 8.482504296795062,
9: 8.482864242999762,
10: 8.483224189204464,
11: 8.483584135409163,
12: 8.483944081613863,
13: 8.484304027818565,
14: 8.484663974023265,
15: 8.485023920227965,
16: 8.485383866432667,
17: 8.485743812637367,
18: 8.486103758842066,
19: 8.486463705046766,
20: 8.486823651251468,
21: 8.487183597456168,
22: 8.487543543660868,
23: 8.48790348986557,
24: 8.48826343607027,
25: 8.48862338227497,
26: 8.48898332847967,
27: 8.489343274684371,
28: 8.489703220889071,
29: 8.490063167093771,
},
"fcst_lower": {
0: 7.055970485245664,
1: 7.056266316358524,
2: 7.056561800026597,
3: 7.056856936297079,
4: 7.057151725217398,
5: 7.05744616683524,
6: 7.057740261198534,
7: 7.058034008355445,
8: 7.058327408354395,
9: 7.058620461244044,
10: 7.0589131670733005,
11: 7.059205525891312,
12: 7.059497537747475,
13: 7.059789202691431,
14: 7.0600805207730595,
15: 7.060371492042489,
16: 7.060662116550093,
17: 7.060952394346479,
18: 7.06124232548251,
19: 7.0615319100092835,
20: 7.061821147978145,
21: 7.062110039440677,
22: 7.062398584448709,
23: 7.062686783054313,
24: 7.0629746353098,
25: 7.063262141267724,
26: 7.063549300980883,
27: 7.063836114502315,
28: 7.0641225818852975,
29: 7.064408703183352,
},
"fcst_upper": {
0: 9.903278969069254,
1: 9.903703030365794,
2: 9.90412743910712,
3: 9.904552195246042,
4: 9.904977298735123,
5: 9.90540274952668,
6: 9.90582854757279,
7: 9.906254692825279,
8: 9.90668118523573,
9: 9.90710802475548,
10: 9.907535211335626,
11: 9.907962744927016,
12: 9.908390625480251,
13: 9.9088188529457,
14: 9.90924742727347,
15: 9.909676348413441,
16: 9.91010561631524,
17: 9.910535230928254,
18: 9.910965192201623,
19: 9.91139550008425,
20: 9.91182615452479,
21: 9.912257155471659,
22: 9.912688502873028,
23: 9.913120196676825,
24: 9.91355223683074,
25: 9.913984623282214,
26: 9.914417355978456,
27: 9.914850434866427,
28: 9.915283859892844,
29: 9.91571763100419,
},
}
)
PEYTON_FCST_LINEAR_99 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2013-05-01 00:00:00"),
1: pd.Timestamp("2013-05-02 00:00:00"),
2: pd.Timestamp("2013-05-03 00:00:00"),
3: pd.Timestamp("2013-05-04 00:00:00"),
4: pd.Timestamp("2013-05-05 00:00:00"),
5: pd.Timestamp("2013-05-06 00:00:00"),
6: pd.Timestamp("2013-05-07 00:00:00"),
7: pd.Timestamp("2013-05-08 00:00:00"),
8: pd.Timestamp("2013-05-09 00:00:00"),
9: pd.Timestamp("2013-05-10 00:00:00"),
10: pd.Timestamp("2013-05-11 00:00:00"),
11: pd.Timestamp("2013-05-12 00:00:00"),
12: pd.Timestamp("2013-05-13 00:00:00"),
13: pd.Timestamp("2013-05-14 00:00:00"),
14: pd.Timestamp("2013-05-15 00:00:00"),
15: pd.Timestamp("2013-05-16 00:00:00"),
16: pd.Timestamp("2013-05-17 00:00:00"),
17: pd.Timestamp("2013-05-18 00:00:00"),
18: pd.Timestamp("2013-05-19 00:00:00"),
19: pd.Timestamp("2013-05-20 00:00:00"),
20: pd.Timestamp("2013-05-21 00:00:00"),
21: pd.Timestamp("2013-05-22 00:00:00"),
22: pd.Timestamp("2013-05-23 00:00:00"),
23: pd.Timestamp("2013-05-24 00:00:00"),
24: pd.Timestamp("2013-05-25 00:00:00"),
25: pd.Timestamp("2013-05-26 00:00:00"),
26: pd.Timestamp("2013-05-27 00:00:00"),
27: pd.Timestamp("2013-05-28 00:00:00"),
28: pd.Timestamp("2013-05-29 00:00:00"),
29: pd.Timestamp("2013-05-30 00:00:00"),
},
"fcst": {
0: 8.479624727157459,
1: 8.479984673362159,
2: 8.480344619566859,
3: 8.48070456577156,
4: 8.48106451197626,
5: 8.48142445818096,
6: 8.481784404385662,
7: 8.482144350590362,
8: 8.482504296795062,
9: 8.482864242999762,
10: 8.483224189204464,
11: 8.483584135409163,
12: 8.483944081613863,
13: 8.484304027818565,
14: 8.484663974023265,
15: 8.485023920227965,
16: 8.485383866432667,
17: 8.485743812637367,
18: 8.486103758842066,
19: 8.486463705046766,
20: 8.486823651251468,
21: 8.487183597456168,
22: 8.487543543660868,
23: 8.48790348986557,
24: 8.48826343607027,
25: 8.48862338227497,
26: 8.48898332847967,
27: 8.489343274684371,
28: 8.489703220889071,
29: 8.490063167093771,
},
"fcst_lower": {
0: 6.605000045325637,
1: 6.605275566724015,
2: 6.605550630617649,
3: 6.605825237068679,
4: 6.606099386139563,
5: 6.60637307789309,
6: 6.606646312392368,
7: 6.606919089700827,
8: 6.607191409882221,
9: 6.607463273000626,
10: 6.607734679120443,
11: 6.608005628306389,
12: 6.608276120623508,
13: 6.608546156137163,
14: 6.608815734913038,
15: 6.609084857017139,
16: 6.609353522515795,
17: 6.609621731475649,
18: 6.609889483963668,
19: 6.610156780047143,
20: 6.61042361979368,
21: 6.610690003271204,
22: 6.610955930547961,
23: 6.611221401692519,
24: 6.611486416773756,
25: 6.611750975860878,
26: 6.612015079023405,
27: 6.612278726331177,
28: 6.612541917854348,
29: 6.612804653663393,
},
"fcst_upper": {
0: 10.354249408989281,
1: 10.354693780000304,
2: 10.355138608516068,
3: 10.355583894474442,
4: 10.356029637812957,
5: 10.35647583846883,
6: 10.356922496378955,
7: 10.357369611479896,
8: 10.357817183707903,
9: 10.358265212998898,
10: 10.358713699288483,
11: 10.359162642511938,
12: 10.359612042604219,
13: 10.360061899499968,
14: 10.360512213133493,
15: 10.36096298343879,
16: 10.361414210349539,
17: 10.361865893799084,
18: 10.362318033720465,
19: 10.36277063004639,
20: 10.363223682709256,
21: 10.363677191641132,
22: 10.364131156773775,
23: 10.364585578038621,
24: 10.365040455366783,
25: 10.365495788689062,
26: 10.365951577935935,
27: 10.366407823037564,
28: 10.366864523923793,
29: 10.36732168052415,
},
}
)
PEYTON_FCST_LINEAR_INVALID_ZERO = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2012-05-02 00:00:00"),
1: pd.Timestamp("2012-05-03 00:00:00"),
2: pd.Timestamp("2012-05-04 00:00:00"),
3: pd.Timestamp("2012-05-05 00:00:00"),
4: pd.Timestamp("2012-05-06 00:00:00"),
5: pd.Timestamp("2012-05-07 00:00:00"),
6: pd.Timestamp("2012-05-08 00:00:00"),
7: pd.Timestamp("2012-05-09 00:00:00"),
8: pd.Timestamp("2012-05-10 00:00:00"),
9: pd.Timestamp("2012-05-11 00:00:00"),
10: pd.Timestamp("2012-05-12 00:00:00"),
11: pd.Timestamp("2012-05-13 00:00:00"),
12: pd.Timestamp("2012-05-14 00:00:00"),
13: pd.Timestamp("2012-05-15 00:00:00"),
14: pd.Timestamp("2012-05-16 00:00:00"),
15: pd.Timestamp("2012-05-17 00:00:00"),
16: pd.Timestamp("2012-05-18 00:00:00"),
17: pd.Timestamp("2012-05-19 00:00:00"),
18: pd.Timestamp("2012-05-20 00:00:00"),
19: pd.Timestamp("2012-05-21 00:00:00"),
20: pd.Timestamp("2012-05-22 00:00:00"),
21: pd.Timestamp("2012-05-23 00:00:00"),
22: pd.Timestamp("2012-05-24 00:00:00"),
23: pd.Timestamp("2012-05-25 00:00:00"),
24: pd.Timestamp("2012-05-26 00:00:00"),
25: pd.Timestamp("2012-05-27 00:00:00"),
26: pd.Timestamp("2012-05-28 00:00:00"),
27: pd.Timestamp("2012-05-29 00:00:00"),
28: pd.Timestamp("2012-05-30 00:00:00"),
29: pd.Timestamp("2012-05-31 00:00:00"),
30: pd.Timestamp("2012-06-01 00:00:00"),
31: pd.Timestamp("2012-06-02 00:00:00"),
32: pd.Timestamp("2012-06-03 00:00:00"),
33: pd.Timestamp("2012-06-04 00:00:00"),
34: pd.Timestamp("2012-06-05 00:00:00"),
35: pd.Timestamp("2012-06-06 00:00:00"),
36: pd.Timestamp("2012-06-07 00:00:00"),
37: pd.Timestamp("2012-06-08 00:00:00"),
38: pd.Timestamp("2012-06-09 00:00:00"),
39: pd.Timestamp("2012-06-10 00:00:00"),
40: pd.Timestamp("2012-06-11 00:00:00"),
41: pd.Timestamp("2012-06-12 00:00:00"),
42: pd.Timestamp("2012-06-13 00:00:00"),
43: pd.Timestamp("2012-06-14 00:00:00"),
44: pd.Timestamp("2012-06-15 00:00:00"),
45: pd.Timestamp("2012-06-16 00:00:00"),
46: pd.Timestamp("2012-06-17 00:00:00"),
47: pd.Timestamp("2012-06-18 00:00:00"),
48: pd.Timestamp("2012-06-19 00:00:00"),
49: pd.Timestamp("2012-06-20 00:00:00"),
50: pd.Timestamp("2012-06-21 00:00:00"),
51: pd.Timestamp("2012-06-22 00:00:00"),
52: pd.Timestamp("2012-06-23 00:00:00"),
53: pd.Timestamp("2012-06-24 00:00:00"),
54: pd.Timestamp("2012-06-25 00:00:00"),
55: pd.Timestamp("2012-06-26 00:00:00"),
56: pd.Timestamp("2012-06-27 00:00:00"),
57: pd.Timestamp("2012-06-28 00:00:00"),
58: pd.Timestamp("2012-06-29 00:00:00"),
59: pd.Timestamp("2012-06-30 00:00:00"),
60: pd.Timestamp("2012-07-01 00:00:00"),
61: pd.Timestamp("2012-07-02 00:00:00"),
62: pd.Timestamp("2012-07-03 00:00:00"),
63: pd.Timestamp("2012-07-04 00:00:00"),
64: pd.Timestamp("2012-07-05 00:00:00"),
65: pd.Timestamp("2012-07-06 00:00:00"),
66: pd.Timestamp("2012-07-07 00:00:00"),
67: pd.Timestamp("2012-07-08 00:00:00"),
68: pd.Timestamp("2012-07-09 00:00:00"),
69: pd.Timestamp("2012-07-10 00:00:00"),
70: pd.Timestamp("2012-07-11 00:00:00"),
71: pd.Timestamp("2012-07-12 00:00:00"),
72: pd.Timestamp("2012-07-13 00:00:00"),
73: pd.Timestamp("2012-07-14 00:00:00"),
74: pd.Timestamp("2012-07-15 00:00:00"),
75: pd.Timestamp("2012-07-16 00:00:00"),
76: pd.Timestamp("2012-07-17 00:00:00"),
77: pd.Timestamp("2012-07-18 00:00:00"),
78: pd.Timestamp("2012-07-19 00:00:00"),
79: pd.Timestamp("2012-07-20 00:00:00"),
80: pd.Timestamp("2012-07-21 00:00:00"),
81: pd.Timestamp("2012-07-22 00:00:00"),
82: pd.Timestamp("2012-07-23 00:00:00"),
83: pd.Timestamp("2012-07-24 00:00:00"),
84: pd.Timestamp("2012-07-25 00:00:00"),
85: pd.Timestamp("2012-07-26 00:00:00"),
86: pd.Timestamp("2012-07-27 00:00:00"),
87: pd.Timestamp("2012-07-28 00:00:00"),
88: pd.Timestamp("2012-07-29 00:00:00"),
89: pd.Timestamp("2012-07-30 00:00:00"),
90: pd.Timestamp("2012-07-31 00:00:00"),
91: pd.Timestamp("2012-08-01 00:00:00"),
92: pd.Timestamp("2012-08-02 00:00:00"),
93: pd.Timestamp("2012-08-03 00:00:00"),
94: pd.Timestamp("2012-08-04 00:00:00"),
95: pd.Timestamp("2012-08-05 00:00:00"),
96: pd.Timestamp("2012-08-06 00:00:00"),
97: pd.Timestamp("2012-08-07 00:00:00"),
98: pd.Timestamp("2012-08-08 00:00:00"),
99: pd.Timestamp("2012-08-09 00:00:00"),
100: pd.Timestamp("2012-08-10 00:00:00"),
101: pd.Timestamp("2012-08-11 00:00:00"),
102: pd.Timestamp("2012-08-12 00:00:00"),
103: pd.Timestamp("2012-08-13 00:00:00"),
104: pd.Timestamp("2012-08-14 00:00:00"),
105: pd.Timestamp("2012-08-15 00:00:00"),
106: pd.Timestamp("2012-08-16 00:00:00"),
107: pd.Timestamp("2012-08-17 00:00:00"),
108: pd.Timestamp("2012-08-18 00:00:00"),
109: pd.Timestamp("2012-08-19 00:00:00"),
110: pd.Timestamp("2012-08-20 00:00:00"),
111: pd.Timestamp("2012-08-21 00:00:00"),
112: pd.Timestamp("2012-08-22 00:00:00"),
113: pd.Timestamp("2012-08-23 00:00:00"),
114: pd.Timestamp("2012-08-24 00:00:00"),
115: pd.Timestamp("2012-08-25 00:00:00"),
116: pd.Timestamp("2012-08-26 00:00:00"),
117: pd.Timestamp("2012-08-27 00:00:00"),
118: pd.Timestamp("2012-08-28 00:00:00"),
119: pd.Timestamp("2012-08-29 00:00:00"),
120: pd.Timestamp("2012-08-30 00:00:00"),
121: pd.Timestamp("2012-08-31 00:00:00"),
122: pd.Timestamp("2012-09-01 00:00:00"),
123: pd.Timestamp("2012-09-02 00:00:00"),
124: pd.Timestamp("2012-09-03 00:00:00"),
125: pd.Timestamp("2012-09-04 00:00:00"),
126: pd.Timestamp("2012-09-05 00:00:00"),
127: pd.Timestamp("2012-09-06 00:00:00"),
128: pd.Timestamp("2012-09-07 00:00:00"),
129: pd.Timestamp("2012-09-08 00:00:00"),
130: pd.Timestamp("2012-09-09 00:00:00"),
131: pd.Timestamp("2012-09-10 00:00:00"),
132: pd.Timestamp("2012-09-11 00:00:00"),
133: pd.Timestamp("2012-09-12 00:00:00"),
134: pd.Timestamp("2012-09-13 00:00:00"),
135: pd.Timestamp("2012-09-14 00:00:00"),
136: pd.Timestamp("2012-09-15 00:00:00"),
137: pd.Timestamp("2012-09-16 00:00:00"),
138: pd.Timestamp("2012-09-17 00:00:00"),
139: pd.Timestamp("2012-09-18 00:00:00"),
140: pd.Timestamp("2012-09-19 00:00:00"),
141: pd.Timestamp("2012-09-20 00:00:00"),
142: pd.Timestamp("2012-09-21 00:00:00"),
143: pd.Timestamp("2012-09-22 00:00:00"),
144: pd.Timestamp("2012-09-23 00:00:00"),
145: pd.Timestamp("2012-09-24 00:00:00"),
146: pd.Timestamp("2012-09-25 00:00:00"),
147: pd.Timestamp("2012-09-26 00:00:00"),
148: pd.Timestamp("2012-09-27 00:00:00"),
149: pd.Timestamp("2012-09-28 00:00:00"),
150: pd.Timestamp("2012-09-29 00:00:00"),
151: pd.Timestamp("2012-09-30 00:00:00"),
152: pd.Timestamp("2012-10-01 00:00:00"),
153: pd.Timestamp("2012-10-02 00:00:00"),
154: pd.Timestamp("2012-10-03 00:00:00"),
155: pd.Timestamp("2012-10-04 00:00:00"),
156: pd.Timestamp("2012-10-05 00:00:00"),
157: pd.Timestamp("2012-10-06 00:00:00"),
158: pd.Timestamp("2012-10-07 00:00:00"),
159: pd.Timestamp("2012-10-08 00:00:00"),
160: pd.Timestamp("2012-10-09 00:00:00"),
161: pd.Timestamp("2012-10-10 00:00:00"),
162: pd.Timestamp("2012-10-11 00:00:00"),
163: pd.Timestamp("2012-10-12 00:00:00"),
164: pd.Timestamp("2012-10-13 00:00:00"),
165: pd.Timestamp("2012-10-14 00:00:00"),
166: pd.Timestamp("2012-10-15 00:00:00"),
167: pd.Timestamp("2012-10-16 00:00:00"),
168: pd.Timestamp("2012-10-17 00:00:00"),
169: pd.Timestamp("2012-10-18 00:00:00"),
170: pd.Timestamp("2012-10-19 00:00:00"),
171: pd.Timestamp("2012-10-20 00:00:00"),
172: pd.Timestamp("2012-10-21 00:00:00"),
173: pd.Timestamp("2012-10-22 00:00:00"),
174: pd.Timestamp("2012-10-23 00:00:00"),
175: pd.Timestamp("2012-10-24 00:00:00"),
176: pd.Timestamp("2012-10-25 00:00:00"),
177: pd.Timestamp("2012-10-26 00:00:00"),
178: pd.Timestamp("2012-10-27 00:00:00"),
179: pd.Timestamp("2012-10-28 00:00:00"),
180: pd.Timestamp("2012-10-29 00:00:00"),
181: pd.Timestamp("2012-10-30 00:00:00"),
182: pd.Timestamp("2012-10-31 00:00:00"),
183: pd.Timestamp("2012-11-01 00:00:00"),
184: pd.Timestamp("2012-11-02 00:00:00"),
185: pd.Timestamp("2012-11-03 00:00:00"),
186: pd.Timestamp("2012-11-04 00:00:00"),
187: pd.Timestamp("2012-11-05 00:00:00"),
188: pd.Timestamp("2012-11-06 00:00:00"),
189: pd.Timestamp("2012-11-07 00:00:00"),
190: pd.Timestamp("2012-11-08 00:00:00"),
191: pd.Timestamp("2012-11-09 00:00:00"),
192: pd.Timestamp("2012-11-10 00:00:00"),
193: pd.Timestamp("2012-11-11 00:00:00"),
194: pd.Timestamp("2012-11-12 00:00:00"),
195: pd.Timestamp("2012-11-13 00:00:00"),
196: pd.Timestamp("2012-11-14 00:00:00"),
197: pd.Timestamp("2012-11-15 00:00:00"),
198: pd.Timestamp("2012-11-16 00:00:00"),
199: pd.Timestamp("2012-11-17 00:00:00"),
200: pd.Timestamp("2012-11-18 00:00:00"),
201: pd.Timestamp("2012-11-19 00:00:00"),
202: pd.Timestamp("2012-11-20 00:00:00"),
203: pd.Timestamp("2012-11-21 00:00:00"),
204: pd.Timestamp("2012-11-22 00:00:00"),
205: pd.Timestamp("2012-11-23 00:00:00"),
206: pd.Timestamp("2012-11-24 00:00:00"),
207: pd.Timestamp("2012-11-25 00:00:00"),
208: pd.Timestamp("2012-11-26 00:00:00"),
209: pd.Timestamp("2012-11-27 00:00:00"),
210: pd.Timestamp("2012-11-28 00:00:00"),
211: pd.Timestamp("2012-11-29 00:00:00"),
212: pd.Timestamp("2012-11-30 00:00:00"),
213: pd.Timestamp("2012-12-01 00:00:00"),
214: pd.Timestamp("2012-12-02 00:00:00"),
215: pd.Timestamp("2012-12-03 00:00:00"),
216: pd.Timestamp("2012-12-04 00:00:00"),
217: pd.Timestamp("2012-12-05 00:00:00"),
218: pd.Timestamp("2012-12-06 00:00:00"),
219: pd.Timestamp("2012-12-07 00:00:00"),
220: pd.Timestamp("2012-12-08 00:00:00"),
221: pd.Timestamp("2012-12-09 00:00:00"),
222: pd.Timestamp("2012-12-10 00:00:00"),
223: pd.Timestamp("2012-12-11 00:00:00"),
224: pd.Timestamp("2012-12-12 00:00:00"),
225: pd.Timestamp("2012-12-13 00:00:00"),
226: pd.Timestamp("2012-12-14 00:00:00"),
227: pd.Timestamp("2012-12-15 00:00:00"),
228: pd.Timestamp("2012-12-16 00:00:00"),
229: pd.Timestamp("2012-12-17 00:00:00"),
230: pd.Timestamp("2012-12-18 00:00:00"),
231: pd.Timestamp("2012-12-19 00:00:00"),
232: pd.Timestamp("2012-12-20 00:00:00"),
233: pd.Timestamp("2012-12-21 00:00:00"),
234: pd.Timestamp("2012-12-22 00:00:00"),
235: pd.Timestamp("2012-12-23 00:00:00"),
236: pd.Timestamp("2012-12-24 00:00:00"),
237: pd.Timestamp("2012-12-25 00:00:00"),
238: pd.Timestamp("2012-12-26 00:00:00"),
239: pd.Timestamp("2012-12-27 00:00:00"),
240: pd.Timestamp("2012-12-28 00:00:00"),
241: pd.Timestamp("2012-12-29 00:00:00"),
242: pd.Timestamp("2012-12-30 00:00:00"),
243: pd.Timestamp("2012-12-31 00:00:00"),
244: pd.Timestamp("2013-01-01 00:00:00"),
245: pd.Timestamp("2013-01-02 00:00:00"),
246: pd.Timestamp("2013-01-03 00:00:00"),
247: pd.Timestamp("2013-01-04 00:00:00"),
248: pd.Timestamp("2013-01-05 00:00:00"),
249: pd.Timestamp("2013-01-06 00:00:00"),
250: pd.Timestamp("2013-01-07 00:00:00"),
251: pd.Timestamp("2013-01-08 00:00:00"),
252: pd.Timestamp("2013-01-09 00:00:00"),
253: pd.Timestamp("2013-01-10 00:00:00"),
254: pd.Timestamp("2013-01-11 00:00:00"),
255: pd.Timestamp("2013-01-12 00:00:00"),
256: pd.Timestamp("2013-01-13 00:00:00"),
257: pd.Timestamp("2013-01-14 00:00:00"),
258: pd.Timestamp("2013-01-15 00:00:00"),
259: pd.Timestamp("2013-01-16 00:00:00"),
260: pd.Timestamp("2013-01-17 00:00:00"),
261: pd.Timestamp("2013-01-18 00:00:00"),
262: pd.Timestamp("2013-01-19 00:00:00"),
263: pd.Timestamp("2013-01-20 00:00:00"),
264: pd.Timestamp("2013-01-21 00:00:00"),
265: pd.Timestamp("2013-01-22 00:00:00"),
266: pd.Timestamp("2013-01-23 00:00:00"),
267: pd.Timestamp("2013-01-24 00:00:00"),
268: pd.Timestamp("2013-01-25 00:00:00"),
269: pd.Timestamp("2013-01-26 00:00:00"),
270: pd.Timestamp("2013-01-27 00:00:00"),
271: pd.Timestamp("2013-01-28 00:00:00"),
272: pd.Timestamp("2013-01-29 00:00:00"),
273: pd.Timestamp("2013-01-30 00:00:00"),
274: pd.Timestamp("2013-01-31 00:00:00"),
275: pd.Timestamp("2013-02-01 00:00:00"),
276: pd.Timestamp("2013-02-02 00:00:00"),
277: pd.Timestamp("2013-02-03 00:00:00"),
278: pd.Timestamp("2013-02-04 00:00:00"),
279: pd.Timestamp("2013-02-05 00:00:00"),
280: pd.Timestamp("2013-02-06 00:00:00"),
281: pd.Timestamp("2013-02-07 00:00:00"),
282: pd.Timestamp("2013-02-08 00:00:00"),
283: pd.Timestamp("2013-02-09 00:00:00"),
284: pd.Timestamp("2013-02-10 00:00:00"),
285: pd.Timestamp("2013-02-11 00:00:00"),
286: pd.Timestamp("2013-02-12 00:00:00"),
287: pd.Timestamp("2013-02-13 00:00:00"),
288: pd.Timestamp("2013-02-14 00:00:00"),
289: pd.Timestamp("2013-02-15 00:00:00"),
290: pd.Timestamp("2013-02-16 00:00:00"),
291: pd.Timestamp("2013-02-17 00:00:00"),
292: pd.Timestamp("2013-02-18 00:00:00"),
293: pd.Timestamp("2013-02-19 00:00:00"),
294: pd.Timestamp("2013-02-20 00:00:00"),
295: pd.Timestamp("2013-02-21 00:00:00"),
296: pd.Timestamp("2013-02-22 00:00:00"),
297: pd.Timestamp("2013-02-23 00:00:00"),
298: pd.Timestamp("2013-02-24 00:00:00"),
299: pd.Timestamp("2013-02-25 00:00:00"),
300: pd.Timestamp("2013-02-26 00:00:00"),
301: pd.Timestamp("2013-02-27 00:00:00"),
302: pd.Timestamp("2013-02-28 00:00:00"),
303: pd.Timestamp("2013-03-01 00:00:00"),
304: pd.Timestamp("2013-03-02 00:00:00"),
305: pd.Timestamp("2013-03-03 00:00:00"),
306: pd.Timestamp("2013-03-04 00:00:00"),
307: pd.Timestamp("2013-03-05 00:00:00"),
308: pd.Timestamp("2013-03-06 00:00:00"),
309: pd.Timestamp("2013-03-07 00:00:00"),
310: pd.Timestamp("2013-03-08 00:00:00"),
311: pd.Timestamp("2013-03-09 00:00:00"),
312: pd.Timestamp("2013-03-10 00:00:00"),
313: pd.Timestamp("2013-03-11 00:00:00"),
314: pd.Timestamp("2013-03-12 00:00:00"),
315: pd.Timestamp("2013-03-13 00:00:00"),
316: pd.Timestamp("2013-03-14 00:00:00"),
317: pd.Timestamp("2013-03-15 00:00:00"),
318: pd.Timestamp("2013-03-16 00:00:00"),
319: pd.Timestamp("2013-03-17 00:00:00"),
320: pd.Timestamp("2013-03-18 00:00:00"),
321: pd.Timestamp("2013-03-19 00:00:00"),
322: pd.Timestamp("2013-03-20 00:00:00"),
323: pd.Timestamp("2013-03-21 00:00:00"),
324: pd.Timestamp("2013-03-22 00:00:00"),
325: pd.Timestamp("2013-03-23 00:00:00"),
326: pd.Timestamp("2013-03-24 00:00:00"),
327: pd.Timestamp("2013-03-25 00:00:00"),
328: pd.Timestamp("2013-03-26 00:00:00"),
329: pd.Timestamp("2013-03-27 00:00:00"),
330: pd.Timestamp("2013-03-28 00:00:00"),
331: pd.Timestamp("2013-03-29 00:00:00"),
332: pd.Timestamp("2013-03-30 00:00:00"),
333: pd.Timestamp("2013-03-31 00:00:00"),
334: pd.Timestamp("2013-04-01 00:00:00"),
335: pd.Timestamp("2013-04-02 00:00:00"),
336: pd.Timestamp("2013-04-03 00:00:00"),
337: pd.Timestamp("2013-04-04 00:00:00"),
338: pd.Timestamp("2013-04-05 00:00:00"),
339: pd.Timestamp("2013-04-06 00:00:00"),
340: pd.Timestamp("2013-04-07 00:00:00"),
341: pd.Timestamp("2013-04-08 00:00:00"),
342: pd.Timestamp("2013-04-09 00:00:00"),
343: pd.Timestamp("2013-04-10 00:00:00"),
344: pd.Timestamp("2013-04-11 00:00:00"),
345: pd.Timestamp("2013-04-12 00:00:00"),
346: pd.Timestamp("2013-04-13 00:00:00"),
347: pd.Timestamp("2013-04-14 00:00:00"),
348: pd.Timestamp("2013-04-15 00:00:00"),
349: pd.Timestamp("2013-04-16 00:00:00"),
350: pd.Timestamp("2013-04-17 00:00:00"),
351: pd.Timestamp("2013-04-18 00:00:00"),
352: pd.Timestamp("2013-04-19 00:00:00"),
353: pd.Timestamp("2013-04-20 00:00:00"),
354: pd.Timestamp("2013-04-21 00:00:00"),
355: pd.Timestamp("2013-04-22 00:00:00"),
356: pd.Timestamp("2013-04-23 00:00:00"),
357: pd.Timestamp("2013-04-24 00:00:00"),
358: pd.Timestamp("2013-04-25 00:00:00"),
359: pd.Timestamp("2013-04-26 00:00:00"),
360: pd.Timestamp("2013-04-27 00:00:00"),
361: pd.Timestamp("2013-04-28 00:00:00"),
362: pd.Timestamp("2013-04-29 00:00:00"),
363: pd.Timestamp("2013-04-30 00:00:00"),
364: pd.Timestamp("2013-05-01 00:00:00"),
365: pd.Timestamp("2013-05-02 00:00:00"),
366: pd.Timestamp("2013-05-03 00:00:00"),
367: pd.Timestamp("2013-05-04 00:00:00"),
368: pd.Timestamp("2013-05-05 00:00:00"),
369: pd.Timestamp("2013-05-06 00:00:00"),
370: pd.Timestamp("2013-05-07 00:00:00"),
371: pd.Timestamp("2013-05-08 00:00:00"),
372: pd.Timestamp("2013-05-09 00:00:00"),
373: pd.Timestamp("2013-05-10 00:00:00"),
374: pd.Timestamp("2013-05-11 00:00:00"),
375: pd.Timestamp("2013-05-12 00:00:00"),
376: pd.Timestamp("2013-05-13 00:00:00"),
377: pd.Timestamp("2013-05-14 00:00:00"),
378: pd.Timestamp("2013-05-15 00:00:00"),
379: pd.Timestamp("2013-05-16 00:00:00"),
380: pd.Timestamp("2013-05-17 00:00:00"),
381: pd.Timestamp("2013-05-18 00:00:00"),
382: pd.Timestamp("2013-05-19 00:00:00"),
383: pd.Timestamp("2013-05-20 00:00:00"),
384: pd.Timestamp("2013-05-21 00:00:00"),
385: pd.Timestamp("2013-05-22 00:00:00"),
386: pd.Timestamp("2013-05-23 00:00:00"),
387: pd.Timestamp("2013-05-24 00:00:00"),
388: pd.Timestamp("2013-05-25 00:00:00"),
389: pd.Timestamp("2013-05-26 00:00:00"),
390: pd.Timestamp("2013-05-27 00:00:00"),
391: pd.Timestamp("2013-05-28 00:00:00"),
392: pd.Timestamp("2013-05-29 00:00:00"),
393: pd.Timestamp("2013-05-30 00:00:00"),
},
"fcst": {
0: 8.348604308646497,
1: 8.348964254851197,
2: 8.349324201055898,
3: 8.349684147260598,
4: 8.350044093465298,
5: 8.350404039669998,
6: 8.3507639858747,
7: 8.3511239320794,
8: 8.3514838782841,
9: 8.351843824488801,
10: 8.352203770693501,
11: 8.352563716898201,
12: 8.352923663102903,
13: 8.353283609307603,
14: 8.353643555512303,
15: 8.354003501717003,
16: 8.354363447921704,
17: 8.354723394126404,
18: 8.355083340331104,
19: 8.355443286535806,
20: 8.355803232740506,
21: 8.356163178945206,
22: 8.356523125149906,
23: 8.356883071354607,
24: 8.357243017559307,
25: 8.357602963764007,
26: 8.357962909968709,
27: 8.358322856173409,
28: 8.358682802378109,
29: 8.35904274858281,
30: 8.35940269478751,
31: 8.35976264099221,
32: 8.36012258719691,
33: 8.360482533401612,
34: 8.360842479606312,
35: 8.361202425811012,
36: 8.361562372015714,
37: 8.361922318220413,
38: 8.362282264425113,
39: 8.362642210629813,
40: 8.363002156834515,
41: 8.363362103039215,
42: 8.363722049243915,
43: 8.364081995448617,
44: 8.364441941653316,
45: 8.364801887858016,
46: 8.365161834062716,
47: 8.365521780267418,
48: 8.365881726472118,
49: 8.366241672676818,
50: 8.36660161888152,
51: 8.36696156508622,
52: 8.36732151129092,
53: 8.367681457495621,
54: 8.368041403700321,
55: 8.368401349905021,
56: 8.36876129610972,
57: 8.369121242314423,
58: 8.369481188519122,
59: 8.369841134723822,
60: 8.370201080928524,
61: 8.370561027133224,
62: 8.370920973337924,
63: 8.371280919542624,
64: 8.371640865747326,
65: 8.372000811952026,
66: 8.372360758156725,
67: 8.372720704361427,
68: 8.373080650566127,
69: 8.373440596770827,
70: 8.373800542975529,
71: 8.374160489180229,
72: 8.374520435384929,
73: 8.374880381589628,
74: 8.37524032779433,
75: 8.37560027399903,
76: 8.37596022020373,
77: 8.376320166408432,
78: 8.376680112613132,
79: 8.377040058817832,
80: 8.377400005022531,
81: 8.377759951227233,
82: 8.378119897431933,
83: 8.378479843636633,
84: 8.378839789841335,
85: 8.379199736046035,
86: 8.379559682250735,
87: 8.379919628455436,
88: 8.380279574660136,
89: 8.380639520864836,
90: 8.380999467069536,
91: 8.381359413274238,
92: 8.381719359478938,
93: 8.382079305683638,
94: 8.38243925188834,
95: 8.38279919809304,
96: 8.38315914429774,
97: 8.383519090502439,
98: 8.38387903670714,
99: 8.38423898291184,
100: 8.38459892911654,
101: 8.384958875321242,
102: 8.385318821525942,
103: 8.385678767730642,
104: 8.386038713935344,
105: 8.386398660140044,
106: 8.386758606344744,
107: 8.387118552549444,
108: 8.387478498754145,
109: 8.387838444958845,
110: 8.388198391163545,
111: 8.388558337368247,
112: 8.388918283572947,
113: 8.389278229777647,
114: 8.389638175982347,
115: 8.389998122187048,
116: 8.390358068391748,
117: 8.390718014596448,
118: 8.39107796080115,
119: 8.39143790700585,
120: 8.39179785321055,
121: 8.392157799415251,
122: 8.392517745619951,
123: 8.392877691824651,
124: 8.393237638029351,
125: 8.393597584234053,
126: 8.393957530438753,
127: 8.394317476643453,
128: 8.394677422848154,
129: 8.395037369052854,
130: 8.395397315257554,
131: 8.395757261462254,
132: 8.396117207666956,
133: 8.396477153871656,
134: 8.396837100076356,
135: 8.397197046281057,
136: 8.397556992485757,
137: 8.397916938690457,
138: 8.398276884895157,
139: 8.398636831099859,
140: 8.398996777304559,
141: 8.399356723509259,
142: 8.39971666971396,
143: 8.40007661591866,
144: 8.40043656212336,
145: 8.400796508328062,
146: 8.401156454532762,
147: 8.401516400737462,
148: 8.401876346942162,
149: 8.402236293146863,
150: 8.402596239351563,
151: 8.402956185556263,
152: 8.403316131760965,
153: 8.403676077965665,
154: 8.404036024170365,
155: 8.404395970375065,
156: 8.404755916579767,
157: 8.405115862784466,
158: 8.405475808989166,
159: 8.405835755193868,
160: 8.406195701398568,
161: 8.406555647603268,
162: 8.40691559380797,
163: 8.40727554001267,
164: 8.40763548621737,
165: 8.40799543242207,
166: 8.408355378626771,
167: 8.408715324831471,
168: 8.409075271036171,
169: 8.409435217240873,
170: 8.409795163445573,
171: 8.410155109650272,
172: 8.410515055854972,
173: 8.410875002059674,
174: 8.411234948264374,
175: 8.411594894469074,
176: 8.411954840673776,
177: 8.412314786878476,
178: 8.412674733083175,
179: 8.413034679287877,
180: 8.413394625492577,
181: 8.413754571697277,
182: 8.414114517901977,
183: 8.414474464106679,
184: 8.414834410311379,
185: 8.415194356516078,
186: 8.41555430272078,
187: 8.41591424892548,
188: 8.41627419513018,
189: 8.41663414133488,
190: 8.416994087539582,
191: 8.417354033744282,
192: 8.417713979948982,
193: 8.418073926153683,
194: 8.418433872358383,
195: 8.418793818563083,
196: 8.419153764767785,
197: 8.419513710972485,
198: 8.419873657177185,
199: 8.420233603381885,
200: 8.420593549586586,
201: 8.420953495791286,
202: 8.421313441995986,
203: 8.421673388200688,
204: 8.422033334405388,
205: 8.422393280610088,
206: 8.422753226814788,
207: 8.42311317301949,
208: 8.42347311922419,
209: 8.423833065428889,
210: 8.42419301163359,
211: 8.42455295783829,
212: 8.42491290404299,
213: 8.42527285024769,
214: 8.425632796452392,
215: 8.425992742657092,
216: 8.426352688861792,
217: 8.426712635066494,
218: 8.427072581271194,
219: 8.427432527475894,
220: 8.427792473680595,
221: 8.428152419885295,
222: 8.428512366089995,
223: 8.428872312294695,
224: 8.429232258499397,
225: 8.429592204704097,
226: 8.429952150908797,
227: 8.430312097113498,
228: 8.430672043318198,
229: 8.431031989522898,
230: 8.431391935727598,
231: 8.4317518819323,
232: 8.432111828137,
233: 8.4324717743417,
234: 8.432831720546401,
235: 8.433191666751101,
236: 8.433551612955801,
237: 8.433911559160503,
238: 8.434271505365203,
239: 8.434631451569903,
240: 8.434991397774603,
241: 8.435351343979304,
242: 8.435711290184004,
243: 8.436071236388704,
244: 8.436431182593406,
245: 8.436791128798106,
246: 8.437151075002806,
247: 8.437511021207506,
248: 8.437870967412207,
249: 8.438230913616907,
250: 8.438590859821607,
251: 8.438950806026309,
252: 8.439310752231009,
253: 8.439670698435709,
254: 8.44003064464041,
255: 8.44039059084511,
256: 8.44075053704981,
257: 8.44111048325451,
258: 8.441470429459212,
259: 8.441830375663912,
260: 8.442190321868612,
261: 8.442550268073314,
262: 8.442910214278013,
263: 8.443270160482713,
264: 8.443630106687413,
265: 8.443990052892115,
266: 8.444349999096815,
267: 8.444709945301515,
268: 8.445069891506217,
269: 8.445429837710916,
270: 8.445789783915616,
271: 8.446149730120318,
272: 8.446509676325018,
273: 8.446869622529718,
274: 8.447229568734418,
275: 8.44758951493912,
276: 8.44794946114382,
277: 8.44830940734852,
278: 8.448669353553221,
279: 8.449029299757921,
280: 8.449389245962621,
281: 8.449749192167321,
282: 8.450109138372023,
283: 8.450469084576723,
284: 8.450829030781422,
285: 8.451188976986124,
286: 8.451548923190824,
287: 8.451908869395524,
288: 8.452268815600226,
289: 8.452628761804926,
290: 8.452988708009626,
291: 8.453348654214325,
292: 8.453708600419027,
293: 8.454068546623727,
294: 8.454428492828427,
295: 8.454788439033129,
296: 8.455148385237829,
297: 8.455508331442529,
298: 8.455868277647228,
299: 8.45622822385193,
300: 8.45658817005663,
301: 8.45694811626133,
302: 8.457308062466032,
303: 8.457668008670732,
304: 8.458027954875432,
305: 8.458387901080131,
306: 8.458747847284833,
307: 8.459107793489533,
308: 8.459467739694233,
309: 8.459827685898935,
310: 8.460187632103635,
311: 8.460547578308335,
312: 8.460907524513036,
313: 8.461267470717736,
314: 8.461627416922436,
315: 8.461987363127136,
316: 8.462347309331838,
317: 8.462707255536538,
318: 8.463067201741238,
319: 8.46342714794594,
320: 8.46378709415064,
321: 8.46414704035534,
322: 8.464506986560039,
323: 8.46486693276474,
324: 8.46522687896944,
325: 8.46558682517414,
326: 8.465946771378842,
327: 8.466306717583542,
328: 8.466666663788242,
329: 8.467026609992944,
330: 8.467386556197644,
331: 8.467746502402344,
332: 8.468106448607044,
333: 8.468466394811745,
334: 8.468826341016445,
335: 8.469186287221145,
336: 8.469546233425847,
337: 8.469906179630547,
338: 8.470266125835247,
339: 8.470626072039947,
340: 8.470986018244648,
341: 8.471345964449348,
342: 8.471705910654048,
343: 8.47206585685875,
344: 8.47242580306345,
345: 8.47278574926815,
346: 8.473145695472851,
347: 8.473505641677551,
348: 8.473865587882251,
349: 8.474225534086951,
350: 8.474585480291653,
351: 8.474945426496353,
352: 8.475305372701053,
353: 8.475665318905754,
354: 8.476025265110454,
355: 8.476385211315154,
356: 8.476745157519854,
357: 8.477105103724556,
358: 8.477465049929256,
359: 8.477824996133956,
360: 8.478184942338657,
361: 8.478544888543357,
362: 8.478904834748057,
363: 8.479264780952759,
364: 8.479624727157459,
365: 8.479984673362159,
366: 8.480344619566859,
367: 8.48070456577156,
368: 8.48106451197626,
369: 8.48142445818096,
370: 8.481784404385662,
371: 8.482144350590362,
372: 8.482504296795062,
373: 8.482864242999762,
374: 8.483224189204464,
375: 8.483584135409163,
376: 8.483944081613863,
377: 8.484304027818565,
378: 8.484663974023265,
379: 8.485023920227965,
380: 8.485383866432667,
381: 8.485743812637367,
382: 8.486103758842066,
383: 8.486463705046766,
384: 8.486823651251468,
385: 8.487183597456168,
386: 8.487543543660868,
387: 8.48790348986557,
388: 8.48826343607027,
389: 8.48862338227497,
390: 8.48898332847967,
391: 8.489343274684371,
392: 8.489703220889071,
393: 8.490063167093771,
},
"fcst_lower": {
0: -np.inf,
1: -np.inf,
2: -np.inf,
3: -np.inf,
4: -np.inf,
5: -np.inf,
6: -np.inf,
7: -np.inf,
8: -np.inf,
9: -np.inf,
10: -np.inf,
11: -np.inf,
12: -np.inf,
13: -np.inf,
14: -np.inf,
15: -np.inf,
16: -np.inf,
17: -np.inf,
18: -np.inf,
19: -np.inf,
20: -np.inf,
21: -np.inf,
22: -np.inf,
23: -np.inf,
24: -np.inf,
25: -np.inf,
26: -np.inf,
27: -np.inf,
28: -np.inf,
29: -np.inf,
30: -np.inf,
31: -np.inf,
32: -np.inf,
33: -np.inf,
34: -np.inf,
35: -np.inf,
36: -np.inf,
37: -np.inf,
38: -np.inf,
39: -np.inf,
40: -np.inf,
41: -np.inf,
42: -np.inf,
43: -np.inf,
44: -np.inf,
45: -np.inf,
46: -np.inf,
47: -np.inf,
48: -np.inf,
49: -np.inf,
50: -np.inf,
51: -np.inf,
52: -np.inf,
53: -np.inf,
54: -np.inf,
55: -np.inf,
56: -np.inf,
57: -np.inf,
58: -np.inf,
59: -np.inf,
60: -np.inf,
61: -np.inf,
62: -np.inf,
63: -np.inf,
64: -np.inf,
65: -np.inf,
66: -np.inf,
67: -np.inf,
68: -np.inf,
69: -np.inf,
70: -np.inf,
71: -np.inf,
72: -np.inf,
73: -np.inf,
74: -np.inf,
75: -np.inf,
76: -np.inf,
77: -np.inf,
78: -np.inf,
79: -np.inf,
80: -np.inf,
81: -np.inf,
82: -np.inf,
83: -np.inf,
84: -np.inf,
85: -np.inf,
86: -np.inf,
87: -np.inf,
88: -np.inf,
89: -np.inf,
90: -np.inf,
91: -np.inf,
92: -np.inf,
93: -np.inf,
94: -np.inf,
95: -np.inf,
96: -np.inf,
97: -np.inf,
98: -np.inf,
99: -np.inf,
100: -np.inf,
101: -np.inf,
102: -np.inf,
103: -np.inf,
104: -np.inf,
105: -np.inf,
106: -np.inf,
107: -np.inf,
108: -np.inf,
109: -np.inf,
110: -np.inf,
111: -np.inf,
112: -np.inf,
113: -np.inf,
114: -np.inf,
115: -np.inf,
116: -np.inf,
117: -np.inf,
118: -np.inf,
119: -np.inf,
120: -np.inf,
121: -np.inf,
122: -np.inf,
123: -np.inf,
124: -np.inf,
125: -np.inf,
126: -np.inf,
127: -np.inf,
128: -np.inf,
129: -np.inf,
130: -np.inf,
131: -np.inf,
132: -np.inf,
133: -np.inf,
134: -np.inf,
135: -np.inf,
136: -np.inf,
137: -np.inf,
138: -np.inf,
139: -np.inf,
140: -np.inf,
141: -np.inf,
142: -np.inf,
143: -np.inf,
144: -np.inf,
145: -np.inf,
146: -np.inf,
147: -np.inf,
148: -np.inf,
149: -np.inf,
150: -np.inf,
151: -np.inf,
152: -np.inf,
153: -np.inf,
154: -np.inf,
155: -np.inf,
156: -np.inf,
157: -np.inf,
158: -np.inf,
159: -np.inf,
160: -np.inf,
161: -np.inf,
162: -np.inf,
163: -np.inf,
164: -np.inf,
165: -np.inf,
166: -np.inf,
167: -np.inf,
168: -np.inf,
169: -np.inf,
170: -np.inf,
171: -np.inf,
172: -np.inf,
173: -np.inf,
174: -np.inf,
175: -np.inf,
176: -np.inf,
177: -np.inf,
178: -np.inf,
179: -np.inf,
180: -np.inf,
181: -np.inf,
182: -np.inf,
183: -np.inf,
184: -np.inf,
185: -np.inf,
186: -np.inf,
187: -np.inf,
188: -np.inf,
189: -np.inf,
190: -np.inf,
191: -np.inf,
192: -np.inf,
193: -np.inf,
194: -np.inf,
195: -np.inf,
196: -np.inf,
197: -np.inf,
198: -np.inf,
199: -np.inf,
200: -np.inf,
201: -np.inf,
202: -np.inf,
203: -np.inf,
204: -np.inf,
205: -np.inf,
206: -np.inf,
207: -np.inf,
208: -np.inf,
209: -np.inf,
210: -np.inf,
211: -np.inf,
212: -np.inf,
213: -np.inf,
214: -np.inf,
215: -np.inf,
216: -np.inf,
217: -np.inf,
218: -np.inf,
219: -np.inf,
220: -np.inf,
221: -np.inf,
222: -np.inf,
223: -np.inf,
224: -np.inf,
225: -np.inf,
226: -np.inf,
227: -np.inf,
228: -np.inf,
229: -np.inf,
230: -np.inf,
231: -np.inf,
232: -np.inf,
233: -np.inf,
234: -np.inf,
235: -np.inf,
236: -np.inf,
237: -np.inf,
238: -np.inf,
239: -np.inf,
240: -np.inf,
241: -np.inf,
242: -np.inf,
243: -np.inf,
244: -np.inf,
245: -np.inf,
246: -np.inf,
247: -np.inf,
248: -np.inf,
249: -np.inf,
250: -np.inf,
251: -np.inf,
252: -np.inf,
253: -np.inf,
254: -np.inf,
255: -np.inf,
256: -np.inf,
257: -np.inf,
258: -np.inf,
259: -np.inf,
260: -np.inf,
261: -np.inf,
262: -np.inf,
263: -np.inf,
264: -np.inf,
265: -np.inf,
266: -np.inf,
267: -np.inf,
268: -np.inf,
269: -np.inf,
270: -np.inf,
271: -np.inf,
272: -np.inf,
273: -np.inf,
274: -np.inf,
275: -np.inf,
276: -np.inf,
277: -np.inf,
278: -np.inf,
279: -np.inf,
280: -np.inf,
281: -np.inf,
282: -np.inf,
283: -np.inf,
284: -np.inf,
285: -np.inf,
286: -np.inf,
287: -np.inf,
288: -np.inf,
289: -np.inf,
290: -np.inf,
291: -np.inf,
292: -np.inf,
293: -np.inf,
294: -np.inf,
295: -np.inf,
296: -np.inf,
297: -np.inf,
298: -np.inf,
299: -np.inf,
300: -np.inf,
301: -np.inf,
302: -np.inf,
303: -np.inf,
304: -np.inf,
305: -np.inf,
306: -np.inf,
307: -np.inf,
308: -np.inf,
309: -np.inf,
310: -np.inf,
311: -np.inf,
312: -np.inf,
313: -np.inf,
314: -np.inf,
315: -np.inf,
316: -np.inf,
317: -np.inf,
318: -np.inf,
319: -np.inf,
320: -np.inf,
321: -np.inf,
322: -np.inf,
323: -np.inf,
324: -np.inf,
325: -np.inf,
326: -np.inf,
327: -np.inf,
328: -np.inf,
329: -np.inf,
330: -np.inf,
331: -np.inf,
332: -np.inf,
333: -np.inf,
334: -np.inf,
335: -np.inf,
336: -np.inf,
337: -np.inf,
338: -np.inf,
339: -np.inf,
340: -np.inf,
341: -np.inf,
342: -np.inf,
343: -np.inf,
344: -np.inf,
345: -np.inf,
346: -np.inf,
347: -np.inf,
348: -np.inf,
349: -np.inf,
350: -np.inf,
351: -np.inf,
352: -np.inf,
353: -np.inf,
354: -np.inf,
355: -np.inf,
356: -np.inf,
357: -np.inf,
358: -np.inf,
359: -np.inf,
360: -np.inf,
361: -np.inf,
362: -np.inf,
363: -np.inf,
364: -np.inf,
365: -np.inf,
366: -np.inf,
367: -np.inf,
368: -np.inf,
369: -np.inf,
370: -np.inf,
371: -np.inf,
372: -np.inf,
373: -np.inf,
374: -np.inf,
375: -np.inf,
376: -np.inf,
377: -np.inf,
378: -np.inf,
379: -np.inf,
380: -np.inf,
381: -np.inf,
382: -np.inf,
383: -np.inf,
384: -np.inf,
385: -np.inf,
386: -np.inf,
387: -np.inf,
388: -np.inf,
389: -np.inf,
390: -np.inf,
391: -np.inf,
392: -np.inf,
393: -np.inf,
},
"fcst_upper": {
0: np.inf,
1: np.inf,
2: np.inf,
3: np.inf,
4: np.inf,
5: np.inf,
6: np.inf,
7: np.inf,
8: np.inf,
9: np.inf,
10: np.inf,
11: np.inf,
12: np.inf,
13: np.inf,
14: np.inf,
15: np.inf,
16: np.inf,
17: np.inf,
18: np.inf,
19: np.inf,
20: np.inf,
21: np.inf,
22: np.inf,
23: np.inf,
24: np.inf,
25: np.inf,
26: np.inf,
27: np.inf,
28: np.inf,
29: np.inf,
30: np.inf,
31: np.inf,
32: np.inf,
33: np.inf,
34: np.inf,
35: np.inf,
36: np.inf,
37: np.inf,
38: np.inf,
39: np.inf,
40: np.inf,
41: np.inf,
42: np.inf,
43: np.inf,
44: np.inf,
45: np.inf,
46: np.inf,
47: np.inf,
48: np.inf,
49: np.inf,
50: np.inf,
51: np.inf,
52: np.inf,
53: np.inf,
54: np.inf,
55: np.inf,
56: np.inf,
57: np.inf,
58: np.inf,
59: np.inf,
60: np.inf,
61: np.inf,
62: np.inf,
63: np.inf,
64: np.inf,
65: np.inf,
66: np.inf,
67: np.inf,
68: np.inf,
69: np.inf,
70: np.inf,
71: np.inf,
72: np.inf,
73: np.inf,
74: np.inf,
75: np.inf,
76: np.inf,
77: np.inf,
78: np.inf,
79: np.inf,
80: np.inf,
81: np.inf,
82: np.inf,
83: np.inf,
84: np.inf,
85: np.inf,
86: np.inf,
87: np.inf,
88: np.inf,
89: np.inf,
90: np.inf,
91: np.inf,
92: np.inf,
93: np.inf,
94: np.inf,
95: np.inf,
96: np.inf,
97: np.inf,
98: np.inf,
99: np.inf,
100: np.inf,
101: np.inf,
102: np.inf,
103: np.inf,
104: np.inf,
105: np.inf,
106: np.inf,
107: np.inf,
108: np.inf,
109: np.inf,
110: np.inf,
111: np.inf,
112: np.inf,
113: np.inf,
114: np.inf,
115: np.inf,
116: np.inf,
117: np.inf,
118: np.inf,
119: np.inf,
120: np.inf,
121: np.inf,
122: np.inf,
123: np.inf,
124: np.inf,
125: np.inf,
126: np.inf,
127: np.inf,
128: np.inf,
129: np.inf,
130: np.inf,
131: np.inf,
132: np.inf,
133: np.inf,
134: np.inf,
135: np.inf,
136: np.inf,
137: np.inf,
138: np.inf,
139: np.inf,
140: np.inf,
141: np.inf,
142: np.inf,
143: np.inf,
144: np.inf,
145: np.inf,
146: np.inf,
147: np.inf,
148: np.inf,
149: np.inf,
150: np.inf,
151: np.inf,
152: np.inf,
153: np.inf,
154: np.inf,
155: np.inf,
156: np.inf,
157: np.inf,
158: np.inf,
159: np.inf,
160: np.inf,
161: np.inf,
162: np.inf,
163: np.inf,
164: np.inf,
165: np.inf,
166: np.inf,
167: np.inf,
168: np.inf,
169: np.inf,
170: np.inf,
171: np.inf,
172: np.inf,
173: np.inf,
174: np.inf,
175: np.inf,
176: np.inf,
177: np.inf,
178: np.inf,
179: np.inf,
180: np.inf,
181: np.inf,
182: np.inf,
183: np.inf,
184: np.inf,
185: np.inf,
186: np.inf,
187: np.inf,
188: np.inf,
189: np.inf,
190: np.inf,
191: np.inf,
192: np.inf,
193: np.inf,
194: np.inf,
195: np.inf,
196: np.inf,
197: np.inf,
198: np.inf,
199: np.inf,
200: np.inf,
201: np.inf,
202: np.inf,
203: np.inf,
204: np.inf,
205: np.inf,
206: np.inf,
207: np.inf,
208: np.inf,
209: np.inf,
210: np.inf,
211: np.inf,
212: np.inf,
213: np.inf,
214: np.inf,
215: np.inf,
216: np.inf,
217: np.inf,
218: np.inf,
219: np.inf,
220: np.inf,
221: np.inf,
222: np.inf,
223: np.inf,
224: np.inf,
225: np.inf,
226: np.inf,
227: np.inf,
228: np.inf,
229: np.inf,
230: np.inf,
231: np.inf,
232: np.inf,
233: np.inf,
234: np.inf,
235: np.inf,
236: np.inf,
237: np.inf,
238: np.inf,
239: np.inf,
240: np.inf,
241: np.inf,
242: np.inf,
243: np.inf,
244: np.inf,
245: np.inf,
246: np.inf,
247: np.inf,
248: np.inf,
249: np.inf,
250: np.inf,
251: np.inf,
252: np.inf,
253: np.inf,
254: np.inf,
255: np.inf,
256: np.inf,
257: np.inf,
258: np.inf,
259: np.inf,
260: np.inf,
261: np.inf,
262: np.inf,
263: np.inf,
264: np.inf,
265: np.inf,
266: np.inf,
267: np.inf,
268: np.inf,
269: np.inf,
270: np.inf,
271: np.inf,
272: np.inf,
273: np.inf,
274: np.inf,
275: np.inf,
276: np.inf,
277: np.inf,
278: np.inf,
279: np.inf,
280: np.inf,
281: np.inf,
282: np.inf,
283: np.inf,
284: np.inf,
285: np.inf,
286: np.inf,
287: np.inf,
288: np.inf,
289: np.inf,
290: np.inf,
291: np.inf,
292: np.inf,
293: np.inf,
294: np.inf,
295: np.inf,
296: np.inf,
297: np.inf,
298: np.inf,
299: np.inf,
300: np.inf,
301: np.inf,
302: np.inf,
303: np.inf,
304: np.inf,
305: np.inf,
306: np.inf,
307: np.inf,
308: np.inf,
309: np.inf,
310: np.inf,
311: np.inf,
312: np.inf,
313: np.inf,
314: np.inf,
315: np.inf,
316: np.inf,
317: np.inf,
318: np.inf,
319: np.inf,
320: np.inf,
321: np.inf,
322: np.inf,
323: np.inf,
324: np.inf,
325: np.inf,
326: np.inf,
327: np.inf,
328: np.inf,
329: np.inf,
330: np.inf,
331: np.inf,
332: np.inf,
333: np.inf,
334: np.inf,
335: np.inf,
336: np.inf,
337: np.inf,
338: np.inf,
339: np.inf,
340: np.inf,
341: np.inf,
342: np.inf,
343: np.inf,
344: np.inf,
345: np.inf,
346: np.inf,
347: np.inf,
348: np.inf,
349: np.inf,
350: np.inf,
351: np.inf,
352: np.inf,
353: np.inf,
354: np.inf,
355: np.inf,
356: np.inf,
357: np.inf,
358: np.inf,
359: np.inf,
360: np.inf,
361: np.inf,
362: np.inf,
363: np.inf,
364: np.inf,
365: np.inf,
366: np.inf,
367: np.inf,
368: np.inf,
369: np.inf,
370: np.inf,
371: np.inf,
372: np.inf,
373: np.inf,
374: np.inf,
375: np.inf,
376: np.inf,
377: np.inf,
378: np.inf,
379: np.inf,
380: np.inf,
381: np.inf,
382: np.inf,
383: np.inf,
384: np.inf,
385: np.inf,
386: np.inf,
387: np.inf,
388: np.inf,
389: np.inf,
390: np.inf,
391: np.inf,
392: np.inf,
393: np.inf,
},
}
)
PEYTON_FCST_LINEAR_INVALID_NEG_ONE = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2012-05-02 00:00:00"),
1: pd.Timestamp("2012-05-03 00:00:00"),
2: pd.Timestamp("2012-05-04 00:00:00"),
3: pd.Timestamp("2012-05-05 00:00:00"),
4: pd.Timestamp("2012-05-06 00:00:00"),
5: pd.Timestamp("2012-05-07 00:00:00"),
6: pd.Timestamp("2012-05-08 00:00:00"),
7: pd.Timestamp("2012-05-09 00:00:00"),
8: pd.Timestamp("2012-05-10 00:00:00"),
9: pd.Timestamp("2012-05-11 00:00:00"),
10: pd.Timestamp("2012-05-12 00:00:00"),
11: pd.Timestamp("2012-05-13 00:00:00"),
12: pd.Timestamp("2012-05-14 00:00:00"),
13: pd.Timestamp("2012-05-15 00:00:00"),
14: pd.Timestamp("2012-05-16 00:00:00"),
15: pd.Timestamp("2012-05-17 00:00:00"),
16: pd.Timestamp("2012-05-18 00:00:00"),
17: pd.Timestamp("2012-05-19 00:00:00"),
18: pd.Timestamp("2012-05-20 00:00:00"),
19: pd.Timestamp("2012-05-21 00:00:00"),
20: pd.Timestamp("2012-05-22 00:00:00"),
21: pd.Timestamp("2012-05-23 00:00:00"),
22: pd.Timestamp("2012-05-24 00:00:00"),
23: pd.Timestamp("2012-05-25 00:00:00"),
24: pd.Timestamp("2012-05-26 00:00:00"),
25: pd.Timestamp("2012-05-27 00:00:00"),
26: pd.Timestamp("2012-05-28 00:00:00"),
27: pd.Timestamp("2012-05-29 00:00:00"),
28: pd.Timestamp("2012-05-30 00:00:00"),
29: pd.Timestamp("2012-05-31 00:00:00"),
30: pd.Timestamp("2012-06-01 00:00:00"),
31: pd.Timestamp("2012-06-02 00:00:00"),
32: pd.Timestamp("2012-06-03 00:00:00"),
33: pd.Timestamp("2012-06-04 00:00:00"),
34: pd.Timestamp("2012-06-05 00:00:00"),
35: pd.Timestamp("2012-06-06 00:00:00"),
36: pd.Timestamp("2012-06-07 00:00:00"),
37: pd.Timestamp("2012-06-08 00:00:00"),
38: pd.Timestamp("2012-06-09 00:00:00"),
39: pd.Timestamp("2012-06-10 00:00:00"),
40: pd.Timestamp("2012-06-11 00:00:00"),
41: pd.Timestamp("2012-06-12 00:00:00"),
42: pd.Timestamp("2012-06-13 00:00:00"),
43: pd.Timestamp("2012-06-14 00:00:00"),
44: pd.Timestamp("2012-06-15 00:00:00"),
45: pd.Timestamp("2012-06-16 00:00:00"),
46: pd.Timestamp("2012-06-17 00:00:00"),
47: pd.Timestamp("2012-06-18 00:00:00"),
48: pd.Timestamp("2012-06-19 00:00:00"),
49: pd.Timestamp("2012-06-20 00:00:00"),
50: pd.Timestamp("2012-06-21 00:00:00"),
51: pd.Timestamp("2012-06-22 00:00:00"),
52: pd.Timestamp("2012-06-23 00:00:00"),
53: pd.Timestamp("2012-06-24 00:00:00"),
54: pd.Timestamp("2012-06-25 00:00:00"),
55: pd.Timestamp("2012-06-26 00:00:00"),
56: pd.Timestamp("2012-06-27 00:00:00"),
57: pd.Timestamp("2012-06-28 00:00:00"),
58: pd.Timestamp("2012-06-29 00:00:00"),
59: pd.Timestamp("2012-06-30 00:00:00"),
60: pd.Timestamp("2012-07-01 00:00:00"),
61: pd.Timestamp("2012-07-02 00:00:00"),
62: pd.Timestamp("2012-07-03 00:00:00"),
63: pd.Timestamp("2012-07-04 00:00:00"),
64: pd.Timestamp("2012-07-05 00:00:00"),
65: pd.Timestamp("2012-07-06 00:00:00"),
66: pd.Timestamp("2012-07-07 00:00:00"),
67: pd.Timestamp("2012-07-08 00:00:00"),
68: pd.Timestamp("2012-07-09 00:00:00"),
69: pd.Timestamp("2012-07-10 00:00:00"),
70: pd.Timestamp("2012-07-11 00:00:00"),
71: pd.Timestamp("2012-07-12 00:00:00"),
72: pd.Timestamp("2012-07-13 00:00:00"),
73: pd.Timestamp("2012-07-14 00:00:00"),
74: pd.Timestamp("2012-07-15 00:00:00"),
75: pd.Timestamp("2012-07-16 00:00:00"),
76: pd.Timestamp("2012-07-17 00:00:00"),
77: pd.Timestamp("2012-07-18 00:00:00"),
78: pd.Timestamp("2012-07-19 00:00:00"),
79: pd.Timestamp("2012-07-20 00:00:00"),
80: pd.Timestamp("2012-07-21 00:00:00"),
81: pd.Timestamp("2012-07-22 00:00:00"),
82: pd.Timestamp("2012-07-23 00:00:00"),
83: pd.Timestamp("2012-07-24 00:00:00"),
84: pd.Timestamp("2012-07-25 00:00:00"),
85: pd.Timestamp("2012-07-26 00:00:00"),
86: pd.Timestamp("2012-07-27 00:00:00"),
87: pd.Timestamp("2012-07-28 00:00:00"),
88: pd.Timestamp("2012-07-29 00:00:00"),
89: pd.Timestamp("2012-07-30 00:00:00"),
90: pd.Timestamp("2012-07-31 00:00:00"),
91: pd.Timestamp("2012-08-01 00:00:00"),
92: pd.Timestamp("2012-08-02 00:00:00"),
93: pd.Timestamp("2012-08-03 00:00:00"),
94: pd.Timestamp("2012-08-04 00:00:00"),
95: pd.Timestamp("2012-08-05 00:00:00"),
96: pd.Timestamp("2012-08-06 00:00:00"),
97: pd.Timestamp("2012-08-07 00:00:00"),
98: pd.Timestamp("2012-08-08 00:00:00"),
99: pd.Timestamp("2012-08-09 00:00:00"),
100: pd.Timestamp("2012-08-10 00:00:00"),
101: pd.Timestamp("2012-08-11 00:00:00"),
102: pd.Timestamp("2012-08-12 00:00:00"),
103: pd.Timestamp("2012-08-13 00:00:00"),
104: pd.Timestamp("2012-08-14 00:00:00"),
105: pd.Timestamp("2012-08-15 00:00:00"),
106: pd.Timestamp("2012-08-16 00:00:00"),
107: pd.Timestamp("2012-08-17 00:00:00"),
108: pd.Timestamp("2012-08-18 00:00:00"),
109: pd.Timestamp("2012-08-19 00:00:00"),
110: pd.Timestamp("2012-08-20 00:00:00"),
111: pd.Timestamp("2012-08-21 00:00:00"),
112: pd.Timestamp("2012-08-22 00:00:00"),
113: pd.Timestamp("2012-08-23 00:00:00"),
114: pd.Timestamp("2012-08-24 00:00:00"),
115: pd.Timestamp("2012-08-25 00:00:00"),
116: pd.Timestamp("2012-08-26 00:00:00"),
117: pd.Timestamp("2012-08-27 00:00:00"),
118: pd.Timestamp("2012-08-28 00:00:00"),
119: pd.Timestamp("2012-08-29 00:00:00"),
120: pd.Timestamp("2012-08-30 00:00:00"),
121: pd.Timestamp("2012-08-31 00:00:00"),
122: pd.Timestamp("2012-09-01 00:00:00"),
123: pd.Timestamp("2012-09-02 00:00:00"),
124: pd.Timestamp("2012-09-03 00:00:00"),
125: pd.Timestamp("2012-09-04 00:00:00"),
126: pd.Timestamp("2012-09-05 00:00:00"),
127: pd.Timestamp("2012-09-06 00:00:00"),
128: pd.Timestamp("2012-09-07 00:00:00"),
129: pd.Timestamp("2012-09-08 00:00:00"),
130: pd.Timestamp("2012-09-09 00:00:00"),
131: pd.Timestamp("2012-09-10 00:00:00"),
132: pd.Timestamp("2012-09-11 00:00:00"),
133: pd.Timestamp("2012-09-12 00:00:00"),
134: pd.Timestamp("2012-09-13 00:00:00"),
135: pd.Timestamp("2012-09-14 00:00:00"),
136: pd.Timestamp("2012-09-15 00:00:00"),
137: pd.Timestamp("2012-09-16 00:00:00"),
138: pd.Timestamp("2012-09-17 00:00:00"),
139: pd.Timestamp("2012-09-18 00:00:00"),
140: pd.Timestamp("2012-09-19 00:00:00"),
141: pd.Timestamp("2012-09-20 00:00:00"),
142: pd.Timestamp("2012-09-21 00:00:00"),
143: pd.Timestamp("2012-09-22 00:00:00"),
144: pd.Timestamp("2012-09-23 00:00:00"),
145: pd.Timestamp("2012-09-24 00:00:00"),
146: pd.Timestamp("2012-09-25 00:00:00"),
147: pd.Timestamp("2012-09-26 00:00:00"),
148: pd.Timestamp("2012-09-27 00:00:00"),
149: pd.Timestamp("2012-09-28 00:00:00"),
150: pd.Timestamp("2012-09-29 00:00:00"),
151: pd.Timestamp("2012-09-30 00:00:00"),
152: pd.Timestamp("2012-10-01 00:00:00"),
153: pd.Timestamp("2012-10-02 00:00:00"),
154: pd.Timestamp("2012-10-03 00:00:00"),
155: pd.Timestamp("2012-10-04 00:00:00"),
156: pd.Timestamp("2012-10-05 00:00:00"),
157: pd.Timestamp("2012-10-06 00:00:00"),
158: pd.Timestamp("2012-10-07 00:00:00"),
159: pd.Timestamp("2012-10-08 00:00:00"),
160: pd.Timestamp("2012-10-09 00:00:00"),
161: pd.Timestamp("2012-10-10 00:00:00"),
162: pd.Timestamp("2012-10-11 00:00:00"),
163: pd.Timestamp("2012-10-12 00:00:00"),
164: pd.Timestamp("2012-10-13 00:00:00"),
165: pd.Timestamp("2012-10-14 00:00:00"),
166: pd.Timestamp("2012-10-15 00:00:00"),
167: pd.Timestamp("2012-10-16 00:00:00"),
168: pd.Timestamp("2012-10-17 00:00:00"),
169: pd.Timestamp("2012-10-18 00:00:00"),
170: pd.Timestamp("2012-10-19 00:00:00"),
171: pd.Timestamp("2012-10-20 00:00:00"),
172: pd.Timestamp("2012-10-21 00:00:00"),
173: pd.Timestamp("2012-10-22 00:00:00"),
174: pd.Timestamp("2012-10-23 00:00:00"),
175: pd.Timestamp("2012-10-24 00:00:00"),
176: pd.Timestamp("2012-10-25 00:00:00"),
177: pd.Timestamp("2012-10-26 00:00:00"),
178: pd.Timestamp("2012-10-27 00:00:00"),
179: pd.Timestamp("2012-10-28 00:00:00"),
180: pd.Timestamp("2012-10-29 00:00:00"),
181: pd.Timestamp("2012-10-30 00:00:00"),
182: pd.Timestamp("2012-10-31 00:00:00"),
183: pd.Timestamp("2012-11-01 00:00:00"),
184: pd.Timestamp("2012-11-02 00:00:00"),
185: pd.Timestamp("2012-11-03 00:00:00"),
186: pd.Timestamp("2012-11-04 00:00:00"),
187: pd.Timestamp("2012-11-05 00:00:00"),
188: pd.Timestamp("2012-11-06 00:00:00"),
189: pd.Timestamp("2012-11-07 00:00:00"),
190: pd.Timestamp("2012-11-08 00:00:00"),
191: pd.Timestamp("2012-11-09 00:00:00"),
192: pd.Timestamp("2012-11-10 00:00:00"),
193: pd.Timestamp("2012-11-11 00:00:00"),
194: pd.Timestamp("2012-11-12 00:00:00"),
195: pd.Timestamp("2012-11-13 00:00:00"),
196: pd.Timestamp("2012-11-14 00:00:00"),
197: pd.Timestamp("2012-11-15 00:00:00"),
198: pd.Timestamp("2012-11-16 00:00:00"),
199: pd.Timestamp("2012-11-17 00:00:00"),
200: pd.Timestamp("2012-11-18 00:00:00"),
201: pd.Timestamp("2012-11-19 00:00:00"),
202: pd.Timestamp("2012-11-20 00:00:00"),
203: pd.Timestamp("2012-11-21 00:00:00"),
204: pd.Timestamp("2012-11-22 00:00:00"),
205: pd.Timestamp("2012-11-23 00:00:00"),
206: pd.Timestamp("2012-11-24 00:00:00"),
207: pd.Timestamp("2012-11-25 00:00:00"),
208: pd.Timestamp("2012-11-26 00:00:00"),
209: pd.Timestamp("2012-11-27 00:00:00"),
210: pd.Timestamp("2012-11-28 00:00:00"),
211: pd.Timestamp("2012-11-29 00:00:00"),
212: pd.Timestamp("2012-11-30 00:00:00"),
213: pd.Timestamp("2012-12-01 00:00:00"),
214: pd.Timestamp("2012-12-02 00:00:00"),
215: pd.Timestamp("2012-12-03 00:00:00"),
216: pd.Timestamp("2012-12-04 00:00:00"),
217: pd.Timestamp("2012-12-05 00:00:00"),
218: pd.Timestamp("2012-12-06 00:00:00"),
219: pd.Timestamp("2012-12-07 00:00:00"),
220: pd.Timestamp("2012-12-08 00:00:00"),
221: pd.Timestamp("2012-12-09 00:00:00"),
222: pd.Timestamp("2012-12-10 00:00:00"),
223: pd.Timestamp("2012-12-11 00:00:00"),
224: pd.Timestamp("2012-12-12 00:00:00"),
225: pd.Timestamp("2012-12-13 00:00:00"),
226: pd.Timestamp("2012-12-14 00:00:00"),
227: pd.Timestamp("2012-12-15 00:00:00"),
228: pd.Timestamp("2012-12-16 00:00:00"),
229: pd.Timestamp("2012-12-17 00:00:00"),
230: pd.Timestamp("2012-12-18 00:00:00"),
231: pd.Timestamp("2012-12-19 00:00:00"),
232: pd.Timestamp("2012-12-20 00:00:00"),
233: pd.Timestamp("2012-12-21 00:00:00"),
234: pd.Timestamp("2012-12-22 00:00:00"),
235: pd.Timestamp("2012-12-23 00:00:00"),
236: pd.Timestamp("2012-12-24 00:00:00"),
237: pd.Timestamp("2012-12-25 00:00:00"),
238: pd.Timestamp("2012-12-26 00:00:00"),
239: pd.Timestamp("2012-12-27 00:00:00"),
240: pd.Timestamp("2012-12-28 00:00:00"),
241: pd.Timestamp("2012-12-29 00:00:00"),
242: pd.Timestamp("2012-12-30 00:00:00"),
243: pd.Timestamp("2012-12-31 00:00:00"),
244: pd.Timestamp("2013-01-01 00:00:00"),
245: pd.Timestamp("2013-01-02 00:00:00"),
246: pd.Timestamp("2013-01-03 00:00:00"),
247: pd.Timestamp("2013-01-04 00:00:00"),
248: pd.Timestamp("2013-01-05 00:00:00"),
249: pd.Timestamp("2013-01-06 00:00:00"),
250: | pd.Timestamp("2013-01-07 00:00:00") | pandas.Timestamp |
#!/usr/bin/env python3
import os
import sys
import pandas as pd
import numpy as np
import tensorflow as tf
from Bio import SeqIO
from numpy import array
from numpy import argmax
from warnings import simplefilter
from contextlib import redirect_stderr
from keras.preprocessing.text import Tokenizer
# Hide warning messages
from tensorflow.python.util import deprecation
deprecation._PRINT_DEPRECATION_WARNINGS = False
simplefilter(action='ignore', category=FutureWarning)
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
with redirect_stderr(open(os.devnull, "w")):
from tensorflow.keras.models import load_model
from keras.preprocessing.sequence import pad_sequences
# Show full array
pd.set_option('display.max_rows', None)
np.set_printoptions(threshold=sys.maxsize)
# IO files
INFASTA = sys.argv[1]
RESCSV = sys.argv[2] if len(sys.argv) >=3 else None
# Get model
MODELO = 'model_embedded_order_wb.hdf5'
PADVALUE = 38797
def fasta_frame(fasta_file):
fids = []
fseq = []
with open(fasta_file) as fasta:
for record in SeqIO.parse(fasta, 'fasta'):
fids.append(record.id)
fseq.append(str(record.seq).lower())
s1 = pd.Series(fids, name = 'id')
s2 = pd.Series(fseq, name = 'sequence')
data = {'id':s1, 'sequence':s2}
df = pd.concat(data, axis=1)
return df
# Read fasta as dataframe
fas_df = fasta_frame(INFASTA)
identifiers = fas_df['id']
sequences = fas_df['sequence']
# Labels
te_labels = {'te': 1, 'nt': 2}
# Tokenize sequences
tkz_seq = Tokenizer(num_words = None, split = ' ', char_level = True, lower = True)
tkz_seq.fit_on_texts(sequences)
x_seq_arrays = tkz_seq.texts_to_sequences(sequences)
vocab_size_seq = len(tkz_seq.word_index) + 1
# Pad sequences
padded_seqs = pad_sequences(x_seq_arrays, padding='post', maxlen = PADVALUE)
# Load model
modelo = load_model(MODELO)
# Predict labels
pred_labels = modelo.predict_classes(padded_seqs, batch_size = 2)
mapped_labels = [k for label in pred_labels for k, v in te_labels.items() if v == label]
# Results
mapped_series = | pd.Series(mapped_labels) | pandas.Series |
from datetime import datetime
import numpy as np
import pandas as pd
import pytest
from numba import njit
import vectorbt as vbt
from tests.utils import record_arrays_close
from vectorbt.generic.enums import range_dt, drawdown_dt
from vectorbt.portfolio.enums import order_dt, trade_dt, log_dt
day_dt = np.timedelta64(86400000000000)
example_dt = np.dtype([
('id', np.int64),
('col', np.int64),
('idx', np.int64),
('some_field1', np.float64),
('some_field2', np.float64)
], align=True)
records_arr = np.asarray([
(0, 0, 0, 10, 21),
(1, 0, 1, 11, 20),
(2, 0, 2, 12, 19),
(3, 1, 0, 13, 18),
(4, 1, 1, 14, 17),
(5, 1, 2, 13, 18),
(6, 2, 0, 12, 19),
(7, 2, 1, 11, 20),
(8, 2, 2, 10, 21)
], dtype=example_dt)
records_nosort_arr = np.concatenate((
records_arr[0::3],
records_arr[1::3],
records_arr[2::3]
))
group_by = pd.Index(['g1', 'g1', 'g2', 'g2'])
wrapper = vbt.ArrayWrapper(
index=['x', 'y', 'z'],
columns=['a', 'b', 'c', 'd'],
ndim=2,
freq='1 days'
)
wrapper_grouped = wrapper.replace(group_by=group_by)
records = vbt.records.Records(wrapper, records_arr)
records_grouped = vbt.records.Records(wrapper_grouped, records_arr)
records_nosort = records.replace(records_arr=records_nosort_arr)
records_nosort_grouped = vbt.records.Records(wrapper_grouped, records_nosort_arr)
# ############# Global ############# #
def setup_module():
vbt.settings.numba['check_func_suffix'] = True
vbt.settings.caching.enabled = False
vbt.settings.caching.whitelist = []
vbt.settings.caching.blacklist = []
def teardown_module():
vbt.settings.reset()
# ############# col_mapper.py ############# #
class TestColumnMapper:
def test_col_arr(self):
np.testing.assert_array_equal(
records['a'].col_mapper.col_arr,
np.array([0, 0, 0])
)
np.testing.assert_array_equal(
records.col_mapper.col_arr,
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])
)
def test_get_col_arr(self):
np.testing.assert_array_equal(
records.col_mapper.get_col_arr(),
records.col_mapper.col_arr
)
np.testing.assert_array_equal(
records_grouped['g1'].col_mapper.get_col_arr(),
np.array([0, 0, 0, 0, 0, 0])
)
np.testing.assert_array_equal(
records_grouped.col_mapper.get_col_arr(),
np.array([0, 0, 0, 0, 0, 0, 1, 1, 1])
)
def test_col_range(self):
np.testing.assert_array_equal(
records['a'].col_mapper.col_range,
np.array([
[0, 3]
])
)
np.testing.assert_array_equal(
records.col_mapper.col_range,
np.array([
[0, 3],
[3, 6],
[6, 9],
[-1, -1]
])
)
def test_get_col_range(self):
np.testing.assert_array_equal(
records.col_mapper.get_col_range(),
np.array([
[0, 3],
[3, 6],
[6, 9],
[-1, -1]
])
)
np.testing.assert_array_equal(
records_grouped['g1'].col_mapper.get_col_range(),
np.array([[0, 6]])
)
np.testing.assert_array_equal(
records_grouped.col_mapper.get_col_range(),
np.array([[0, 6], [6, 9]])
)
def test_col_map(self):
np.testing.assert_array_equal(
records['a'].col_mapper.col_map[0],
np.array([0, 1, 2])
)
np.testing.assert_array_equal(
records['a'].col_mapper.col_map[1],
np.array([3])
)
np.testing.assert_array_equal(
records.col_mapper.col_map[0],
np.array([0, 1, 2, 3, 4, 5, 6, 7, 8])
)
np.testing.assert_array_equal(
records.col_mapper.col_map[1],
np.array([3, 3, 3, 0])
)
def test_get_col_map(self):
np.testing.assert_array_equal(
records.col_mapper.get_col_map()[0],
records.col_mapper.col_map[0]
)
np.testing.assert_array_equal(
records.col_mapper.get_col_map()[1],
records.col_mapper.col_map[1]
)
np.testing.assert_array_equal(
records_grouped['g1'].col_mapper.get_col_map()[0],
np.array([0, 1, 2, 3, 4, 5])
)
np.testing.assert_array_equal(
records_grouped['g1'].col_mapper.get_col_map()[1],
np.array([6])
)
np.testing.assert_array_equal(
records_grouped.col_mapper.get_col_map()[0],
np.array([0, 1, 2, 3, 4, 5, 6, 7, 8])
)
np.testing.assert_array_equal(
records_grouped.col_mapper.get_col_map()[1],
np.array([6, 3])
)
def test_is_sorted(self):
assert records.col_mapper.is_sorted()
assert not records_nosort.col_mapper.is_sorted()
# ############# mapped_array.py ############# #
mapped_array = records.map_field('some_field1')
mapped_array_grouped = records_grouped.map_field('some_field1')
mapped_array_nosort = records_nosort.map_field('some_field1')
mapped_array_nosort_grouped = records_nosort_grouped.map_field('some_field1')
mapping = {x: 'test_' + str(x) for x in pd.unique(mapped_array.values)}
mp_mapped_array = mapped_array.replace(mapping=mapping)
mp_mapped_array_grouped = mapped_array_grouped.replace(mapping=mapping)
class TestMappedArray:
def test_config(self, tmp_path):
assert vbt.MappedArray.loads(mapped_array.dumps()) == mapped_array
mapped_array.save(tmp_path / 'mapped_array')
assert vbt.MappedArray.load(tmp_path / 'mapped_array') == mapped_array
def test_mapped_arr(self):
np.testing.assert_array_equal(
mapped_array['a'].values,
np.array([10., 11., 12.])
)
np.testing.assert_array_equal(
mapped_array.values,
np.array([10., 11., 12., 13., 14., 13., 12., 11., 10.])
)
def test_id_arr(self):
np.testing.assert_array_equal(
mapped_array['a'].id_arr,
np.array([0, 1, 2])
)
np.testing.assert_array_equal(
mapped_array.id_arr,
np.array([0, 1, 2, 3, 4, 5, 6, 7, 8])
)
def test_col_arr(self):
np.testing.assert_array_equal(
mapped_array['a'].col_arr,
np.array([0, 0, 0])
)
np.testing.assert_array_equal(
mapped_array.col_arr,
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])
)
def test_idx_arr(self):
np.testing.assert_array_equal(
mapped_array['a'].idx_arr,
np.array([0, 1, 2])
)
np.testing.assert_array_equal(
mapped_array.idx_arr,
np.array([0, 1, 2, 0, 1, 2, 0, 1, 2])
)
def test_is_sorted(self):
assert mapped_array.is_sorted()
assert mapped_array.is_sorted(incl_id=True)
assert not mapped_array_nosort.is_sorted()
assert not mapped_array_nosort.is_sorted(incl_id=True)
def test_sort(self):
assert mapped_array.sort().is_sorted()
assert mapped_array.sort().is_sorted(incl_id=True)
assert mapped_array.sort(incl_id=True).is_sorted(incl_id=True)
assert mapped_array_nosort.sort().is_sorted()
assert mapped_array_nosort.sort().is_sorted(incl_id=True)
assert mapped_array_nosort.sort(incl_id=True).is_sorted(incl_id=True)
def test_apply_mask(self):
mask_a = mapped_array['a'].values >= mapped_array['a'].values.mean()
np.testing.assert_array_equal(
mapped_array['a'].apply_mask(mask_a).id_arr,
np.array([1, 2])
)
mask = mapped_array.values >= mapped_array.values.mean()
filtered = mapped_array.apply_mask(mask)
np.testing.assert_array_equal(
filtered.id_arr,
np.array([2, 3, 4, 5, 6])
)
np.testing.assert_array_equal(filtered.col_arr, mapped_array.col_arr[mask])
np.testing.assert_array_equal(filtered.idx_arr, mapped_array.idx_arr[mask])
assert mapped_array_grouped.apply_mask(mask).wrapper == mapped_array_grouped.wrapper
assert mapped_array_grouped.apply_mask(mask, group_by=False).wrapper.grouper.group_by is None
def test_map_to_mask(self):
@njit
def every_2_nb(inout, idxs, col, mapped_arr):
inout[idxs[::2]] = True
np.testing.assert_array_equal(
mapped_array.map_to_mask(every_2_nb),
np.array([True, False, True, True, False, True, True, False, True])
)
def test_top_n_mask(self):
np.testing.assert_array_equal(
mapped_array.top_n_mask(1),
np.array([False, False, True, False, True, False, True, False, False])
)
def test_bottom_n_mask(self):
np.testing.assert_array_equal(
mapped_array.bottom_n_mask(1),
np.array([True, False, False, True, False, False, False, False, True])
)
def test_top_n(self):
np.testing.assert_array_equal(
mapped_array.top_n(1).id_arr,
np.array([2, 4, 6])
)
def test_bottom_n(self):
np.testing.assert_array_equal(
mapped_array.bottom_n(1).id_arr,
np.array([0, 3, 8])
)
def test_to_pd(self):
target = pd.DataFrame(
np.array([
[10., 13., 12., np.nan],
[11., 14., 11., np.nan],
[12., 13., 10., np.nan]
]),
index=wrapper.index,
columns=wrapper.columns
)
pd.testing.assert_series_equal(
mapped_array['a'].to_pd(),
target['a']
)
pd.testing.assert_frame_equal(
mapped_array.to_pd(),
target
)
pd.testing.assert_frame_equal(
mapped_array.to_pd(fill_value=0.),
target.fillna(0.)
)
mapped_array2 = vbt.MappedArray(
wrapper,
records_arr['some_field1'].tolist() + [1],
records_arr['col'].tolist() + [2],
idx_arr=records_arr['idx'].tolist() + [2]
)
with pytest.raises(Exception):
_ = mapped_array2.to_pd()
pd.testing.assert_series_equal(
mapped_array['a'].to_pd(ignore_index=True),
pd.Series(np.array([10., 11., 12.]), name='a')
)
pd.testing.assert_frame_equal(
mapped_array.to_pd(ignore_index=True),
pd.DataFrame(
np.array([
[10., 13., 12., np.nan],
[11., 14., 11., np.nan],
[12., 13., 10., np.nan]
]),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array.to_pd(fill_value=0, ignore_index=True),
pd.DataFrame(
np.array([
[10., 13., 12., 0.],
[11., 14., 11., 0.],
[12., 13., 10., 0.]
]),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array_grouped.to_pd(ignore_index=True),
pd.DataFrame(
np.array([
[10., 12.],
[11., 11.],
[12., 10.],
[13., np.nan],
[14., np.nan],
[13., np.nan],
]),
columns=pd.Index(['g1', 'g2'], dtype='object')
)
)
def test_apply(self):
@njit
def cumsum_apply_nb(idxs, col, a):
return np.cumsum(a)
np.testing.assert_array_equal(
mapped_array['a'].apply(cumsum_apply_nb).values,
np.array([10., 21., 33.])
)
np.testing.assert_array_equal(
mapped_array.apply(cumsum_apply_nb).values,
np.array([10., 21., 33., 13., 27., 40., 12., 23., 33.])
)
np.testing.assert_array_equal(
mapped_array_grouped.apply(cumsum_apply_nb, apply_per_group=False).values,
np.array([10., 21., 33., 13., 27., 40., 12., 23., 33.])
)
np.testing.assert_array_equal(
mapped_array_grouped.apply(cumsum_apply_nb, apply_per_group=True).values,
np.array([10., 21., 33., 46., 60., 73., 12., 23., 33.])
)
assert mapped_array_grouped.apply(cumsum_apply_nb).wrapper == \
mapped_array.apply(cumsum_apply_nb, group_by=group_by).wrapper
assert mapped_array.apply(cumsum_apply_nb, group_by=False).wrapper.grouper.group_by is None
def test_reduce(self):
@njit
def mean_reduce_nb(col, a):
return np.mean(a)
assert mapped_array['a'].reduce(mean_reduce_nb) == 11.
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb),
pd.Series(np.array([11., 13.333333333333334, 11., np.nan]), index=wrapper.columns).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb, fill_value=0.),
pd.Series(np.array([11., 13.333333333333334, 11., 0.]), index=wrapper.columns).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb, fill_value=0., wrap_kwargs=dict(dtype=np.int_)),
pd.Series(np.array([11., 13.333333333333334, 11., 0.]), index=wrapper.columns).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb, wrap_kwargs=dict(to_timedelta=True)),
pd.Series(np.array([11., 13.333333333333334, 11., np.nan]), index=wrapper.columns).rename('reduce') * day_dt
)
pd.testing.assert_series_equal(
mapped_array_grouped.reduce(mean_reduce_nb),
pd.Series([12.166666666666666, 11.0], index=pd.Index(['g1', 'g2'], dtype='object')).rename('reduce')
)
assert mapped_array_grouped['g1'].reduce(mean_reduce_nb) == 12.166666666666666
pd.testing.assert_series_equal(
mapped_array_grouped[['g1']].reduce(mean_reduce_nb),
pd.Series([12.166666666666666], index=pd.Index(['g1'], dtype='object')).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb),
mapped_array_grouped.reduce(mean_reduce_nb, group_by=False)
)
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb, group_by=group_by),
mapped_array_grouped.reduce(mean_reduce_nb)
)
def test_reduce_to_idx(self):
@njit
def argmin_reduce_nb(col, a):
return np.argmin(a)
assert mapped_array['a'].reduce(argmin_reduce_nb, returns_idx=True) == 'x'
pd.testing.assert_series_equal(
mapped_array.reduce(argmin_reduce_nb, returns_idx=True),
pd.Series(np.array(['x', 'x', 'z', np.nan], dtype=object), index=wrapper.columns).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array.reduce(argmin_reduce_nb, returns_idx=True, to_index=False),
pd.Series(np.array([0, 0, 2, -1], dtype=int), index=wrapper.columns).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array_grouped.reduce(argmin_reduce_nb, returns_idx=True, to_index=False),
pd.Series(np.array([0, 2], dtype=int), index=pd.Index(['g1', 'g2'], dtype='object')).rename('reduce')
)
def test_reduce_to_array(self):
@njit
def min_max_reduce_nb(col, a):
return np.array([np.min(a), np.max(a)])
pd.testing.assert_series_equal(
mapped_array['a'].reduce(min_max_reduce_nb, returns_array=True,
wrap_kwargs=dict(name_or_index=['min', 'max'])),
pd.Series([10., 12.], index= | pd.Index(['min', 'max'], dtype='object') | pandas.Index |
import matplotlib
from matplotlib import pyplot as plt
matplotlib.use('Agg')
from matplotlib.collections import BrokenBarHCollection
from itertools import cycle
from collections import defaultdict
import pandas
import numpy as np
asms = snakemake.params["asms"]
print(asms)
alist = snakemake.input["asms"]
print(alist)
data = defaultdict(list)
for i, v in enumerate(alist):
atext = asms[i]
with open(v + ".fai") as input:
for l in input:
s = l.rstrip().split()
data['asm'].append(atext)
data['len'].append(int(s[1]))
df = | pandas.DataFrame(data) | pandas.DataFrame |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Sun Jan 20 10:24:34 2019
@author: labadmin
"""
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 02 21:05:32 2019
@author: Hassan
"""
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.model_selection import GridSearchCV
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis as QDA
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import GradientBoostingClassifier as GBC
from imblearn.over_sampling import RandomOverSampler
from imblearn.over_sampling import BorderlineSMOTE
from imblearn.over_sampling import SMOTENC
data_ben1=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\bending1\\dataset1.csv",skiprows=4)
data_ben2=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\bending1\\dataset2.csv",skiprows=4)
data_ben3=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\bending1\\dataset3.csv",skiprows=4)
data_ben4=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\bending1\\dataset4.csv",skiprows=4)
data_ben5=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\bending1\\dataset5.csv",skiprows=4)
data_ben6=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\bending1\\dataset6.csv",skiprows=4)
data_ben7=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\bending1\\dataset7.csv",skiprows=4)
frames_ben1 = [data_ben1,data_ben2,data_ben3,data_ben4,data_ben5,data_ben6,data_ben7]
result_ben1 = pd.concat(frames_ben1)
result_ben1.index=range(3360)
df_ben1 = pd.DataFrame({'label': [1]},index=range(0,3360))
dat_ben1=pd.concat([result_ben1,df_ben1],axis=1)
#-------------------------------------------------------------------------------------------------
data__ben1=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\bending2\\dataset1.csv",skiprows=4)
data__ben2=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\bending2\\dataset2.csv",skiprows=4)
data__ben3=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\bending2\\dataset3.csv",skiprows=4)
data__ben4=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\bending2\\dataset4.csv",skiprows=4)
data__ben4=data__ben4['# Columns: time'].str.split(expand=True)
data__ben4.columns=['# Columns: time','avg_rss12','var_rss12','avg_rss13','var_rss13','avg_rss23','var_rss23']
data__ben5=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\bending2\\dataset5.csv",skiprows=4)
data__ben6=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\bending2\\dataset6.csv",skiprows=4)
frames_ben2 = [data__ben1,data__ben2,data__ben3,data__ben4,data__ben5,data__ben6]
result_ben2 = pd.concat(frames_ben2)
result_ben2.index=range(2880)
df_ben2 = pd.DataFrame({'label': [2]},index=range(0,2880))
dat__ben2=pd.concat([result_ben2,df_ben2],axis=1)
#-----------------------------------------------------------------------------------------------------
data_cyc1=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\cycling\\dataset1.csv",skiprows=4)
data_cyc2=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\cycling\\dataset2.csv",skiprows=4)
data_cyc3=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\cycling\\dataset3.csv",skiprows=4)
data_cyc4=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\cycling\\dataset4.csv",skiprows=4)
data_cyc5=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\cycling\\dataset5.csv",skiprows=4)
data_cyc6=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\cycling\\dataset6.csv",skiprows=4)
data_cyc7=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\cycling\\dataset7.csv",skiprows=4)
data_cyc8=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\cycling\\dataset8.csv",skiprows=4)
data_cyc9=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\cycling\\dataset99.csv",skiprows=4)
data_cyc10=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\cycling\\dataset10.csv",skiprows=4)
data_cyc11=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\cycling\\dataset11.csv",skiprows=4)
data_cyc12=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\cycling\\dataset12.csv",skiprows=4)
data_cyc13=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\cycling\\dataset13.csv",skiprows=4)
data_cyc14=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\cycling\\dataset144.csv",skiprows=4)
data_cyc15=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\cycling\\dataset15.csv",skiprows=4)
frames_cyc = [data_cyc1,data_cyc2,data_cyc3,data_cyc4,data_cyc5,data_cyc6,data_cyc7,data_cyc8,data_cyc9,data_cyc10,data_cyc11,data_cyc12,data_cyc13,data_cyc14,data_cyc15]
result_cyc = pd.concat(frames_cyc)
result_cyc.index=range(7200)
df_cyc = pd.DataFrame({'label': [3]},index=range(0,7200))
data_cyc=pd.concat([result_cyc,df_cyc],axis=1)
#----------------------------------------------------------------------------------------------
data_ly1=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\lying\\dataset1.csv",skiprows=4)
data_ly2=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\lying\\dataset2.csv",skiprows=4)
data_ly3=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\lying\\dataset3.csv",skiprows=4)
data_ly4=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\lying\\dataset4.csv",skiprows=4)
data_ly5=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\lying\\dataset5.csv",skiprows=4)
data_ly6=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\lying\\dataset6.csv",skiprows=4)
data_ly7= | pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\lying\\dataset7.csv",skiprows=4) | pandas.read_csv |
import os, gc, sys
import re
import random
import pickle
import pandas as pd
import numpy as np
from tqdm import tqdm
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from nltk.stem.porter import PorterStemmer
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import f1_score
import lightgbm as lgb
from catboost import CatBoost, Pool
import pulp
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from transformers import AutoTokenizer, AutoModel, AdamW
import nlp
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
warnings.filterwarnings('ignore')
def seed_everything(seed):
"""
GPU+Pytorchを使用する場合の再現性確保のための関数.
Parameters
----------
seed: int
固定するシードの値.
"""
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
def np_rounder(x):
"""
numpyの四捨五入用関数.
Parameters:
-----------
x: np.array[float]
Returns:
----------
(int_array + float_array).astype(int): np.array[int]
"""
int_array = x // 1
float_array = x % 1
float_array[float_array<0.5] = 0
float_array[float_array>=0.5] = 1
return (int_array + float_array).astype(int)
def sigmoid(x):
"""
尤度を確率に変換する関数.
Parameters:
-----------
x: np.array[float]
Returns:
1 / (1+np.exp(-x)) : np.array[float]
"""
return 1 / (1+np.exp(-x))
def make_dataset(df, tokenizer, device, model_name):
"""
NLPモデル用のデータセットを作成するための関数.
Parameters:
-----------
df: pd.DataFrame
モデル用のデータセット.
tokenizer: transformers.AutoTokenizer.from_pretrained
モデル用のtokenizer.
device: str
使用するデバイス. "cpu" or "cuda".
model_name: str
使用するモデルの名前.
Returns:
----------
dataset: nlp.Dataset.from_pandas
NLP用のデータセット.
"""
dataset = nlp.Dataset.from_pandas(df)
dataset = dataset.map(
lambda example: tokenizer(example[params.TEXT_COL],
padding="max_length",
truncation=True,
max_length=params.MAX_TOKEN_LEN))
if not model_name in ["roberta-base", "distilbert-base-cased"]:
dataset.set_format(type='torch',
columns=['input_ids', 'token_type_ids', 'attention_mask', 'labels'],
device=device)
else:
dataset.set_format(type='torch',
columns=['input_ids', 'attention_mask', 'labels'],
device=device)
return dataset
def predict_lgb(X_test, n_folds=4):
"""
lightgbm予測用関数.
Parameters:
-----------
X_test: pd.DataFrame
予測用データセット.
n_folds: int
予測時のFold数. 訓練時のFold数より大きくしないこと.
Returns:
----------
y_pred: np.array[float]
予測した尤度.
"""
y_pred = np.zeros((X_test.shape[0], params.NUM_CLASS), dtype='float32')
for fold in range(n_folds):
model = pickle.load(open(params.MODELS_DIR+"lgb_fold{}.lgbmodel".format(fold), "rb"))
y_pred += model.predict(X_test, num_iteration=model.best_iteration) / n_folds
return y_pred
def predict_ctb(X_test, n_folds=4):
"""
catboost予測用関数.
Parameters:
-----------
X_test: pd.DataFrame
予測用データセット.
n_folds: int
予測時のFold数. 訓練時のFold数より大きくしないこと.
Returns:
----------
y_pred: np.array[float]
予測した尤度.
"""
y_pred = np.zeros((X_test.shape[0], params.NUM_CLASS), dtype='float32')
for fold in range(n_folds):
model = pickle.load(open(params.MODELS_DIR+"ctb_fold{}.ctbmodel".format(fold), "rb"))
y_pred += model.predict(X_test) / n_folds
return y_pred
def predict_nlp(model_name, typ, file_path):
"""
nlp予測用関数.
Parameters:
-----------
model_name: str
使用するモデルの名前.
type: str
使用する特徴量の部分.
file_path: str
予測するデータセットのパス.
Returns:
----------
preds: np.array[float]
予測した尤度.
"""
models = []
for fold in range(params.NUM_SPLITS):
model = Classifier(model_name, typ)
model.load_state_dict(torch.load(params.MODELS_DIR + f"best_{model_name}_{typ}_{fold}.pth"))
model.to(params.DEVICE)
model.eval()
models.append(model)
tokenizer = AutoTokenizer.from_pretrained(model_name)
test_df = | pd.read_csv(file_path) | pandas.read_csv |
from os.path import join, exists, dirname, basename
from os import makedirs
import sys
import pandas as pd
from glob import glob
import seaborn as sns
import numpy as np
from scipy import stats
import xlsxwriter
import matplotlib.pyplot as plt
from scripts.parse_samplesheet import get_min_coverage, get_role, add_aliassamples, get_species
from scripts.snupy import check_snupy_status
import json
import datetime
import getpass
import socket
import requests
from requests.auth import HTTPBasicAuth
import urllib3
import yaml
import pickle
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
plt.switch_backend('Agg')
RESULT_NOT_PRESENT = -5
def report_undertermined_filesizes(fp_filesizes, fp_output, fp_error,
zscorethreshold=1):
# read all data
fps_sizes = glob(join(dirname(fp_filesizes), '*.txt'))
pds_sizes = []
for fp_size in fps_sizes:
data = pd.read_csv(
fp_size, sep="\t", names=["filesize", "filename", "status"],
index_col=1)
# mark given read as isme=True while all other data in the dir
# are isme=False
data['isme'] = fp_filesizes in fp_size
data['filesize'] /= 1024**3
pds_sizes.append(data)
pd_sizes = pd.concat(pds_sizes)
# compute z-score against non-bad known runs
pd_sizes['z-score'] = np.nan
idx_nonbad = pd_sizes[pd_sizes['status'] != 'bad'].index
pd_sizes.loc[idx_nonbad, 'z-score'] = stats.zscore(
pd_sizes.loc[idx_nonbad, 'filesize'])
# plot figure
fig = plt.figure()
ax = sns.distplot(
pd_sizes[(pd_sizes['isme'] == np.False_) &
(pd_sizes['status'] != 'bad')]['filesize'],
kde=False, rug=False, color="black", label='known runs')
ax = sns.distplot(
pd_sizes[(pd_sizes['isme'] == np.False_) &
(pd_sizes['status'] == 'bad')]['filesize'],
kde=False, rug=False, color="red", label='bad runs')
ax = sns.distplot(
pd_sizes[pd_sizes['isme'] == np.True_]['filesize'],
kde=False, rug=True, color="green", label='this run')
_ = ax.set_ylabel('number of files')
_ = ax.set_xlabel('file-size in GB')
ax.set_title('run %s' % basename(fp_filesizes)[:-4])
ax.legend()
# raise error if current run contains surprisingly large undetermined
# filesize
if pd_sizes[(pd_sizes['isme'] == np.True_) &
(pd_sizes['status'] == 'unknown')]['z-score'].max() > zscorethreshold:
ax.set_title('ERROR: %s' % ax.get_title())
fig.savefig(fp_error, bbox_inches='tight')
raise ValueError(
("Compared to known historic runs, your run contains surprisingly "
"(z-score > %f) large file(s) of undetermined reads. You will find"
" an supporting image at '%s'. Please do the following things:\n"
"1. discuss with lab personal about the quality of the run.\n"
"2. should you decide to keep going with this run, mark file "
"status (3rd column) in file '%s' as 'good'.\n"
"3. for future automatic considerations, mark file status (3rd "
"column) as 'bad' if you have decided to abort processing due to"
" too low quality (z-score kind of averages about known values)."
) % (zscorethreshold, fp_error, fp_filesizes))
else:
fig.savefig(fp_output, bbox_inches='tight')
def report_exome_coverage(
fps_sample, fp_plot,
min_coverage=30, min_targets=80, coverage_cutoff=200):
"""Creates an exome coverage plot for multiple samples.
Parameters
----------
fps_sample : [str]
A list of file-paths with coverage data in csv format.
fp_plot : str
Filepath of output graph.
min_coverage : int
Default: 30.
An arbitraty threshold of minimal coverage that we expect.
A vertical dashed line is drawn at this value.
min_targets : float
Default: 80.
An arbitraty threshold of minimal targets that we expect to be covered.
A horizontal dashed line is drawn at this value.
coverage_cutoff : float
Default: 200.
Rightmost coverage cut-off value where X-axis is limited.
Raises
------
ValueError : If one of the sample's coverage falls below expected
thresholds.
"""
# Usually we aim for a 30X coverage on 80% of the sites.
fig, ax = plt.subplots()
ax.axhline(y=min_targets, xmin=0, xmax=coverage_cutoff, color='gray',
linestyle='--')
ax.axvline(x=min_coverage, ymin=0, ymax=100, color='gray', linestyle='--')
samples_below_coverage_threshold = []
for fp_sample in fps_sample:
coverage = pd.read_csv(fp_sample, sep="\t")
samplename = fp_sample.split('/')[-1].split('.')[0]
linewidth = 1
if coverage[coverage['#coverage'] == min_coverage]['percent_cumulative'].min() < min_targets:
linewidth = 4
samples_below_coverage_threshold.append(samplename)
ax.plot(coverage['#coverage'],
coverage['percent_cumulative'],
label=samplename,
linewidth=linewidth)
ax.set_xlim((0, coverage_cutoff))
ax.set_xlabel('Read Coverage')
ax.set_ylabel('Targeted Exome Bases')
ax.legend()
if len(samples_below_coverage_threshold) > 0:
fp_plot = fp_plot.replace('.pdf', '.error.pdf')
fig.savefig(fp_plot, bbox_inches='tight')
if len(samples_below_coverage_threshold) > 0:
raise ValueError(
"The following %i sample(s) have coverage below expected "
"thresholds. Please discuss with project PIs on how to proceed. "
"Maybe, samples need to be re-sequenced.\n\t%s\nYou will find more"
" information in the generated coverage plot '%s'." % (
len(samples_below_coverage_threshold),
'\n\t'.join(samples_below_coverage_threshold),
fp_plot))
ACTION_PROGRAMS = [
{'action': 'background',
'program': 'GATK',
'fileending_snupy_extract': '.snp_indel.gatk',
'fileending_spike_calls': '.gatk.snp_indel.vcf',
'stepname_spike_calls': 'gatk_CombineVariants',
},
{'action': 'background',
'program': 'Platypus',
'fileending_snupy_extract': '.indel.ptp',
'fileending_spike_calls': '.ptp.annotated.filtered.indels.vcf',
'stepname_spike_calls': 'platypus_filtered',
},
{'action': 'tumornormal',
'program': 'Varscan',
'fileending_snupy_extract': '.somatic.varscan',
'fileending_spike_calls':
{'homo sapiens': '.snp.somatic_germline.vcf',
'mus musculus': '.indel_snp.vcf'},
'stepname_spike_calls': 'merge_somatic',
},
{'action': 'tumornormal',
'program': 'Mutect',
'fileending_snupy_extract': '.somatic.mutect',
'fileending_spike_calls': '.all_calls.vcf',
'stepname_spike_calls': 'mutect',
},
{'action': 'tumornormal',
'program': 'Excavator2',
'fileending_snupy_extract': '.somatic.cnv.excavator2',
'fileending_spike_calls': '.vcf',
'stepname_spike_calls': 'excavator_somatic',
},
{'action': 'trio',
'program': 'Varscan\ndenovo',
'fileending_snupy_extract': '.denovo.varscan',
'fileending_spike_calls': '.var2denovo.vcf',
'stepname_spike_calls': 'writing_headers',
},
{'action': 'trio',
'program': 'Excavator2',
'fileending_snupy_extract': '.trio.cnv.excavator2',
'fileending_spike_calls': '.vcf',
'stepname_spike_calls': 'excavator_trio',
},
]
def _get_statusdata_demultiplex(samplesheets, prefix, config):
demux_yields = []
for flowcell in samplesheets['run'].unique():
fp_yielddata = '%s%s%s/Data/%s.yield_data.csv' % (prefix, config['dirs']['intermediate'], config['stepnames']['yield_report'], flowcell)
if exists(fp_yielddata):
demux_yields.append(
pd.read_csv(fp_yielddata, sep="\t").rename(columns={'Project': 'Sample_Project', 'Sample': 'Sample_ID', 'Yield': 'yield'})) #.set_index(['Project', 'Lane', 'Sample', 'Barcode sequence'])
if len(demux_yields) <= 0:
return pd.DataFrame()
demux_yields = add_aliassamples(pd.concat(demux_yields, axis=0), config)
# map yields of original sampels to aliases
for idx, row in demux_yields[demux_yields['is_alias'] == True].iterrows():
orig = demux_yields[(demux_yields['Sample_Project'] == row['fastq-prefix'].split('/')[0]) & (demux_yields['Sample_ID'] == row['fastq-prefix'].split('/')[1])]['yield']
if orig.shape[0] > 0:
demux_yields.loc[idx, 'yield'] = orig.sum()
demux_yields = demux_yields.dropna(subset=['yield'])
return pd.DataFrame(demux_yields).groupby(['Sample_Project', 'Sample_ID'])['yield'].sum()
def _get_statusdata_coverage(samplesheets, prefix, config, min_targets=80):
coverages = []
for (sample_project, sample_id), meta in samplesheets.groupby(['Sample_Project', 'Sample_ID']):
role_sample_project, role_sample_id = sample_project, sample_id
if (meta['is_alias'] == True).any():
role_sample_project, role_sample_id = get_role(sample_project, meta['spike_entity_id'].unique()[0], meta['spike_entity_role'].unique()[0], samplesheets).split('/')
fp_coverage = join(prefix, config['dirs']['intermediate'], config['stepnames']['exome_coverage'], role_sample_project, '%s.exome_coverage.csv' % role_sample_id)
if exists(fp_coverage):
coverage = pd.read_csv(fp_coverage, sep="\t")
if coverage.shape[0] > 0:
coverages.append({
'Sample_Project': sample_project,
'Sample_ID': sample_id,
'coverage': coverage.loc[coverage['percent_cumulative'].apply(lambda x: abs(x-min_targets)).idxmin(), '#coverage']})
if len(coverages) <= 0:
return pd.DataFrame()
return pd.DataFrame(coverages).set_index(['Sample_Project', 'Sample_ID'])['coverage']
def _isKnownDuo(sample_project, spike_entity_id, config):
"""Checks if trio is a known duo, i.e. missing samples won't be available in the future.
Parameters
----------
sample_project : str
spike_entity_id : str
config : dict()
Snakemake configuration.
Returns
-------
Boolean: True, if spike_entity_id is in config list of known duos for given project.
False, otherwise.
"""
if 'projects' in config:
if sample_project in config['projects']:
if 'known_duos' in config['projects'][sample_project]:
if spike_entity_id in config['projects'][sample_project]['known_duos']:
return True
return False
def _get_statusdata_snupyextracted(samplesheets, prefix, snupy_instance, config):
results = []
for sample_project, meta in samplesheets.groupby('Sample_Project'):
# project in config file is not properly configure for snupy!
if config['projects'].get(sample_project, None) is None:
continue
if config['projects'][sample_project].get('snupy', None) is None:
continue
if config['projects'][sample_project]['snupy'][snupy_instance].get('project_id', None) is None:
continue
r = requests.get('%s/experiments/%s.json' % (config['credentials']['snupy'][snupy_instance]['host'], config['projects'][sample_project]['snupy'][snupy_instance]['project_id']),
auth=HTTPBasicAuth(config['credentials']['snupy'][snupy_instance]['username'], config['credentials']['snupy'][snupy_instance]['password']),
verify=False)
check_snupy_status(r)
samples = [sample['name'] for sample in r.json()['samples']]
for sample_id, meta_sample in meta.groupby('Sample_ID'):
for file_ending, action, program in [(ap['fileending_snupy_extract'], ap['action'], ap['program']) for ap in ACTION_PROGRAMS]:
# in some cases "sample name" hold spike_entity_id, in others Sample_ID
entity = sample_id
runs = '+'.join(sorted(meta_sample['run'].unique()))
if (action == 'trio'):
if meta_sample['spike_entity_role'].unique()[0] == 'patient':
entity = meta_sample['spike_entity_id'].iloc[0]
runs = '+'.join(sorted(samplesheets[samplesheets['spike_entity_id'] == meta_sample['spike_entity_id'].iloc[0]]['run'].unique()))
if (action == 'tumornormal'):
if meta_sample['spike_entity_role'].unique()[0] == 'tumor':
entity = meta_sample['spike_entity_id'].iloc[0]
runs = '+'.join(sorted(samplesheets[samplesheets['spike_entity_id'] == meta_sample['spike_entity_id'].iloc[0]]['run'].unique()))
name = '%s_%s/%s%s' % (runs, sample_project, entity, file_ending)
if (sample_project in config['projects']) and (pd.notnull(meta_sample['spike_entity_role'].iloc[0])):
if ((action == 'trio') and (meta_sample['spike_entity_role'].iloc[0] in ['patient', 'sibling']) and (not _isKnownDuo(sample_project, meta_sample['spike_entity_id'].iloc[0], config))) or\
((action == 'background')) or\
((action == 'tumornormal') and (meta_sample['spike_entity_role'].iloc[0].startswith('tumor'))):
results.append({
'Sample_Project': sample_project,
'Sample_ID': sample_id,
'action': action,
'program': program,
'status': name in samples,
'snupy_sample_name': name
})
if len(results) <= 0:
return pd.DataFrame()
return pd.DataFrame(results).set_index(['Sample_Project', 'Sample_ID', 'action', 'program'])
def _get_statusdata_numberpassingcalls(samplesheets, prefix, config, RESULT_NOT_PRESENT, verbose=sys.stderr):
results = []
# leave out samples aliases
for (sample_project, spike_entity_id, spike_entity_role, fastq_prefix), meta in samplesheets[samplesheets['is_alias'] != True].fillna('not defined').groupby(['Sample_Project', 'spike_entity_id', 'spike_entity_role', 'fastq-prefix']):
def _get_fileending(file_ending, fastq_prefix, samplesheets, config):
if isinstance(file_ending, dict):
return file_ending[get_species(fastq_prefix, samplesheets, config)]
else:
return file_ending
for ap in ACTION_PROGRAMS:
fp_vcf = None
if (ap['action'] == 'background') and pd.notnull(spike_entity_role):
if (ap['program'] == 'GATK'):
fp_vcf = '%s%s%s/%s%s' % (prefix, config['dirs']['intermediate'], config['stepnames'][ap['stepname_spike_calls']], fastq_prefix, _get_fileending(ap['fileending_spike_calls'], fastq_prefix, meta, config))
elif (ap['program'] == 'Platypus'):
fp_vcf = '%s%s%s/%s%s' % (prefix, config['dirs']['intermediate'], config['stepnames'][ap['stepname_spike_calls']], fastq_prefix, _get_fileending(ap['fileending_spike_calls'], fastq_prefix, meta, config))
elif (ap['action'] == 'tumornormal'):
for (alias_sample_project, alias_spike_entity_role, alias_sample_id), alias_meta in samplesheets[(samplesheets['fastq-prefix'] == fastq_prefix) & (samplesheets['spike_entity_role'].apply(lambda x: x.split('_')[0] if pd.notnull(x) else x).isin(['tumor']))].groupby(['Sample_Project', 'spike_entity_role', 'Sample_ID']):
# for Keimbahn, the tumor sample needs to include the name of the original sample ID
instance_id = '%s/%s' % (alias_sample_project, alias_sample_id)
if alias_spike_entity_role == 'tumor':
# for Maus_Hauer, the filename holds the entity name, but not the Sample ID
instance_id = '%s/%s' % (sample_project, spike_entity_id)
if (alias_spike_entity_role.split('_')[0] in set(['tumor'])):
if (ap['program'] == 'Varscan'):
fp_vcf = '%s%s%s/%s%s' % (prefix, config['dirs']['intermediate'], config['stepnames'][ap['stepname_spike_calls']], instance_id, _get_fileending(ap['fileending_spike_calls'], fastq_prefix, meta, config))
elif (ap['program'] == 'Mutect'):
fp_vcf = '%s%s%s/%s%s' % (prefix, config['dirs']['intermediate'], config['stepnames'][ap['stepname_spike_calls']], instance_id, _get_fileending(ap['fileending_spike_calls'], fastq_prefix, meta, config))
elif (ap['program'] == 'Excavator2'):
fp_vcf = '%s%s%s/%s/Results/%s/EXCAVATORRegionCall_%s%s' % (prefix, config['dirs']['intermediate'], config['stepnames'][ap['stepname_spike_calls']], instance_id, fastq_prefix.split('/')[-1], fastq_prefix.split('/')[-1], _get_fileending(ap['fileending_spike_calls'], fastq_prefix, meta, config))
elif (ap['action'] == 'trio'):
for (alias_sample_project, alias_spike_entity_role, alias_sample_id, alias_spike_entity_id), alias_meta in samplesheets[(samplesheets['fastq-prefix'] == fastq_prefix) & (samplesheets['spike_entity_role'].isin(['patient', 'sibling']))].groupby(['Sample_Project', 'spike_entity_role', 'Sample_ID', 'spike_entity_id']):
# Trios are a more complicated case, since by default the result name is given by the
# spike_entity_id, but if computed for siblings, the name is given by the fastq-prefix
if (ap['program'] == 'Varscan\ndenovo'):
if (alias_spike_entity_role in set(['patient'])):
fp_vcf = '%s%s%s/%s/%s%s' % (prefix, config['dirs']['intermediate'], config['stepnames'][ap['stepname_spike_calls']], alias_sample_project, alias_spike_entity_id, _get_fileending(ap['fileending_spike_calls'], fastq_prefix, meta, config))
elif (alias_spike_entity_role in set(['sibling'])):
fp_vcf = '%s%s%s/%s%s' % (prefix, config['dirs']['intermediate'], config['stepnames'][ap['stepname_spike_calls']], fastq_prefix, _get_fileending(ap['fileending_spike_calls'], fastq_prefix, meta, config))
elif (ap['program'] == 'Excavator2'):
if (alias_spike_entity_role in set(['patient'])):
fp_vcf = '%s%s%s/%s/%s/Results/%s/EXCAVATORRegionCall_%s%s' % (prefix, config['dirs']['intermediate'], config['stepnames'][ap['stepname_spike_calls']], alias_sample_project, alias_spike_entity_id, fastq_prefix.split('/')[-1], fastq_prefix.split('/')[-1], _get_fileending(ap['fileending_spike_calls'], fastq_prefix, meta, config))
elif (alias_spike_entity_role in set(['sibling'])):
fp_vcf = '%s%s%s/%s/Results/%s/EXCAVATORRegionCall_%s%s' % (prefix, config['dirs']['intermediate'], config['stepnames'][ap['stepname_spike_calls']], fastq_prefix, fastq_prefix.split('/')[-1], fastq_prefix.split('/')[-1], _get_fileending(ap['fileending_spike_calls'], fastq_prefix, meta, config))
# remove entry, if it is known (config.yaml) that this trio is incomplete
if (spike_entity_role == 'patient') and (spike_entity_id in config.get('projects', []).get(sample_project, []).get('known_duos', [])):
fp_vcf = None
results.append({
'Sample_Project': sample_project,
'Sample_ID': fastq_prefix.split('/')[-1],
'action': ap['action'],
'program': ap['program'],
'fp_calls': fp_vcf,
})
status = 0
num_status = 20
if verbose is not None:
print('of %i: ' % num_status, file=verbose, end="")
for i, res in enumerate(results):
if (verbose is not None) and int(i % (len(results) / num_status)) == 0:
status+=1
print('%i ' % status, file=verbose, end="")
nr_calls = RESULT_NOT_PRESENT
if (res['fp_calls'] is not None) and exists(res['fp_calls']):
try:
if res['program'] == 'Varscan':
nr_calls = pd.read_csv(res['fp_calls'], comment='#', sep="\t", dtype=str, header=None, usecols=[7], squeeze=True).apply(lambda x: ';SS=2;' in x).sum()
else:
nr_calls = pd.read_csv(res['fp_calls'], comment='#', sep="\t", dtype=str, header=None, usecols=[6], squeeze=True).value_counts()['PASS']
except pd.io.common.EmptyDataError:
nr_calls = 0
res['number_calls'] = nr_calls
if verbose is not None:
print('done.', file=verbose)
if len(results) <= 0:
return pd.DataFrame()
results = pd.DataFrame(results)
results = results[pd.notnull(results['fp_calls'])].set_index(['Sample_Project', 'Sample_ID', 'action', 'program'])['number_calls']
# add alias sample results
for (sample_project, spike_entity_id, spike_entity_role, fastq_prefix), meta in samplesheets[samplesheets['is_alias'] == True].groupby(['Sample_Project', 'spike_entity_id', 'spike_entity_role', 'fastq-prefix']):
for (_, _, action, program), row in results.loc[fastq_prefix.split('/')[0], fastq_prefix.split('/')[-1], :].iteritems():
results.loc[sample_project, meta['Sample_ID'].unique()[0], action, program] = row
# remove samples, that don't have their own role, but were used for aliases
for (sample_project, sample_id), _ in samplesheets[pd.isnull(samplesheets['spike_entity_role'])].groupby(['Sample_Project', 'Sample_ID']):
idx_to_drop = results.loc[sample_project, sample_id, ['tumornormal', 'trio'], :].index
if len(idx_to_drop) > 0:
results.drop(index=idx_to_drop, inplace=True)
return results
def _get_genepanel_data(samplesheets, prefix, config):
results = []
columns = ['Sample_Project', 'Sample_ID', 'genepanel', 'gene']
# leave out samples aliases
for (sample_project, spike_entity_id, spike_entity_role, fastq_prefix), meta in samplesheets[samplesheets['is_alias'] != True].fillna('not defined').groupby(['Sample_Project', 'spike_entity_id', 'spike_entity_role', 'fastq-prefix']):
#print(sample_project, spike_entity_id, spike_entity_role, fastq_prefix)
for file in glob('%s%s%s/*/%s.tsv' % (prefix, config['dirs']['intermediate'], config['stepnames']['genepanel_coverage'], fastq_prefix)):
#print("\t", file)
coverage = pd.read_csv(file, sep="\t")
parts = file.split('/')
# determine genepanel name, project and sample_id from filename
coverage['Sample_Project'] = sample_project
coverage['Sample_ID'] = meta['Sample_ID'].unique()[0]
coverage['genepanel'] = parts[-3][:-5]
coverage = coverage.set_index(columns)
results.append(coverage)
if len(results) > 0:
results = pd.concat(results).sort_values(by=columns)
else:
results = pd.DataFrame(columns=columns)
# add alias sample results
for (sample_project, spike_entity_id, spike_entity_role, fastq_prefix), meta in samplesheets[samplesheets['is_alias'] == True].groupby(['Sample_Project', 'spike_entity_id', 'spike_entity_role', 'fastq-prefix']):
for (_, _, action, program), row in results.loc[fastq_prefix.split('/')[0], fastq_prefix.split('/')[-1], :].iterrows():
results.loc[sample_project, meta['Sample_ID'].unique()[0], action, program] = row
return results
def get_status_data(samplesheets, config, snupy_instance, prefix=None, verbose=sys.stderr):
"""
Parameters
----------
samplesheets : pd.DataFrame
The global samplesheets.
config : dict()
Snakemake configuration object.
prefix : str
Default: None, i.e. config['dirs']['prefix'] is used.
Filepath to spike main directory.
verbose : StringIO
Default: sys.stderr
If not None: print verbose information.
Returns
-------
4-tuple: (data_yields, data_coverage, data_snupy, data_calls)
"""
global RESULT_NOT_PRESENT
NUMSTEPS = 6
if prefix is None:
prefix = config['dirs']['prefix']
if verbose is not None:
print("Creating report", file=verbose)
# obtain data
if verbose is not None:
print("1/%i) gathering demuliplexing yields: ..." % NUMSTEPS, file=verbose, end="")
data_yields = _get_statusdata_demultiplex(samplesheets, prefix, config)
if verbose is not None:
print(" done.\n2/%i) gathering coverage: ..." % NUMSTEPS, file=verbose, end="")
data_coverage = _get_statusdata_coverage(samplesheets, prefix, config)
if verbose is not None:
print(" done.\n3/%i) gathering snupy extraction status: ..." % NUMSTEPS, file=verbose, end="")
data_snupy = _get_statusdata_snupyextracted(samplesheets, prefix, snupy_instance, config)
if verbose is not None:
print(" done.\n4/%i) gathering number of PASSing calls: ..." % NUMSTEPS, file=verbose, end="")
data_calls = _get_statusdata_numberpassingcalls(samplesheets, prefix, config, RESULT_NOT_PRESENT, verbose=verbose)
if verbose is not None:
print(" done.\n5/%i) gathering gene coverage: ..." % NUMSTEPS, file=verbose, end="")
data_genepanels = _get_genepanel_data(samplesheets, prefix, config)
if verbose is not None:
print("done.\n6/%i) generating Excel output: ..." % NUMSTEPS, file=verbose, end="")
return (data_yields, data_coverage, data_snupy, data_calls, data_genepanels)
def write_status_update(data, filename, samplesheets, config, offset_rows=0, offset_cols=0, min_yield=5.0, verbose=sys.stderr):
"""
Parameters
----------
data : (pd.DataFrame, pd.DataFrame, pd.DataFrame, pd.DataFrame)
yields, coverage, snupy, calls. Result of function get_status_data.
filename : str
Filepath to output Excel file.
samplesheets : pd.DataFrame
The global samplesheets.
config : dict()
Snakemake configuration object.
offset_rows : int
Default: 0
Number if rows to leave blank on top.
offset_cols : int
Default: 0
Number if columns to leave blank on the left.
min_yield : float
Default: 5.0
Threshold when to color yield falling below this value in red.
Note: I don't know what a good default looks like :-/
verbose : StringIO
Default: sys.stderr
If not None: print verbose information.
"""
global RESULT_NOT_PRESENT
# for debugging purposes
pickle.dump(data, open('%s.datadump' % filename, 'wb'))
data_yields, data_coverage, data_snupy, data_calls, data_genepanels = data
# start creating the Excel result
workbook = xlsxwriter.Workbook(filename)
worksheet = workbook.add_worksheet()
format_good = workbook.add_format({'bg_color': '#ccffcc'})
format_bad = workbook.add_format({'bg_color': '#ffcccc'})
# date information
format_info = workbook.add_format({
'valign': 'vcenter',
'align': 'center',
'font_size': 9})
info_username = getpass.getuser()
info_now = datetime.datetime.now().strftime("%Y-%m-%d %H:%M")
info_machine = socket.gethostname()
worksheet.merge_range(offset_rows, offset_cols, offset_rows+1, offset_cols+3, ('status report created\nat %s\nby %s\non %s' % (info_now, info_username, info_machine)),format_info)
gene_order = []
if data_genepanels.shape[0] > 0:
for panel in sorted(data_genepanels.index.get_level_values('genepanel').unique()):
for gene in sorted(data_genepanels.loc(axis=0)[:, :, panel, :].index.get_level_values('gene').unique()):
gene_order.append((panel, gene))
# header action
format_action = workbook.add_format({
'valign': 'vcenter',
'align': 'center',
'bold': True})
aps = pd.Series([ap['action'] for ap in ACTION_PROGRAMS]).to_frame()
for caption, g in aps.groupby(0):
left = offset_cols+6+g.index[0]
right = offset_cols+6+g.index[-1]
if left == right:
worksheet.write(offset_rows, left, caption, format_action)
else:
worksheet.merge_range(offset_rows, left, offset_rows, right, caption, format_action)
# header
format_header = workbook.add_format({
'rotation': 90,
'bold': True,
'valign': 'vcenter',
'align': 'center'})
worksheet.set_row(offset_rows+1, 80)
for i, caption in enumerate(['yield (MB)', 'coverage'] + [ap['program'] for ap in ACTION_PROGRAMS]):
worksheet.write(offset_rows+1, offset_cols+4+i, caption, format_header)
format_spike_seqdate = workbook.add_format({
'align': 'center',
'valign': 'vcenter',
'font_size': 8})
worksheet.write(offset_rows+1, offset_cols+6+len(ACTION_PROGRAMS), 'sequenced at', format_spike_seqdate)
# header for gene panels
format_header_genes = workbook.add_format({
'rotation': 90,
'bold': False,
'valign': 'vcenter',
'align': 'center',
'font_size': 8})
if len(gene_order) > 0:
for caption, g in pd.DataFrame(gene_order).groupby(0):
left = offset_cols+6+len(ACTION_PROGRAMS)+1+g.index[0]
right = offset_cols+6+len(ACTION_PROGRAMS)+1+g.index[-1]
if left == right:
worksheet.write(offset_rows, left, caption, format_action)
else:
worksheet.merge_range(offset_rows, left, offset_rows, right, caption, format_action)
for i, (panel, gene) in enumerate(gene_order):
worksheet.write(offset_rows+1, offset_cols+6+len(ACTION_PROGRAMS)+1+i, gene, format_header_genes)
worksheet.set_column(offset_cols+6+len(ACTION_PROGRAMS)+1, offset_cols+6+len(ACTION_PROGRAMS)+1+len(gene_order), 3)
worksheet.freeze_panes(offset_rows+2, offset_cols+4)
# body
format_project = workbook.add_format({
'rotation': 90,
'bold': True,
'valign': 'vcenter',
'align': 'center'})
format_spike_entity_id = workbook.add_format({
'valign': 'vcenter',
'align': 'center'})
format_spike_sampleID = workbook.add_format({
'valign': 'vcenter',
'align': 'center'})
format_spike_entity_role_missing = workbook.add_format({
'valign': 'vcenter',
'align': 'center',
'font_color': '#ff0000'})
format_gene_coverage_good = workbook.add_format({
'valign': 'vcenter',
'align': 'right',
'font_size': 6,
'bg_color': '#ccffcc'})
format_gene_coverage_bad = workbook.add_format({
'valign': 'vcenter',
'align': 'right',
'font_size': 6,
'bg_color': 'ffcccc'})
row = offset_rows+2
for sample_project, grp_project in samplesheets.groupby('Sample_Project'):
# add in lines to indicate missing samples, e.g. for trios that are incomplete
missing_samples = []
for spike_entity_id, grp_spike_entity_group in grp_project.groupby('spike_entity_id'):
if len(set(grp_spike_entity_group['spike_entity_role'].unique()) & set(['patient', 'father', 'mother', 'sibling'])) > 0:
for role in ['patient', 'mother', 'father']:
if grp_spike_entity_group[grp_spike_entity_group['spike_entity_role'] == role].shape[0] <= 0:
missing_samples.append({
'spike_entity_id': spike_entity_id,
'Sample_ID': role,
'spike_entity_role': role,
'missing': True,
})
# combine samples from samplesheets AND those that are expected but missing
samples_and_missing = pd.concat([grp_project, pd.DataFrame(missing_samples)], sort=False).fillna(value={'spike_entity_id': ''})
worksheet.merge_range(row, offset_cols, row+len(samples_and_missing.groupby(['spike_entity_id', 'Sample_ID']))-1, offset_cols, sample_project.replace('_', '\n'), format_project)
worksheet.set_column(offset_cols, offset_cols, 4)
# groupby excludes NaNs, thus I have to hack: replace NaN by "" here and
# reset to np.nan within the loop
for spike_entity_group, grp_spike_entity_group in samples_and_missing.groupby('spike_entity_id'):
if spike_entity_group != "":
label = spike_entity_group
if _isKnownDuo(sample_project, spike_entity_group, config):
label += "\n(known duo)"
if len(grp_spike_entity_group.groupby('Sample_ID')) > 1:
worksheet.merge_range(row, offset_cols+1, row+len(grp_spike_entity_group.groupby('Sample_ID'))-1, offset_cols+1, label, format_spike_entity_id)
else:
worksheet.write(row, offset_cols+1, label, format_spike_entity_id)
else:
spike_entity_group = np.nan
worksheet.set_column(offset_cols+1, offset_cols+1, 10)
for nr_sample_id, (sample_id, grp_sample_id) in enumerate(grp_spike_entity_group.sort_values(by='spike_entity_role').groupby('Sample_ID')):
worksheet.set_column(offset_cols+2, offset_cols+2, 4)
role = grp_sample_id['spike_entity_role'].iloc[0]
is_missing = ('missing' in grp_sample_id.columns) and (grp_sample_id[grp_sample_id['missing'] == np.True_].shape[0] > 0)
# sample_ID, extend field if no spike_entity_group or spike_entity_role is given
col_start = offset_cols+2
col_end = offset_cols+2
if pd.isnull(spike_entity_group):
col_start -= 1
if pd.isnull(role):
col_end += 1
sample_id_value = sample_id
# if sample_id starts with name of the entity group, we are using "..." to make it visually more pleasing
if | pd.notnull(spike_entity_group) | pandas.notnull |
# -*- coding: utf-8 -*-
import pytest
import os
import numpy as np
import pandas as pd
from pandas.testing import assert_frame_equal, assert_series_equal
import numpy.testing as npt
from numpy.linalg import norm, lstsq
from numpy.random import randn
from flaky import flaky
from lifelines import CoxPHFitter, WeibullAFTFitter, KaplanMeierFitter, ExponentialFitter
from lifelines.datasets import load_regression_dataset, load_larynx, load_waltons, load_rossi
from lifelines import utils
from lifelines import exceptions
from lifelines.utils.sklearn_adapter import sklearn_adapter
from lifelines.utils.safe_exp import safe_exp
def test_format_p_values():
assert utils.format_p_value(2)(0.004) == "<0.005"
assert utils.format_p_value(3)(0.004) == "0.004"
assert utils.format_p_value(3)(0.000) == "<0.0005"
assert utils.format_p_value(3)(0.005) == "0.005"
assert utils.format_p_value(3)(0.2111) == "0.211"
assert utils.format_p_value(3)(0.2119) == "0.212"
def test_ridge_regression_with_penalty_is_less_than_without_penalty():
X = randn(2, 2)
Y = randn(2)
assert norm(utils.ridge_regression(X, Y, c1=2.0)[0]) <= norm(utils.ridge_regression(X, Y)[0])
assert norm(utils.ridge_regression(X, Y, c1=1.0, c2=1.0)[0]) <= norm(utils.ridge_regression(X, Y)[0])
def test_ridge_regression_with_extreme_c1_penalty_equals_close_to_zero_vector():
c1 = 10e8
c2 = 0.0
offset = np.ones(2)
X = randn(2, 2)
Y = randn(2)
assert norm(utils.ridge_regression(X, Y, c1, c2, offset)[0]) < 10e-4
def test_ridge_regression_with_extreme_c2_penalty_equals_close_to_offset():
c1 = 0.0
c2 = 10e8
offset = np.ones(2)
X = randn(2, 2)
Y = randn(2)
assert norm(utils.ridge_regression(X, Y, c1, c2, offset)[0] - offset) < 10e-4
def test_lstsq_returns_similar_values_to_ridge_regression():
X = randn(2, 2)
Y = randn(2)
expected = lstsq(X, Y, rcond=None)[0]
assert norm(utils.ridge_regression(X, Y)[0] - expected) < 10e-4
def test_lstsq_returns_correct_values():
X = np.array([[-1.0, -1.0], [-1.0, 0], [-0.8, -1.0], [1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
beta, V = utils.ridge_regression(X, y)
expected_beta = [-0.98684211, -0.07894737]
expected_v = [
[-0.03289474, -0.49342105, 0.06578947, 0.03289474, 0.49342105],
[-0.30263158, 0.46052632, -0.39473684, 0.30263158, -0.46052632],
]
assert norm(beta - expected_beta) < 10e-4
for V_row, e_v_row in zip(V, expected_v):
assert norm(V_row - e_v_row) < 1e-4
def test_unnormalize():
df = load_larynx()
m = df.mean(0)
s = df.std(0)
ndf = utils.normalize(df)
npt.assert_almost_equal(df.values, utils.unnormalize(ndf, m, s).values)
def test_normalize():
df = load_larynx()
n, d = df.shape
npt.assert_almost_equal(utils.normalize(df).mean(0).values, np.zeros(d))
npt.assert_almost_equal(utils.normalize(df).std(0).values, np.ones(d))
def test_median():
sv = pd.DataFrame(1 - np.linspace(0, 1, 1000))
assert utils.median_survival_times(sv) == 500
def test_median_accepts_series():
sv = pd.Series(1 - np.linspace(0, 1, 1000))
assert utils.median_survival_times(sv) == 500
def test_qth_survival_times_with_varying_datatype_inputs():
sf_list = [1.0, 0.75, 0.5, 0.25, 0.0]
sf_array = np.array([1.0, 0.75, 0.5, 0.25, 0.0])
sf_df_no_index = pd.DataFrame([1.0, 0.75, 0.5, 0.25, 0.0])
sf_df_index = pd.DataFrame([1.0, 0.75, 0.5, 0.25, 0.0], index=[10, 20, 30, 40, 50])
sf_series_index = pd.Series([1.0, 0.75, 0.5, 0.25, 0.0], index=[10, 20, 30, 40, 50])
sf_series_no_index = pd.Series([1.0, 0.75, 0.5, 0.25, 0.0])
q = 0.5
assert utils.qth_survival_times(q, sf_list) == 2
assert utils.qth_survival_times(q, sf_array) == 2
assert utils.qth_survival_times(q, sf_df_no_index) == 2
assert utils.qth_survival_times(q, sf_df_index) == 30
assert utils.qth_survival_times(q, sf_series_index) == 30
assert utils.qth_survival_times(q, sf_series_no_index) == 2
def test_qth_survival_times_multi_dim_input():
sf = np.linspace(1, 0, 50)
sf_multi_df = pd.DataFrame({"sf": sf, "sf**2": sf ** 2})
medians = utils.qth_survival_times(0.5, sf_multi_df)
assert medians["sf"].loc[0.5] == 25
assert medians["sf**2"].loc[0.5] == 15
def test_qth_survival_time_returns_inf():
sf = pd.Series([1.0, 0.7, 0.6])
assert utils.qth_survival_time(0.5, sf) == np.inf
def test_qth_survival_time_accepts_a_model():
kmf = KaplanMeierFitter().fit([1.0, 0.7, 0.6])
assert utils.qth_survival_time(0.8, kmf) > 0
def test_qth_survival_time_with_dataframe():
sf_df_no_index = pd.DataFrame([1.0, 0.75, 0.5, 0.25, 0.0])
sf_df_index = pd.DataFrame([1.0, 0.75, 0.5, 0.25, 0.0], index=[10, 20, 30, 40, 50])
sf_df_too_many_columns = pd.DataFrame([[1, 2], [3, 4]])
assert utils.qth_survival_time(0.5, sf_df_no_index) == 2
assert utils.qth_survival_time(0.5, sf_df_index) == 30
with pytest.raises(ValueError):
utils.qth_survival_time(0.5, sf_df_too_many_columns)
def test_qth_survival_times_with_multivariate_q():
sf = np.linspace(1, 0, 50)
sf_multi_df = pd.DataFrame({"sf": sf, "sf**2": sf ** 2})
assert_frame_equal(
utils.qth_survival_times([0.2, 0.5], sf_multi_df),
pd.DataFrame([[40, 28], [25, 15]], index=[0.2, 0.5], columns=["sf", "sf**2"]),
)
assert_frame_equal(
utils.qth_survival_times([0.2, 0.5], sf_multi_df["sf"]), pd.DataFrame([40, 25], index=[0.2, 0.5], columns=["sf"])
)
assert_frame_equal(utils.qth_survival_times(0.5, sf_multi_df), pd.DataFrame([[25, 15]], index=[0.5], columns=["sf", "sf**2"]))
assert utils.qth_survival_times(0.5, sf_multi_df["sf"]) == 25
def test_qth_survival_times_with_duplicate_q_returns_valid_index_and_shape():
sf = pd.DataFrame(np.linspace(1, 0, 50))
q = pd.Series([0.5, 0.5, 0.2, 0.0, 0.0])
actual = utils.qth_survival_times(q, sf)
assert actual.shape[0] == len(q)
assert actual.index[0] == actual.index[1]
assert_series_equal(actual.iloc[0], actual.iloc[1])
npt.assert_almost_equal(actual.index.values, q.values)
def test_datetimes_to_durations_with_different_frequencies():
# days
start_date = ["2013-10-10 0:00:00", "2013-10-09", "2012-10-10"]
end_date = ["2013-10-13", "2013-10-10 0:00:00", "2013-10-15"]
T, C = utils.datetimes_to_durations(start_date, end_date)
npt.assert_almost_equal(T, np.array([3, 1, 5 + 365]))
npt.assert_almost_equal(C, np.array([1, 1, 1], dtype=bool))
# years
start_date = ["2013-10-10", "2013-10-09", "2012-10-10"]
end_date = ["2013-10-13", "2013-10-10", "2013-10-15"]
T, C = utils.datetimes_to_durations(start_date, end_date, freq="Y")
npt.assert_almost_equal(T, np.array([0, 0, 1]))
npt.assert_almost_equal(C, np.array([1, 1, 1], dtype=bool))
# hours
start_date = ["2013-10-10 17:00:00", "2013-10-09 0:00:00", "2013-10-10 23:00:00"]
end_date = ["2013-10-10 18:00:00", "2013-10-10 0:00:00", "2013-10-11 2:00:00"]
T, C = utils.datetimes_to_durations(start_date, end_date, freq="h")
npt.assert_almost_equal(T, np.array([1, 24, 3]))
npt.assert_almost_equal(C, np.array([1, 1, 1], dtype=bool))
def test_datetimes_to_durations_will_handle_dates_above_fill_date():
start_date = ["2013-10-08", "2013-10-09", "2013-10-10"]
end_date = ["2013-10-10", "2013-10-12", "2013-10-15"]
T, C = utils.datetimes_to_durations(start_date, end_date, freq="D", fill_date="2013-10-12")
npt.assert_almost_equal(C, np.array([1, 1, 0], dtype=bool))
npt.assert_almost_equal(T, np.array([2, 3, 2]))
def test_datetimes_to_durations_will_handle_dates_above_multi_fill_date():
start_date = ["2013-10-08", "2013-10-09", "2013-10-10"]
end_date = ["2013-10-10", None, None]
last_observation = ["2013-10-10", "2013-10-12", "2013-10-14"]
T, E = utils.datetimes_to_durations(start_date, end_date, freq="D", fill_date=last_observation)
npt.assert_almost_equal(E, np.array([1, 0, 0], dtype=bool))
npt.assert_almost_equal(T, np.array([2, 3, 4]))
def test_datetimes_to_durations_will_handle_dates_above_multi_fill_date():
start_date = ["2013-10-08", "2013-10-09", "2013-10-10"]
end_date = ["2013-10-10", None, "2013-10-20"]
last_observation = ["2013-10-10", "2013-10-12", "2013-10-14"]
T, E = utils.datetimes_to_durations(start_date, end_date, freq="D", fill_date=last_observation)
npt.assert_almost_equal(E, np.array([1, 0, 0], dtype=bool))
npt.assert_almost_equal(T, np.array([2, 3, 4]))
def test_datetimes_to_durations_censor():
start_date = ["2013-10-10", "2013-10-09", "2012-10-10"]
end_date = ["2013-10-13", None, ""]
T, C = utils.datetimes_to_durations(start_date, end_date, freq="Y")
npt.assert_almost_equal(C, np.array([1, 0, 0], dtype=bool))
def test_datetimes_to_durations_custom_censor():
start_date = ["2013-10-10", "2013-10-09", "2012-10-10"]
end_date = ["2013-10-13", "NaT", ""]
T, C = utils.datetimes_to_durations(start_date, end_date, freq="Y", na_values=["NaT", ""])
npt.assert_almost_equal(C, np.array([1, 0, 0], dtype=bool))
def test_survival_events_from_table_no_ties():
T, C = np.array([1, 2, 3, 4, 4, 5]), np.array([1, 0, 1, 1, 0, 1])
d = utils.survival_table_from_events(T, C)
T_, C_, W_ = utils.survival_events_from_table(d[["censored", "observed"]])
npt.assert_array_equal(T, T_)
npt.assert_array_equal(C, C_)
npt.assert_array_equal(W_, np.ones_like(T))
def test_survival_events_from_table_with_ties():
T, C = np.array([1, 2, 3, 4, 4, 5]), np.array([1, 0, 1, 1, 1, 1])
d = utils.survival_table_from_events(T, C)
T_, C_, W_ = utils.survival_events_from_table(d[["censored", "observed"]])
npt.assert_array_equal([1, 2, 3, 4, 5], T_)
npt.assert_array_equal([1, 0, 1, 1, 1], C_)
npt.assert_array_equal([1, 1, 1, 2, 1], W_)
def test_survival_table_from_events_with_non_trivial_censorship_column():
T = np.random.exponential(5, size=50)
malformed_C = np.random.binomial(2, p=0.8) # set to 2 on purpose!
proper_C = malformed_C > 0 # (proper "boolean" array)
table1 = utils.survival_table_from_events(T, malformed_C, np.zeros_like(T))
table2 = utils.survival_table_from_events(T, proper_C, np.zeros_like(T))
assert_frame_equal(table1, table2)
def test_group_survival_table_from_events_on_waltons_data():
df = load_waltons()
g, removed, observed, censored = utils.group_survival_table_from_events(df["group"], df["T"], df["E"])
assert len(g) == 2
assert all(removed.columns == ["removed:miR-137", "removed:control"])
assert all(removed.index == observed.index)
assert all(removed.index == censored.index)
def test_group_survival_table_with_weights():
df = load_waltons()
dfw = df.groupby(["T", "E", "group"]).size().reset_index().rename(columns={0: "weights"})
gw, removedw, observedw, censoredw = utils.group_survival_table_from_events(
dfw["group"], dfw["T"], dfw["E"], weights=dfw["weights"]
)
assert len(gw) == 2
assert all(removedw.columns == ["removed:miR-137", "removed:control"])
assert all(removedw.index == observedw.index)
assert all(removedw.index == censoredw.index)
g, removed, observed, censored = utils.group_survival_table_from_events(df["group"], df["T"], df["E"])
assert_frame_equal(removedw, removed)
assert_frame_equal(observedw, observed)
assert_frame_equal(censoredw, censored)
def test_survival_table_from_events_binned_with_empty_bin():
df = load_waltons()
ix = df["group"] == "miR-137"
event_table = utils.survival_table_from_events(df.loc[ix]["T"], df.loc[ix]["E"], intervals=[0, 10, 20, 30, 40, 50])
assert not pd.isnull(event_table).any().any()
def test_survival_table_from_events_at_risk_column():
df = load_waltons()
# from R
expected = [
163.0,
162.0,
160.0,
157.0,
154.0,
152.0,
151.0,
148.0,
144.0,
139.0,
134.0,
133.0,
130.0,
128.0,
126.0,
119.0,
118.0,
108.0,
107.0,
99.0,
96.0,
89.0,
87.0,
69.0,
65.0,
49.0,
38.0,
36.0,
27.0,
24.0,
14.0,
1.0,
]
df = utils.survival_table_from_events(df["T"], df["E"])
assert list(df["at_risk"][1:]) == expected # skip the first event as that is the birth time, 0.
def test_survival_table_to_events_casts_to_float():
T, C = (np.array([1, 2, 3, 4, 4, 5]), np.array([True, False, True, True, True, True]))
d = utils.survival_table_from_events(T, C, np.zeros_like(T))
npt.assert_array_equal(d["censored"].values, np.array([0.0, 0.0, 1.0, 0.0, 0.0, 0.0]))
npt.assert_array_equal(d["removed"].values, np.array([0.0, 1.0, 1.0, 1.0, 2.0, 1.0]))
def test_group_survival_table_from_events_works_with_series():
df = pd.DataFrame([[1, True, 3], [1, True, 3], [4, False, 2]], columns=["duration", "E", "G"])
ug, _, _, _ = utils.group_survival_table_from_events(df.G, df.duration, df.E, np.array([[0, 0, 0]]))
npt.assert_array_equal(ug, np.array([3, 2]))
def test_survival_table_from_events_will_collapse_if_asked():
T, C = np.array([1, 3, 4, 5]), np.array([True, True, True, True])
table = utils.survival_table_from_events(T, C, collapse=True)
assert table.index.tolist() == [
pd.Interval(-0.001, 3.5089999999999999, closed="right"),
pd.Interval(3.5089999999999999, 7.0179999999999998, closed="right"),
]
def test_survival_table_from_events_will_collapse_to_desired_bins():
T, C = np.array([1, 3, 4, 5]), np.array([True, True, True, True])
table = utils.survival_table_from_events(T, C, collapse=True, intervals=[0, 4, 8])
assert table.index.tolist() == [pd.Interval(-0.001, 4, closed="right"), pd.Interval(4, 8, closed="right")]
def test_cross_validator_returns_k_results():
cf = CoxPHFitter()
results = utils.k_fold_cross_validation(cf, load_regression_dataset(), duration_col="T", event_col="E", k=3)
assert len(results) == 3
results = utils.k_fold_cross_validation(cf, load_regression_dataset(), duration_col="T", event_col="E", k=5)
assert len(results) == 5
def test_cross_validator_returns_fitters_k_results():
cf = CoxPHFitter()
fitters = [cf, cf]
results = utils.k_fold_cross_validation(fitters, load_regression_dataset(), duration_col="T", event_col="E", k=3)
assert len(results) == 2
assert len(results[0]) == len(results[1]) == 3
results = utils.k_fold_cross_validation(fitters, load_regression_dataset(), duration_col="T", event_col="E", k=5)
assert len(results) == 2
assert len(results[0]) == len(results[1]) == 5
def test_cross_validator_with_predictor():
cf = CoxPHFitter()
results = utils.k_fold_cross_validation(cf, load_regression_dataset(), duration_col="T", event_col="E", k=3)
assert len(results) == 3
def test_cross_validator_with_stratified_cox_model():
cf = CoxPHFitter(strata=["race"])
utils.k_fold_cross_validation(cf, load_rossi(), duration_col="week", event_col="arrest")
def test_cross_validator_with_specific_loss_function():
cf = CoxPHFitter()
results_sq = utils.k_fold_cross_validation(
cf, load_regression_dataset(), scoring_method="concordance_index", duration_col="T", event_col="E"
)
def test_concordance_index():
size = 1000
T = np.random.normal(size=size)
P = np.random.normal(size=size)
C = np.random.choice([0, 1], size=size)
Z = np.zeros_like(T)
# Zeros is exactly random
assert utils.concordance_index(T, Z) == 0.5
assert utils.concordance_index(T, Z, C) == 0.5
# Itself is 1
assert utils.concordance_index(T, T) == 1.0
assert utils.concordance_index(T, T, C) == 1.0
# Random is close to 0.5
assert abs(utils.concordance_index(T, P) - 0.5) < 0.05
assert abs(utils.concordance_index(T, P, C) - 0.5) < 0.05
def test_survival_table_from_events_with_non_negative_T_and_no_lagged_births():
n = 10
T = np.arange(n)
C = [True] * n
min_obs = [0] * n
df = utils.survival_table_from_events(T, C, min_obs)
assert df.iloc[0]["entrance"] == n
assert df.index[0] == T.min()
assert df.index[-1] == T.max()
def test_survival_table_from_events_with_negative_T_and_no_lagged_births():
n = 10
T = np.arange(-n / 2, n / 2)
C = [True] * n
min_obs = None
df = utils.survival_table_from_events(T, C, min_obs)
assert df.iloc[0]["entrance"] == n
assert df.index[0] == T.min()
assert df.index[-1] == T.max()
def test_survival_table_from_events_with_non_negative_T_and_lagged_births():
n = 10
T = np.arange(n)
C = [True] * n
min_obs = np.linspace(0, 2, n)
df = utils.survival_table_from_events(T, C, min_obs)
assert df.iloc[0]["entrance"] == 1
assert df.index[0] == T.min()
assert df.index[-1] == T.max()
def test_survival_table_from_events_with_negative_T_and_lagged_births():
n = 10
T = np.arange(-n / 2, n / 2)
C = [True] * n
min_obs = np.linspace(-n / 2, 2, n)
df = utils.survival_table_from_events(T, C, min_obs)
assert df.iloc[0]["entrance"] == 1
assert df.index[0] == T.min()
assert df.index[-1] == T.max()
def test_survival_table_from_events_raises_value_error_if_too_early_births():
n = 10
T = np.arange(0, n)
C = [True] * n
min_obs = T.copy()
min_obs[1] = min_obs[1] + 10
with pytest.raises(ValueError):
utils.survival_table_from_events(T, C, min_obs)
class TestLongDataFrameUtils(object):
@pytest.fixture
def seed_df(self):
df = pd.DataFrame.from_records([{"id": 1, "var1": 0.1, "T": 10, "E": 1}, {"id": 2, "var1": 0.5, "T": 12, "E": 0}])
return utils.to_long_format(df, "T")
@pytest.fixture
def cv1(self):
return pd.DataFrame.from_records(
[
{"id": 1, "t": 0, "var2": 1.4},
{"id": 1, "t": 4, "var2": 1.2},
{"id": 1, "t": 8, "var2": 1.5},
{"id": 2, "t": 0, "var2": 1.6},
]
)
@pytest.fixture
def cv2(self):
return pd.DataFrame.from_records(
[{"id": 1, "t": 0, "var3": 0}, {"id": 1, "t": 6, "var3": 1}, {"id": 2, "t": 0, "var3": 0}]
)
def test_order_of_adding_covariates_doesnt_matter(self, seed_df, cv1, cv2):
df12 = seed_df.pipe(utils.add_covariate_to_timeline, cv1, "id", "t", "E").pipe(
utils.add_covariate_to_timeline, cv2, "id", "t", "E"
)
df21 = seed_df.pipe(utils.add_covariate_to_timeline, cv2, "id", "t", "E").pipe(
utils.add_covariate_to_timeline, cv1, "id", "t", "E"
)
assert_frame_equal(df21, df12, check_like=True)
def test_order_of_adding_covariates_doesnt_matter_in_cumulative_sum(self, seed_df, cv1, cv2):
df12 = seed_df.pipe(utils.add_covariate_to_timeline, cv1, "id", "t", "E", cumulative_sum=True).pipe(
utils.add_covariate_to_timeline, cv2, "id", "t", "E", cumulative_sum=True
)
df21 = seed_df.pipe(utils.add_covariate_to_timeline, cv2, "id", "t", "E", cumulative_sum=True).pipe(
utils.add_covariate_to_timeline, cv1, "id", "t", "E", cumulative_sum=True
)
assert_frame_equal(df21, df12, check_like=True)
def test_adding_cvs_with_the_same_column_name_will_insert_appropriately(self, seed_df):
seed_df = seed_df[seed_df["id"] == 1]
cv = pd.DataFrame.from_records([{"id": 1, "t": 1, "var1": 1.0}, {"id": 1, "t": 2, "var1": 2.0}])
df = seed_df.pipe(utils.add_covariate_to_timeline, cv, "id", "t", "E")
expected = pd.DataFrame.from_records(
[
{"E": False, "id": 1, "stop": 1.0, "start": 0, "var1": 0.1},
{"E": False, "id": 1, "stop": 2.0, "start": 1, "var1": 1.0},
{"E": True, "id": 1, "stop": 10.0, "start": 2, "var1": 2.0},
]
)
assert_frame_equal(df, expected, check_like=True)
def test_adding_cvs_with_the_same_column_name_will_sum_update_appropriately(self, seed_df):
seed_df = seed_df[seed_df["id"] == 1]
new_value_at_time_0 = 1.0
old_value_at_time_0 = seed_df["var1"].iloc[0]
cv = pd.DataFrame.from_records([{"id": 1, "t": 0, "var1": new_value_at_time_0, "var2": 2.0}])
df = seed_df.pipe(utils.add_covariate_to_timeline, cv, "id", "t", "E", overwrite=False)
expected = pd.DataFrame.from_records(
[{"E": True, "id": 1, "stop": 10.0, "start": 0, "var1": new_value_at_time_0 + old_value_at_time_0, "var2": 2.0}]
)
assert_frame_equal(df, expected, check_like=True)
def test_adding_cvs_with_the_same_column_name_will_overwrite_update_appropriately(self, seed_df):
seed_df = seed_df[seed_df["id"] == 1]
new_value_at_time_0 = 1.0
cv = pd.DataFrame.from_records([{"id": 1, "t": 0, "var1": new_value_at_time_0}])
df = seed_df.pipe(utils.add_covariate_to_timeline, cv, "id", "t", "E", overwrite=True)
expected = pd.DataFrame.from_records([{"E": True, "id": 1, "stop": 10.0, "start": 0, "var1": new_value_at_time_0}])
assert_frame_equal(df, expected, check_like=True)
def test_enum_flag(self, seed_df, cv1, cv2):
df = seed_df.pipe(utils.add_covariate_to_timeline, cv1, "id", "t", "E", add_enum=True).pipe(
utils.add_covariate_to_timeline, cv2, "id", "t", "E", add_enum=True
)
idx = df["id"] == 1
n = idx.sum()
try:
assert_series_equal(df["enum"].loc[idx], pd.Series(np.arange(1, n + 1)), check_names=False)
except AssertionError as e:
# Windows Numpy and Pandas sometimes have int32 or int64 as default dtype
if os.name == "nt" and "int32" in str(e) and "int64" in str(e):
assert_series_equal(
df["enum"].loc[idx], pd.Series(np.arange(1, n + 1), dtype=df["enum"].loc[idx].dtypes), check_names=False
)
else:
raise e
def test_event_col_is_properly_inserted(self, seed_df, cv2):
df = seed_df.pipe(utils.add_covariate_to_timeline, cv2, "id", "t", "E")
assert df.groupby("id").last()["E"].tolist() == [1, 0]
def test_redundant_cv_columns_are_dropped(self, seed_df):
seed_df = seed_df[seed_df["id"] == 1]
cv = pd.DataFrame.from_records(
[
{"id": 1, "t": 0, "var3": 0, "var4": 1},
{"id": 1, "t": 1, "var3": 0, "var4": 1}, # redundant, as nothing changed during the interval
{"id": 1, "t": 3, "var3": 0, "var4": 1}, # redundant, as nothing changed during the interval
{"id": 1, "t": 6, "var3": 1, "var4": 1},
{"id": 1, "t": 9, "var3": 1, "var4": 1}, # redundant, as nothing changed during the interval
]
)
df = seed_df.pipe(utils.add_covariate_to_timeline, cv, "id", "t", "E")
assert df.shape[0] == 2
def test_will_convert_event_column_to_bools(self, seed_df, cv1):
seed_df["E"] = seed_df["E"].astype(int)
df = seed_df.pipe(utils.add_covariate_to_timeline, cv1, "id", "t", "E")
assert df.dtypes["E"] == bool
def test_if_cvs_include_a_start_time_after_the_final_time_it_is_excluded(self, seed_df):
max_T = seed_df["stop"].max()
cv = pd.DataFrame.from_records(
[
{"id": 1, "t": 0, "var3": 0},
{"id": 1, "t": max_T + 10, "var3": 1}, # will be excluded
{"id": 2, "t": 0, "var3": 0},
]
)
df = seed_df.pipe(utils.add_covariate_to_timeline, cv, "id", "t", "E")
assert df.shape[0] == 2
def test_if_cvs_include_a_start_time_before_it_is_included(self, seed_df):
min_T = seed_df["start"].min()
cv = pd.DataFrame.from_records(
[{"id": 1, "t": 0, "var3": 0}, {"id": 1, "t": min_T - 1, "var3": 1}, {"id": 2, "t": 0, "var3": 0}]
)
df = seed_df.pipe(utils.add_covariate_to_timeline, cv, "id", "t", "E")
assert df.shape[0] == 3
def test_cvs_with_null_values_are_dropped(self, seed_df):
seed_df = seed_df[seed_df["id"] == 1]
cv = pd.DataFrame.from_records(
[{"id": None, "t": 0, "var3": 0}, {"id": 1, "t": None, "var3": 1}, {"id": 2, "t": 0, "var3": None}]
)
df = seed_df.pipe(utils.add_covariate_to_timeline, cv, "id", "t", "E")
assert df.shape[0] == 1
def test_a_new_row_is_not_created_if_start_times_are_the_same(self, seed_df):
seed_df = seed_df[seed_df["id"] == 1]
cv1 = pd.DataFrame.from_records([{"id": 1, "t": 0, "var3": 0}, {"id": 1, "t": 5, "var3": 1}])
cv2 = pd.DataFrame.from_records(
[{"id": 1, "t": 0, "var4": 0}, {"id": 1, "t": 5, "var4": 1.5}, {"id": 1, "t": 6, "var4": 1.7}]
)
df = seed_df.pipe(utils.add_covariate_to_timeline, cv1, "id", "t", "E").pipe(
utils.add_covariate_to_timeline, cv2, "id", "t", "E"
)
assert df.shape[0] == 3
def test_error_is_raised_if_columns_are_missing_in_seed_df(self, seed_df, cv1):
del seed_df["start"]
with pytest.raises(IndexError):
utils.add_covariate_to_timeline(seed_df, cv1, "id", "t", "E")
def test_cumulative_sum(self):
seed_df = pd.DataFrame.from_records([{"id": 1, "start": 0, "stop": 5, "E": 1}])
cv = pd.DataFrame.from_records([{"id": 1, "t": 0, "var4": 1}, {"id": 1, "t": 1, "var4": 1}, {"id": 1, "t": 3, "var4": 1}])
df = seed_df.pipe(utils.add_covariate_to_timeline, cv, "id", "t", "E", cumulative_sum=True)
expected = pd.DataFrame.from_records(
[
{"id": 1, "start": 0, "stop": 1.0, "cumsum_var4": 1, "E": False},
{"id": 1, "start": 1, "stop": 3.0, "cumsum_var4": 2, "E": False},
{"id": 1, "start": 3, "stop": 5.0, "cumsum_var4": 3, "E": True},
]
)
assert_frame_equal(expected, df, check_like=True)
def test_delay(self, cv2):
seed_df = pd.DataFrame.from_records([{"id": 1, "start": 0, "stop": 50, "E": 1}])
cv3 = pd.DataFrame.from_records(
[{"id": 1, "t": 0, "varA": 2}, {"id": 1, "t": 10, "varA": 4}, {"id": 1, "t": 20, "varA": 6}]
)
df = seed_df.pipe(utils.add_covariate_to_timeline, cv3, "id", "t", "E", delay=2).fillna(0)
expected = pd.DataFrame.from_records(
[
{"start": 0, "stop": 2.0, "varA": 0.0, "id": 1, "E": False},
{"start": 2, "stop": 12.0, "varA": 2.0, "id": 1, "E": False},
{"start": 12, "stop": 22.0, "varA": 4.0, "id": 1, "E": False},
{"start": 22, "stop": 50.0, "varA": 6.0, "id": 1, "E": True},
]
)
assert_frame_equal(expected, df, check_like=True)
def test_covariates_from_event_matrix_with_simple_addition(self):
base_df = pd.DataFrame([[1, 0, 5, 1], [2, 0, 4, 1], [3, 0, 8, 1], [4, 0, 4, 1]], columns=["id", "start", "stop", "e"])
event_df = pd.DataFrame([[1, 1], [2, 2], [3, 3], [4, None]], columns=["id", "poison"])
cv = utils.covariates_from_event_matrix(event_df, "id")
ldf = utils.add_covariate_to_timeline(base_df, cv, "id", "duration", "e", cumulative_sum=True)
assert pd.notnull(ldf).all().all()
expected = pd.DataFrame(
[
(0.0, 0.0, 1.0, 1, False),
(1.0, 1.0, 5.0, 1, True),
(0.0, 0.0, 2.0, 2, False),
(2.0, 1.0, 4.0, 2, True),
(0.0, 0.0, 3.0, 3, False),
(3.0, 1.0, 8.0, 3, True),
(0.0, 0.0, 4.0, 4, True),
],
columns=["start", "cumsum_poison", "stop", "id", "e"],
)
| assert_frame_equal(expected, ldf, check_dtype=False, check_like=True) | pandas.testing.assert_frame_equal |
import pandas as pd
mydataset = {
'name': ["Felipe", "Jesus", "Gabriel", "Rony"],
'age': [20, 19, 21, 21]
}
mydataframe = | pd.DataFrame(mydataset) | pandas.DataFrame |
"""
Utilities for examining ABS NOM unit record
"""
import pickle
from pathlib import Path
import pandas as pd
import numpy as np
import matplotlib as mpl
from matplotlib import pyplot as plt
from IPython.display import display_html, display
from matplotlib.patches import Patch
from chris_utilities import adjust_chart
import file_paths
# the data storage
base_data_folder = file_paths.base_data_folder
abs_data_folder = file_paths.abs_data_folder
unit_record_folder = file_paths.unit_record_folder
individual_movements_folder = file_paths.individual_movements_folder
abs_nom_propensity = file_paths.abs_nom_propensity
abs_traveller_characteristics_folder = file_paths.abs_traveller_characteristics
grant_data_folder = file_paths.grant_data_folder
dict_data_folder = file_paths.dict_data_folder
program_data_folder = file_paths.program_data_folder
# local to current forecasting period folder
forecasting_data_folder = Path("data/forecasting")
forecasting_input_folder = forecasting_data_folder / "input"
### Utilities to read in raw ABS data:
def process_original_ABS_data(abs_original_data_folder, analysis_folder):
"""Process the SAS data, include removing previous preliminary parquet
and replace with final parquet, and add new preliminary parquet for latest quarter
Parameters
----------
abs_original_data_folder : Path ojbect
SAS data directory
analysis_folder : Path object
ABS Traveller characteristics folder pat
Returns
-------
None
Raises
------
ValueError
Check ABS NOM files must commence with p or f
This differentiates between preliminary and final NOM
Raise error to advice user that RTS file name convention not in place
"""
# TODO: read from the zip file rather than unzipped data
# variables to convert to ints or strings
ints_preliminary = [
"person_id",
"sex",
"country_of_birth",
"country_of_citizenship",
"country_of_stay",
"initial_erp_flag",
"final_erp_flag",
"duration_movement_sort_key",
"nom_direction",
"duration_in_australia_category",
"count_of_movements",
"initial_category_of_travel",
"age",
"status_flag",
"reason_for_journey",
"odb_time_code",
]
## For preliminary leave as floats: 'rky_val'
ints_final = [
"person_id",
"sex",
"country_of_birth",
"country_of_citizenship",
"country_of_stay",
"initial_erp_flag",
"final_erp_flag",
"duration_movement_sort_key",
"nom_direction",
"duration_in_australia_category",
"count_of_movements",
"initial_category_of_travel",
"age",
"status_flag",
"reason_for_journey",
"odb_time_code",
"net_erp_effect",
"nom_propensity",
]
# string vars are the same across preliminary and final
string_vars = [
"visa_group",
"visa_subclass",
"visa_applicant_type",
"visa_stream_code",
"stream_code_out",
"state",
"direction",
]
date_times = ["Duration_movement_date"]
### For unzipped sas data filess files
### Requires both options - older folders may not have the zipped version
# for abs_filepath in sorted(abs_original_data_folder.glob("*.sas7bdat")):
# print(abs_filepath.stem)
# df = pd.read_sas(abs_filepath, encoding="latin-1", format="sas7bdat").rename(
# columns=str.lower
# )
for abs_filepath in sorted(abs_original_data_folder.glob("*.sas7bdat")):
print(abs_filepath.stem)
df = pd.read_sas(abs_filepath, encoding="latin-1", format="sas7bdat").rename(
columns=str.lower
)
# for zip_filename in sorted(abs_original_data_folder.glob("*.zip")):
# zipped_file = zipfile.ZipFile(zip_filename, 'r')
# # There's only expected to be one file in each zip
# if len(zipped_file.namelist()) != 1:
# raise ValueError("Chris: zipped file has more than one file...recode!")
# sasfile = zipfile.open(zipped_file.namelist()[0])
# print(sasfile.stem)
# df = pd.read_sas(sasfile, encoding="latin-1", format="sas7bdat").rename(
# columns=str.lower
# )
### need to fix all abs_filepath below
# adjust datatypes and write out:
# string vars are the same across preliminary and final
for col in string_vars:
df[col] = df[col].astype("category")
# integer variables differ across final and preliminary data
if abs_filepath.stem[0] == "p": # preliminary NOM
for col in ints_preliminary:
df[col] = df[col].astype(int)
elif abs_filepath.stem[0] == "f": # final NOM
for col in ints_final:
df[col] = df[col].astype(int)
else:
raise ValueError(
"Chris - ABS NOM files must commence with p or f: {abs_filepath.stem} does not!"
)
write_outfile(df, abs_filepath, abs_original_data_folder, analysis_folder)
return None
def write_outfile(df, abs_filepath, abs_original_data_folder, analysis_folder):
"""
write out the processed ABS data to the ABS data folder and the analysis folder
Parameters
----------
df: pandas dataframe to write out
abs_filepath: Path object of original ABS file
abs_original_data_folder: Path object of path to ABS data folder
analysis_folder: Path to folder containing all NOM unit record parquet files
Returns
-------
None
"""
# ABS NOM filenames are of the type xxxx2018q1.sas...
# Want to extract the date compenent: 2018q1
date_start = abs_filepath.stem.find("2")
if date_start != -1: # if a '2' is found
filename_date = abs_filepath.stem[date_start:]
## append '_p' if it's a preliminary file
if abs_filepath.stem[0] == "p":
filename_date = filename_date + "_p"
else:
raise ValueError(
f"Chris - filename {abs_filepath.stem} does not appear to have a 20XXqY date in it"
)
filename = "traveller_characteristics" + filename_date + ".parquet"
# Write to original ABS folder:
# to keep as history for comparison with updated preliminary/final files
df.to_parquet(abs_original_data_folder / filename)
# Write to folder for analysis
df.to_parquet(analysis_folder / filename)
# if a final file replaces a preliminary file - delete it from the analysis file
if abs_filepath.stem[0] == "f":
preliminary_filename = (
"traveller_characteristics" + filename_date + "_p" + ".parquet"
)
preliminary_path = analysis_folder / preliminary_filename
if preliminary_path.exists():
preliminary_path.unlink()
return None
def get_visa_code_descriptions(vsc_list):
"""
get visa code descriptions
parameters
----------
vsc_list: list
visa suc codes as strings
returns
-------
a dictionary matching visa subcode to description
"""
with open(dict_data_folder / "dict_visa_code_descriptions.pickle", "rb") as pickle_file:
dict_visa_code_descriptions = pickle.load(pickle_file)
for vsc in vsc_list:
print(dict_visa_code_descriptions[vsc])
return dict_visa_code_descriptions
def get_monthly(
df, net_erp_effect, group_by=("Duration_movement_date", "Visa_subclass")
):
"""
Aggregate unit record NOM data to monthly by visa subclass
"""
summary = (
df[df.net_erp_effect == net_erp_effect]
.groupby(group_by)
.net_erp_effect.sum()
.unstack()
)
return summary.resample("M").sum()
def read_single_NOM_file(data_folder, file_name, field_list=None):
if field_list is None:
df = pd.read_parquet(data_folder / file_name)
else:
df = pd.read_parquet(data_folder / file_name, columns=field_list)
return df
def get_NOM_monthly_old(net_erp_effect, data_folder=Path("parquet")):
"""
A generator for returning NOM data selected for arrivals or departures
Parameters
----------
net_erp_effect: contribution to NOM: 1 = arrivals, -1 = departure
data_folder: a Path object to the folder containing ABS NOM unit record data
Yields:
-------
NOM_effect: a dataframe selected on net_erp_effect
"""
assert (net_erp_effect == 1) | (net_erp_effect == -1)
for p in sorted(data_folder.glob("*.parq")):
print(p.stem)
df = pd.read_parquet(p)
monthly_nom_outcomes = get_monthly(df, net_erp_effect)
yield monthly_nom_outcomes
def get_visa_groups_old(visa_groups, df_nom):
for group, idx in visa_groups.items():
df = df_nom[idx]
if group not in ["citizens", "student"]: # don't aggregate if in list:
if len(df.columns) > 1:
df = df.sum(axis=1)
df.name = group
if group == "student":
df.columns = [
s.lower().replace(" ", "_") for s in df.columns.droplevel(level=0)
]
# columns to breakout
idx_break_out = ["572", "573", "570"]
idx_break_outnames = ["higher_ed", "vet", "elicos", "student_other"]
df = pd.concat(
[df[idx_break_out], df.drop(columns=idx_break_out).sum(axis=1)], axis=1
)
df.columns = idx_break_outnames
if group == "citizens":
df.columns = [
s.lower().replace(" ", "_") for s in df.columns.droplevel(level=1)
]
yield df
def get_NOM(data_folder, abs_visa_group, nom_fields, abs_visagroup_exists=False):
"""
A generator to return unit records in an ABS visa group
Parameters:
-----------
data_folder: string, path object (pathlib.Path)
assumes contains parquet files
vsc: list
list of visa sub groups
nom_fields: list
list of nom fields to be extracts from ABS unit record file
"""
# abs_visa_group_current = ['AUST', 'NZLA', # Australian citizen, NZ citizen
# 'PSKL', 'PFAM', 'POTH', # skill, family, other
# 'TSKL', 'TSTD', 'TWRK', 'TOTH', 'TVIS' #still, student, WHM, other, visitor
# ]
# if not abs_visa_group in abs_visa_group_current:
# raise ValueError(f'Chris: {abs_visa_group} not legitimate ABS visa group.')
if not isinstance(nom_fields, (list, tuple)):
raise ValueError(
"Chris: get_NOM expects {nom_fields} to be a list of fields to extract."
)
for p in sorted(data_folder.glob("*.parquet")):
# Only loop over post 2011Q3 files
if abs_visagroup_exists:
if "ROADS" in p.stem:
continue
print(p.stem)
df = pd.read_parquet(p, columns=nom_fields)
yield df[(df.net_erp_effect != 0) & (df.visa_group == abs_visa_group)]
def append_nom_columns(df):
"""
Append each visa with a NOM column
Parameters
----------
df: data frame
the dataframe has hierarchical columns where:
level[0] has [arrival, departure]
level[1] has [visagroup, VSC, VSC etc]
"""
# set visa subclasses to level 0 & arrival, departure at levet 1)
df.columns = df.columns.swaplevel()
df = df.sort_index(axis="columns")
for col in df.columns.levels[0]:
df[(col, "nom")] = df[(col, "arrival")] - df[(col, "departure")]
df.columns = df.columns.swaplevel()
df = df.sort_index(axis="columns")
return df
def make_unique_movement_files(characteristcis_folder=abs_traveller_characteristics_folder, nom_final=True):
nom_fields = [
"person_id",
"duration_movement_date",
"visa_subclass",
"net_erp_effect",
]
# establish the generators
get_file_paths = gen_nom_files(
characteristcis_folder,
abs_visagroup_exists=False,
nom_final=nom_final)
df_get_fields = gen_nom_fields(get_file_paths, nom_fields)
df_visa_group = gen_get_visa_group(df_get_fields, vsc_list=None)
# build the NOM dataframe
df = (pd.concat(df_visa_group, axis="index", ignore_index=True, sort=False)
.rename({"duration_movement_date": "date"}, axis="columns")
.sort_values(["date", "person_id"])
)
if nom_final:
file_name = "NOM unique movement - final.parquet"
else:
file_name = "NOM unique movement - preliminary.parquet"
df.to_parquet(individual_movements_folder / file_name)
return df
# Dictionary utilities
def get_vsc_reference(file_path=None):
"""
Return a dataframe containing definitions and groupings for visa subclasses
The reference definitions and groupings is the sql table 'REF_VISA_SUBCLASS'
It is maintained by the visa stats team.
Parameters:
-----------
file_path: Path or str object
filepath to parquet file
Returns:
-------
dataframe
"""
if file_path == None:
file_path = dict_data_folder / "REF_VISA_SUBCLASS.parquet"
reference_visa_dict = (
pd.read_parquet(file_path)
.rename(columns=str.lower)
.rename(columns=lambda x: x.replace(" ", "_"))
)
return reference_visa_dict
def get_ABS_visa_grouping(file_path=None):
"""
Return a dataframe with ABS visa groupings (in cat no. 3412) by subclass
See ABS Migration unit for updated copies of excel file
Parameters:
-----------
file_path: None or Path object to 'ABS - Visacode3412mapping.xlsx'
Returns:
-------
dataframe
"""
if file_path is None:
file_path = dict_data_folder / "ABS - Visacode3412mapping.xlsx"
abs_3412 = (
pd.read_excel(file_path)
.rename(columns=str.lower)
.rename(columns=lambda x: x.replace(" ", "_"))
# make sure visa subclass code is a string
.assign(visa_subclass_code=lambda x: x.visa_subclass_code.astype(str))
)
return abs_3412
def get_abs_3412_mapper(df_abs_3412=None):
"""
Return a dictionary to map subclass strings to modified ABS groupings
Parameters
----------
df_abs_3412: dataframe, output from get_ABS_visa_grouping
3 columns in the dataframe: visa_subclass_code,
visa_subclass_label,
migration_publication_category
Returns:
--------
abs_3412_mapper
"""
# TODO: add in test that dataframe contains the expected columns
if df_abs_3412 is None:
df_abs_3412 = get_ABS_visa_grouping()
idx = ["visa_subclass_code", "migration_publication_category"]
abs_3412_mapper = df_abs_3412[idx].set_index("visa_subclass_code").squeeze()
# Thhe ABS migration_publication_category splits students out - put them back into one group
student_mapper = {
"Higher education sector": "Student",
"Student VET": "Student",
"Student other": "Student",
}
abs_3412_mapper[abs_3412_mapper.isin(student_mapper.keys())] = "Student"
## break out a bridging category
bridging = {
"10": "Bridging",
"010": "Bridging",
"020": "Bridging",
"20": "Bridging",
"030": "Bridging",
"30": "Bridging",
"040": "Bridging",
"40": "Bridging",
"041": "Bridging",
"41": "Bridging",
"42": "Bridging",
"042": "Bridging",
"050": "Bridging",
"50": "Bridging",
"051": "Bridging",
"51": "Bridging",
"060": "Bridging",
"60": "Bridging",
"070": "Bridging",
}
idx = abs_3412_mapper.index.isin(bridging.keys())
abs_3412_mapper[idx] = "Bridging"
# as the mapper is used to generate variable names (in columns), make lowercase, no breaks
abs_3412_mapper = abs_3412_mapper.str.lower().str.replace(" ", "_")
return abs_3412_mapper
def get_ABS_3412_definitions(abs_3412_excel_path):
"""
Get a
Parameters:
-----------
abs_excel_path: Path object, or str
absolute path to ABS 3412 visa groupings and subclasses
Return:
-------
Dataframe
with visa subclass as index,
column 0 is home affairs visa reporting subclass defintions
column 1 is ABS visa
"""
abs_3412_def = (
pd.read_excel(abs_3412_excel_path)
.rename(str.lower, axis="columns")
.rename(lambda x: x.replace(" ", "_"), axis="columns")
# make sure visa subclass code are all strings
.assign(visa_subclass_code=lambda x: x.visa_subclass_code.astype(str))
.set_index("visa_subclass_code")
)
student_mapper = {
"Higher education sector": "Student",
"Student VET": "Student",
"Student other": "Student",
}
# using map writes NaNs for items not being mapped rather than ignoring
# abs_3412_def = abs_3412_def.map(student_mapper)
idx = abs_3412_def["migration_publication_category"].isin(student_mapper.keys())
abs_3412_def.loc[idx, "migration_publication_category"] = "Student"
return abs_3412_def
# Generic Chart Utilities - always check consistent with Chris_utiltiies
def adjust_chart(ax, ylim_min=None, do_thousands=False):
"""
add second y axis, remove borders, set grid_lines on
Parameters
----------
ax: ax
the left hand axis to be swapped
# TODO: make it so that the side of the axis is endogenous, and the opposite side is created
do_thousands: boolean
if True, call thousands style
Returns:
-------
ax, ax2
"""
if ylim_min != None:
ax.set_ylim(ylim_min, None)
# remove second axes if it exists
# this will occur when multiple calls to a figure are made - eg plotting forecasts on top of actuals
fig = ax.get_figure()
if len(fig.axes) == 2:
fig.axes[1].remove()
# ax, ax2 = adjust_chart(ax, do_thousands=True)
# else:
# ax2 = fig.axes[1]
ax2 = ax.twinx()
ax2.set_ylim(ax.get_ylim())
ax.set_xlabel("")
if ax.get_ylim()[0] < 0:
ax.spines["bottom"].set_position(("data", 0))
ax2.spines["bottom"].set_visible(False)
for axe in ax.get_figure().axes:
axe.tick_params(axis="y", length=0)
for spine in ["top", "left", "right"]:
axe.spines[spine].set_visible(False)
ax.set_axisbelow(True)
ax.grid(axis="y", alpha=0.5, lw=0.8)
if do_thousands:
thousands(ax, ax2)
return ax, ax2
def commas(x, pos):
# formatter function takes tick label and tick position - but position is
# passed from FuncFormatter()
# PEP 378 - format specifier for thousands separator
return "{:,d}".format(int(x))
def thousands(*axes, y=True):
comma_formatter = mpl.ticker.FuncFormatter(commas)
if y:
for ax in axes:
ax.yaxis.set_major_formatter(comma_formatter)
else:
for ax in axes:
ax.xaxis.set_major_formatter(comma_formatter)
def set_y_axis_min(vsc):
"""
Determine whether y_axis_min should be zero or a negative value
Parameters
----------
vsc: Pandas Series
Returns
-------
zero or 1.1 * negative minimum of the series
"""
if vsc.min() > 0:
y_axis_min = 0
else:
y_axis_min = 1.1 * vsc.min()
return y_axis_min
######## Charting of ABS NOM output
def plot_visa_group_stacked(df, group, legend=False):
# sort by last observation from lowest to highest
df = df.sort_values(by=df.index[-1], axis=1)
fig, ax = plt.subplots()
ax.stackplot(df.index, *list(df.columns), data=df, labels=df.columns)
ax, ax2 = adjust_chart(ax, df.min().min() * 1.1, do_thousands=True)
# thousands(ax, ax2)
ax.set_title(group)
if legend:
ax.legend(ncol=4)
# label_vsc_stacked(df.iloc[-1], ax, None)
label_vsc_stacked(df.iloc[-1:], ax, None)
# label_vsc_stacked(df.head(1), ax, left=False)
return fig, ax, ax2
def plot_visa_group_line_2(df, group):
fig, ax = plt.subplots()
for col in df.columns:
ax.plot(df[col])
# do total for group
if len(df.columns) > 1:
df_group_total = df.sum(axis=1).to_frame().rename(columns={0: "All"})
df_group_total.plot(ax=ax, ls=":", color="black", legend=None)
label_vsc(df_group_total.tail(1), ax, "black")
ax, ax2 = adjust_chart(ax, do_thousands=True)
ax2.spines["right"].set_position(("outward", 10))
label_vsc(df.tail(1), ax, None)
ax.set_title(group)
ax.set_xlabel("")
return fig, ax, ax2
def plot_visa_group_line_(df, group):
ax = df.plot(legend=None)
fig = ax.get_figure()
# ax, ax2 = adjust_chart(ax)
# thousands(ax, ax2)
# label_vsc(visa_groups_by_year[group].tail(1), ax)
# do total for group
if len(df.columns) > 1:
df_group_total = df.sum(axis=1).to_frame().rename(columns={0: "All"})
df_group_total.plot(ax=ax, ls=":", color="black", legend=None)
label_vsc(df_group_total.tail(1), ax, "black")
ax, ax2 = adjust_chart(ax)
thousands(ax, ax2)
label_vsc(df.tail(1), ax, None)
ax.set_title(group)
ax.set_xlabel("")
return fig, ax, ax2
def plot_visa_group_line(df, group):
ax = df[group].plot(legend=None)
fig = ax.get_figure()
# ax, ax2 = adjust_chart(ax)
# thousands(ax, ax2)
# label_vsc(visa_groups_by_year[group].tail(1), ax)
# do total for group
if len(df[group].columns) > 1:
df_group_total = df[group].sum(axis=1).to_frame().rename(columns={0: "All"})
df_group_total.plot(ax=ax, ls=":", color="black", legend=None)
label_vsc(df_group_total.tail(1), ax, "black")
ax, ax2 = adjust_chart(ax)
thousands(ax, ax2)
label_vsc(df[group].tail(1), ax, None)
ax.set_title(group)
ax.set_xlabel("")
return fig, ax, ax2
def label_vsc(label_positions, ax, color=None):
"""
Plot the vsc label at the vertical position and date given by label_positions
Parameters:
-----------
label_positions: dataframe
Expected to be the last row of visa_groups_by_year dataframe for relevant visa_group
ax: matplotlib axes
Returns:
--------
None
"""
# Make sure it's one row of data, if not, take the last one
if len(label_positions) != 1:
label_positions = label_positions.tail(1)
for i, col in enumerate(label_positions.columns):
# not sure why xaxis isn't dates - resorted to using tick location
# x = mpl.dates.date2num(label_positions.index).values
# x = ax.xaxis.get_ticklocs()[-1]
x = (
mpl.dates.date2num(label_positions.index[-1]) + 30
) # shift label right by 30 days
y = label_positions.iat[0, i]
if color == "black":
y = y * 1.05
if color == None:
color_ = f"C{i % 10}" # use to the modulus operator, and assumes default color scheme with 10 colors
# this ensures that i%10 only returns a value in the range [0,9] regardless of
# number of visa subclasses - ie if i > 9
else:
color_ = color
ax.text(x, y, col, color=color_) # fontsize=14,
return None
def label_vsc_stacked(label_positions, ax, color=None, left=True):
"""
Plot the vsc label at the vertical position and date given by label_positions
Parameters:
-----------
label_positions: dataframe
Expected to be the last row of visa_groups_by_year dataframe for relevant visa_group
ax: matplotlib axes
Returns:
--------
None
"""
# #Make sure it's one row of data, if not, take the last one
# if len(label_positions) != 1:
# label_positions = label_positions.tail(1)
label_positions = label_positions.sort_values(
by=label_positions.index[-1], axis=1
).cumsum(axis=1)
if left:
x = ax.get_xlim()[1] + 0
else:
x = ax.get_xlim()[1] - 0
for i, col in enumerate(label_positions.columns):
y = label_positions.iat[0, i]
if y > 0:
if color == None:
color_ = f"C{i % 10}" # use to the modulus operator, and assumes default color scheme with 10 colors
# this ensures that i%10 only returns a value in the range [0,9] regardless of
# number of visa subclasses - ie if i > 9
else:
color_ = color
ax.text(x, y, col, color=color_) # fontsize=14,
return None
def label_vsc_stacked_(label_positions, ax, color=None, left=True):
"""
Plot the vsc label at the vertical position and date given by label_positions
Parameters:
-----------
label_positions: dataframe
Expected to be the last row of visa_groups_by_year dataframe for relevant visa_group
ax: matplotlib axes
Returns:
--------
None
"""
# #Make sure it's one row of data, if not, take the last one
# if len(label_positions) != 1:
# label_positions = label_positions.tail(1)
label_positions = label_positions.sort_values(
by=label_positions.index[-1], axis=1
).cumsum(axis=1)
for i, col in enumerate(label_positions.columns):
# not sure why xaxis isn't dates - resorted to using tick location
# x = mpl.dates.date2num(label_positions.index).values
if left:
x = ax.xaxis.get_ticklocs()[-1] + 365
else:
x = ax.xaxis.get_ticklocs()[0] - 400
y = label_positions.iat[0, i]
if y > 0:
if color == None:
color_ = f"C{i % 10}" # use to the modulus operator, and assumes default color scheme with 10 colors
# this ensures that i%10 only returns a value in the range [0,9] regardless of
# number of visa subclasses - ie if i > 9
else:
color_ = color
ax.text(x, y, col, color=color_) # fontsize=14,
return None
def plot_vsc_nom_charts(data, ax=None, ls="-", lw=1.75, colors=["C0", "C1", "C2"], legend=True):
"""
Plot a 12 month rolling window chart of nom, arrivals and departures
Parameters:
-----------
data: data frame with 3 columns lablled arrivals, departures & nom
"""
if ax is None:
ax = plt.gca()
chart_data = data #.copy().rolling(12).sum().dropna()
# work around for pandad datetime[ns] vs matplotlib datetime functionality
# Meant to be resolved in Matplotlib 1.2.3 - but still fails for bar charts
# chart_data.index = chart_data.index.date
l1 = chart_data.arrivals.plot(ax=ax, linewidth=lw, linestyle=ls, color=colors[0])
l2 = chart_data.departures.plot(ax=ax, linewidth=lw, linestyle=ls, color=colors[1])
l3 = chart_data.nom.plot(ax=ax, linewidth=lw, linestyle=ls, color=colors[2])
ax, ax2 = adjust_chart(ax, do_thousands=True)
if legend:
ax.legend(frameon=False, ncol=3)
return ax, ax2, l1, l2, l3
def plot_visa_groups(df, visa_group, window=1, nom=False, vsc=None):
"""
plot visa group and select visa subclasses
Parameters
----------
df: dataframe
multiindex dataframe with arrivals & departures by visa subclasses and the visa group
visa_group: str
the name of the visa group, eg whm, students etc
the name will be column in the second level in the hierarchy
window: int, default=1
rolling window length, 1(default) is no window, 12 = year ending etc
nom: boolean, deault = False
if True, plot NOM
vsc: List or None, default = None
list of visa subclasses to plot. If None(default), print all
"""
if nom:
# is df.copy() defensive driving here or is Chris confused
# about whether the object passed to this function is a copy or a reference
df = append_nom_columns(df.copy())
df = df.rolling(window).sum().dropna()
linewidth = 3
A4_landscape = (11.69, 8.27)
A4_portrait = (8.27, 11.69)
fig, fig_axes = plt.subplots(
figsize=A4_portrait,
nrows=len(df.columns.levels[1]),
sharex=True,
constrained_layout=True,
)
for chart_column, direction in enumerate(df.columns.levels[0]):
# plot visa_group first
df[(direction, visa_group)].plot(ax=fig_axes[0], lw=linewidth)
if direction == "departure":
if nom:
y_axis_min = set_y_axis_min(df[("nom", visa_group)])
else:
y_axis_min = set_y_axis_min(df[(direction, visa_group)])
ax1, ax2 = adjust_chart(fig_axes[0], y_axis_min)
thousands(ax1, ax2)
fig_axes[0].set_title(visa_group, size=14)
df = df.drop((direction, visa_group), axis="columns")
for chart_row, col in enumerate(df[direction].columns):
# Since chart_row is the iterator across VSC's, but fig_axes[0] already holds visa_group plot
# need to add 1 to chart_row to plot visa sub group in subsequent rows
df[(direction, col)].plot(ax=fig_axes[chart_row + 1], lw=linewidth)
# do last otherwise grid line get removed
if direction == "departure":
if nom:
y_axis_min = set_y_axis_min(df[("nom", col)])
else:
y_axis_min = set_y_axis_min(df[(direction, col)])
ax1, ax2 = adjust_chart(fig_axes[chart_row + 1], y_axis_min)
thousands(ax1, ax2)
fig_axes[chart_row + 1].set_title(col, size=14)
return fig, fig_axes
def plot_check_for_gaps(arrivals, departures, abs_grouping, label_top_10=None):
"""
Plot the visa subgroups of a given abs group to assess aggregation requirements
Rough code transfered by jupyter - could be tidied up
Parameters
----------
arrivals, tidy dataframed of with keys of date, abs_grouping,visa_label,visa_subclass,count
departures: tidy dataframed of with keys of date, abs_grouping,visa_label,visa_subclass,count
abs_grouping: the abs group
"""
def plot_it(df, direction):
ax = df.plot(legend=None)
ax.legend(loc="upper center", bbox_to_anchor=(1.6, 0.9), frameon=False, ncol=1)
ax.set_title(direction)
adjust_chart(ax)
return ax
labels_arrivals = (
arrivals[["abs_grouping", "visa_subclass", "visa_label"]]
.drop_duplicates()
.astype(str)
)
idx_a = labels_arrivals.abs_grouping == abs_grouping
a = labels_arrivals[idx_a].set_index("visa_subclass").visa_label.rename("arrivals")
labels_departures = (
departures[["abs_grouping", "visa_subclass", "visa_label"]]
.drop_duplicates()
.astype(str)
)
idx_d = labels_departures.abs_grouping == abs_grouping
d = (
labels_departures[idx_d]
.set_index("visa_subclass")
.visa_label.rename("departures")
)
display(pd.concat([a, d], axis=1, sort=False).fillna("").rename_axis(index="vsc"))
print()
idx = arrivals.abs_grouping == abs_grouping
df = (
arrivals[idx]
.groupby(["date", "visa_label"])["count"]
.sum()
.unstack("visa_label")
.rolling(12)
.sum()
)
ax_arrivals = plot_it(df, "Arrivals")
idx = departures.abs_grouping == abs_grouping
df = (
departures[idx]
.groupby(["date", "visa_label"])["count"]
.sum()
.unstack("visa_label")
.rolling(12)
.sum()
)
ax_departures = plot_it(df, "Departures")
return ax_arrivals, ax_departures
######### Retriving NOM data for analysis
def get_NOM_final_preliminary(data_folder=individual_movements_folder, arrival=True):
"""
Return dataframe of monthly data by visa subclass
Parameters:
----------
individual_movements_folder: Path or str object
filepath to locations of: 'NOM unique movement - preliminary.parquet'
'NOM unique movement - final.parquet')
arrival: Boolean
flag to get departure or arrival data
Returns
-------
dataframe
"""
# TODO: change arrival parameter from booleann to string: direction="arrival" as default, values to be "arrival", "departure", "nom"
# TODO: generalise to return with multiindex of abs visa group by vsc (ie call nomf.make_vsc_multiIndex)
## TODO: think about returning both arrivals and departures as a tidy datasset
final = pd.read_parquet(data_folder / "NOM unique movement - final.parquet")
prelim = | pd.read_parquet(data_folder / "NOM unique movement - preliminary.parquet") | pandas.read_parquet |
# -*- coding: utf-8 -*-
# Copyright 2017 <NAME> <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Python modules
import glob
import os
import warnings
# Third party modules
import gpxpy
from gpxpy.gpx import GPXBounds
import pandas as pd
from pandas import DataFrame
from tqdm import tqdm
import geopy
import sqlite3
# Own modules
from trackanimation import utils as trk_utils
from trackanimation.utils import TrackException
class DFTrack:
def __init__(self, df_points=None, columns=None):
if df_points is None:
self.df = DataFrame()
if isinstance(df_points, pd.DataFrame):
self.df = df_points
else:
if columns is None:
columns = ['CodeRoute', 'Latitude', 'Longitude', 'Altitude', 'Date',
'Speed', 'TimeDifference', 'Distance', 'FileName']
self.df = DataFrame(df_points, columns=columns)
def export(self, filename='exported_file', export_format='csv'):
"""
Export a data frame of DFTrack to JSON or CSV.
Parameters
----------
export_format: string
Format to export: JSON or CSV
filename: string
Name of the exported file
"""
if export_format.lower() == 'json':
self.df.reset_index().to_json(orient='records', path_or_buf=filename + '.json')
elif export_format.lower() == 'csv':
self.df.to_csv(path_or_buf=filename + '.csv')
else:
raise TrackException('Must specify a valid format to export', "'%s'" % export_format)
def getTracks(self):
"""
Makes a copy of the DFTrack.
Explanation:
http://stackoverflow.com/questions/27673231/why-should-i-make-a-copy-of-a-data-frame-in-pandas
Returns
-------
copy: DFTrack
The copy of DFTrack.
"""
warnings.warn("The getTracks function is deprecated and "
"will be removed in version 2.0.0. "
"Use the get_tracks function instead.",
FutureWarning,
stacklevel=8
)
return self.get_tracks()
def get_tracks(self):
"""
Makes a copy of the DFTrack.
Explanation:
http://stackoverflow.com/questions/27673231/why-should-i-make-a-copy-of-a-data-frame-in-pandas
Returns
-------
copy: DFTrack
The copy of DFTrack.
"""
return self.__class__(self.df.copy(), list(self.df))
def sort(self, column_name):
"""
Sorts the data frame by the specified column.
:param column_name: Column name to sort
:type column_name: string_or_list
:return: DFTrack sorted
:rtype: DFTrack
"""
if isinstance(column_name, list):
for column in column_name:
if column not in self.df:
raise TrackException('Column name not found', "'%s'" % column)
else:
if column_name not in self.df:
raise TrackException('Column name not found', "'%s'" % column_name)
return self.__class__(self.df.sort_values(column_name), list(self.df))
def getTracksByPlace(self, place, timeout=10, only_points=True):
"""
Gets the points of the specified place searching in Google's API
and, if it does not get anything, it tries with OpenStreetMap's API.
Parameters
----------
place: string
Place to get the points
timeout: int
Time, in seconds, to wait for the geocoding service to respond
before returning a None value.
only_points: boolean
True to retrieve only the points that cross a place. False to
retrive all the points of the tracks that cross a place.
Returns
-------
place: DFTrack
A DFTrack with the points of the specified place or
None if anything is found.
"""
warnings.warn("The getTracksByPlace function is deprecated and "
"will be removed in version 2.0.0. "
"Use the get_tracks_by_place function instead.",
FutureWarning,
stacklevel=8
)
return self.get_tracks_by_place(place, timeout, only_points)
def get_tracks_by_place(self, place, timeout=10, only_points=True, **kwargs):
"""
Gets the points of the specified place searching in Google's API
and, if it does not get anything, it tries with OpenStreetMap's API.
Parameters
----------
place: string
Place to get the points
timeout: int
Time, in seconds, to wait for the geocoding service to respond
before returning a None value.
only_points: boolean
True to retrieve only the points that cross a place. False to
retrive all the points of the tracks that cross a place.
Returns
-------
place: DFTrack
A DFTrack with the points of the specified place or
None if anything is found.
"""
google_api_key=kwargs.get('google_api_key', None)
osm_user_agent=kwargs.get('osm_user_agent', None)
track_place = self.get_tracks_by_place_google(place, timeout=timeout,
only_points=only_points,
api_key=google_api_key)
if track_place is not None:
return track_place
track_place = self.get_tracks_by_place_osm(place, timeout=timeout,
only_points=only_points,
user_agent=osm_user_agent)
if track_place is not None:
return track_place
return None
def getTracksByPlaceGoogle(self, place, timeout=10, only_points=True):
"""
Gets the points of the specified place searching in Google's API.
Parameters
----------
place: string
Place to get the points
timeout: int
Time, in seconds, to wait for the geocoding service to respond
before returning a None value.
only_points: boolean
True to retrieve only the points that cross a place. False to
retrive all the points of the tracks that cross a place.
Returns
-------
place: DFTrack
A DFTrack with the points of the specified place or
None if anything is found.
"""
warnings.warn("The getTracksByPlaceGoogle function is deprecated and "
"will be removed in version 2.0.0. "
"Use the get_tracks_by_place_google function instead.",
FutureWarning,
stacklevel=8
)
return self.get_tracks_by_place_google(place, timeout, only_points)
def get_tracks_by_place_google(self, place, timeout=10, only_points=True,
api_key=None):
"""
Gets the points of the specified place searching in Google's API.
Parameters
----------
place: string
Place to get the points
timeout: int
Time, in seconds, to wait for the geocoding service to respond
before returning a None value.
only_points: boolean
True to retrieve only the points that cross a place. False to
retrive all the points of the tracks that cross a place.
Returns
-------
place: DFTrack
A DFTrack with the points of the specified place or
None if anything is found.
"""
try:
#geolocator = geopy.GoogleV3()
geolocator = geopy.GoogleV3(api_key=api_key)
location = geolocator.geocode(place, timeout=timeout)
except geopy.exc.GeopyError:
return None
southwest_lat = float(location.raw['geometry']['bounds']['southwest']['lat'])
northeast_lat = float(location.raw['geometry']['bounds']['northeast']['lat'])
southwest_lng = float(location.raw['geometry']['bounds']['southwest']['lng'])
northeast_lng = float(location.raw['geometry']['bounds']['northeast']['lng'])
df_place = self.df[(self.df['Latitude'] < northeast_lat) & (self.df['Longitude'] < northeast_lng) &
(self.df['Latitude'] > southwest_lat) & (self.df['Longitude'] > southwest_lng)]
if only_points:
return self.__class__(df_place)
track_list = df_place['CodeRoute'].unique().tolist()
return self.__class__(self.df[self.df['CodeRoute'].isin(track_list)])
def getTracksByPlaceOSM(self, place, timeout=10, only_points=True):
"""
Gets the points of the specified place searching in OpenStreetMap's API.
Parameters
----------
place: string
Place to get the points
timeout: int
Time, in seconds, to wait for the geocoding service to respond
before returning a None value.
only_points: boolean
True to retrieve only the points that cross a place. False to
retrive all the points of the tracks that cross a place.
Returns
-------
place: DFTrack
A DFTrack with the points of the specified place or
None if anything is found.
"""
warnings.warn("The getTracksByPlaceOSM function is deprecated and "
"will be removed in version 2.0.0. "
"Use the get_tracks_by_place_osm function instead.",
FutureWarning,
stacklevel=8
)
return self.get_tracks_by_place_osm(place, timeout, only_points)
def get_tracks_by_place_osm(self, place, timeout=10, only_points=True,
**kwargs):
"""
Gets the points of the specified place searching in OpenStreetMap's API.
Parameters
----------
place: string
Place to get the points
timeout: int
Time, in seconds, to wait for the geocoding service to respond
before returning a None value.
only_points: boolean
True to retrieve only the points that cross a place. False to
retrive all the points of the tracks that cross a place.
Returns
-------
place: DFTrack
A DFTrack with the points of the specified place or
None if anything is found.
"""
user_agent = kwargs.get('user_agent', None)
cache_db = kwargs.get('cache_db', None)
db = None
cur = None
entry = None
if cache_db:
db = sqlite3.connect(cache_db)
cur = db.cursor()
cur.execute('''CREATE TABLE IF NOT EXISTS location_cache(
location TEXT ,
source TEXT,
southwest_lat REAL,
northeast_lat REAL,
southwest_lng REAL,
northeast_lng REAL
)''')
cur.execute(
'SELECT southwest_lat, northeast_lat, southwest_lng, northeast_lng FROM location_cache WHERE location=?',
(place,)
)
entry = cur.fetchone()
if entry is not None:
southwest_lat = entry[0]
northeast_lat = entry[1]
southwest_lng = entry[2]
northeast_lng = entry[3]
else:
try:
geolocator = geopy.Nominatim(user_agent=user_agent)
location = geolocator.geocode(place, timeout=timeout)
except geopy.exc.GeopyError:
return None
southwest_lat = float(location.raw['boundingbox'][0])
northeast_lat = float(location.raw['boundingbox'][1])
southwest_lng = float(location.raw['boundingbox'][2])
northeast_lng = float(location.raw['boundingbox'][3])
cur.execute(
("INSERT INTO location_cache "
"(location, source, southwest_lat, northeast_lat, southwest_lng, northeast_lng) "
"VALUES (?, ?, ?, ?, ?, ?)"),
(place, "osm", southwest_lat, northeast_lat, southwest_lng, northeast_lng)
)
db.commit()
else:
try:
geolocator = geopy.Nominatim(user_agent=user_agent)
location = geolocator.geocode(place, timeout=timeout)
except geopy.exc.GeopyError:
return None
southwest_lat = float(location.raw['boundingbox'][0])
northeast_lat = float(location.raw['boundingbox'][1])
southwest_lng = float(location.raw['boundingbox'][2])
northeast_lng = float(location.raw['boundingbox'][3])
#print(f"sw_lat, ne_lat, sw_lng, ne_lng = {southwest_lat}, {northeast_lat}, {southwest_lng}, {northeast_lng}")
df_place = self.df[(self.df['Latitude'] < northeast_lat) & (self.df['Longitude'] < northeast_lng) &
(self.df['Latitude'] > southwest_lat) & (self.df['Longitude'] > southwest_lng)]
if only_points:
return self.__class__(df_place)
track_list = df_place['CodeRoute'].unique().tolist()
return self.__class__(self.df[self.df['CodeRoute'].isin(track_list)])
def getTracksByDate(self, start=None, end=None, periods=None, freq='D'):
"""
Gets the points of the specified date range
using various combinations of parameters.
2 of 'start', 'end', or 'periods' must be specified.
Date format recommended: 'yyyy-mm-dd'
Parameters
----------
start: date
Date start period
end: date
Date end period
periods: int
Number of periods. If None, must specify 'start' and 'end'
freq: string
Frequency of the date range
Returns
-------
df_date: DFTrack
A DFTrack with the points of the specified date range.
"""
warnings.warn("The getTracksByDate function is deprecated and "
"will be removed in version 2.0.0. "
"Use the get_tracks_by_date function instead.",
FutureWarning,
stacklevel=8
)
return self.get_tracks_by_date(start, end, periods, freq)
def get_tracks_by_date(self, start=None, end=None, periods=None, freq='D'):
"""
Gets the points of the specified date range
using various combinations of parameters.
2 of 'start', 'end', or 'periods' must be specified.
Date format recommended: 'yyyy-mm-dd'
Parameters
----------
start: date
Date start period
end: date
Date end period
periods: int
Number of periods. If None, must specify 'start' and 'end'
freq: string
Frequency of the date range
Returns
-------
df_date: DFTrack
A DFTrack with the points of the specified date range.
"""
if trk_utils.is_time_format(start) or trk_utils.is_time_format(end):
raise TrackException('Must specify an appropiate date format', 'Time format found')
rng = pd.date_range(start=start, end=end, periods=periods, freq=freq)
df_date = self.df.copy()
df_date['Date'] = pd.to_datetime(df_date['Date'])
df_date['ShortDate'] = df_date['Date'].apply(lambda date: date.date().strftime('%Y-%m-%d'))
df_date = df_date[df_date['ShortDate'].apply(lambda date: date in rng)]
del df_date['ShortDate']
df_date = df_date.reset_index(drop=True)
return self.__class__(df_date, list(df_date))
def getTracksByTime(self, start, end, include_start=True, include_end=True):
"""
Gets the points between the specified time range.
Parameters
----------
start: datetime.time
Time start period
end: datetime.time
Time end period
include_start: boolean
include_end: boolean
Returns
-------
df_time: DFTrack
A DFTrack with the points of the specified date and time periods.
"""
warnings.warn("The getTracksByTime function is deprecated and "
"will be removed in version 2.0.0. "
"Use the get_tracks_by_time function instead.",
FutureWarning,
stacklevel=8
)
return self.get_tracks_by_time(start, end, include_start, include_end)
def get_tracks_by_time(self, start, end, include_start=True, include_end=True):
"""
Gets the points between the specified time range.
Parameters
----------
start: datetime.time
Time start period
end: datetime.time
Time end period
include_start: boolean
include_end: boolean
Returns
-------
df_time: DFTrack
A DFTrack with the points of the specified date and time periods.
"""
if not trk_utils.is_time_format(start) or not trk_utils.is_time_format(end):
raise TrackException('Must specify an appropiate time format', trk_utils.TIME_FORMATS)
df_time = self.df.copy()
index = pd.DatetimeIndex(df_time['Date'])
df_time = df_time.iloc[index.indexer_between_time(start_time=start, end_time=end, include_start=include_start,
include_end=include_end)]
df_time = df_time.reset_index(drop=True)
return self.__class__(df_time, list(df_time))
def pointVideoNormalize(self):
warnings.warn("The pointVideoNormalize function is deprecated and "
"will be removed in version 2.0.0. "
"Use the point_video_normalize function instead.",
FutureWarning,
stacklevel=8
)
return self.point_video_normalize()
def point_video_normalize(self):
df = self.df.copy()
df_norm = pd.DataFrame()
group_size = df.groupby('CodeRoute').size()
max_value = group_size.max()
name_max_value = group_size.idxmax()
grouped = df['CodeRoute'].unique()
for name in tqdm(grouped, desc='Groups'):
df_slice = df[df['CodeRoute'] == name]
df_slice = df_slice.reset_index(drop=True)
div = int(max_value / len(df_slice)) + 1
df_index = DataFrame(df_slice.index)
df_slice['VideoFrame'] = df_index.apply(lambda x: x + 1 if name_max_value == name else x * div)
df_norm = pd.concat([df_norm, df_slice])
df_norm = df_norm.reset_index(drop=True)
return self.__class__(df_norm, list(df_norm))
def timeVideoNormalize(self, time, framerate=5):
warnings.warn("The timeVideoNormalize function is deprecated and "
"will be removed in version 2.0.0. "
"Use the time_video_normalize function instead.",
FutureWarning,
stacklevel=8
)
return self.time_video_normalize(time, framerate)
def time_video_normalize(self, time, framerate=5):
df = self.df.copy()
if time == 0:
df['VideoFrame'] = 0
df = df.reset_index(drop=True)
return self.__class__(df, list(df))
n_fps = time * framerate
df = df.sort_values('Date')
df_cum = trk_utils.calculate_cum_time_diff(df)
grouped = df_cum['CodeRoute'].unique()
df_norm = | pd.DataFrame() | pandas.DataFrame |
import os
import numpy as np
import pandas as pd
import xgboost as xgb
import lightgbm as lgb
import catboost as ctb
from tqdm import tqdm
from sklearn import preprocessing
from sklearn.model_selection import KFold, StratifiedKFold,train_test_split
from sklearn.metrics import mean_squared_error
def eval_Features(df,feature_nan= {'nan': -1}):
feature_tmp = []
for index,feature in tqdm(enumerate(df['Features'].values)):
feature_tmp.append(eval(feature,feature_nan))
feature_tmp = pd.DataFrame(feature_tmp)
feature_tmp.columns = ['feature_'+str(i) for i in range(feature_tmp.shape[1])]
return feature_tmp
def get_dataset(path = './Molecule_prediction_20200312'):
#读入数据
df_train = | pd.read_csv(f'{path}/train_0312.csv') | pandas.read_csv |
from django.shortcuts import render
from django.conf import settings
from django.utils import timezone
from django.shortcuts import redirect
from django.db.models.functions import Cast
from django.contrib.auth import authenticate, login
from .models import InjectionLog, GSBrand, Cats, UserGS, SelectedCat
from .models import UserExtension, RelapseDate, ObservationLog, BloodWork, FixTimezone
from .forms import BloodWorkForm
from django.contrib.auth.forms import UserCreationForm
from .forms import AddGS, RegisterForm
from django.db.models.fields import DateField
from django.db.models import F, DurationField, ExpressionWrapper
from .models import WarriorTracker
import re
from django.contrib.auth import logout
from django.contrib.auth.decorators import login_required
from datetime import datetime, timedelta
import pytz
import hashlib
import csv
import decimal
from django.http import StreamingHttpResponse
import pandas as pd
import statsmodels.formula.api as smf
from django.db import connection
from plotly.offline import plot
import plotly.graph_objs as go
import plotly.express as px
import os
import json
import math
from cron_data import Database
from gdstorage.storage import GoogleDriveStorage
from django.core.files.base import ContentFile
class Echo:
"""An object that implements just the write method of the file-like
interface.
"""
def write(self, value):
"""Write the value by returning it, instead of storing in a buffer."""
return value
# Create your views here.
def selected_cat(request):
if "sharable" in request.GET:
try:
cats = Cats.objects.filter(sharable=request.GET["sharable"])
cats[0]
return cats[0]
except:
return False
if "selectedcat" not in request.GET:
try:
sc = SelectedCat.objects.filter(user=request.user)[0]
cat_name = sc.cat_name
except:
try:
cat_name = Cats.objects.filter(owner=request.user).order_by('id')[0]
except:
return False
request.GET = request.GET.copy()
request.GET["selectedcat"] = cat_name.id
try:
cat = Cats.objects.filter(owner=request.user).filter(id=request.GET["selectedcat"])[0]
except:
return False
return cat
def register(request):
if request.method == "POST":
form = RegisterForm(request.POST)
if form.is_valid():
form.save()
username = form.cleaned_data.get('username')
password = form.cleaned_data.get('<PASSWORD>')
user = authenticate(username=username, password=password)
login(request, user)
return redirect("/")
else:
form = RegisterForm()
page = "register"
return render(request, "registration/register.html", {"form":form,"page":page})
@login_required(login_url='/information')
def main_site(request):
sc = None
relapse = None
cat_quality = ""
brand_plot = ""
# Define a local timezone from the IP Address
tz = get_local_timezone(request)
timezone.activate(tz)
local_time = pytz.timezone(tz)
now = datetime.now(local_time)
date_stamp = None
unaware = datetime.now()
try:
user_defined_tz = FixTimezone.objects.get(owner=request.user).timezone
tz_list = None
except:
user_defined_tz = False
tz_list = []
for time in pytz.all_timezones:
tz_list.append(time)
validcats = Cats.objects.filter(owner=request.user)
if not SelectedCat.objects.filter(user=request.user).exists():
if Cats.objects.filter(owner=request.user).exists():
cat = Cats.objects.filter(owner=request.user).order_by('id')[0]
sc = SelectedCat(cat_name=cat, user=request.user)
sc.save()
else:
sc = SelectedCat.objects.get(user=request.user)
if "selectedcat" in request.GET and sc:
try:
cat = Cats.objects.filter(owner=request.user).filter(id=request.GET["selectedcat"])[0]
except:
cat = Cats.objects.filter(owner=request.user).order_by('id')[0]
sc = SelectedCat.objects.get(user=request.user)
sc.cat_name=cat
sc.save()
brand_plot=cat_stats(sc)
res = None
try:
ht_val = 220
if sc.cat_name.cured:
ht_val=150
quality = InjectionLog.objects.filter(cat_name=sc.cat_name).order_by("injection_time").filter(
active=True)
res = [x.cat_weight for x in quality]
wt_unit = [x.wt_units for x in quality][0]
fig = px.line(res, height=ht_val)
fig.update_xaxes(visible=False, fixedrange=True)
fig.update_layout(
showlegend=False,
plot_bgcolor="white",
yaxis_title="Cat's Weight (%s)" % wt_unit,
margin=dict(t=10,l=10,b=10,r=10)
)
fig.update_yaxes(
visible=True,
fixedrange=True,
title_text = "Cat's Weight (%s)" % wt_unit,
title_font_size = 10)
fig.update_traces(
hoverinfo='skip',
hovertemplate=None)
cat_quality = plot(fig, output_type="div", include_plotlyjs="cdn")
except:
cat_quality = ""
try:
relapse = RelapseDate.objects.filter(cat_name = sc.cat_name).order_by('-relapse_start')[0]
treatment_duration = now.date()-relapse.relapse_start
try:
inj_progress={}
inj_date = InjectionLog.objects.filter(
owner=request.user).filter(
cat_name=sc.cat_name).filter(
active=True).order_by("-injection_time")[0].injection_time
date_stamp = local_time.fromutc(inj_date.replace(tzinfo=None))
inj_date = date_stamp.date() - relapse.relapse_start
inj_progress["inj_date"] = inj_date.days+1
except:
inj_progress = None
except:
try:
treatment_duration = now.date()-sc.cat_name.treatment_start
except:
treatment_duration = now.date()-now.date()
try:
inj_progress={}
inj_date = InjectionLog.objects.filter(
owner=request.user).filter(
cat_name=sc.cat_name).filter(
active=True).order_by("-injection_time")[0].injection_time
date_stamp = local_time.fromutc(inj_date.replace(tzinfo=None))
inj_date = date_stamp.date() - sc.cat_name.treatment_start
inj_progress["inj_date"] = inj_date.days+1
except:
inj_progress = None
template ='InjectionLog/home.html'
page="home"
if request.user.groups.filter(name="WarriorAdmin").exists():
grouping="WarriorAdmin"
else:
grouping = None
injections = InjectionLog.objects.filter(owner=request.user)
return render(request, template, {"page":page, "cat_quality":cat_quality, "brand_plot":brand_plot, "date_stamp":date_stamp, "progress":inj_progress,"sc":sc, "tz":tz, "tz_list":tz_list, "user_defined_timezone":user_defined_tz, "relapse":relapse, "treatment_duration":treatment_duration.days,"grouping":grouping,"validcats":validcats,"time_info":now.utcoffset, "wt_array":res[::-1]})
def sharable_hash(cat, user):
key1 = str(cat).encode('utf-8')
key2 = str(user).encode('utf-8')
md5 = hashlib.md5(b"%s-%s" %(key1, key2))
return md5.hexdigest()
@login_required
def catinfo(request):
page="catinfo"
sharable = None
relapse = None
validcats = Cats.objects.filter(owner=request.user)
pattern="^\d{4}\-(0[1-9]|1[012])\-(0[1-9]|[12][0-9]|3[01])$"
if request.method == "POST":
if request.POST["CatID"]:
c = Cats.objects.get(id=request.POST["CatID"])
if "cured" in request.POST:
c.cured = True
c.bad = False
else:
c.cured = False
if "bad_outcome" in request.POST:
c.cured = False
c.bad = True
else:
c.bad = False
if "treatmentstart" in request.POST and re.match(pattern,request.POST["treatmentstart"]):
c.treatment_start = request.POST["treatmentstart"]
if "relapse_date" in request.POST and re.match(pattern,request.POST["relapse_date"]):
c.cured = False
relapse = RelapseDate(
cat_name = c,
relapse_start = request.POST["relapse_date"],
fip_type = request.POST["FIPTypeRelapse"],
ocular = "Ocular_Relapse" in request.POST,
neuro = "Neuro_Relapse" in request.POST,
)
relapse.save()
if "extendedtreatment" in request.POST:
if request.POST["extendedtreatment"]=="":
c.extended_treatment = 0
else:
c.extended_treatment = request.POST["extendedtreatment"]
c.relapse = "relapse" in request.POST
c.notes = request.POST["notes"]
if request.POST["warrioradmin"]:
c.WarriorAdmin = request.POST["warrioradmin"]
sharable = sharable_hash(c, request.user)
c.sharable = sharable
c.save()
return redirect("/?message=update")
else:
if "treatmentstart" in request.POST and re.match(pattern,request.POST["treatmentstart"]):
treatment_date = request.POST["treatmentstart"]
else:
treatment_date = None
if not re.match(pattern, request.POST["CatBirthday"]):
return redirect("/catinfo?error=Cat's birthday not entered. Please enter a value.&CatID=0")
cats = Cats(
owner = request.user,
name = request.POST["CatName"],
birthday = request.POST["CatBirthday"],
fip_type = request.POST["FIP Type"],
ocular = "Ocular" in request.POST,
neuro = "Neuro" in request.POST,
treatment_start = treatment_date,
relapse = "realpse" in request.POST,
WarriorAdmin = request.POST["warrioradmin"],
notes = request.POST["notes"]
)
cats.save()
sharable = sharable_hash(cats, request.user)
cats.sharable = sharable
cats.save()
return redirect("/?message=success")
else:
if "CatID" in request.GET or "sharable" in request.GET:
try:
if "CatID" in request.GET and request.GET["CatID"] != "0":
cats = Cats.objects.filter(id=request.GET["CatID"]).filter(owner=request.user)
if request.GET["CatID"] == "0":
cats = [None]
if "sharable" in request.GET:
shared = True
cats = Cats.objects.filter(sharable=request.GET["sharable"])
try:
cats[0]
except:
return redirect("/?error=Invalid Share Link")
catnum=1
try:
bw_data = BloodWork.objects.filter(cat_name=cats[0]).filter(active=True)
bloodwork = []
for result in bw_data:
bloodwork.append(result)
except:
bloodwork = None
try:
relapse = RelapseDate.objects.filter(cat_name = cats[0])
except:
relapse=None
except:
# Do Cat Sharable Logic to get the cat information
return redirect("/catinfo")
else:
cats = Cats.objects.filter(owner = request.user).all()
catnum = len(cats)
bloodwork = None
try:
cats[0]
except:
return redirect("/catinfo?CatID=0")
sharable = sharable_hash(cats[0], request.user)
form = BloodWorkForm(request.POST, request.FILES)
template = "InjectionLog/catinfo.html"
return render(request, template, {"page":page, "cats":cats, "relapse":relapse, "sharable":sharable, "catnum":catnum,'form':form,"bloodwork":bloodwork, "validcats":validcats})
def information(request):
template ='InjectionLog/information.html'
page="information"
return render(request, template,{"page":page})
def about(request):
template ='InjectionLog/about.html'
page="about"
return render(request, template,{"page":page})
@login_required
def make_test(request):
try:
account = UserExtension.objects.filter(user=request.user).get()
except:
account = UserExtension(user = request.user)
if request.method=="POST":
if "activate_test" in request.POST:
account.test_account = True
account.save()
else:
account.test_account = False
account.save()
return render(request, "InjectionLog/test_account.html", {"enabled":account})
@login_required
def calculatedosage(request):
template ='InjectionLog/dosecalc.html'
page="dose"
drugs = GSBrand.objects.all().order_by('brand')
return render(request, template, {"page":page, "dose":True, "drugs":drugs})
def logout_view(request):
logout(request)
return redirect("/")
@login_required
def delete_injection(request):
if "delete_id" not in request.GET:
return redirect("/?error=No record ID was specified")
try:
cat = Cats.objects.filter(owner=request.user).filter(id=request.GET["selectedcat"])[0]
except:
try:
cat = Cats.objects.filter(owner=request.user).order_by('id')[0]
except:
return redirect("/?error=Problem finding cat for your account")
try:
log_type = request.GET["log"]
except:
return redirect("/?error=No log specified")
try:
if log_type == "observationlog":
q = ObservationLog.objects.filter(owner=request.user).filter(cat_name=cat).filter(id=request.GET["delete_id"]).get()
if log_type == "log":
q = InjectionLog.objects.filter(owner=request.user).filter(cat_name=cat).filter(id=request.GET["delete_id"]).get()
if log_type == "bloodwork":
q = BloodWork.objects.filter(cat_name=cat).filter(id=request.GET["delete_id"]).get()
q.bloodwork.delete()
if log_type == "tracker":
q = WarriorTracker.objects.filter(user=request.user).filter(id=request.GET["delete_id"]).delete()
return redirect("/trackwarrior")
except:
return redirect("/?error=You do not have access to this resource")
q.active=False
q.save()
if log_type == "bloodwork":
return redirect("/catinfo/?message=update&CatID=%s" % (cat.id))
return redirect("/%s/?message=update&selectedcat=%s&q=%s" % (log_type, cat.id,q.id))
@login_required
def add_gs(request):
page="add_gs"
if request.method == "POST":
form = AddGS(request.POST)
if form.is_valid():
if request.user.groups.filter(name="WarriorAdmin").exists():
user_gs = GSBrand(
brand = request.POST["GSBrand"],
concentration = request.POST["GSConcentration"],
admin_method = request.POST["GSAdmin"],
price = request.POST["GSPrice"])
else:
user_gs = UserGS(
user = request.user,
brand = request.POST["GSBrand"],
concentration = request.POST["GSConcentration"],
admin_method = request.POST["GSAdmin"],
price = request.POST["GSPrice"])
user_gs.save()
return redirect("/?message=success")
else:
form = AddGS()
template ='InjectionLog/injlog.html'
return render(request, 'InjectionLog/add_gs.html',{'page':page,'form':form})
@login_required
def recordinjection(request):
template ='InjectionLog/dosecalc.html'
page="injection"
ns=False
drugs = GSBrand.objects.all().order_by('brand')
userGS = UserGS.objects.filter(user=request.user)
local_time = get_local_timezone(request)
tz = pytz.timezone(local_time)
cat_name = selected_cat(request)
if not cat_name:
return redirect("/?error=Please add a cat to your account first")
try:
latest_data = InjectionLog.objects.filter(owner=request.user).filter(cat_name=cat_name).order_by('-injection_time')[0]
try:
gs_brand = GSBrand.objects.get(brand=latest_data.gs_brand)
except:
gs_brand = UserGS.objects.get(brand=latest_data.gs_brand, user=request.user)
conc = gs_brand.concentration
dose = round(conc*latest_data.injection_amount/latest_data.cat_weight*decimal.Decimal(2.204),0)
request.GET = request.GET.copy()
request.GET["selectedcat"] = cat_name.id
request.GET["CatWeight"] = latest_data.cat_weight
request.GET["brand_value"] = gs_brand.brand
if latest_data.wt_units == "kg":
request.GET["weight_units"]=True
dose = int(round(conc*latest_data.injection_amount/latest_data.cat_weight,0))
request.GET["GSDose"] = dose
except:
pass
validcats = Cats.objects.filter(owner=request.user)
if request.method == "POST":
user = request.user
cat = Cats.objects.get(id=request.POST["selectedcat"])
weight = request.POST["CatWeight"]
brand = request.POST["brand_value"]
date = request.POST["inj_date"]
try:
i_date = tz.localize(datetime.strptime(date,"%Y-%m-%d %I:%M %p"))
except:
return render(request, template, {"page":page,"dose":True,"local_time":local_time, "drugs":drugs,"userGS":userGS, "validcats":validcats,"error":"Invalid Date Time Format Entered. Must be YYYY-MM-DD HH:MM AM/PM"})
rating = request.POST["cat_rating"]
try:
int(rating)
except:
return render(request, template, {"page":page,"dose":True,"local_time":local_time, "drugs":drugs,"userGS":userGS, "validcats":validcats,"error":"There was an error with the value entered for how your cat is doing."})
amount = request.POST["calculateddose"]
i_note = request.POST["injectionnotes"]
o_note = request.POST["othernotes"]
if float(weight) > 30:
return render(request, template, {"page":page,"dose":True,"local_time":local_time, "drugs":drugs,"userGS":userGS, "validcats":validcats,"error":"You entered a weight for your cat that appears unrealistic. Please check your weight and units, and try again"})
if float(amount) > 30:
return render(request, template, {"page":page,"dose":True,"local_time":local_time, "drugs":drugs,"userGS":userGS, "validcats":validcats,"error":"The calculated dose appears to be too large or incorrect. Please check that you have entered a correct weight, and press 'calculate'. If this problem persists, report a bug."})
if "new_symptom" in request.POST:
newsymptom = request.POST["symptom_details"]
if newsymptom!="":
ns = True
else:
newsymptom = ""
ns = False
if "gabadose" in request.POST:
try:
gabadose = int(request.POST["gabadose"])
except:
gabadose = None
else:
gabadose = None
if isinstance(gabadose, str):
gabadose = None
unit = "lb"
if "weight_units" in request.POST:
unit="kg"
q = InjectionLog.objects.filter(owner = user).filter(cat_name = cat).filter(active=True)
for row in q:
difference = i_date - row.injection_time
if difference.total_seconds() < 12*60*60 and difference.total_seconds() > 0 and "multi_entry" not in request.POST:
request.GET = request.POST
return render(request, template, {"page":page,"dose":True,"local_time":local_time, "drugs":drugs,"userGS":userGS, "validcats":validcats,"error":"The injection date and time entered is too close to a previous injection. Select 'allow multiplie entires' if this is correct"})
log = InjectionLog(
owner = user,
gs_brand = brand,
cat_name = cat,
cat_weight= weight,
injection_time = i_date,
injection_amount = amount,
cat_behavior_today = rating,
injection_notes = i_note,
gaba_dose = gabadose,
new_symptom=newsymptom,
other_notes = o_note,
wt_units = unit)
log.save()
if cat.treatment_start is None:
cat.treatment_start = i_date.date()
cat.save()
return redirect("/?message=success&weight=%s&unit=%s&ns=%s" % (weight,unit,ns))
return render(request, template, {"page":page, "local_time":local_time, "dose":True,"drugs":drugs,"userGS":userGS, "validcats":validcats})
@login_required
def injectionlog(request):
validcats = Cats.objects.filter(owner=request.user)
cat = selected_cat(request)
local_time = get_local_timezone(request)
if not cat:
return redirect("/?error=No data has been recorded...")
template ='InjectionLog/injlog.html'
page="log"
treatment_start = cat.treatment_start
if "sharable" not in request.GET:
sharable = False
injections = InjectionLog.objects.filter(
owner=request.user).filter(
cat_name=cat).filter(
active=True).annotate(
inj_date = ExpressionWrapper(Cast(F('injection_time'), DateField())-F('cat_name__treatment_start'),output_field=DurationField())).order_by('injection_time')
else:
sharable = True
injections = InjectionLog.objects.filter(
cat_name=cat).filter(
active=True).annotate(
inj_date = ExpressionWrapper(Cast(F('injection_time'), DateField())-F('cat_name__treatment_start'),output_field=DurationField())).order_by('injection_time')
if "export" in request.GET:
"""
Export the log files as a csv
"""
# Generate a sequence of rows. The range is based on the maximum number of
# rows that can be handled by a single sheet in most spreadsheet
# applications.
csv_file = [[
"GS Brand",
"Cat Weight",
"Units",
"Injection Date",
"Injection Amount (mL or pills)",
"Cat Behavior (1-5)",
"Injection Notes",
"Gaba Dose (mL)",
"Other Notes",
"New Symptoms"]]
for row in injections:
csv_file.append([
row.gs_brand,
row.cat_weight,
row.wt_units,
row.injection_time,
row.injection_amount,
row.cat_behavior_today,
row.injection_notes,
row.gaba_dose,
row.other_notes,
row.new_symptom
])
date_format = datetime.now()
filename="InjectionLog_%s_%s.csv" % (cat.name, date_format.strftime("%Y-%m-%d"))
pseudo_buffer = Echo()
writer = csv.writer(pseudo_buffer)
response = StreamingHttpResponse((writer.writerow(row) for row in csv_file),
content_type="text/csv")
response['Content-Disposition'] = 'attachment; filename=%s' % filename
return response
return render(request, template, {"page":page, "treatment_start":treatment_start, "injections":injections, "local_time":local_time, "validcats":validcats, "cat":cat, "sharable":sharable})
def change_record(request):
if request.method == "POST":
local_time = get_local_timezone(request)
tz = pytz.timezone(local_time)
i_date = request.POST["inj_date"]
try:
i_date = datetime.strptime(i_date,"%Y-%m-%d %H:%M %p")
except:
return redirect("/log/?error=Invalid Date Format Entered:%s&selectedcat=%s" % (i_date, request.POST["cat_name"]))
record = InjectionLog.objects.get(id=request.POST["inj_id"])
record.injection_time = timezone.make_aware(i_date,tz,True)
record.cat_behavior_today = request.POST["cat_rating"]
record.injection_amount = request.POST["injection_amount"]
record.new_symptom = request.POST["new_symptom"]
record.injection_notes = request.POST["injection_notes"]
record.other_notes = request.POST["other_notes"]
record.gaba_dose = request.POST["gaba_dose"]
record.save()
return redirect("/log/?message=update&selectedcat=%s" % request.POST["cat_name"])
@login_required
def observation_log(request):
# Define a local timezone from the IP Address
tz = get_local_timezone(request)
timezone.activate(tz)
now = timezone.make_aware(datetime.now())
validcats = Cats.objects.filter(owner=request.user).filter(treatment_start__lte=(now-timedelta(days=84)))
cat = selected_cat(request)
if not cat:
return redirect("/?error=No data has been recorded...")
template ='InjectionLog/observation_log.html'
page="log"
try:
relapse = RelapseDate.objects.filter(cat_name = cat).order_by('-relapse_start')[0]
treatment_duration = now.date()-relapse.relapse_start
relapse = relapse.relapse_start
except:
relapse = cat.treatment_start
treatment_duration = now.date()-cat.treatment_start
observations = ObservationLog.objects.filter(
owner=request.user).filter(
cat_name=cat).filter(
active=True)
return render(request, template, {"page":page,"relapse":relapse, "treatment_duration":treatment_duration, "observations":observations,"validcats":validcats, "cat":cat})
@login_required
def record_observation(request):
validcats = Cats.objects.filter(owner=request.user)
template = "InjectionLog/record_observation.html"
if request.method == "POST":
user = request.user
cat = Cats.objects.get(id=request.POST["selectedcat"])
weight = request.POST["CatWeight"]
wt_units = request.POST["weight_units"]
obs_date = request.POST["observation_date"]
rating = request.POST["cat_rating"]
temperature = request.POST["temperature"]
temp_units = request.POST["temp_units"]
notes = request.POST["notes"]
if temperature == "":
temperature = None
if weight == "":
weight = None
log = ObservationLog(
owner = user,
cat_name = cat,
cat_weight= weight,
wt_units = wt_units,
observation_date = obs_date,
temperature = temperature,
temp_units = temp_units,
cat_behavior_today = rating,
notes = notes)
log.save()
return redirect("/?message=success")
cat = selected_cat(request)
if not cat:
return redirect("/?error=Please add a cat to your account first")
request.GET = request.GET.copy()
request.GET["selectedcat"] = cat.id
return render(request,template, {"validcats":validcats})
def signup(request):
if request.method == 'POST':
form = UserCreationForm(request.POST)
if form.is_valid():
form.save()
username = form.cleaned_data.get('username')
raw_password = form.cleaned_data.get('password1')
user = authenticate(username=username, password=raw_password)
login(request, user)
return redirect('home')
else:
form = UserCreationForm()
return render(request, 'signup.html', {'form': form})
@login_required
def upload_file(request):
if request.method == 'POST':
targetDir = "/var/www/fip/SlayFIP/temporary_uploads/"+request.user.username
if not os.path.exists(targetDir):
os.makedirs(targetDir)
if 'ajax_call' in request.POST:
fileName = request.POST['fileName'] # you receive the file name as a separate post data
fileSize = request.POST['fileSize'] # you receive the file size as a separate post data
fileId = request.POST['fileId'] # you receive the file identifier as a separate post data
index = request.POST['chunkIndex'] # the current file chunk index
totalChunks = int(request.POST['chunkCount']) # the total number of chunks for this file
file_chunk = request.FILES['fileBlob']
target_file = targetDir +"/"+fileName
outfile = targetDir+"/"+fileName
target_file = target_file + "_" +str(index)
if(chunk_handler(file_chunk,target_file)):
chunks = get_chunk_list(targetDir,fileName+"_")
allChunksUploaded = len(chunks) == totalChunks
if allChunksUploaded:
combineChunks(chunks, outfile, cleanup=True)
request.session['fileName'] = fileName
return_values = {
'chunkIndex': index,
'initialPreviewConfig':
{
'type':'other',
'caption': fileName,
'key':fileId,
'fileId': fileId,
'size': fileSize,
},
'append': True}
return StreamingHttpResponse(json.dumps(return_values))
if "google_drive_upload" in request.POST:
if not request.session["fileName"]:
return redirect("/catinfo/?error=Unable to find file to upload")
fileName = request.session["fileName"]
outfile = targetDir+"/"+fileName
file_blob = ContentFile(open(outfile,'rb').read())
storage = GoogleDriveStorage()
path = storage.save('FIPlog/'+fileName,file_blob)
cat = Cats.objects.get(id=request.POST["cat_name"])
foo = BloodWork(
bloodname = request.POST["bloodname"],
cat_name = cat,
bloodwork_date = request.POST["bloodwork_date"],
notes = request.POST["notes"],
bloodwork = path,
)
foo.save()
# Cleanup Temp Files in User's upload folder
for filename in os.listdir(targetDir):
file_path = os.path.join(targetDir, filename)
try:
if os.path.isfile(file_path) or os.path.islink(file_path):
os.unlink(file_path)
except Exception as e:
print('Failed to delete %s. Reason: %s' % (file_path, e))
return redirect("/catinfo/?message=success&CatID="+request.POST["cat_name"])
else:
form = BloodWorkForm()
return redirect('/catinfo')
def get_chunk_list(mypath, slug):
from os import listdir
from os.path import isfile, join
onlyfiles = [f for f in listdir(mypath) if isfile(join(mypath, f))]
return [mypath+"/"+x for x in onlyfiles if slug in x]
def chunk_handler(data,targetfile):
with open(targetfile, 'wb+') as destination:
for chunk in data.chunks():
destination.write(chunk)
return True
def combineChunks(chunk_list, outFile, cleanup=False):
with open(outFile, 'wb+') as destination:
for chunk in chunk_list:
content = open(chunk,'rb').read()
destination.write(content)
if cleanup:
for chunk in chunk_list:
os.remove(chunk)
def load_file(request):
cat = Cats.objects.get(id=8)
data = BloodWork.objects.filter(cat_name=cat)
bloodwork = []
for result in data:
bloodwork.append(result)
django_file = bloodwork[0]
t ='InjectionLog/view_file.html'
gd_storage = None
return render(request, t, {'file':django_file, "data": bloodwork, 'storage':gd_storage})
def parse_sharable_url(url):
pattern = "^(.*)([a-z0-9]{32})$"
try:
share_hash = re.match(pattern, url).groups()[1]
cat = Cats.objects.get(sharable = share_hash)
return share_hash
except:
return False
@login_required
def track_warrior(request):
if request.method == "POST":
share_hash = parse_sharable_url(request.POST["share_link"])
share_name = request.POST["identifier"]
exist = WarriorTracker.objects.filter(user = request.user).filter(md5hash=share_hash).count()
if exist > 0:
return redirect("/trackwarrior/?error=You already following this cat")
if len(share_name)<=1:
return redirect("/trackwarrior/?error=Please enter a longer name to identify this cat")
if share_hash:
sh = WarriorTracker(
user = request.user,
md5hash = share_hash,
identifier = share_name)
sh.save()
return redirect("/trackwarrior/?message=success")
else:
return redirect("/trackwarrior/?error=Invalid Sharing Link")
tracking = WarriorTracker.objects.filter(user = request.user)
return render(request, "InjectionLog/tracker.html",{"tracking":tracking})
def data_analysis(request):
"""
Generate weight and dosing tables for cats
"""
filename = os.path.dirname(settings.DATABASES['default']['NAME'])+"/data_output.txt"
with open(filename) as json_file:
data = json.load(json_file)
fip_div = data["fip_stats"]["graph"]
total_cats = data["fip_stats"]["total_cats"]
wt_div = data["weight"]["graph"]
age_div = data["summary"]["graph"]
duration_div = data["distribution"]["graph"]
past_initial_treatment = data["distribution"]["total_cats"]
cured_cats = data["distribution"]["cured_cats"]
cat_quality = data["quality"]["graph"]
brand_div = data["brands"]["graph"]
treatment_stats = data["treatment_stats"]
return render(request, "InjectionLog/data_analysis.html",{"page":"data","brand_stats":brand_div, "duration_fig":duration_div, "cat_quality":cat_quality,"treatment_stats":treatment_stats, "cured_cats":cured_cats, "84_cats":past_initial_treatment,"age_fig":age_div,"fip_fig":fip_div,"wt_fig":wt_div, "dry_cases":total_cats["0"],"wet_cases":total_cats["1"]})
def vet_info(request):
return render(request,"InjectionLog/vet_info.html",{"page":"vetinfo"})
def brands(request):
return render(request,"InjectionLog/brands.html",{"page":"brands"})
def error_create(requests):
return render(requests, "InjectionLog/error_create.html")
def costs(request):
"""
View to do the number crunching
"""
filename = os.path.dirname(settings.DATABASES['default']['NAME'])+"/data_output.txt"
with open(filename) as json_file:
data = json.load(json_file)
model = data["weight"]["model"]
ints = model["ints"]
daily_price = model["daily_price"]
stwt = model["stwt"]
mult = model["mult"]
age = model["age"]
total_cost = 0
total_vol = 0
table = []
if "CatWeight" in request.GET and "FIPType" in request.GET and "CatAge" in request.GET:
if request.GET["FIPType"]=="dry":
dosage=10
else:
dosage=6
try:
wt = float(request.GET["CatWeight"])
ct_age = float(request.GET["CatAge"])
except:
return redirect("/costs/?error=Error: Please enter a Cat's Weight and Age")
for i in range(85):
calc_wt = round((1+mult*i+stwt*wt+ints+age*ct_age)*wt,1)
if calc_wt>wt:
use_wt = calc_wt
else:
use_wt = wt
if i>0:
amount = round(use_wt/2.2*dosage/15,2)
price = round(daily_price*use_wt/2.2*dosage,0)
else:
price = 0
amount = 0
total_vol = total_vol + amount
total_cost = total_cost + price
table.append(
{"price": "$%.2f" % price,
"weight":use_wt,
"amount":"%.2f mL" % amount
})
params = "Percent Change from Start = %f x [treatment day] + %f x [starting weight] + %f [cat age] " % (mult,stwt, age)
return render(request, "InjectionLog/costs.html",{"page":"costs", "model":mult, "amount":math.ceil(total_vol/5/.96), "loop":table,"total_cost":"${:.2f}".format(round(total_cost/100,0)*100), "params":params})
def get_local_timezone(request):
import requests
if "tz" in request.session:
return request.session["tz"]
try:
user_defined_tz = FixTimezone.objects.get(owner=request.user).timezone
request.session["tz"] = user_defined_tz
return request.session["tz"]
except:
x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
if x_forwarded_for:
ip = x_forwarded_for.split(',')[0]
else:
ip = request.META.get('REMOTE_ADDR')
if ip == "127.0.0.1":
ip = "172.16.58.3"
try:
ipinfo = requests.get('http://ip-api.com/json/%s' % ip)
ipinfo = ipinfo.json()
tz = ipinfo["timezone"]
except:
tz = "UTC"
request.session["tz"] = tz
return tz
def cat_stats(sc):
import numpy as np
data = Database()
quality = data.get_cat_quality()
this_cat = pd.DataFrame()
results = quality.groupby(['week']).agg(
behavior = pd.NamedAgg(column='cat_behavior_today', aggfunc=np.mean),
stdev = | pd.NamedAgg(column='cat_behavior_today',aggfunc=np.std) | pandas.NamedAgg |
import pandas as pd, numpy as np, pickle, os
def filter_cpx(data, k):
data_red = []
for idx in range(len(data)):
unique = {}
for adx, a in enumerate(data[idx][0]):
if a[0] not in unique:
unique[a[0]] = [a[1]]
else:
unique[a[0]].append(a[1])
max_unique = max([len(unique[i]) for i in unique])
if max_unique <=k:
data_red.append(data[idx])
return data_red
def tables(name, atom_pairs, atom_pair_types, k):
ranges = list(range(0, len(atom_pairs), 500))
ranges = [[ranges[i], ranges[i + 1]] for i in range(0, len(ranges) - 1)] + [[ranges[-1], len(atom_pairs)]]
for m in range(k):
#lens = {}
print(m)
load_files = []
for rdx, r in enumerate(ranges):
load_file = name + str(m) + '_' + str(rdx) + '.txt'
load_files.append(load_file)
df = pd.DataFrame(columns=atom_pair_types)
for i in atom_pairs[r[0]:r[1]]:
unique = {}
for adx, a in enumerate(i):
if a[0] not in unique:
unique[a[0]] = [a[1]]
else:
unique[a[0]].append(a[1])
row_input = {}
for a in atom_pair_types:
if (a in unique) and (len(unique[a]) > m):
row_input[a] = unique[a][m]
else:
row_input[a] = 100.0
df = df.append(row_input, ignore_index=True)
print(m, rdx)
df.to_csv(load_file, sep=' ', mode='w')
frames = [pd.read_csv(load_file, sep=' ') for load_file in load_files]
result = pd.concat(frames)
result = result.drop(['Unnamed: 0'], axis=1)
result.to_csv(name + str(m) + '.csv', sep=' ', mode='w')
for load_file in load_files:
os.remove(load_file)
def tables2(name, cp):
df = | pd.DataFrame(columns=['num_ligand_atoms', 'exp_binding_energy', 'complex_name']) | pandas.DataFrame |
#!/usr/bin/env python
import json
import os
import glob
import time
import httpagentparser
from itertools import combinations
import numpy as np
import pandas as pd
from scipy.sparse import hstack
from tqdm import tqdm
from lib.parsers.udgerWrapper import is_crawler, get_ua
class LogParser:
"""
Prepare data for learning
"""
def __init__(self, log_folder):
"""
:param log_folder: Log folder
"""
self.__log_folder = log_folder
self.__main_table = []
self.__value_table = []
self.__order_table = []
@staticmethod
def __line_handler(line, filter_crawlers, parse_ua, start_log_time, finish_log_time):
elements = line.split(',', 2)
if int(elements[0]) < start_log_time or int(elements[0]) > finish_log_time:
return False
if filter_crawlers:
if not elements[1]:
return False
main_row = {'timestamp': elements[0], 'ip': elements[1]}
value_row = {}
ordered_row = {}
if elements[2][0] == "'":
elements[2] = elements[2].strip()[1:][:-1].translate(str.maketrans("'", '"'))
if bool(elements[2] and elements[2].strip()):
# noinspection PyBroadException
try:
value_row.update(json.loads(elements[2].translate(str.maketrans("'", "\n"))))
order = -2 # ip and timestamp
for header, _ in value_row.items():
order += 1
if order > 0:
ordered_row.update({header: order})
if not parse_ua:
if filter_crawlers:
if is_crawler(elements[1], value_row['User-Agent']):
return False
if 'bot' in httpagentparser.detect(value_row['User-Agent']):
if httpagentparser.detect(value_row['User-Agent'])['bot']:
return False
main_row['User_Agent'] = value_row['User-Agent']
else:
if filter_crawlers:
if is_crawler(elements[1], value_row['User-Agent']):
return False
if 'bot' in httpagentparser.detect(value_row['User-Agent']):
if httpagentparser.detect(value_row['User-Agent'])['bot']:
return False
ua_obj = get_ua(value_row['User-Agent'])
main_row = {
'timestamp': elements[0],
'ip': elements[1],
'User_Agent': value_row['User-Agent'],
'ua_family_code': ua_obj['ua_family_code'],
'ua_version': ua_obj['ua_family_code'] + ua_obj['ua_version'],
'ua_class_code': ua_obj['ua_class_code'],
'device_class_code': ua_obj['device_class_code'],
'os_family_code': ua_obj['os_family_code'],
'os_code': ua_obj['os_code']
}
except:
pass
return main_row, value_row, ordered_row
def __parse_single_log(self, path_to_log, filter_crawlers, parse_ua, start_log_time, finish_log_time):
with open(path_to_log, 'r') as input_stream:
main_table = []
value_table = []
ordered_table = []
for line in input_stream:
line = self.__line_handler(line, filter_crawlers, parse_ua, start_log_time, finish_log_time)
if line:
main_row, value_row, ordered_row = line
main_table.append(main_row)
value_table.append(value_row)
ordered_table.append(ordered_row)
return main_table, value_table, ordered_table
def __parse_logs_from_folder(self, start_log_index, finish_log_index, filter_crawlers, parse_ua,
start_log_time=0, finish_log_time=time.time()):
files_in_folder = list(glob.iglob(self.__log_folder + '/**/*.log', recursive=True))
# files_in_folder = os.listdir(path=self.__log_folder)
if finish_log_index > len(files_in_folder) - 1:
finish_log_index = len(files_in_folder) - 1
for i in tqdm(range(start_log_index, finish_log_index)):
if files_in_folder[i].endswith('.log'):
main_sample, value_sample, order_sample = self.__parse_single_log(files_in_folder[i],
filter_crawlers, parse_ua, start_log_time, finish_log_time)
self.__value_table.extend(value_sample)
self.__order_table.extend(order_sample)
self.__main_table.extend(main_sample)
return self.__main_table, self.__value_table, self.__order_table
def parse_train_sample(self, start_log_index=0, finish_log_index=1,
filter_crawlers=False, parse_ua=False):
"""
:param start_log_index: started index
:param finish_log_index: started index
:param filter_crawlers: Use crawler filter udger.com
:param parse_ua: Parse user agent. Main Table will contain tuple rather than the value
:return: tuple main_table, value_table, order_table
"""
self.__parse_logs_from_folder(start_log_index, finish_log_index, filter_crawlers, parse_ua)
return self.__main_table, self.__value_table, self.__order_table
def parse_train_sample_by_time(self, start_log_time=0, finish_log_time=time.time(),
filter_crawlers=False, parse_ua=False):
"""
:param start_log_time: down limit for log timestamp
:param finish_log_time: upper limit for log timestamp
:param filter_crawlers: Use crawler filter udger.com
:param parse_ua: Parse user agent. Main Table will contain tuple rather than the value
:return: tuple main_table, value_table, order_table
"""
self.__parse_logs_from_folder(0, 10000, filter_crawlers, parse_ua, start_log_time, finish_log_time)
return self.__main_table, self.__value_table, self.__order_table
def parse_bot_sample(self, distr_start_log_index, distr_finish_log_index, base_start_log_index,
base_finish_log_index, filter_crawlers=False, parse_ua=False):
"""
:param distr_start_log_index: started distribution logs index
:param distr_finish_log_index: end of distribution logs index
:param base_start_log_index: started values logs index
:param base_finish_log_index: end of values logs index
:param filter_crawlers: Use crawler filter udger.com
:param parse_ua: Parse user agent. Main table will contain tuple rather than the value
:return: tuple main_table, value_table, order_table
"""
print('Start parsing logs for distribution')
main_data, _, _ = self.__parse_logs_from_folder(distr_start_log_index, distr_finish_log_index,
filter_crawlers, parse_ua)
main_frame = pd.DataFrame(main_data)
distribution_frame = main_frame.User_Agent.value_counts() / main_frame.shape[0]
cumulative_frame = np.cumsum(distribution_frame)
print('Start parsing logs for values')
main_data, self.__value_table, self.__order_table = self.__parse_logs_from_folder(
base_start_log_index, base_finish_log_index, filter_crawlers, parse_ua)
self.__main_table = pd.DataFrame(main_data)
sample_user_agents = []
norm_coefficient = distribution_frame.sum()
print('Bots Generation')
for _ in tqdm(range(self.__main_table.shape[0])):
uniform_value = np.random.rand() * norm_coefficient
sample_user_agents.append(
distribution_frame[cumulative_frame > uniform_value].index[0])
sample_user_agents = | pd.Series(sample_user_agents) | pandas.Series |
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 16 09:04:46 2017
@author: <NAME>
pygemfxns_output_postprocessing.py is a mix of post-processing for things like plots, relationships between variables,
and any other comparisons between output or input data.
"""
# Built-in Libraries
import os
import collections
# External Libraries
import numpy as np
import pandas as pd
import netCDF4 as nc
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
import scipy
import cartopy
import xarray as xr
# Local Libraries
import pygem_input as input
import pygemfxns_modelsetup as modelsetup
import pygemfxns_massbalance as massbalance
import class_mbdata
import run_simulation
# Script options
option_plot_futuresim = 0
option_mb_shean_analysis = 0
option_mb_shean_regional = 0
option_geodeticMB_loadcompare = 0
option_check_biasadj = 0
option_parameter_relationships = 0
option_MCMC_ensembles = 0
option_calcompare_w_geomb = 0
option_add_metadata2netcdf = 0
option_var_mon2annual = 0
#%% SUBSET RESULTS INTO EACH VARIABLE NAME SO EASIER TO TRANSFER
if option_var_mon2annual == 1:
netcdf_fp_prefix = input.output_filepath + 'simulations/spc/20181108_vars/'
vns = ['acc_glac_monthly', 'melt_glac_monthly', 'refreeze_glac_monthly', 'frontalablation_glac_monthly',
'massbaltotal_glac_monthly', 'temp_glac_monthly', 'prec_glac_monthly', 'runoff_glac_monthly']
# vns = ['runoff_glac_monthly']
def coords_attrs_dict(ds, vn):
"""
Retrieve dictionaries containing coordinates, attributes, and encoding for the dataset and variable name
Parameters
----------
ds : xr.Dataset
dataset of a variable of interest
vn : str
variable name
Returns
-------
output_coords_dict : dictionary
coordiantes for the modified variable
output_attrs_dict: dictionary
attributes to add to the modified variable
encoding : dictionary
encoding used with exporting xarray dataset to netcdf
"""
# Variable coordinates dictionary
output_coords_dict = {
'temp_glac_annual': collections.OrderedDict(
[('glac', ds.glac.values), ('year', ds.year.values), ('stats', ds.stats.values)]),
'prec_glac_annual': collections.OrderedDict(
[('glac', ds.glac.values), ('year', ds.year.values), ('stats', ds.stats.values)]),
'runoff_glac_annual': collections.OrderedDict(
[('glac', ds.glac.values), ('year', ds.year.values), ('stats', ds.stats.values)]),
'acc_glac_annual': collections.OrderedDict(
[('glac', ds.glac.values), ('year', ds.year.values), ('stats', ds.stats.values)]),
'acc_glac_summer': collections.OrderedDict(
[('glac', ds.glac.values), ('year', ds.year.values), ('stats', ds.stats.values)]),
'acc_glac_winter': collections.OrderedDict(
[('glac', ds.glac.values), ('year', ds.year.values), ('stats', ds.stats.values)]),
'melt_glac_annual': collections.OrderedDict(
[('glac', ds.glac.values), ('year', ds.year.values), ('stats', ds.stats.values)]),
'melt_glac_summer': collections.OrderedDict(
[('glac', ds.glac.values), ('year', ds.year.values), ('stats', ds.stats.values)]),
'melt_glac_winter': collections.OrderedDict(
[('glac', ds.glac.values), ('year', ds.year.values), ('stats', ds.stats.values)]),
'refreeze_glac_annual': collections.OrderedDict(
[('glac', ds.glac.values), ('year', ds.year.values), ('stats', ds.stats.values)]),
'refreeze_glac_summer': collections.OrderedDict(
[('glac', ds.glac.values), ('year', ds.year.values), ('stats', ds.stats.values)]),
'refreeze_glac_winter': collections.OrderedDict(
[('glac', ds.glac.values), ('year', ds.year.values), ('stats', ds.stats.values)]),
'frontalablation_glac_annual': collections.OrderedDict(
[('glac', ds.glac.values), ('year', ds.year.values), ('stats', ds.stats.values)]),
'frontalablation_glac_summer': collections.OrderedDict(
[('glac', ds.glac.values), ('year', ds.year.values), ('stats', ds.stats.values)]),
'frontalablation_glac_winter': collections.OrderedDict(
[('glac', ds.glac.values), ('year', ds.year.values), ('stats', ds.stats.values)]),
'massbaltotal_glac_annual': collections.OrderedDict(
[('glac', ds.glac.values), ('year', ds.year.values), ('stats', ds.stats.values)]),
'massbaltotal_glac_summer': collections.OrderedDict(
[('glac', ds.glac.values), ('year', ds.year.values), ('stats', ds.stats.values)]),
'massbaltotal_glac_winter': collections.OrderedDict(
[('glac', ds.glac.values), ('year', ds.year.values), ('stats', ds.stats.values)])
}
# Attributes dictionary
output_attrs_dict = {
'temp_glac_annual': {
'long_name': 'glacier-wide mean air temperature',
'units': 'degC',
'temporal_resolution': 'annual',
'comment': (
'annual mean has each month weight equally, each elevation bin is weighted equally'
' to compute the mean temperature, and bins where the glacier no longer exists due to '
'retreat have been removed')},
'prec_glac_annual': {
'long_name': 'glacier-wide precipitation (liquid)',
'units': 'm',
'temporal_resolution': 'annual',
'comment': 'only the liquid precipitation, solid precipitation excluded'},
'acc_glac_annual': {
'long_name': 'glacier-wide accumulation',
'units': 'm w.e.',
'temporal_resolution': 'annual',
'comment': 'only the solid precipitation'},
'acc_glac_summer': {
'long_name': 'glacier-wide accumulation',
'units': 'm w.e.',
'temporal_resolution': 'annual summer',
'comment': 'only the solid precipitation'},
'acc_glac_winter': {
'long_name': 'glacier-wide accumulation',
'units': 'm w.e.',
'temporal_resolution': 'annual winter',
'comment': 'only the solid precipitation'},
'melt_glac_annual': {
'long_name': 'glacier-wide melt',
'units': 'm w.e.',
'temporal_resolution': 'annual'},
'melt_glac_summer': {
'long_name': 'glacier-wide melt',
'units': 'm w.e.',
'temporal_resolution': 'annual summer'},
'melt_glac_winter': {
'long_name': 'glacier-wide melt',
'units': 'm w.e.',
'temporal_resolution': 'annual winter'},
'refreeze_glac_annual': {
'long_name': 'glacier-wide refreeze',
'units': 'm w.e.',
'temporal_resolution': 'annual'},
'refreeze_glac_summer': {
'long_name': 'glacier-wide refreeze',
'units': 'm w.e.',
'temporal_resolution': 'annual summer'},
'refreeze_glac_winter': {
'long_name': 'glacier-wide refreeze',
'units': 'm w.e.',
'temporal_resolution': 'annual winter'},
'frontalablation_glac_annual': {
'long_name': 'glacier-wide frontal ablation',
'units': 'm w.e.',
'temporal_resolution': 'annual',
'comment': (
'mass losses from calving, subaerial frontal melting, sublimation above the '
'waterline and subaqueous frontal melting below the waterline')},
'frontalablation_glac_summer': {
'long_name': 'glacier-wide frontal ablation',
'units': 'm w.e.',
'temporal_resolution': 'annual summer',
'comment': (
'mass losses from calving, subaerial frontal melting, sublimation above the '
'waterline and subaqueous frontal melting below the waterline')},
'frontalablation_glac_winter': {
'long_name': 'glacier-wide frontal ablation',
'units': 'm w.e.',
'temporal_resolution': 'annual winter',
'comment': (
'mass losses from calving, subaerial frontal melting, sublimation above the '
'waterline and subaqueous frontal melting below the waterline')},
'massbaltotal_glac_annual': {
'long_name': 'glacier-wide total mass balance',
'units': 'm w.e.',
'temporal_resolution': 'annual',
'comment': 'total mass balance is the sum of the climatic mass balance and frontal ablation'},
'massbaltotal_glac_summer': {
'long_name': 'glacier-wide total mass balance',
'units': 'm w.e.',
'temporal_resolution': 'annual summer',
'comment': 'total mass balance is the sum of the climatic mass balance and frontal ablation'},
'massbaltotal_glac_winter': {
'long_name': 'glacier-wide total mass balance',
'units': 'm w.e.',
'temporal_resolution': 'annual winter',
'comment': 'total mass balance is the sum of the climatic mass balance and frontal ablation'},
'runoff_glac_annual': {
'long_name': 'glacier-wide runoff',
'units': 'm**3',
'temporal_resolution': 'annual',
'comment': 'runoff from the glacier terminus, which moves over time'},
}
encoding = {}
noencoding_vn = ['stats', 'glac_attrs']
# Encoding (specify _FillValue, offsets, etc.)
if vn not in noencoding_vn:
encoding[vn] = {'_FillValue': False}
return output_coords_dict, output_attrs_dict, encoding
for vn in vns:
netcdf_fp = netcdf_fp_prefix + vn + '/'
for i in os.listdir(netcdf_fp):
if i.endswith('.nc'):
print(i)
# Open dataset and extract annual values
ds = xr.open_dataset(netcdf_fp + i)
ds_mean = ds[vn].values[:,:,0]
ds_std = ds[vn].values[:,:,1]
ds_var = ds_std**2
# Compute annual/seasonal mean/sum and standard deviation for the variable of interest
if vn is 'temp_glac_monthly':
output_list = ['annual']
vn_annual = 'temp_glac_annual'
# Mean annual temperature, standard deviation, and variance
ds_mean_annual = ds_mean.reshape(-1,12).mean(axis=1).reshape(-1,int(ds_mean.shape[1]/12))
ds_var_annual = ds_var.reshape(-1,12).mean(axis=1).reshape(-1,int(ds_std.shape[1]/12))
ds_std_annual = ds_var_annual**0.5
ds_values_annual = np.concatenate((ds_mean_annual[:,:,np.newaxis], ds_std_annual[:,:,np.newaxis]),
axis=2)
elif vn in ['prec_glac_monthly', 'runoff_glac_monthly']:
output_list = ['annual']
vn_annual = 'prec_glac_annual'
# Total annual precipitation, standard deviation, and variance
ds_sum_annual = ds_mean.reshape(-1,12).sum(axis=1).reshape(-1,int(ds_mean.shape[1]/12))
ds_var_annual = ds_var.reshape(-1,12).sum(axis=1).reshape(-1,int(ds_std.shape[1]/12))
ds_std_annual = ds_var_annual**0.5
ds_values_annual = np.concatenate((ds_sum_annual[:,:,np.newaxis], ds_std_annual[:,:,np.newaxis]),
axis=2)
elif vn in ['acc_glac_monthly', 'melt_glac_monthly', 'refreeze_glac_monthly',
'frontalablation_glac_monthly', 'massbaltotal_glac_monthly']:
output_list = ['annual', 'summer', 'winter']
# Annual total, standard deviation, and variance
ds_sum_annual = ds_mean.reshape(-1,12).sum(axis=1).reshape(-1,int(ds_mean.shape[1]/12))
ds_var_annual = ds_var.reshape(-1,12).sum(axis=1).reshape(-1,int(ds_std.shape[1]/12))
ds_std_annual = ds_var_annual**0.5
ds_values_annual = np.concatenate((ds_sum_annual[:,:,np.newaxis], ds_std_annual[:,:,np.newaxis]),
axis=2)
# Seasonal total, standard deviation, and variance
if ds.time.year_type == 'water year':
option_wateryear = 1
elif ds.time.year_type == 'calendar year':
option_wateryear = 2
else:
option_wateryear = 3
dates_table = modelsetup.datesmodelrun(startyear=ds.year.values[0], endyear=ds.year.values[-1],
spinupyears=0, option_wateryear=option_wateryear)
# For seasonal calculations copy monthly values and remove the other season's values
ds_mean_summer = ds_mean.copy()
ds_var_summer = ds_var.copy()
ds_mean_summer[:,dates_table.season.values == 'winter'] = 0
ds_sum_summer = ds_mean_summer.reshape(-1,12).sum(axis=1).reshape(-1, int(ds_mean.shape[1]/12))
ds_var_summer = ds_var_summer.reshape(-1,12).sum(axis=1).reshape(-1,int(ds_std.shape[1]/12))
ds_std_summer = ds_var_summer**0.5
ds_values_summer = np.concatenate((ds_sum_summer[:,:,np.newaxis], ds_std_summer[:,:,np.newaxis]),
axis=2)
ds_mean_winter = ds_mean.copy()
ds_var_winter = ds_var.copy()
ds_mean_winter[:,dates_table.season.values == 'summer'] = 0
ds_sum_winter = ds_mean_winter.reshape(-1,12).sum(axis=1).reshape(-1, int(ds_mean.shape[1]/12))
ds_var_winter = ds_var_winter.reshape(-1,12).sum(axis=1).reshape(-1,int(ds_std.shape[1]/12))
ds_std_winter = ds_var_winter**0.5
ds_values_winter = np.concatenate((ds_sum_winter[:,:,np.newaxis], ds_std_winter[:,:,np.newaxis]),
axis=2)
# Create modified dataset
for temporal_res in output_list:
vn_new = vn.split('_')[0] + '_glac_' + temporal_res
output_fp = netcdf_fp_prefix + vn_new + '/'
output_fn = i.split('.nc')[0][:-7] + temporal_res + '.nc'
output_coords_dict, output_attrs_dict, encoding = coords_attrs_dict(ds, vn_new)
if temporal_res is 'annual':
ds_new = xr.Dataset({vn_new: (list(output_coords_dict[vn_new].keys()), ds_values_annual)},
coords=output_coords_dict[vn_new])
elif temporal_res is 'summer':
ds_new = xr.Dataset({vn_new: (list(output_coords_dict[vn_new].keys()), ds_values_summer)},
coords=output_coords_dict[vn_new])
elif temporal_res is 'winter':
ds_new = xr.Dataset({vn_new: (list(output_coords_dict[vn_new].keys()), ds_values_winter)},
coords=output_coords_dict[vn_new])
ds_new[vn_new].attrs = output_attrs_dict[vn_new]
# Merge new dataset into the old to retain glacier table and other attributes
output_ds = xr.merge((ds, ds_new))
output_ds = output_ds.drop(vn)
# Export netcdf
if not os.path.exists(output_fp):
os.makedirs(output_fp)
output_ds.to_netcdf(output_fp + output_fn, encoding=encoding)
# Remove file
os.remove(netcdf_fp + i)
#%%===== PLOT FUNCTIONS =============================================================================================
def plot_latlonvar(lons, lats, variable, rangelow, rangehigh, title, xlabel, ylabel, colormap, east, west, south, north,
xtick=1,
ytick=1,
marker_size=2,
option_savefig=0,
fig_fn='Samplefig_fn.png',
output_filepath = input.main_directory + '/../Output/'):
"""
Plot a variable according to its latitude and longitude
"""
# Create the projection
ax = plt.axes(projection=cartopy.crs.PlateCarree())
# Add country borders for reference
ax.add_feature(cartopy.feature.BORDERS)
# Set the extent
ax.set_extent([east, west, south, north], cartopy.crs.PlateCarree())
# Label title, x, and y axes
plt.title(title)
ax.set_xticks(np.arange(east,west+1,xtick), cartopy.crs.PlateCarree())
ax.set_yticks(np.arange(south,north+1,ytick), cartopy.crs.PlateCarree())
plt.xlabel(xlabel)
plt.ylabel(ylabel)
# Plot the data
plt.scatter(lons, lats, s=marker_size, c=variable, cmap='RdBu', marker='o', edgecolor='black', linewidths=0.25)
# plotting x, y, size [s=__], color bar [c=__]
plt.clim(rangelow,rangehigh)
# set the range of the color bar
plt.colorbar(fraction=0.02, pad=0.04)
# fraction resizes the colorbar, pad is the space between the plot and colorbar
if option_savefig == 1:
plt.savefig(output_filepath + fig_fn)
plt.show()
def plot_caloutput(data):
"""
Plot maps and histograms of the calibration parameters to visualize results
"""
# Set extent
east = int(round(data['CenLon'].min())) - 1
west = int(round(data['CenLon'].max())) + 1
south = int(round(data['CenLat'].min())) - 1
north = int(round(data['CenLat'].max())) + 1
xtick = 1
ytick = 1
# Select relevant data
lats = data['CenLat'][:]
lons = data['CenLon'][:]
precfactor = data['precfactor'][:]
tempchange = data['tempchange'][:]
ddfsnow = data['ddfsnow'][:]
calround = data['calround'][:]
massbal = data['MB_geodetic_mwea']
# Plot regional maps
plot_latlonvar(lons, lats, massbal, 'Geodetic mass balance [mwea]', 'longitude [deg]', 'latitude [deg]', east, west,
south, north, xtick, ytick)
plot_latlonvar(lons, lats, precfactor, 'precipitation factor', 'longitude [deg]', 'latitude [deg]', east, west,
south, north, xtick, ytick)
plot_latlonvar(lons, lats, tempchange, 'Temperature bias [degC]', 'longitude [deg]', 'latitude [deg]', east, west,
south, north, xtick, ytick)
plot_latlonvar(lons, lats, ddfsnow, 'DDF_snow [m w.e. d-1 degC-1]', 'longitude [deg]', 'latitude [deg]', east, west,
south, north, xtick, ytick)
plot_latlonvar(lons, lats, calround, 'Calibration round', 'longitude [deg]', 'latitude [deg]', east, west,
south, north, xtick, ytick)
# Plot histograms
data.hist(column='MB_difference_mwea', bins=50)
plt.title('Mass Balance Difference [mwea]')
data.hist(column='precfactor', bins=50)
plt.title('Precipitation factor [-]')
data.hist(column='tempchange', bins=50)
plt.title('Temperature bias [degC]')
data.hist(column='ddfsnow', bins=50)
plt.title('DDFsnow [mwe d-1 degC-1]')
plt.xticks(rotation=60)
data.hist(column='calround', bins = [0.5, 1.5, 2.5, 3.5])
plt.title('Calibration round')
plt.xticks([1, 2, 3])
#%% ===== PARAMETER RELATIONSHIPS ======
if option_parameter_relationships == 1:
# Load csv
ds = pd.read_csv(input.main_directory + '/../Output/20180710_cal_modelparams_opt1_R15_ERA-Interim_1995_2015.csv',
index_col=0)
property_cn = 'Zmed'
# Relationship between model parameters and glacier properties
plt.figure(figsize=(6,10))
plt.subplots_adjust(wspace=0.05, hspace=0.05)
plt.suptitle('Model parameters vs. ' + property_cn, y=0.94)
# Temperature change
slope, intercept, r_value, p_value, std_err = scipy.stats.linregress(ds[property_cn], ds['tempchange'])
xplot = np.arange(4000,6500)
line = slope*xplot+intercept
plt.subplot(4,1,1)
plt.plot(ds[property_cn], ds['tempchange'], 'o', mfc='none', mec='black')
plt.plot(xplot, line)
plt.xlabel(property_cn + ' [masl]', size=10)
plt.ylabel('tempchange \n[degC]', size=12)
equation = 'tempchange = ' + str(round(slope,7)) + ' * ' + property_cn + ' + ' + str(round(intercept,5))
plt.text(0.15, 0.85, equation, fontsize=12, transform=plt.gcf().transFigure,
bbox=dict(facecolor='white', edgecolor='none', alpha=0.85))
print(equation, ' , R2 =', round(r_value**2,2))
# Precipitation factor
slope, intercept, r_value, p_value, std_err = scipy.stats.linregress(ds[property_cn], ds['precfactor'])
xplot = np.arange(4000,6500)
line = slope*xplot+intercept
plt.subplot(4,1,2)
plt.plot(ds[property_cn], ds['precfactor'], 'o', mfc='none', mec='black')
plt.plot(xplot, line)
plt.xlabel(property_cn + ' [masl]', size=12)
plt.ylabel('precfactor \n[-]', size=12)
equation = 'precfactor = ' + str(round(slope,7)) + ' * ' + property_cn + ' + ' + str(round(intercept,5))
plt.text(0.15, 0.65, equation, fontsize=12, transform=plt.gcf().transFigure,
bbox=dict(facecolor='white', edgecolor='none', alpha=0.85))
print(equation, ' , R2 =', round(r_value**2,2))
# Degree day factor of snow
slope, intercept, r_value, p_value, std_err = scipy.stats.linregress(ds[property_cn], ds['ddfsnow'])
xplot = np.arange(4000,6500)
line = slope*xplot+intercept
plt.subplot(4,1,3)
plt.plot(ds[property_cn], ds['ddfsnow'], 'o', mfc='none', mec='black')
plt.plot(xplot, line)
plt.xlabel(property_cn + ' [masl]', size=12)
plt.ylabel('ddfsnow \n[mwe d-1 degC-1]', size=12)
# plt.legend()
equation = 'ddfsnow = ' + str(round(slope,12)) + ' * ' + property_cn + ' + ' + str(round(intercept,5))
plt.text(0.15, 0.45, equation, fontsize=12, transform=plt.gcf().transFigure,
bbox=dict(facecolor='white', edgecolor='none', alpha=0.85))
print(equation, ' , R2 =', round(r_value**2,2))
# Precipitation gradient
slope, intercept, r_value, p_value, std_err = scipy.stats.linregress(ds[property_cn], ds['precgrad'])
xplot = np.arange(4000,6500)
line = slope*xplot+intercept
plt.subplot(4,1,4)
plt.plot(ds[property_cn], ds['precgrad'], 'o', mfc='none', mec='black')
plt.plot(xplot, line)
plt.xlabel(property_cn + ' [masl]', size=12)
plt.ylabel('precgrad \n[% m-1]', size=12)
# plt.legend()
equation = 'precgrad = ' + str(round(slope,12)) + ' * ' + property_cn + ' + ' + str(round(intercept,5))
plt.text(0.15, 0.25, equation, fontsize=12, transform=plt.gcf().transFigure,
bbox=dict(facecolor='white', edgecolor='none', alpha=0.85))
print(equation, ' , R2 =', round(r_value**2,2))
# Plot and save figure
if option_savefigs == 1:
plt.savefig(input.output_filepath + 'figures/' + 'modelparameters_vs_' + property_cn + '.png',
bbox_inches='tight')
plt.show()
#%% ===== PLOTTING: Future simulations =====
if option_plot_futuresim == 1:
output_fp = input.output_filepath + 'R15_sims_20180530/'
gcm_list = ['MPI-ESM-LR', 'GFDL-CM3', 'CanESM2', 'GISS-E2-R']
# gcm_list = ['NorESM1-M']
# gcm_list = ['MPI-ESM-LR']
rcp_scenario = 'rcp26'
rgi_regionO1 = [15]
output_all = []
gcm = gcm_list[0]
for gcm in gcm_list:
# for rcp_scenario in ['rcp26', 'rcp85']:
print(gcm)
output_fn = 'PyGEM_R' + str(rgi_regionO1[0]) + '_' + gcm + '_' + rcp_scenario + '_biasadj_opt1_1995_2100.nc'
output = nc.Dataset(output_fp + output_fn)
# Select relevant data
main_glac_rgi = pd.DataFrame(output['glacier_table'][:], columns=output['glacier_table_header'][:])
main_glac_rgi['RGIId'] = 'RGI60-' + main_glac_rgi['RGIId_float'].astype(str)
lats = main_glac_rgi['CenLat']
lons = main_glac_rgi['CenLon']
months = nc.num2date(output['time'][:], units=output['time'].units, calendar=output['time'].calendar).tolist()
years = output['year'][:]
years_plus1 = output['year_plus1'][:]
massbal_total = output['massbaltotal_glac_monthly'][:]
massbal_total_mwea = massbal_total.sum(axis=1)/(massbal_total.shape[1]/12)
volume_glac_annual = output['volume_glac_annual'][:]
volume_glac_annual[volume_glac_annual[:,0] == 0] = np.nan
volume_glac_annualnorm = volume_glac_annual / volume_glac_annual[:,0][:,np.newaxis] * 100
volchange_glac_perc_15yrs = (volume_glac_annual[:,16] - volume_glac_annual[:,0]) / volume_glac_annual[:,0] * 100
volchange_glac_perc_15yrs[np.isnan(volchange_glac_perc_15yrs)==True] = 0
volume_reg_annual = output['volume_glac_annual'][:].sum(axis=0)
volume_reg_annualnorm = volume_reg_annual / volume_reg_annual[0] * 100
slr_reg_annual_mm = ((volume_reg_annual[0] - volume_reg_annual) * input.density_ice / input.density_water /
input.area_ocean * 10**6)
runoff_glac_monthly = output['runoff_glac_monthly'][:]
runoff_reg_monthly = runoff_glac_monthly.mean(axis=0)
acc_glac_monthly = output['acc_glac_monthly'][:]
acc_reg_monthly = acc_glac_monthly.mean(axis=0)
acc_reg_annual = np.sum(acc_reg_monthly.reshape(-1,12), axis=1)
refreeze_glac_monthly = output['refreeze_glac_monthly'][:]
refreeze_reg_monthly = refreeze_glac_monthly.mean(axis=0)
refreeze_reg_annual = np.sum(refreeze_reg_monthly.reshape(-1,12), axis=1)
melt_glac_monthly = output['melt_glac_monthly'][:]
melt_reg_monthly = melt_glac_monthly.mean(axis=0)
melt_reg_annual = np.sum(melt_reg_monthly.reshape(-1,12), axis=1)
massbaltotal_glac_monthly = output['massbaltotal_glac_monthly'][:]
massbaltotal_reg_monthly = massbaltotal_glac_monthly.mean(axis=0)
massbaltotal_reg_annual = np.sum(massbaltotal_reg_monthly.reshape(-1,12), axis=1)
# PLOT OF ALL GCMS
# use subplots to plot all the GCMs on the same figure
# Plot: Regional volume change [%]
plt.subplot(2,1,1)
plt.plot(years_plus1, volume_reg_annualnorm, label=gcm)
plt.title('Region ' + str(rgi_regionO1[0]))
plt.ylabel('Volume [%]')
plt.xlim(2000,2101)
plt.legend()
# Plot: Regional sea-level rise [mm]
plt.subplot(2,1,2)
plt.plot(years_plus1, slr_reg_annual_mm, label=gcm)
plt.ylabel('Sea-level rise [mm]')
plt.xlim(2000,2101)
plt.show()
# PLOTS FOR LAST GCM
# Plot: Regional mass balance [mwe]
plt.plot(years, massbaltotal_reg_annual, label='massbal_total')
plt.plot(years, acc_reg_annual, label='accumulation')
plt.plot(years, refreeze_reg_annual, label='refreeze')
plt.plot(years, -1*melt_reg_annual, label='melt')
plt.ylabel('Region 15 annual mean [m.w.e.]')
plt.title(gcm)
plt.legend()
plt.show()
# Plot: Regional map of volume change by glacier
volume_change_glac_perc = output['volume_glac_annual'][:][:,0]
volume_change_glac_perc[volume_change_glac_perc > 0] = (
(volume_glac_annual[volume_change_glac_perc > 0,-1] -
volume_glac_annual[volume_change_glac_perc > 0, 0])
/ volume_glac_annual[volume_change_glac_perc > 0, 0] * 100)
# Set extent
east = int(round(lons.min())) - 1
west = int(round(lons.max())) + 1
south = int(round(lats.min())) - 1
north = int(round(lats.max())) + 1
xtick = 1
ytick = 1
# Plot regional maps
plot_latlonvar(lons, lats, volume_change_glac_perc, -100, 100, gcm + ' Volume [%]',
'longitude [deg]', 'latitude [deg]', 'jet_r', east, west, south, north, xtick, ytick,
marker_size=20)
#%% ===== MASS BALANCE ANALYSIS =====
if option_mb_shean_analysis == 1:
# Set parameters within this little batch script
option_nearestneighbor_export = 0
# Load csv
ds = pd.read_csv(input.main_directory + '/../Output/calibration_R15_20180403_Opt02solutionspaceexpanding.csv',
index_col='GlacNo')
# Select data of interest
data_all = ds[['RGIId', 'Area', 'CenLon', 'CenLat', 'mb_mwea', 'mb_mwea_sigma', 'lrgcm', 'lrglac', 'precfactor',
'precgrad', 'ddfsnow', 'ddfice', 'tempsnow', 'tempchange']].copy()
# Drop nan data to retain only glaciers with calibrated parameters
data = data_all.dropna()
# Compute statistics
mb_mean = data['mb_mwea'].mean()
mb_std = data['mb_mwea'].std()
mb_95 = [mb_mean - 1.96 * mb_std, mb_mean + 1.96 * mb_std]
# Remove data outside of 95% confidence bounds
data_95 = data[(data['mb_mwea'] >= mb_95[0]) & (data['mb_mwea'] <= mb_95[1])]
mb_1std = [mb_mean - 1 * mb_std, mb_mean + 1 * mb_std]
# Remove data outside of 95% confidence bounds
data_1std = data[(data['mb_mwea'] >= mb_1std[0]) & (data['mb_mwea'] <= mb_1std[1])]
# Plot Glacier Area vs. MB
plt.scatter(data['Area'], data['mb_mwea'], facecolors='none', edgecolors='black', label='Region 15')
plt.ylabel('MB 2000-2015 [mwea]', size=12)
plt.xlabel('Glacier area [km2]', size=12)
plt.legend()
plt.show()
# Only 95% confidence
plt.scatter(data_95['Area'], data_95['mb_mwea'], facecolors='none', edgecolors='black', label='Region 15')
plt.ylabel('MB 2000-2015 [mwea]', size=12)
plt.xlabel('Glacier area [km2]', size=12)
plt.ylim(-3,1.5)
plt.legend()
plt.show()
# Only 1 std
plt.scatter(data_1std['Area'], data_1std['mb_mwea'], facecolors='none', edgecolors='black', label='Region 15')
plt.ylabel('MB 2000-2015 [mwea]', size=12)
plt.xlabel('Glacier area [km2]', size=12)
plt.ylim(-3,1.5)
plt.legend()
plt.show()
# Bar plot
bins = np.array([0.1, 0.25, 0.5, 1, 2.5, 5, 10, 20, 200])
hist, bin_edges = np.histogram(data.Area,bins) # make the histogram
fig, ax = plt.subplots()
# Plot the histogram heights against integers on the x axis
ax.bar(range(len(hist)),hist,width=1)
# Set the tickets to the middle of the bars
ax.set_xticks([i for i,j in enumerate(hist)])
# Set the xticklabels to a string taht tells us what the bin edges were
ax.set_xticklabels(['{} - {}'.format(bins[i],bins[i+1]) for i,j in enumerate(hist)], rotation=45)
plt.show()
# Compute max/min for the various bins
mb = data_1std['mb_mwea']
area = data_1std['Area']
mb_envelope = np.zeros((bins.shape[0]-1,3))
for n in range(bins.shape[0] - 1):
mb_envelope[n,0] = bins[n+1]
mb_subset = mb[(area > bins[n]) & (area <= bins[n+1])]
mb_envelope[n,1] = mb_subset.min()
mb_envelope[n,2] = mb_subset.max()
# zoomed in
plt.scatter(data['Area'], data['mb_mwea'], facecolors='none', edgecolors='black', label='Region 15')
plt.ylabel('MB 2000-2015 [mwea]', size=12)
plt.xlabel('Glacier area [km2]', size=12)
plt.xlim(0.1,2)
plt.legend()
plt.show()
# Plot Glacier Area vs. MB
plt.scatter(data['mb_mwea'], data['Area'], facecolors='none', edgecolors='black', label='Region 15')
plt.ylabel('Glacier area [km2]', size=12)
plt.xlabel('MB 2000-2015 [mwea]', size=12)
plt.legend()
plt.show()
# Plot Glacier Area vs. MB
plt.scatter(data_95['mb_mwea'], data_95['Area'], facecolors='none', edgecolors='black', label='Region 15')
plt.ylabel('Glacier area [km2]', size=12)
plt.xlabel('MB 2000-2015 [mwea]', size=12)
plt.xlim(-3,1.75)
plt.legend()
plt.show()
# Histogram of MB data
plt.hist(data['mb_mwea'], bins=50)
plt.show()
main_glac_rgi = modelsetup.selectglaciersrgitable(rgi_glac_number='all')
# Select calibration data from geodetic mass balance from <NAME>
main_glac_calmassbal = modelsetup.selectcalibrationdata(main_glac_rgi)
# Concatenate massbal data to the main glacier
main_glac_rgi = pd.concat([main_glac_rgi, main_glac_calmassbal], axis=1)
# Drop those with nan values
main_glac_calmassbal = main_glac_calmassbal.dropna()
main_glac_rgi = main_glac_rgi.dropna()
main_glac_rgi[['lrgcm', 'lrglac', 'precfactor', 'precgrad', 'ddfsnow', 'ddfice', 'tempsnow', 'tempchange']] = (
data[['lrgcm', 'lrglac', 'precfactor', 'precgrad', 'ddfsnow', 'ddfice', 'tempsnow', 'tempchange']])
# Mass balance versus various parameters
# Median elevation
plt.scatter(main_glac_rgi['mb_mwea'], main_glac_rgi['Zmed'], facecolors='none', edgecolors='black',
label='Region 15')
plt.ylabel('Median Elevation [masl]', size=12)
plt.xlabel('MB 2000-2015 [mwea]', size=12)
plt.legend()
plt.show()
# Elevation range
main_glac_rgi['elev_range'] = main_glac_rgi['Zmax'] - main_glac_rgi['Zmin']
plt.scatter(main_glac_rgi['mb_mwea'], main_glac_rgi['elev_range'], facecolors='none', edgecolors='black',
label='Region 15')
plt.ylabel('Elevation range [m]', size=12)
plt.xlabel('MB 2000-2015 [mwea]', size=12)
plt.legend()
plt.show()
plt.scatter(main_glac_rgi['Area'], main_glac_rgi['elev_range'], facecolors='none', edgecolors='black',
label='Region 15')
plt.ylabel('Elevation range [m]', size=12)
plt.xlabel('Area [km2]', size=12)
plt.legend()
plt.show()
# Length
plt.scatter(main_glac_rgi['mb_mwea'], main_glac_rgi['Lmax'], facecolors='none', edgecolors='black',
label='Region 15')
plt.ylabel('Length [m]', size=12)
plt.xlabel('MB 2000-2015 [mwea]', size=12)
plt.legend()
plt.show()
# Slope
plt.scatter(main_glac_rgi['mb_mwea'], main_glac_rgi['Slope'], facecolors='none', edgecolors='black',
label='Region 15')
plt.ylabel('Slope [deg]', size=12)
plt.xlabel('MB 2000-2015 [mwea]', size=12)
plt.legend()
plt.show()
# Aspect
plt.scatter(main_glac_rgi['mb_mwea'], main_glac_rgi['Aspect'], facecolors='none', edgecolors='black',
label='Region 15')
plt.ylabel('Aspect [deg]', size=12)
plt.xlabel('MB 2000-2015 [mwea]', size=12)
plt.legend()
plt.show()
plt.scatter(main_glac_rgi['Aspect'], main_glac_rgi['precfactor'], facecolors='none', edgecolors='black',
label='Region 15')
plt.ylabel('precfactor [-]', size=12)
plt.xlabel('Aspect [deg]', size=12)
plt.legend()
plt.show()
# tempchange
# Line of best fit
slope, intercept, r_value, p_value, std_err = scipy.stats.linregress(main_glac_rgi['mb_mwea'],
main_glac_rgi['tempchange'])
xplot = np.arange(-3,1.5)
line = slope*xplot+intercept
plt.plot(main_glac_rgi['mb_mwea'], main_glac_rgi['tempchange'], 'o', mfc='none', mec='black')
plt.plot(xplot, line)
plt.ylabel('tempchange [deg]', size=12)
plt.xlabel('MB 2000-2015 [mwea]', size=12)
plt.legend()
plt.show()
# precfactor
# Line of best fit
slope, intercept, r_value, p_value, std_err = scipy.stats.linregress(main_glac_rgi['mb_mwea'],
main_glac_rgi['precfactor'])
xplot = np.arange(-3,1.5)
line = slope*xplot+intercept
plt.plot(main_glac_rgi['mb_mwea'], main_glac_rgi['precfactor'], 'o', mfc='none', mec='black')
plt.plot(xplot, line)
plt.ylabel('precfactor [-]', size=12)
plt.xlabel('MB 2000-2015 [mwea]', size=12)
plt.legend()
plt.show()
#%% ===== ALL GEODETIC MB DATA LOAD & COMPARE (Shean, <NAME>) =====
if option_geodeticMB_loadcompare == 1:
# rgi_regionsO1 = [15]
rgi_regionsO1 = ['13, 14, 15'] # 13, 14, 15 - load data from csv
rgi_glac_number = 'all'
if rgi_regionsO1[0] == '13, 14, 15':
# Note: this file was created by manually copying the main_glac_rgi for regions 13, 14, 15 into a csv
main_glac_rgi = pd.read_csv(input.main_directory +
'/../DEMs/geodetic_glacwide_Shean_Maurer_Brun_HMA_20180807.csv')
else:
# Mass balance column name
massbal_colname = 'mb_mwea'
# Mass balance uncertainty column name
massbal_uncertainty_colname = 'mb_mwea_sigma'
# Mass balance date 1 column name
massbal_t1 = 't1'
# Mass balance date 1 column name
massbal_t2 = 't2'
# Mass balance tolerance [m w.e.a]
massbal_tolerance = 0.1
# Calibration optimization tolerance
main_glac_rgi = modelsetup.selectglaciersrgitable(rgi_regionsO1=rgi_regionsO1, rgi_regionsO2='all',
rgi_glac_number=rgi_glac_number)
# SHEAN DATA
# Load all data
ds_all_shean = pd.read_csv(input.main_directory + '/../DEMs/Shean_2018_0806/hma_mb_20180803_1229.csv')
ds_all_shean['RegO1'] = ds_all_shean[input.shean_rgi_glacno_cn].values.astype(int)
ds_all_shean['glacno'] = ((ds_all_shean[input.shean_rgi_glacno_cn] % 1) * 10**5).round(0).astype(int)
ds_all_shean['RGIId'] = ('RGI60-' + ds_all_shean['RegO1'].astype(str) + '.' +
(ds_all_shean['glacno'] / 10**5).apply(lambda x: '%.5f' % x).str.split('.').str[1])
# Select glaciers included in main_glac_rgi
ds_shean = (ds_all_shean.iloc[np.where(ds_all_shean['RGIId'].isin(main_glac_rgi['RGIId']) == True)[0],:]).copy()
ds_shean.sort_values(['glacno'], inplace=True)
ds_shean.reset_index(drop=True, inplace=True)
ds_shean['O1Index'] = np.where(main_glac_rgi['RGIId'].isin(ds_shean['RGIId']))[0]
# Select data for main_glac_rgi
main_glac_calmassbal_shean = np.zeros((main_glac_rgi.shape[0],4))
ds_subset_shean = ds_shean[[input.rgi_O1Id_colname, massbal_colname, massbal_uncertainty_colname, massbal_t1,
massbal_t2]].values
rgi_O1Id = main_glac_rgi[input.rgi_O1Id_colname].values
for glac in range(rgi_O1Id.shape[0]):
try:
# Grab the mass balance based on the RGIId Order 1 glacier number
main_glac_calmassbal_shean[glac,:] = (
ds_subset_shean[np.where(np.in1d(ds_subset_shean[:,0],rgi_O1Id[glac])==True)[0][0],1:])
# np.in1d searches if there is a match in the first array with the second array provided and returns an
# array with same length as first array and True/False values. np.where then used to identify the
# index where there is a match, which is then used to select the massbalance value
# Use of numpy arrays for indexing and this matching approach is much faster than looping through;
# however, need the for loop because np.in1d does not order the values that match; hence, need to do
# it 1 at a time
except:
# If there is no mass balance data available for the glacier, then set as NaN
main_glac_calmassbal_shean[glac,:] = np.empty(4)
main_glac_calmassbal_shean[glac,:] = np.nan
main_glac_calmassbal_shean = pd.DataFrame(main_glac_calmassbal_shean,
columns=[massbal_colname, massbal_uncertainty_colname, massbal_t1,
massbal_t2])
main_glac_rgi['Shean_MB_mwea'] = main_glac_calmassbal_shean[input.massbal_colname]
main_glac_rgi['Shean_MB_mwea_sigma'] = main_glac_calmassbal_shean[input.massbal_uncertainty_colname]
main_glac_rgi['Shean_MB_year1'] = main_glac_calmassbal_shean[massbal_t1]
main_glac_rgi['Shean_MB_year2'] = main_glac_calmassbal_shean[massbal_t2]
# ===== BRUN DATA =====
# Load all data
cal_rgi_colname = 'GLA_ID'
ds_all_raw_brun = | pd.read_csv(input.brun_fp + input.brun_fn) | pandas.read_csv |
import os
import pandas as pd
import argparse
import ujson
from bs4 import BeautifulSoup
import re
import datetime
import warnings
warnings.simplefilter("ignore")
""" GETS INPUT READY FOR INDICIO """
def parse_arguments():
parser = argparse.ArgumentParser(description="reads in original data files / directories")
parser.add_argument('orgdata', type=str, default='')
parser.add_argument('savedir', type=str, default='')
parser.add_argument('reldata', type=str, default='')
parser.add_argument('outputfile', type=str, default='')
args = parser.parse_args()
return args
def load_file(filePath, saveData, relData, outputFile):
# file = pd.read_csv()
fname, ext = os.path.splitext(filePath)
dictionary = {}
if ext == '.json':
data = ujson.loads(open(filePath).read())
rel = ujson.loads(open(relData).read())
for d1 in data:
sid = d1.get('SubmissionID')
title = d1.get('SubmissionTitle')
cleantext = BeautifulSoup(title).get_text()
cleantext = re.sub("[^a-zA-Z]", " ", cleantext)
dictionary[sid] = [cleantext, convertunixtodate(d1.get('SubmissionTime')), rel.get(sid)]
com = d1.get("Comments")
for d2 in com:
cid = d2.get("CommentID")
comtext = d2.get('CommentText')
comtext = BeautifulSoup(comtext).get_text()
comtext = re.sub("'", "", comtext)
comtext = re.sub("[^a-zA-Z]", " ", comtext)
dictionary[cid] = [comtext, convertunixtodate(d2.get('CommentTime')), rel.get(cid)]
elif ext == '.csv' or ext == '.tsv':
data = pd.read_csv(filePath, header=0, index_col=[], delimiter=",", quoting=1, encoding='latin1')
for row in data.itertuples():
if (not (pd.isnull(row.id) or | pd.isnull(row.text) | pandas.isnull |
import numpy as np
import pandas as pd
import datetime
import argparse
def readCSV(dt):
"""
Read the CSV file into a dataframe for a YYYY-MM (dt)
Do preliminary cleaning
arg: dt -- string with format YYYY-MM
return df: dataframe containing data from csv
"""
folder = 'raw_data/'
filename = 'output-' + str(dt) + '-01T00_00_00+00_00.csv'
df = pd.read_csv(folder+filename)
df.when_captured = pd.to_datetime(df.when_captured)
# Need to change the format of the Time Stamp for all the measurements in the raw data
df.service_uploaded = df.service_uploaded.apply(lambda x: \
datetime.datetime.strptime(x, '%b %d, %Y @ %H:%M:%S.%f')\
.replace(tzinfo=datetime.timezone.utc))
#### Add a column for the year
df['year'] = pd.DatetimeIndex(df['when_captured']).year
#### Need to correct for the format of the PM numeric values.
df['pms_pm01_0'] = df['pms_pm01_0'].astype(str).str.replace(',', '').astype(float)
df['pms_pm10_0'] = df['pms_pm10_0'].astype(str).str.replace(',', '').astype(float)
df['pms_pm02_5'] = df['pms_pm02_5'].astype(str).str.replace(',', '').astype(float)
return df
def findBadData(df):
'''
return the badRecords, i.e. (device, whenCaptured) key for records that have more than one
records for the same key (as this is not possible physically)
'''
temp_df = df.groupby(['device','when_captured']).size().to_frame('size').\
reset_index().sort_values('size', ascending=False)
print("bad device data counts: ")
badRecords = temp_df[(temp_df['size']>1)]
print(badRecords)
print("all bad device list: ")
# Devices that have misbehaved at some point - more than one data values per time stamp
print(np.unique(temp_df[temp_df['size']>1]['device'].values)) # devices that have misbehaved
return badRecords
def rmInvalidTimeStamps(df):
"""
remove invalid time stamped records
## remove records with NULL `when_captured`
## remove records where `when_captured` is an invalid
## remove records where gap of `service_uploaded` and `when_captured` > 7 days
"""
## remove records with NULL `when_captured`
print("Null date records to remove: ", df['when_captured'].isna().sum())
df = df[df['when_captured'].notna()]
print("df shape after remove records with NULL `when_captured` : ",df.shape)
## remove records where `when_captured` is an invalid
boolean_condition = df['when_captured'] > pd.to_datetime(2000/1/19, infer_datetime_format=True).tz_localize('UTC')
print("Valid `when_captured` entires: ", boolean_condition.sum())
df = df[df['when_captured'] > pd.to_datetime(2000/1/19, infer_datetime_format=True).tz_localize('UTC')]
print("df shape after remove records where `when_captured` is an invalid : ",df.shape)
## remove records where gap of `service_uploaded` and `when_captured` > 7 days
boolean_condition = abs(df['when_captured'].subtract(df['service_uploaded'])).astype('timedelta64[D]') < 7
boolean_condition.shape
print("Lag 7 days to remove: ",df.shape[0] - (boolean_condition).sum())
df = df[boolean_condition]
print("df shape after records where gap of `service_uploaded` and `when_captured` > 7 days : ",df.shape)
return df
def imputeInaccurateRH(df):
"""
impute data with NaN(missing) for inaccurate values of RH
"""
boolean_condition = (df['env_humid']<0) | (df['env_humid']>100)
column_name = 'env_humid'
new_value = np.nan
df.loc[boolean_condition, column_name] = new_value
print("Inaccurate RH records imputed: ", boolean_condition.sum())
return df
def dropServiceUploaded(df):
"""
Inplace dropping of the 'service_uploaded' column
"""
df.drop('service_uploaded', axis=1, inplace=True)
def rmDuplicates(df):
"""
Inplace dropping of duplicates
preserve a single copy of duplicative rows
"""
incoming = df.shape[0]
df.drop_duplicates(subset=df.columns[0:df.shape[1]], inplace=True, keep='first') # args: subset=[df.columns[0:df.shape[1]]], keep = 'first'
print("Number of duplicative entries removed : ", -df.shape[0]+incoming)
def dataAggWithKey(df):
"""
Aggregate the df based on key: ('device','when_captured')
arg: df - incoming dataframe
return: dataframe with COUNTS and COUNT-DISTINCTS for each key
"""
# STEP 1: Aggregate the dataframe based on key
temp_df = df.groupby(['device','when_captured']).agg(['count','nunique'])
# temp_df.info()
num_groups = temp_df.shape[0]
print("num_groups is : ", num_groups)
# STEP 2: Merge Counts and Count-Distincts to check for duplicative records and multiplicities
even = list(range(0,26,2))
odd = list(range(1,26,2))
tmp_df1 = temp_df.iloc[:,even].max(axis=1).to_frame('COUNTS').reset_index()
tmp_df2 = temp_df.iloc[:,odd].max(axis=1).to_frame('DISTINCTS').reset_index()
print(tmp_df1.shape, tmp_df2.shape)
merged = pd.merge(tmp_df1, tmp_df2, left_on = ['device', 'when_captured'], \
right_on=['device', 'when_captured'])
return merged, num_groups
def identifyALLNanRecs(merged):
"""
Actionable: Records of useless data with all NaNs
args: incoming datframe with COUNTS and COUNT-DISTINCTS for each key
return : keys dataframe ('device', 'when_captured') to remove later
"""
bool1 = (merged.COUNTS >1) & (merged.DISTINCTS==0)
sum1 = bool1.sum()
print(sum1)
toDiscard1 = merged.loc[:,['device', 'when_captured']][bool1]
toDiscard1.shape
return sum1, toDiscard1
def identifyMultivaluedTimeStamps(merged):
"""
Actionable: Records that are a mix of duplicates and non-duplicate rows
for a given (`device`, `when_captured`) [must be all discarded]
args: incoming datframe with COUNTS and COUNT-DISTINCTS for each key
return : keys dataframe ('device', 'when_captured') to remove later
"""
bool3 = (merged.COUNTS >1) & (merged.DISTINCTS>1)
sum3 = bool3.sum()
print(sum3)
toDiscard3 = merged.loc[:,['device', 'when_captured']][bool3]
toDiscard3.shape
return sum3, toDiscard3
def identifyRemainingDupl(merged):
"""
Actionable: even though duplicates were dropped, there can still be records for which (merged.COUNTS >1) & (merged.DISTINCTS==1)
: consider the case where one of the records for the key under consideration has meaningful values
: but the other record has all NaNs for the same key. Ex. (Oct 18, 2018 @ 10:36:24.000 , 2299238163): row 22618
Records where all rows are purely duplicates [preserve only 1 later]
args: incoming datframe with COUNTS and COUNT-DISTINCTS for each key
"""
bool2 = (merged.COUNTS >1) & (merged.DISTINCTS==1)
sum2 = bool2.sum()
print("remaining duplicates check : " ,merged.COUNTS[bool2].sum() - merged.DISTINCTS[bool2].sum())
toDiscard2 = merged.loc[:,['device', 'when_captured']][bool2]
toDiscard2.shape
return sum2, toDiscard2
def goodTimeStamps(merged):
"""
Records that are good
"""
bool4 = (merged.COUNTS ==1) & (merged.DISTINCTS==1)
sum4 = bool4.sum()
print('good records : ', sum4)
return sum4
def writeDF(dt, dframe, descrpt):
"""
write multivalued timestamps' keys to a csv
args: dframe to write
descrpt: string with description to append to file
"""
# dframe.info()
print("written records shape : ", dframe.shape)
dframe.to_csv('cleaned_data/' + str(dt) + '-01_' + str(descrpt) + '.csv')
def filterRows(toDiscard1, toDiscard2, toDiscard3, df):
"""
Inplace discarding of rows based on allNaN record keys (in df : toDiscard1)
and rows based on MultivaluedTimeStamps keys (in df : toDiscard3)
from original dataframe: df
args:
toDiscard1: allNaN record keys
toDiscard2: identifyRemainingDuplcates: records where (merged.COUNTS >1) & (merged.DISTINCTS==1)
toDiscard3: MultivaluedTimeStamps keys
df: original dataframe
"""
# STEP 1 :
# all tuples of keys to be discarded
discard = pd.concat([toDiscard1, toDiscard2, toDiscard3], ignore_index=True)
discard['KEY_Dev_WhenCapt'] = list(zip(discard.device, discard.when_captured))
print(df.shape, discard.shape)
# STEP 2 :
# tuples of all keys in the dataframe
df['KEY_Dev_WhenCapt'] = list(zip(df.device, df.when_captured))
df.shape
# STEP 3 :
# discard the rows
rows_to_discard = df['KEY_Dev_WhenCapt'].isin(discard['KEY_Dev_WhenCapt'])
print("these many rows to discard: ", rows_to_discard.sum())
incoming = df.shape[0]
df = df[~rows_to_discard]
print(incoming - df.shape[0])
return df
def cleanSolarCastData(dt):
"""
Master Function to clean all the data with the helper functions in `Data_Cleansing_Single_file`
arg: dt: The function returns the cleaned data frame for the YYYY-MM corresponding to "dt"
return : df: cleaned dataframe
"""
df = readCSV(dt)
findBadData(df)
df = rmInvalidTimeStamps(df)
print("new df: ", df.shape)
df = imputeInaccurateRH(df)
print("new df: ", df.shape)
dropServiceUploaded(df)
print("new df after dropping service_uploaded col: ", df.shape)
rmDuplicates(df)
print("new df after removing duplicates: ", df.shape)
merged,num_groups = dataAggWithKey(df)
print("merged: ", merged.shape)
print("num_groups : ", num_groups)
sum1, toDiscard1 = identifyALLNanRecs(merged)
sum3, toDiscard3 = identifyMultivaluedTimeStamps(merged)
sum2, toDiscard2 = identifyRemainingDupl(merged)
sum4 = goodTimeStamps(merged)
print("toDiscard1 shape: ",toDiscard1.shape)
print("toDiscard2 shape: ",toDiscard2.shape)
print("toDiscard3 shape: ",toDiscard3.shape)
# sanityCheck(): ensure you have all records covered by 1 of the 4 conditions
assert(num_groups == sum1+sum2+sum3+sum4)
writeDF(dt, toDiscard3, 'MultivaluedTimeStamps')
df = filterRows(toDiscard1, toDiscard2, toDiscard3, df)
print("final df shape: ", df.shape)
### Now check to make sure no garbage data is left
badRecordsLeft = findBadData(df)
if not badRecordsLeft.empty:
print("Still bad records remaining:", badRecordsLeft)
assert(badRecordsLeft.empty)
return df
def cleanAndWriteDF(dt):
df = cleanSolarCastData(dt)
print(df.shape)
# Check how many devices there are in the dataset
devices = np.unique(df.device.values)
print(len(devices))
print(devices)
# *Sort the time series -- it's unsorted.*
df.sort_values(by=['when_captured'], inplace=True)
# Write the files
descrpt = 'cleaned'
writeDF(dt, df, descrpt)
return df
def readCleanedDF(dt, descrpt):
"""
Read the cleaned & pre-sorted CSV file into a dataframe for a YYYY-MM (dt)
Do preliminary cleaning
arg: dt -- string with format YYYY-MM
return df: dataframe containing data from csv
"""
folder = './'
filename = str(dt) + '-01_' + str(descrpt) + '.csv'
df = pd.read_csv(folder+filename)
return df
def cleanAndWriteMainDF(start_yyyymm, end_yyyymm):
"""
Cleans each month's data and saves it; also concatenate all the data into a single DataFrame,
sort, and then save
arg: start_yyyymm -- string with format YYYY-MM; earliest month for which data is available
end_yyyymm -- string with format YYYY-MM; latest month for which data is available
"""
dfList = []
for dt in pd.date_range(start_yyyymm, end_yyyymm, freq='MS').strftime("%Y-%m").tolist():
print("========================")
print("========================", dt, "========================")
print("========================")
df = cleanAndWriteDF(dt)
dfList.append(df)
mainDF = pd.concat(dfList, ignore_index=True)
mainDF.when_captured = | pd.to_datetime(mainDF.when_captured) | pandas.to_datetime |
"""
This file is part of the accompanying code to our paper
<NAME>., <NAME>., <NAME>., & <NAME>. (2021). Uncovering flooding mecha-
nisms across the contiguous United States through interpretive deep learning on
representative catchments. Water Resources Research, 57, e2021WR030185.
https://doi.org/10.1029/2021WR030185.
Copyright (c) 2021 <NAME>. All rights reserved.
You should have received a copy of the MIT license along with the code. If not,
see <https://opensource.org/licenses/MIT>
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.pylab as mpl
import matplotlib.patches as mpatches
import matplotlib.dates as mdates
def plot_peaks(Q, peak_dates, plot_range=[None, None], linecolor="tab:brown", markercolor="tab:red", figsize=(7.5, 2.0)):
"""
Plot the identified flood peaks.
Parameters
----------
Q: pandas series of streamflow observations.
peak_dates: a sequence of flood peaks' occurrence dates.
plot_range: the date range of the plot, it can be a pair of date strings (default: [None, None]).
linecolor: the color of the line (default: 'tab:brown').
markercolor: the color of the marker (default: 'tab:red').
figsize: the width and height of the figure in inches (default: (7.5, 2.0)).
"""
fig, ax = plt.subplots(figsize=figsize)
fig.tight_layout()
plot_range[0] = Q.index[0] if plot_range[0] == None else plot_range[0]
plot_range[1] = Q.index[-1] if plot_range[1] == None else plot_range[1]
ax.plot(Q["flow"].loc[plot_range[0]:plot_range[1]], color=linecolor, lw=1.0)
ax.plot(
Q.loc[peak_dates, "flow"].loc[plot_range[0]:plot_range[1]],
"*",
c=markercolor,
markersize=8,
)
ax.set_title(f"Identified flood peaks from {plot_range[0]} to {plot_range[1]}")
ax.set_ylabel("flow(mm)")
plt.show()
def plot_eg_individual(dataset, peak_eg_dict, peak_eg_var_dict, peak_date, title_suffix=None, linewidth=1.5, figsize=(10, 3)):
eg_plot = dataset.loc[pd.date_range(end=peak_date, periods=list(peak_eg_dict.values())[0].shape[1]+1, freq='d')[:-1]]
eg_plot.loc[:, "prcp_eg"] = abs(peak_eg_dict[pd.to_datetime(peak_date)][0, :, 0])
eg_plot.loc[:, "temp_eg"] = abs(peak_eg_dict[pd.to_datetime(peak_date)][0, :, 1])
eg_plot.loc[:, "prcp_eg_val"] = abs(peak_eg_var_dict[pd.to_datetime(peak_date)][0, :, 0])
eg_plot.loc[:, "temp_eg_val"] = abs(peak_eg_var_dict[pd.to_datetime(peak_date)][0, :, 1])
fig = plt.figure(constrained_layout=False, figsize=figsize)
gs1 = fig.add_gridspec(nrows=2, ncols=1, hspace=0, left=0.00, right=0.45, height_ratios=[2.5, 1.5])
ax1 = fig.add_subplot(gs1[0, 0])
ax2 = fig.add_subplot(gs1[1, 0])
gs2 = fig.add_gridspec(nrows=2, ncols=1, hspace=0, left=0.55, right=1.00, height_ratios=[2.5, 1.5])
ax3 = fig.add_subplot(gs2[0, 0])
ax4 = fig.add_subplot(gs2[1, 0])
for ax in [ax1, ax3]:
ax.spines["bottom"].set_visible(False)
ax.axes.get_xaxis().set_visible(False)
for ax in [ax2, ax4]:
ax.set_ylabel(r'$\phi^{EG}_{i}$')
ax.spines["top"].set_visible(False)
ax.yaxis.tick_right()
ax.yaxis.set_label_position("right")
ax.set_ylim(bottom=np.min(peak_eg_dict[pd.to_datetime(peak_date)]),
top=np.max(peak_eg_dict[pd.to_datetime(peak_date)]))
ax.xaxis.set_major_formatter(mdates.DateFormatter('%m/%Y'))
ax.xaxis.set_major_locator(mdates.MonthLocator(interval=1))
ax1.plot(eg_plot['prcp'], color='k', lw=linewidth)
ax1.set_ylabel('P [mm]', ha='center', y=0.5)
ax2.plot(eg_plot['prcp_eg'], color='blue', lw=linewidth)
ax2.fill_between(eg_plot['prcp_eg'].index,
eg_plot['prcp_eg']-eg_plot.loc[:, "prcp_eg_val"],
eg_plot['prcp_eg']+eg_plot.loc[:, "prcp_eg_val"], color='blue', alpha=0.3)
ax2.yaxis.label.set_color('blue')
ax2.tick_params(axis='y', colors='blue')
ax3.plot(eg_plot['tmean'], color='k', lw=linewidth)
ax3.set_ylabel('T [\u2103]', ha='center', y=0.5)
ax4.plot(eg_plot['temp_eg'], color='red', lw=linewidth)
ax4.fill_between(eg_plot['temp_eg'].index,
eg_plot['temp_eg']-eg_plot.loc[:, "temp_eg_val"],
eg_plot['temp_eg']+eg_plot.loc[:, "temp_eg_val"], color='red', alpha=0.3)
ax4.yaxis.label.set_color('red')
ax4.tick_params(axis='y', colors='red')
ax1.set_title(f"Flood on { | pd.to_datetime(peak_date) | pandas.to_datetime |
import importlib.resources
from typing import Any, Optional
import numpy as np
import pandas as pd
from scipy.stats import pearsonr
from scipy.stats.mstats import rankdata
def from_file(data_file: str, data_file2: str, learn_options: dict[str, Any]) -> tuple:
if learn_options["V"] == 1: # from Nature Biotech paper
print(f"loading V{learn_options['V']} data")
if learn_options["weighted"] is not None:
raise AssertionError("not supported for V1 data")
_, gene_position, target_genes, x_df, y_df = read_V1_data(
data_file, learn_options
)
learn_options["binary target name"] = "average threshold"
learn_options["rank-transformed target name"] = "average rank"
learn_options["raw target name"] = "average activity"
elif learn_options["V"] == 2: # from Nov 2014, hot off the machines
x_df, _, target_genes, y_df, gene_position = read_V2_data(
data_file, learn_options
)
# check that data is consistent with sgRNA score
xx = x_df["sgRNA Score"].values
yy = y_df["score_drug_gene_rank"].values
rr, _ = pearsonr(xx, yy)
if rr <= 0:
raise AssertionError(
"data processing has gone wrong as correlation with previous "
"predictions is negative"
)
elif (
learn_options["V"] == 3
): # merge of V1 and V2--this is what is used for the final model
# these are relative to the V2 data, and V1 will be made to automatically match
learn_options["binary target name"] = "score_drug_gene_threshold"
learn_options["rank-transformed target name"] = "score_drug_gene_rank"
learn_options["raw target name"] = None
x_df, y_df, gene_position, target_genes = mergeV1_V2(
data_file, data_file2, learn_options
)
elif learn_options["V"] == 4: # merge of V1 and V2 and the Xu et al data
# these are relative to the V2 data, and V1 and Xu et al. will be made
# to automatically match
learn_options["binary target name"] = "score_drug_gene_threshold"
learn_options["rank-transformed target name"] = "score_drug_gene_rank"
learn_options["raw target name"] = None
x_df, y_df, gene_position, target_genes = merge_all(
data_file, data_file2, learn_options
)
elif learn_options["V"] == 5:
raise Exception(
"The from_file() function is attempting to learn using the xu_et_al data. "
"This data is no longer available with Azimuth."
)
# truncate down to 30--some data sets gave us more.
x_df["30mer"] = x_df["30mer"].apply(lambda x: x[0:30])
return x_df, y_df, gene_position, target_genes
def set_V2_target_names(learn_options: dict) -> dict:
if "binary target name" not in learn_options:
learn_options["binary target name"] = "score_drug_gene_threshold"
if "rank-transformed target name" not in learn_options:
learn_options["rank-transformed target name"] = "score_drug_gene_rank"
learn_options["raw target name"] = "score"
return learn_options
def get_ranks(
y: pd.Series, thresh: float = 0.8, prefix: Optional[str] = None, flip: bool = False
) -> tuple[pd.Series, pd.Series, pd.Series, pd.Series]:
"""
y should be a DataFrame with one column # so a series?
thresh is the threshold at which to call it a knock-down or not
col_name = 'score' is only for V2 data
flip should be FALSE for both V1 and V2
\f
Parameters
----------
y : :class:`pd.Series`
thresh : float
prefix : :class:`Optional[str]`
flip : bool
Return
------
y_rank : :class:`pd.Series`
y_rank_raw : :class:`pd.Series`
y_threshold : :class:`pd.Series`
y_quantized : :class:`pd.Series`
"""
if prefix is not None:
prefix = prefix + "_"
else:
prefix = ""
# y_rank = y.apply(ranktrafo)
y_rank = y.apply(rankdata)
y_rank /= y_rank.max()
if flip:
y_rank = (
1.0 - y_rank
) # before this line, 1-labels where associated with low ranks, this flips it around
# (hence the y_rank > thresh below)
# we should NOT flip (V2), see README.txt in ./data
y_rank.columns = [prefix + "rank"]
y_threshold = (y_rank > thresh) * 1
y_threshold.columns = [prefix + "threshold"]
# JL: undo the log2 transform (not sure this matters?)
y_rank_raw = (2 ** y).apply(rankdata)
y_rank_raw /= y_rank_raw.max()
if flip:
y_rank_raw = 1.0 - y_rank_raw
y_rank_raw.columns = [prefix + "rank raw"]
if np.any(np.isnan(y_rank)):
raise AssertionError("found NaN in ranks")
y_quantized = y_threshold.copy()
y_quantized.columns = [prefix + "quantized"]
return y_rank, y_rank_raw, y_threshold, y_quantized
def get_data(
data: pd.DataFrame,
y_names: list[str],
organism: str = "human",
target_gene: str = None,
) -> tuple[pd.DataFrame, pd.DataFrame]:
"""
this is called once for each gene (aggregating across cell types)
y_names are cell types
e.g. call: X_CD13, Y_CD13 = get_data(cd13, y_names=['NB4 CD13', 'TF1 CD13'])
\f
Parameters
----------
data : pd.DataFrame
y_names : List[str]
organism : str = "human"
target_gene : str = None
Return
------
features : :class:`pd.DataFrame`
output : :class:`pd.DataFrame`
"""
outputs = pd.DataFrame()
# generate ranks for each cell type before aggregating to match what is in Doench et al
thresh = 0.8
for y_name in y_names: # for each cell type
y = pd.DataFrame(data[y_name])
# these thresholds/quantils are not used:
y_rank, y_rank_raw, y_threshold, _ = get_ranks(y, thresh=thresh, flip=False)
y_rank.columns = [y_name + " rank"]
y_rank_raw.columns = [y_name + " rank raw"]
y_threshold.columns = [y_name + " threshold"]
outputs = pd.concat([outputs, y, y_rank, y_threshold, y_rank_raw], axis=1)
# aggregated rank across cell types
average_activity = pd.DataFrame(outputs[[y_name for y_name in y_names]].mean(1))
average_activity.columns = ["average activity"]
average_rank_from_avg_activity = get_ranks(
average_activity, thresh=thresh, flip=False
)[0]
average_rank_from_avg_activity.columns = ["average_rank_from_avg_activity"]
average_threshold_from_avg_activity = (average_rank_from_avg_activity > thresh) * 1
average_threshold_from_avg_activity.columns = [
"average_threshold_from_avg_activity"
]
average_rank = pd.DataFrame(
outputs[[y_name + " rank" for y_name in y_names]].mean(1)
)
average_rank.columns = ["average rank"]
# higher ranks are better (when flip=False as it should be)
average_threshold = (average_rank > thresh) * 1
average_threshold.columns = ["average threshold"]
# undo the log2 trafo on the reads per million, apply rank trafo right away
average_rank_raw = pd.DataFrame(
outputs[[y_name + " rank raw" for y_name in y_names]].mean(1)
)
average_rank_raw.columns = ["average rank raw"]
outputs = pd.concat(
[
outputs,
average_rank,
average_threshold,
average_activity,
average_rank_raw,
average_rank_from_avg_activity,
average_threshold_from_avg_activity,
],
axis=1,
)
# import pdb; pdb.set_trace()
# sequence-specific computations
# features = featurize_data(data)
# strip out featurization to later
features = pd.DataFrame(data["30mer"])
if organism == "human":
target_gene = y_names[0].split(" ")[1]
outputs["Target gene"] = target_gene
outputs["Organism"] = organism
features["Target gene"] = target_gene
features["Organism"] = organism
features["Strand"] = pd.DataFrame(data["Strand"])
return features, outputs
def combine_organisms(human_data: pd.DataFrame, mouse_data: pd.DataFrame) -> tuple:
# 'Target' is the column name, 'CD13' are some rows in that column
# xs slices through the pandas data frame to return another one
cd13 = human_data.xs("CD13", level="Target", drop_level=False)
# y_names are column names, cd13 is a pd object
x_cd13, y_cd13 = get_data(cd13, y_names=["NB4 CD13", "TF1 CD13"])
cd33 = human_data.xs("CD33", level="Target", drop_level=False)
x_cd33, y_cd33 = get_data(cd33, y_names=["MOLM13 CD33", "TF1 CD33", "NB4 CD33"])
cd15 = human_data.xs("CD15", level="Target", drop_level=False)
x_cd15, y_cd15 = get_data(cd15, y_names=["MOLM13 CD15"])
mouse_x = pd.DataFrame()
mouse_y = pd.DataFrame()
for k in mouse_data.index.levels[1]:
# is k the gene
x_df, y_df = get_data(
mouse_data.xs(k, level="Target", drop_level=False),
["On-target Gene"],
target_gene=k,
organism="mouse",
)
mouse_x = pd.concat([mouse_x, x_df], axis=0)
mouse_y = pd.concat([mouse_y, y_df], axis=0)
x_df = pd.concat([x_cd13, x_cd15, x_cd33, mouse_x], axis=0, sort=True)
y_df = pd.concat([y_cd13, y_cd15, y_cd33, mouse_y], axis=0, sort=True)
return x_df, y_df
def impute_gene_position(gene_position: pd.DataFrame) -> pd.DataFrame:
"""
Some amino acid cut position and percent peptide are blank because of stop codons, but
we still want a number for these, so just set them to 101 as a proxy
\f
Parameters
----------
Return
------
"""
gene_position["Percent Peptide"] = gene_position["Percent Peptide"].fillna(101.00)
if "Amino Acid Cut position" in gene_position.columns:
gene_position["Amino Acid Cut position"] = gene_position[
"Amino Acid Cut position"
].fillna(gene_position["Amino Acid Cut position"].mean())
return gene_position
def read_V1_data(
data_file: Optional[str] = None,
learn_options: Optional[dict] = None,
aml_file: Optional[str] = None,
) -> tuple:
if data_file is None:
data_file = importlib.resources.files("azimuth").joinpath(
"data", "V1_data.xlsx"
)
with importlib.resources.as_file(data_file) as data_file:
human_data = pd.read_excel(data_file, sheet_name=0, index_col=[0, 1])
mouse_data = pd.read_excel(data_file, sheet_name=1, index_col=[0, 1])
else:
human_data = pd.read_excel(data_file, sheet_name=0, index_col=[0, 1])
mouse_data = pd.read_excel(data_file, sheet_name=1, index_col=[0, 1])
x_df, y_df = combine_organisms(human_data, mouse_data)
# get position within each gene, then join and re-order
# note that 11 missing guides we were told to ignore
annotations = pd.read_csv(aml_file, delimiter="\t", index_col=[0, 4])
annotations.index.names = x_df.index.names
gene_position = pd.merge(
x_df, annotations, how="inner", left_index=True, right_index=True
)
gene_position = impute_gene_position(gene_position)
gene_position = gene_position[
["Amino Acid Cut position", "Nucleotide cut position", "Percent Peptide"]
]
y_df = y_df.loc[gene_position.index]
x_df = x_df.loc[gene_position.index]
y_df[
"test"
] = 1 # for bookkeeping to keep consistent with V2 which uses this for "extra pairs"
target_genes = y_df["Target gene"].unique()
y_df.index.names = ["Sequence", "Target gene"]
if not x_df.index.equals(y_df.index):
raise AssertionError(
"The index of x_df is different from the index of y_df "
"(this can cause inconsistencies/random performance later on)"
)
if learn_options is not None and learn_options["flipV1target"]:
print(
"************************************************************************\n"
"*****************MATCHING DOENCH CODE (DEBUG MODE)**********************\n"
"************************************************************************"
)
# normally it is:y_df['average threshold'] =y_df['average rank'] > 0.8, where
# 1s are good guides, 0s are not
y_df["average threshold"] = y_df["average rank"] < 0.2 # 1s are bad guides
print("press c to continue")
import pdb
pdb.set_trace()
return annotations, gene_position, target_genes, x_df, y_df
def read_V2_data(
data_file: str = None, learn_options: dict = None, verbose: bool = True
) -> tuple:
if data_file is None:
data_file = importlib.resources.files("azimuth").joinpath(
"data", "V2_data.xlsx"
)
with importlib.resources.as_file(data_file) as df:
data = pd.read_excel(
df,
sheet_name="ResultsFiltered",
skiprows=range(0, 6 + 1),
index_col=[0, 4],
)
else:
data = pd.read_excel(
data_file,
sheet_name="ResultsFiltered",
skiprows=range(0, 6 + 1),
index_col=[0, 4],
)
# grab data relevant to each of three drugs, which exludes some genes
# note gene MED12 has two drugs, all others have at most one
x_df = pd.DataFrame()
# This comes from the "Pairs" tab in their excel sheet,
# note HPRT/HPRT1 are same thing, and also PLX_2uM/PLcX_2uM
known_pairs = {
"AZD_200nM": ["CCDC101", "MED12", "TADA2B", "TADA1"],
"6TG_2ug/mL": ["HPRT1"],
"PLX_2uM": ["CUL3", "NF1", "NF2", "MED12"],
}
drugs_to_genes = {
"AZD_200nM": ["CCDC101", "MED12", "TADA2B", "TADA1"],
"6TG_2ug/mL": ["HPRT1"],
"PLX_2uM": ["CUL3", "NF1", "NF2", "MED12"],
}
if learn_options is not None:
if learn_options["extra pairs"] or learn_options["all pairs"]:
raise AssertionError(
"extra pairs and all pairs options (in learn_options) can't be "
"active simultaneously."
)
if learn_options["extra pairs"]:
drugs_to_genes["AZD_200nM"].extend(["CUL3", "NF1", "NF2"])
elif learn_options["all pairs"]:
drugs_to_genes["AZD_200nM"].extend(["HPRT1", "CUL3", "NF1", "NF2"])
drugs_to_genes["PLX_2uM"].extend(["HPRT1", "CCDC101", "TADA2B", "TADA1"])
drugs_to_genes["6TG_2ug/mL"].extend(
["CCDC101", "MED12", "TADA2B", "TADA1", "CUL3", "NF1", "NF2"]
)
count = 0
for drug in drugs_to_genes:
genes = drugs_to_genes[drug]
for gene in genes:
xtmp = data.copy().xs(gene, level="Target gene", drop_level=False)
xtmp["drug"] = drug
xtmp["score"] = xtmp[
drug
].copy() # grab the drug results that are relevant for this gene
if gene in known_pairs[drug]:
xtmp["test"] = 1.0
else:
xtmp["test"] = 0.0
count = count + xtmp.shape[0]
x_df = pd.concat([x_df, xtmp], axis=0)
if verbose:
print(
f"Loaded {xtmp.shape[0]} samples for gene {gene} "
f"\ttotal number of samples: {count}"
)
# create new index that includes the drug
x_df = x_df.set_index("drug", append=True)
y_df = pd.DataFrame(x_df.pop("score"))
y_df.columns.names = ["score"]
test_gene = pd.DataFrame(x_df.pop("test"))
target = pd.DataFrame(
x_df.index.get_level_values("Target gene").values,
index=y_df.index,
columns=["Target gene"],
)
y_df = pd.concat((y_df, target, test_gene), axis=1)
target_genes = y_df["Target gene"].unique()
gene_position = x_df[["Percent Peptide", "Amino Acid Cut position"]].copy()
# convert to ranks for each (gene, drug combo)
# flip = True
y_rank = pd.DataFrame()
y_threshold = pd.DataFrame()
y_quant = pd.DataFrame()
for drug in drugs_to_genes:
gene_list = drugs_to_genes[drug]
for gene in gene_list:
ytmp = pd.DataFrame(
y_df.xs((gene, drug), level=["Target gene", "drug"], drop_level=False)[
"score"
]
)
y_ranktmp, _, y_thresholdtmp, y_quanttmp = get_ranks(
ytmp, thresh=0.8, prefix="score_drug_gene", flip=False
)
# np.unique(y_rank.values-y_rank_raw.values)
y_rank = pd.concat((y_rank, y_ranktmp), axis=0)
y_threshold = pd.concat((y_threshold, y_thresholdtmp), axis=0)
y_quant = pd.concat((y_quant, y_quanttmp), axis=0)
yall = pd.concat((y_rank, y_threshold, y_quant), axis=1)
y_df = pd.merge(y_df, yall, how="inner", left_index=True, right_index=True)
# convert also by drug only, irrespective of gene
y_rank = pd.DataFrame()
y_threshold = pd.DataFrame()
y_quant = pd.DataFrame()
for drug in drugs_to_genes:
ytmp = pd.DataFrame(y_df.xs(drug, level="drug", drop_level=False)["score"])
y_ranktmp, _, y_thresholdtmp, y_quanttmp = get_ranks(
ytmp, thresh=0.8, prefix="score_drug", flip=False
)
# np.unique(y_rank.values-y_rank_raw.values)
y_rank = pd.concat((y_rank, y_ranktmp), axis=0)
y_threshold = pd.concat((y_threshold, y_thresholdtmp), axis=0)
y_quant = pd.concat((y_quant, y_quanttmp), axis=0)
yall = pd.concat((y_rank, y_threshold, y_quant), axis=1)
y_df = pd.merge(y_df, yall, how="inner", left_index=True, right_index=True)
gene_position = impute_gene_position(gene_position)
if learn_options is not None and learn_options["weighted"] == "variance":
print("computing weights from replicate variance...")
# compute the variance across replicates so can use it as a weight
data = pd.read_excel(
data_file,
sheet_name="Normalized",
skiprows=range(0, 6 + 1),
index_col=[0, 4],
)
data.index.names = ["Sequence", "Target gene"]
experiments = {
"AZD_200nM": ["Deep 25", "Deep 27", "Deep 29 ", "Deep 31"],
"6TG_2ug/mL": ["Deep 33", "Deep 35", "Deep 37", "Deep 39"],
"PLX_2uM": ["Deep 49", "Deep 51", "Deep 53", "Deep 55"],
}
variance = None
for drug in drugs_to_genes:
data_tmp = data.iloc[
data.index.get_level_values("Target gene").isin(drugs_to_genes[drug])
][experiments[drug]]
data_tmp["drug"] = drug
data_tmp = data_tmp.set_index("drug", append=True)
data_tmp["variance"] = np.var(data_tmp.values, axis=1)
if variance is None:
variance = data_tmp["variance"].copy()
else:
variance = pd.concat((variance, data_tmp["variance"]), axis=0)
orig_index = y_df.index.copy()
y_df = pd.merge(
y_df, pd.DataFrame(variance), how="inner", left_index=True, right_index=True
)
y_df = y_df.ix[orig_index]
print("done.")
# Make sure to keep this check last in this function
if not x_df.index.equals(y_df.index):
raise AssertionError(
"The index of x_df is different from the index of y_df "
"(this can cause inconsistencies/random performance later on)"
)
return x_df, drugs_to_genes, target_genes, y_df, gene_position
def merge_all(data_file: str, data_file2: str, learn_options: dict) -> tuple:
x_df, y_df, gene_position, target_genes = mergeV1_V2(
data_file, data_file2, learn_options
)
return x_df, y_df, gene_position, target_genes
def mergeV1_V2(data_file: str, data_file2: str, learn_options: dict) -> tuple:
"""
ground_truth_label, etc. are taken to correspond to the V2 data,
and then the V1 is appropriately matched based on semantics
"""
if learn_options["include_strand"]:
raise AssertionError("don't currently have 'Strand' column in V1 data")
_, gene_position1, target_genes1, x_df1, y_df1 = read_V1_data(
data_file, learn_options
)
x_df2, _, target_genes2, y_df2, gene_position2 = read_V2_data(data_file2)
y_df1.rename(
columns={"average rank": learn_options["rank-transformed target name"]},
inplace=True,
)
y_df1.rename(
columns={"average threshold": learn_options["binary target name"]}, inplace=True
)
# rename columns, and add a dummy "drug" to V1 so can join the data sets
y_df1["drug"] = ["nodrug" for _ in range(y_df1.shape[0])]
y_df1 = y_df1.set_index("drug", append=True)
y_df1.index.names = ["Sequence", "Target gene", "drug"]
y_cols_to_keep = np.unique(
["Target gene", "test", "score_drug_gene_rank", "score_drug_gene_threshold"]
)
y_df1 = y_df1[y_cols_to_keep]
y_df2 = y_df2[y_cols_to_keep]
x_df1["drug"] = ["nodrug" for _ in range(x_df1.shape[0])]
x_df1 = x_df1.set_index("drug", append=True)
x_cols_to_keep = ["30mer", "Strand"]
x_df1 = x_df1[x_cols_to_keep]
x_df2 = x_df2[x_cols_to_keep]
gene_position1["drug"] = ["nodrug" for _ in range(gene_position1.shape[0])]
gene_position1 = gene_position1.set_index("drug", append=True)
gene_position1.index.names = ["Sequence", "Target gene", "drug"]
cols_to_keep = ["Percent Peptide", "Amino Acid Cut position"]
gene_position1 = gene_position1[cols_to_keep]
gene_position2 = gene_position2[cols_to_keep]
y_df = pd.concat((y_df1, y_df2), axis=0)
x_df = pd.concat((x_df1, x_df2), axis=0)
gene_position = | pd.concat((gene_position1, gene_position2)) | pandas.concat |
import itertools
import os
import psutil
import time
from datetime import datetime
from os.path import join, splitext, basename, exists, isfile
import logging
from copy import deepcopy
from typing import Optional
from collections import OrderedDict
import numpy as np
import pandas as pd
from compress_pickle import dump, load
import torch
from torch import nn
import torch.nn.functional as F
from detectron2.data import MetadataCatalog
from detectron2.structures import Instances, Boxes
from detectron2.utils import comm
from tqdm import tqdm
from fsdet.engine import DefaultTrainer
from fsdet.evaluation import (COCOEvaluator, DatasetEvaluators, LVISEvaluator, PascalVOCDetectionEvaluator,
DatasetEvaluator, print_csv_format, inference_context)
from laplacianshot.images_manipulation import normalize_image, apply_random_augmentation
from laplacianshot.inference import laplacian_shot
from laplacianshot.plotting import plot_detections, plot_supports, plot_supports_augmentations, plot_distribution
def get_available_ram_gb():
memory = psutil.virtual_memory().available * (1024.0 ** -3)
return memory
class LaplacianTrainer(DefaultTrainer):
@classmethod
def build_evaluator(cls, cfg, dataset_name, output_folder=None):
"""
Create evaluator(s) for a given dataset.
This uses the special metadata "evaluator_type" associated with each builtin dataset.
For your own dataset, you can simply create an evaluator manually in your
script and do not have to worry about the hacky if-else logic here.
"""
if output_folder is None:
output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")
evaluator_list = []
evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type
if evaluator_type == "coco":
evaluator_list.append(
COCOEvaluator(dataset_name, cfg, True, output_folder)
)
if evaluator_type == "pascal_voc":
return PascalVOCDetectionEvaluator(dataset_name)
if evaluator_type == "lvis":
return LVISEvaluator(dataset_name, cfg, True, output_folder)
if len(evaluator_list) == 0:
raise NotImplementedError(
"no Evaluator for the dataset {} with the type {}".format(
dataset_name, evaluator_type
)
)
if len(evaluator_list) == 1:
return evaluator_list[0]
return DatasetEvaluators(evaluator_list)
@classmethod
def test(cls, cfg, model, evaluators=None,
support_augmentation: Optional[bool] = True,
use_laplacianshot: bool = True,
use_classification_layer: bool = True,
rectify_prototypes: Optional[bool] = True,
leverage_classification: Optional[bool] = True,
embeddings_type: Optional[str] = "embeddings",
do_pca: Optional[bool] = False,
remove_possibly_duplicates: Optional[bool] = False,
knn: Optional[int] = 3,
lambda_factor: Optional[float] = 0.1,
max_iters: Optional[int] = None,
laplacianshot_logs: bool = True,
plots: bool = False,
save_checkpoints: bool = True):
"""
Args:
cfg (CfgNode):
model (nn.Module):
evaluators (list[DatasetEvaluator] or None): if None, will call
:meth:`build_evaluator`. Otherwise, must have the same length as
`cfg.DATASETS.TEST`.
Returns:
dict: a dict of result metrics
"""
assert isinstance(use_laplacianshot, bool)
assert embeddings_type in {None, "embeddings", "probabilities"}
logger = logging.getLogger(__name__)
if isinstance(evaluators, DatasetEvaluator):
evaluators = [evaluators]
if evaluators is not None:
assert len(cfg.DATASETS.TEST) == len(
evaluators
), "{} != {}".format(len(cfg.DATASETS.TEST), len(evaluators))
results = OrderedDict()
for idx, dataset_name in enumerate(cfg.DATASETS.TEST):
dataloader_support = cls.build_train_loader(cfg)
dataloader_query = cls.build_test_loader(cfg, dataset_name)
if evaluators is not None:
evaluator = evaluators[idx]
else:
try:
evaluator = cls.build_evaluator(cfg, dataset_name)
except NotImplementedError:
logger.warn(
"No evaluator found. Use `DefaultTrainer.test(evaluators=)`, "
"or implement its `build_evaluator` method."
)
results[dataset_name] = {}
continue
results_i = inference_on_dataset(model=model,
dataloader_support=dataloader_support,
dataloader_query=dataloader_query,
evaluator=evaluator,
support_augmentation=support_augmentation,
use_laplacianshot=use_laplacianshot,
use_classification_layer=use_classification_layer,
rectify_prototypes=rectify_prototypes,
leverage_classification=leverage_classification,
embeddings_type=embeddings_type,
do_pca=do_pca,
remove_possibly_duplicates=remove_possibly_duplicates,
knn=knn,
lambda_factor=lambda_factor,
max_iters=max_iters,
cfg=cfg,
save_checkpoints=save_checkpoints,
laplacianshot_logs=laplacianshot_logs,
plots=plots)
results[dataset_name] = results_i
if comm.is_main_process():
assert isinstance(
results_i, dict
), "Evaluator must return a dict on the main process. Got {} instead.".format(
results_i
)
logger.info(
"Evaluation results for {} in csv format:".format(
dataset_name
)
)
print_csv_format(results_i)
if len(results) == 1:
results = list(results.values())[0]
return results
def inference_on_dataset(model, dataloader_support, dataloader_query, evaluator,
support_augmentation: bool = True,
use_laplacianshot: bool = True,
use_classification_layer: bool = True,
rectify_prototypes: Optional[bool] = True,
leverage_classification: Optional[bool] = True,
embeddings_type: Optional[str] = "embeddings",
do_pca: Optional[bool] = False,
remove_possibly_duplicates: Optional[bool] = False,
knn: Optional[int] = 3,
lambda_factor: Optional[int] = 0.1,
max_iters: Optional[int] = None,
cfg=None,
save_checkpoints: bool = True,
plots: bool = False,
laplacianshot_logs: bool = True):
assert not use_laplacianshot or isinstance(use_laplacianshot, bool)
assert not use_classification_layer or isinstance(use_classification_layer, bool)
assert not support_augmentation or isinstance(support_augmentation, bool)
assert embeddings_type in {None, "embeddings", "probabilities"}
assert use_laplacianshot or use_classification_layer
if use_laplacianshot:
assert isinstance(laplacianshot_logs, bool)
n_query_images = max_iters if max_iters \
else len(dataloader_query)
evaluator.reset()
inputs_agg, outputs_agg = [], []
X_s_embeddings, X_s_probabilities, X_s_labels, X_s_imgs = [], [], [], []
test_score_thresh_original, test_detections_per_img_original = model.roi_heads.test_score_thresh, \
model.roi_heads.test_detections_per_img
datasets_names = "_".join(cfg.DATASETS.TRAIN)
model_name = basename(
splitext(cfg.MODEL.WEIGHTS)[0]
)
checkpoint_filename = join("checkpoints", "_".join([model_name, datasets_names]) + ".bz")
times = pd.DataFrame()
with inference_context(model), torch.no_grad():
# =======#=======#=======#=======#=======#=======#=======#=======#=======
# ======= S U P P O R T
# =======#=======#=======#=======#=======#=======#=======#=======#=======
if use_laplacianshot:
starting_time = time.time()
# sets the model for support predictions
model.roi_heads.test_score_thresh, model.roi_heads.test_detections_per_img = 0, 1
original_images_indices = []
for img_data in tqdm(dataloader_support.dataset.dataset, desc=f"Getting support data"):
# print(img_data)
# retrieves data about found image and boxes
img_original = img_data["image"] \
.to(model.device) # torch.Size([3, H, W])
boxes_labels = img_data["instances"].get_fields()["gt_classes"] \
.to(model.device) # torch.Size([N])
boxes_coords = img_data["instances"].get_fields()["gt_boxes"].tensor \
.to(model.device) # torch.Size([N, 4])
# gt_boxes = torch.zeros_like(boxes_coords)
gt_boxes = []
# loops over found boxes
for i_instance, (box, label) in enumerate(zip(boxes_coords, boxes_labels)):
imgs, boxes = [img_original], [box]
# tracks non-augmented images
original_images_indices += [len(X_s_embeddings)]
# eventually data augments the image
if support_augmentation is None or support_augmentation:
for _ in range(5):
img_augmented, box_augmented = apply_random_augmentation(img=img_original, box=box)
imgs += [img_augmented]
boxes += [box_augmented]
for i_img, (img, box) in enumerate(zip(imgs, boxes)):
# print(f"Shape and box before: {img.shape}\t\t{box}")
# normalizes the image
img_normalized = normalize_image(img=img, model=model)
# resizes the box according to the new size
box_normalized = deepcopy(box)
box_normalized[0] = (box_normalized[0] * img_normalized.shape[2]) / img.shape[2]
box_normalized[2] = (box_normalized[2] * img_normalized.shape[2]) / img.shape[2]
box_normalized[1] = (box_normalized[1] * img_normalized.shape[1]) / img.shape[1]
box_normalized[3] = (box_normalized[3] * img_normalized.shape[1]) / img.shape[1]
# adjusts img_data
img_data_normalized = deepcopy(img_data)
img_data_normalized["image"] = (
(
(img_normalized + abs(img_normalized.min()))
/ img_normalized.max()
) * 255).byte()
img_data_normalized["instances"]._image_size = img_normalized.shape[1:]
gt_boxes = img_data_normalized["instances"].get("gt_boxes")
gt_boxes.tensor[i_instance] = box_normalized
img_data_normalized["instances"].set("gt_boxes", [box for box in gt_boxes.tensor])
# creates the box proposal query for the classification
proposal = [
Instances(image_size=img_normalized.shape[-2:],
# objectness_logits=[torch.tensor([1], device=model.device)],
proposal_boxes=Boxes(box_normalized.unsqueeze(0)))
]
features = model.backbone(img_normalized.unsqueeze(0))
result = model.roi_heads(img_data, features, proposal)[0][0]
if len(result.get("box_features")) == 0:
continue
features = result.get("box_features")[0].type(torch.half)
scores = result.get("pred_class_logits")[0].type(torch.half)
# keeps relevant infos
X_s_imgs += [
img[:,
int(box[1]): int(box[3]),
int(box[0]): int(box[2]),
].cpu()]
if X_s_imgs[-1].shape[1] == 0 or X_s_imgs[-1].shape[2] == 0:
print(X_s_imgs[-1].shape, img.shape)
print(box, box_normalized)
exit()
X_s_embeddings += [features.cpu()]
X_s_probabilities += [scores.cpu()]
X_s_labels += [label.cpu()]
X_s_embeddings, X_s_probabilities, X_s_labels = torch.stack(X_s_embeddings, dim=0), \
torch.stack(X_s_probabilities, dim=0), \
torch.stack(X_s_labels, dim=0)
if plots:
plot_supports(imgs=[X_s_img for i, X_s_img in enumerate(X_s_imgs)
if i in original_images_indices],
labels=X_s_labels[original_images_indices],
folder="plots")
if plots and (support_augmentation is None or support_augmentation):
plot_supports_augmentations(imgs=X_s_imgs,
labels=X_s_labels,
original_images_indices=original_images_indices,
folder="plots")
# resets the model
model.roi_heads.test_score_thresh, model.roi_heads.test_detections_per_img = test_score_thresh_original, \
test_detections_per_img_original
# records the times
times = times.append(
{
"phase": "support features retrieval",
"time_from": int(starting_time),
"time_to": int(time.time()),
"time_elapsed": int(time.time() - starting_time)
},
ignore_index=True)
# =======#=======#=======#=======#=======#=======#=======#=======#=======
# ======= Q U E R Y
# =======#=======#=======#=======#=======#=======#=======#=======#=======
# eventually loads the checkpoints from memory
if exists(checkpoint_filename) and not max_iters:
starting_time = time.time()
evaluator._logger.info(f"Loading inputs and outputs from {checkpoint_filename}")
inputs_agg, outputs_agg = load(checkpoint_filename)
# records the times
times = times.append(
{
"phase": "query checkpoint loading",
"time_from": int(starting_time),
"time_to": int(time.time()),
"time_elapsed": int(time.time() - starting_time)
},
ignore_index=True)
else:
starting_time = time.time()
for i_query, inputs in tqdm(enumerate(dataloader_query), desc=f"Predicting query data",
total=n_query_images):
# eventually early stops the computation
if max_iters and i_query >= max_iters:
break
outputs = model(inputs)
torch.cuda.synchronize()
# cleanses inputs and outputs before collection
for i_output, (input, output) in enumerate(zip(inputs, outputs)):
for k, v in input.items():
if isinstance(v, torch.Tensor):
inputs[i_output][k] = v.to("cpu")
for k, v in output.items():
if isinstance(v, torch.Tensor) or isinstance(v, Instances):
outputs[i_output][k] = v.to("cpu")
# plots a sample of detection
if plots and i_query == 0:
plot_detections(img=inputs[0]["image"],
boxes=outputs[0]["instances"].get_fields()["pred_boxes"].tensor,
confidences=outputs[0]["instances"].get_fields()["scores"],
labels=outputs[0]["instances"].get_fields()["pred_classes"],
folder="plots")
# slims inputs
inputs = [
{
k: v
for k, v in input.items()
if k != "image"
}
for input in inputs
]
# collects predictions
inputs_agg += inputs
outputs_agg += outputs
# records the times
times = times.append(
{
"phase": "query features retrieval",
"time_from": int(starting_time),
"time_to": int(time.time()),
"time_elapsed": int(time.time() - starting_time)
},
ignore_index=True)
if save_checkpoints and not max_iters:
starting_time = time.time()
evaluator._logger.info(f"Compressing checkpoint in {checkpoint_filename}")
dump((inputs_agg, outputs_agg), checkpoint_filename)
# records the times
times = times.append(
{
"phase": "query checkpoint saving",
"time_from": int(starting_time),
"time_to": int(time.time()),
"time_elapsed": int(time.time() - starting_time)
},
ignore_index=True)
if plots:
plot_distribution(distribution=[
len(output["instances"].get_fields()["scores"])
for output in outputs_agg
], label_x="detections", title=f"number of detections", folder="plots")
final_results = | pd.DataFrame() | pandas.DataFrame |
from datetime import (
datetime,
timedelta,
timezone,
)
import numpy as np
import pytest
import pytz
from pandas import (
Categorical,
DataFrame,
DatetimeIndex,
NaT,
Period,
Series,
Timedelta,
Timestamp,
date_range,
isna,
)
import pandas._testing as tm
class TestSeriesFillNA:
def test_fillna_nat(self):
series = Series([0, 1, 2, NaT.value], dtype="M8[ns]")
filled = series.fillna(method="pad")
filled2 = series.fillna(value=series.values[2])
expected = series.copy()
expected.values[3] = expected.values[2]
tm.assert_series_equal(filled, expected)
tm.assert_series_equal(filled2, expected)
df = DataFrame({"A": series})
filled = df.fillna(method="pad")
filled2 = df.fillna(value=series.values[2])
expected = DataFrame({"A": expected})
tm.assert_frame_equal(filled, expected)
tm.assert_frame_equal(filled2, expected)
series = Series([NaT.value, 0, 1, 2], dtype="M8[ns]")
filled = series.fillna(method="bfill")
filled2 = series.fillna(value=series[1])
expected = series.copy()
expected[0] = expected[1]
tm.assert_series_equal(filled, expected)
tm.assert_series_equal(filled2, expected)
df = DataFrame({"A": series})
filled = df.fillna(method="bfill")
filled2 = df.fillna(value=series[1])
expected = DataFrame({"A": expected})
tm.assert_frame_equal(filled, expected)
tm.assert_frame_equal(filled2, expected)
def test_fillna_value_or_method(self, datetime_series):
msg = "Cannot specify both 'value' and 'method'"
with pytest.raises(ValueError, match=msg):
datetime_series.fillna(value=0, method="ffill")
def test_fillna(self):
ts = Series([0.0, 1.0, 2.0, 3.0, 4.0], index=tm.makeDateIndex(5))
tm.assert_series_equal(ts, ts.fillna(method="ffill"))
ts[2] = np.NaN
exp = Series([0.0, 1.0, 1.0, 3.0, 4.0], index=ts.index)
tm.assert_series_equal(ts.fillna(method="ffill"), exp)
exp = Series([0.0, 1.0, 3.0, 3.0, 4.0], index=ts.index)
tm.assert_series_equal(ts.fillna(method="backfill"), exp)
exp = Series([0.0, 1.0, 5.0, 3.0, 4.0], index=ts.index)
tm.assert_series_equal(ts.fillna(value=5), exp)
msg = "Must specify a fill 'value' or 'method'"
with pytest.raises(ValueError, match=msg):
ts.fillna()
def test_fillna_nonscalar(self):
# GH#5703
s1 = Series([np.nan])
s2 = Series([1])
result = s1.fillna(s2)
expected = Series([1.0])
tm.assert_series_equal(result, expected)
result = s1.fillna({})
tm.assert_series_equal(result, s1)
result = s1.fillna(Series((), dtype=object))
tm.assert_series_equal(result, s1)
result = s2.fillna(s1)
tm.assert_series_equal(result, s2)
result = s1.fillna({0: 1})
tm.assert_series_equal(result, expected)
result = s1.fillna({1: 1})
tm.assert_series_equal(result, Series([np.nan]))
result = s1.fillna({0: 1, 1: 1})
tm.assert_series_equal(result, expected)
result = s1.fillna(Series({0: 1, 1: 1}))
tm.assert_series_equal(result, expected)
result = s1.fillna(Series({0: 1, 1: 1}, index=[4, 5]))
tm.assert_series_equal(result, s1)
def test_fillna_aligns(self):
s1 = Series([0, 1, 2], list("abc"))
s2 = Series([0, np.nan, 2], list("bac"))
result = s2.fillna(s1)
expected = Series([0, 0, 2.0], list("bac"))
tm.assert_series_equal(result, expected)
def test_fillna_limit(self):
ser = Series(np.nan, index=[0, 1, 2])
result = ser.fillna(999, limit=1)
expected = Series([999, np.nan, np.nan], index=[0, 1, 2])
tm.assert_series_equal(result, expected)
result = ser.fillna(999, limit=2)
expected = Series([999, 999, np.nan], index=[0, 1, 2])
tm.assert_series_equal(result, expected)
def test_fillna_dont_cast_strings(self):
# GH#9043
# make sure a string representation of int/float values can be filled
# correctly without raising errors or being converted
vals = ["0", "1.5", "-0.3"]
for val in vals:
ser = Series([0, 1, np.nan, np.nan, 4], dtype="float64")
result = ser.fillna(val)
expected = Series([0, 1, val, val, 4], dtype="object")
tm.assert_series_equal(result, expected)
def test_fillna_consistency(self):
# GH#16402
# fillna with a tz aware to a tz-naive, should result in object
ser = Series([Timestamp("20130101"), NaT])
result = ser.fillna(Timestamp("20130101", tz="US/Eastern"))
expected = Series(
[Timestamp("20130101"), Timestamp("2013-01-01", tz="US/Eastern")],
dtype="object",
)
tm.assert_series_equal(result, expected)
msg = "The 'errors' keyword in "
with tm.assert_produces_warning(FutureWarning, match=msg):
# where (we ignore the errors=)
result = ser.where(
[True, False], Timestamp("20130101", tz="US/Eastern"), errors="ignore"
)
tm.assert_series_equal(result, expected)
with tm.assert_produces_warning(FutureWarning, match=msg):
result = ser.where(
[True, False], Timestamp("20130101", tz="US/Eastern"), errors="ignore"
)
tm.assert_series_equal(result, expected)
# with a non-datetime
result = ser.fillna("foo")
expected = Series([Timestamp("20130101"), "foo"])
tm.assert_series_equal(result, expected)
# assignment
ser2 = ser.copy()
ser2[1] = "foo"
tm.assert_series_equal(ser2, expected)
def test_fillna_downcast(self):
# GH#15277
# infer int64 from float64
ser = Series([1.0, np.nan])
result = ser.fillna(0, downcast="infer")
expected = Series([1, 0])
tm.assert_series_equal(result, expected)
# infer int64 from float64 when fillna value is a dict
ser = Series([1.0, np.nan])
result = ser.fillna({1: 0}, downcast="infer")
expected = Series([1, 0])
tm.assert_series_equal(result, expected)
def test_timedelta_fillna(self, frame_or_series):
# GH#3371
ser = Series(
[
Timestamp("20130101"),
Timestamp("20130101"),
Timestamp("20130102"),
Timestamp("20130103 9:01:01"),
]
)
td = ser.diff()
obj = frame_or_series(td)
# reg fillna
result = obj.fillna(Timedelta(seconds=0))
expected = Series(
[
timedelta(0),
timedelta(0),
timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1),
]
)
expected = frame_or_series(expected)
tm.assert_equal(result, expected)
# interpreted as seconds, no longer supported
msg = "value should be a 'Timedelta', 'NaT', or array of those. Got 'int'"
with pytest.raises(TypeError, match=msg):
obj.fillna(1)
result = obj.fillna(Timedelta(seconds=1))
expected = Series(
[
timedelta(seconds=1),
timedelta(0),
timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1),
]
)
expected = frame_or_series(expected)
tm.assert_equal(result, expected)
result = obj.fillna(timedelta(days=1, seconds=1))
expected = Series(
[
timedelta(days=1, seconds=1),
timedelta(0),
timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1),
]
)
expected = frame_or_series(expected)
tm.assert_equal(result, expected)
result = obj.fillna(np.timedelta64(10 ** 9))
expected = Series(
[
timedelta(seconds=1),
timedelta(0),
timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1),
]
)
expected = frame_or_series(expected)
tm.assert_equal(result, expected)
result = obj.fillna(NaT)
expected = Series(
[
NaT,
timedelta(0),
timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1),
],
dtype="m8[ns]",
)
expected = frame_or_series(expected)
tm.assert_equal(result, expected)
# ffill
td[2] = np.nan
obj = frame_or_series(td)
result = obj.ffill()
expected = td.fillna(Timedelta(seconds=0))
expected[0] = np.nan
expected = frame_or_series(expected)
tm.assert_equal(result, expected)
# bfill
td[2] = np.nan
obj = frame_or_series(td)
result = obj.bfill()
expected = td.fillna(Timedelta(seconds=0))
expected[2] = timedelta(days=1, seconds=9 * 3600 + 60 + 1)
expected = frame_or_series(expected)
tm.assert_equal(result, expected)
def test_datetime64_fillna(self):
ser = Series(
[
Timestamp("20130101"),
Timestamp("20130101"),
Timestamp("20130102"),
Timestamp("20130103 9:01:01"),
]
)
ser[2] = np.nan
# ffill
result = ser.ffill()
expected = Series(
[
Timestamp("20130101"),
Timestamp("20130101"),
Timestamp("20130101"),
Timestamp("20130103 9:01:01"),
]
)
tm.assert_series_equal(result, expected)
# bfill
result = ser.bfill()
expected = Series(
[
Timestamp("20130101"),
Timestamp("20130101"),
Timestamp("20130103 9:01:01"),
Timestamp("20130103 9:01:01"),
]
)
tm.assert_series_equal(result, expected)
def test_datetime64_fillna_backfill(self):
# GH#6587
# make sure that we are treating as integer when filling
msg = "containing strings is deprecated"
with tm.assert_produces_warning(FutureWarning, match=msg):
# this also tests inference of a datetime-like with NaT's
ser = Series([NaT, NaT, "2013-08-05 15:30:00.000001"])
expected = Series(
[
"2013-08-05 15:30:00.000001",
"2013-08-05 15:30:00.000001",
"2013-08-05 15:30:00.000001",
],
dtype="M8[ns]",
)
result = ser.fillna(method="backfill")
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("tz", ["US/Eastern", "Asia/Tokyo"])
def test_datetime64_tz_fillna(self, tz):
# DatetimeLikeBlock
ser = Series(
[
Timestamp("2011-01-01 10:00"),
NaT,
Timestamp("2011-01-03 10:00"),
NaT,
]
)
null_loc = Series([False, True, False, True])
result = ser.fillna(Timestamp("2011-01-02 10:00"))
expected = Series(
[
Timestamp("2011-01-01 10:00"),
Timestamp("2011-01-02 10:00"),
Timestamp("2011-01-03 10:00"),
Timestamp("2011-01-02 10:00"),
]
)
tm.assert_series_equal(expected, result)
# check s is not changed
tm.assert_series_equal(isna(ser), null_loc)
result = ser.fillna(Timestamp("2011-01-02 10:00", tz=tz))
expected = Series(
[
Timestamp("2011-01-01 10:00"),
Timestamp("2011-01-02 10:00", tz=tz),
Timestamp("2011-01-03 10:00"),
Timestamp("2011-01-02 10:00", tz=tz),
]
)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(isna(ser), null_loc)
result = ser.fillna("AAA")
expected = Series(
[
Timestamp("2011-01-01 10:00"),
"AAA",
Timestamp("2011-01-03 10:00"),
"AAA",
],
dtype=object,
)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(isna(ser), null_loc)
result = ser.fillna(
{
1: Timestamp("2011-01-02 10:00", tz=tz),
3: Timestamp("2011-01-04 10:00"),
}
)
expected = Series(
[
Timestamp("2011-01-01 10:00"),
Timestamp("2011-01-02 10:00", tz=tz),
Timestamp("2011-01-03 10:00"),
Timestamp("2011-01-04 10:00"),
]
)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(isna(ser), null_loc)
result = ser.fillna(
{1: Timestamp("2011-01-02 10:00"), 3: Timestamp("2011-01-04 10:00")}
)
expected = Series(
[
Timestamp("2011-01-01 10:00"),
Timestamp("2011-01-02 10:00"),
Timestamp("2011-01-03 10:00"),
Timestamp("2011-01-04 10:00"),
]
)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(isna(ser), null_loc)
# DatetimeTZBlock
idx = DatetimeIndex(["2011-01-01 10:00", NaT, "2011-01-03 10:00", NaT], tz=tz)
ser = Series(idx)
assert ser.dtype == f"datetime64[ns, {tz}]"
tm.assert_series_equal(isna(ser), null_loc)
result = ser.fillna(Timestamp("2011-01-02 10:00"))
expected = Series(
[
Timestamp("2011-01-01 10:00", tz=tz),
Timestamp("2011-01-02 10:00"),
Timestamp("2011-01-03 10:00", tz=tz),
Timestamp("2011-01-02 10:00"),
]
)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(isna(ser), null_loc)
result = ser.fillna(Timestamp("2011-01-02 10:00", tz=tz))
idx = DatetimeIndex(
[
"2011-01-01 10:00",
"2011-01-02 10:00",
"2011-01-03 10:00",
"2011-01-02 10:00",
],
tz=tz,
)
expected = Series(idx)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(isna(ser), null_loc)
result = ser.fillna(Timestamp("2011-01-02 10:00", tz=tz).to_pydatetime())
idx = DatetimeIndex(
[
"2011-01-01 10:00",
"2011-01-02 10:00",
"2011-01-03 10:00",
"2011-01-02 10:00",
],
tz=tz,
)
expected = Series(idx)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(isna(ser), null_loc)
result = ser.fillna("AAA")
expected = Series(
[
Timestamp("2011-01-01 10:00", tz=tz),
"AAA",
Timestamp("2011-01-03 10:00", tz=tz),
"AAA",
],
dtype=object,
)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(isna(ser), null_loc)
result = ser.fillna(
{
1: Timestamp("2011-01-02 10:00", tz=tz),
3: Timestamp("2011-01-04 10:00"),
}
)
expected = Series(
[
Timestamp("2011-01-01 10:00", tz=tz),
Timestamp("2011-01-02 10:00", tz=tz),
Timestamp("2011-01-03 10:00", tz=tz),
Timestamp("2011-01-04 10:00"),
]
)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(isna(ser), null_loc)
result = ser.fillna(
{
1: Timestamp("2011-01-02 10:00", tz=tz),
3: Timestamp("2011-01-04 10:00", tz=tz),
}
)
expected = Series(
[
Timestamp("2011-01-01 10:00", tz=tz),
Timestamp("2011-01-02 10:00", tz=tz),
Timestamp("2011-01-03 10:00", tz=tz),
Timestamp("2011-01-04 10:00", tz=tz),
]
)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(isna(ser), null_loc)
# filling with a naive/other zone, coerce to object
result = ser.fillna(Timestamp("20130101"))
expected = Series(
[
Timestamp("2011-01-01 10:00", tz=tz),
Timestamp("2013-01-01"),
Timestamp("2011-01-03 10:00", tz=tz),
Timestamp("2013-01-01"),
]
)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(isna(ser), null_loc)
with tm.assert_produces_warning(FutureWarning, match="mismatched timezone"):
result = ser.fillna(Timestamp("20130101", tz="US/Pacific"))
expected = Series(
[
Timestamp("2011-01-01 10:00", tz=tz),
Timestamp("2013-01-01", tz="US/Pacific"),
Timestamp("2011-01-03 10:00", tz=tz),
Timestamp("2013-01-01", tz="US/Pacific"),
]
)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(isna(ser), null_loc)
def test_fillna_dt64tz_with_method(self):
# with timezone
# GH#15855
ser = Series([Timestamp("2012-11-11 00:00:00+01:00"), NaT])
exp = Series(
[
Timestamp("2012-11-11 00:00:00+01:00"),
Timestamp("2012-11-11 00:00:00+01:00"),
]
)
tm.assert_series_equal(ser.fillna(method="pad"), exp)
ser = Series([NaT, Timestamp("2012-11-11 00:00:00+01:00")])
exp = Series(
[
| Timestamp("2012-11-11 00:00:00+01:00") | pandas.Timestamp |
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 5 21:40:43 2021
@author: Pierce
"""
#import required packagaes
import pandas as pd
import numpy as np
import catboost as cb
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
def CleanWeatherData(*args):
"""
Funcation to Clean and prep weather data
Will merge data files into one data frame
Files to be entered in order starting with the oldest
Parameters
----------
*args : TYPE- String
File Path to csv with weather data
Returns
-------
df : DataFrame
Cleaned dataframe with missing values removed or calculated
Only Features for training left
"""
df = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Library to execute a exploratory data analysis (EDA). It is an approach to
analyzing data sets to summarize their main characteristics, often with visual
methods. Primarily EDA is for seeing what the data can tell us beyond the
formal modeling or hypothesis testing task.
@author: ucaiado
Created on 10/20/2016
"""
###########################################
# Suppress matplotlib user warnings
# Necessary for newer version of matplotlib
try:
import warnings
from IPython import get_ipython
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sns
from collections import defaultdict
import json
import numpy as np
import pandas as pd
import StringIO
warnings.filterwarnings('ignore', category=UserWarning,
module='matplotlib')
# Display inline matplotlib plots with IPython
get_ipython().run_line_magic('matplotlib', 'inline')
# aesthetics
sns.set_palette('deep', desat=.6)
sns.set_context(rc={'figure.figsize': (8, 4)})
sns.set_style('whitegrid')
sns.set_palette(sns.color_palette('Set2', 10))
# loading style sheet
get_ipython().run_cell('from IPython.core.display import HTML')
get_ipython().run_cell('HTML(open("ipython_style.css").read())')
except:
pass
###########################################
'''
Begin help functions
'''
def func_estimator(x):
'''
pseudo estimator to be used by poinplot
'''
return x[0]
'''
End help functions
'''
def read_logs(i_desired_trial, s_fname):
'''
Return a dictionary with information for the passed log file and trial and
the number of trades in the main instrument of the strategy
:param i_desired_trial: integer. the trial ID to colect data
:param s_fname: string. the name of the log file analized
'''
with open(s_fname) as fr:
# initiate the returned dictionary ans other control variables
d_rtn = {'pnl': defaultdict(dict),
'position': defaultdict(dict),
'duration': defaultdict(dict),
'mid': defaultdict(dict)}
f_reward = 0.
f_count_step = 0
last_reward = 0.
i_trial = 0
i_trades = 0
for idx, row in enumerate(fr):
if row == '\n':
continue
# extract desired information
# count the number of trials
if ' New Trial will start!' in row:
i_trial += 1
f_count_step = 0
f_reward = 0
elif '.update():' in row and i_trial == i_desired_trial:
s_aux = row.strip().split(';')[1]
s_x = row.split('time = ')[1].split(',')[0]
s_date_all = s_x
s_x = s_date_all[:-7]
s_date = s_x
ts_date_all = pd.to_datetime(s_date_all)
ts_date = pd.to_datetime(s_date)
last_reward = float(s_aux.split('reward = ')[1].split(',')[0])
f_duration = float(s_aux.split('duration = ')[1].split(',')[0])
f_reward += last_reward
f_count_step += 1.
# extract some data
d_rtn['duration'][i_trial][ts_date_all] = f_duration
if ', position = ' in s_aux:
s_pos = s_aux.split(', position = ')[1].split('}')[0][1:]
s_pos = s_pos.replace("'", "")
l_pos = [(a.strip(), float(b)) for a, b in
[s.split(': ')for s in s_pos.split(',')]]
d_rtn['position'][i_trial][ts_date_all] = dict(l_pos)
if ', pnl = ' in s_aux:
s_action = s_aux.split(', pnl = ')[1].split(',')[0]
f_aux = float(s_action)
d_rtn['pnl'][i_trial][ts_date_all] = f_aux
if 'crossed_prices' in s_aux or 'correction_by_trade' in s_aux:
i_trades += 1
if ', inputs = ' in s_aux:
s_mid = s_aux.split(', inputs = ')[1].split("{'midPrice':")
s_mid = s_mid[1][1:].split('}}')[0]
s_mid = s_mid.replace("'", "")[1:]
l_pos = [(a.strip(), float(b)) for a, b in
[s.split(': ')for s in s_mid.split(',')]]
d_rtn['mid'][i_trial][ts_date_all] = dict(l_pos)
# finish the loop as soon as the trial is analyzed
if i_trial > i_desired_trial:
break
return d_rtn, i_trades
def read_logs2(i_desired_trial, s_fname):
'''
Return a dictionary with information for the passed log file and trial and
the number of trades in the main instrument of the strategy
:param i_desired_trial: integer. the trial ID to colect data
:param s_fname: string. the name of the log file analized
'''
with open(s_fname) as fr:
# initiate the returned dictionary ans other control variables
d_rtn = {'pnl': defaultdict(dict),
'position': defaultdict(dict),
'duration': defaultdict(dict),
'mid': defaultdict(dict)}
f_reward = 0.
f_count_step = 0
last_reward = 0.
i_trial = 0
i_trades = 0
for idx, row in enumerate(fr):
if row == '\n':
continue
# extract desired information
# count the number of trials
if ' New Trial will start!' in row:
i_trial += 1
f_count_step = 0
f_reward = 0
elif '.update():' in row and i_trial == i_desired_trial:
s_aux = row.strip().split(';')[1]
s_x = row.split('time = ')[1].split(',')[0]
s_date_all = s_x
s_x = s_date_all[:-7]
s_date = s_x
ts_date_all = pd.to_datetime(s_date_all)
ts_date = pd.to_datetime(s_date)
last_reward = float(s_aux.split('reward = ')[1].split(',')[0])
f_duration = float(s_aux.split('duration = ')[1].split(',')[0])
f_reward += last_reward
f_count_step += 1.
# extract some data
d_rtn['duration'][i_trial][ts_date_all] = f_duration
if ', position = ' in s_aux:
s_pos = s_aux.split(', position = ')[1].split('}')[0][1:]
s_pos = s_pos.replace("'", "")
l_pos = [(a.strip(), float(b)) for a, b in
[s.split(': ')for s in s_pos.split(',')]]
d_rtn['position'][i_trial][ts_date_all] = dict(l_pos)
if ', pnl = ' in s_aux:
s_action = s_aux.split(', pnl = ')[1].split(',')[0]
f_aux = float(s_action)
d_rtn['pnl'][i_trial][ts_date_all] = f_aux
if 'crossed_prices' in s_aux or 'correction_by_trade' in s_aux:
i_trades += 1
if ', inputs = ' in s_aux:
s_mid = s_aux.split(', inputs = ')[1].split("{'midPrice':")
s_mid = s_mid[0].split(', position =')[0]
s_mid = s_mid.replace("'", '"').replace('None', '0')
l_mid = json.loads(s_mid)
s_mid = s_mid.replace("'", "")[1:]
l_mid = [(s_key, (float(x))) for s_key, x
in l_mid['midPrice'].iteritems()]
d_rtn['mid'][i_trial][ts_date_all] = dict(l_mid)
# finish the loop as soon as the trial is analyzed
if i_trial > i_desired_trial:
break
return d_rtn, i_trades
def read_logs_to_form_spread(i_desired_trial, s_fname):
'''
Return a dictionary with information for the passed log file and trial and
the number of trades in the main instrument of the strategy (just F21 and
F19)
:param i_desired_trial: integer. the trial ID to colect data
:param s_fname: string. the name of the log file analized
'''
with open(s_fname) as fr:
# initiate the returned dictionary ans other control variables
d_rtn = {'pnl': defaultdict(dict),
'position': defaultdict(dict),
'mid': defaultdict(dict),
'duration': defaultdict(dict),
'TOB_F21': defaultdict(dict),
'TOB_F19': defaultdict(dict),
'MY_PRICES': defaultdict(dict),
'EXEC': defaultdict(dict),
'LAST_SPREAD': defaultdict(dict)} # where I am most aggres
f_reward = 0.
f_count_step = 0
last_reward = 0.
i_trial = 0
i_trades = 0
l_trade_actions = ['TAKE', 'crossed_prices', 'correction_by_trade',
'HIT']
for idx, row in enumerate(fr):
if row == '\n':
continue
# extract desired information
# count the number of trials
if ' New Trial will start!' in row:
i_trial += 1
f_count_step = 0
f_reward = 0
elif '.update():' in row and i_trial == i_desired_trial:
s_aux = row.strip().split(';')[1]
s_x = row.split('time = ')[1].split(',')[0]
s_date_all = s_x
s_x = s_date_all[:-7]
s_date = s_x
ts_date_all = pd.to_datetime(s_date_all)
ts_date = pd.to_datetime(s_date)
last_reward = float(s_aux.split('reward = ')[1].split(',')[0])
f_duration = float(s_aux.split('duration = ')[1].split(',')[0])
f_reward += last_reward
f_count_step += 1.
# extract some data
d_rtn['duration'][i_trial][ts_date_all] = f_duration
if ', position = ' in s_aux:
s_pos = s_aux.split(', position = ')[1].split('}')[0][1:]
s_pos = s_pos.replace("'", "")
l_pos = [(a.strip(), float(b)) for a, b in
[s.split(': ')for s in s_pos.split(',')]]
d_rtn['position'][i_trial][ts_date_all] = dict(l_pos)
if 'action = ' in s_aux:
s_action = row.split('action = ')[1].split(',')[0].strip()
d_aux2 = {'DI1F21': 0, 'DI1F19': 0}
if ts_date_all not in d_rtn['EXEC'][i_trial]:
d_rtn['EXEC'][i_trial][ts_date_all] = d_aux2.copy()
d_aux2 = d_rtn['EXEC'][i_trial][ts_date_all]
if s_action in l_trade_actions:
s_msgs = s_aux.split('msgs_to_env = ')[1]
for d_aux in json.loads(s_msgs.replace("'", '"')):
i_mult = 1 if d_aux['S'] == 'Buy' else -1
d_aux2[d_aux['C']] += float(d_aux['P']) * i_mult
if ', pnl = ' in s_aux:
s_action = s_aux.split(', pnl = ')[1].split(',')[0]
f_aux = float(s_action)
d_rtn['pnl'][i_trial][ts_date_all] = f_aux
if 'crossed_prices' in s_aux or 'correction_by_trade' in s_aux:
i_trades += 1
if ', inputs = ' in s_aux:
s_mid = s_aux.split(', inputs = ')[1].split("{'midPrice':")
s_mid = s_mid[0].split(', position =')[0]
s_mid = s_mid.replace("'", '"').replace('None', '0')
l_mid = json.loads(s_mid)
s_mid = s_mid.replace("'", "")[1:]
l_mid = [(s_key, (float(x))) for s_key, x
in l_mid['midPrice'].iteritems()]
d_rtn['mid'][i_trial][ts_date_all] = dict(l_mid)
if s_mid[0] != '{':
s_mid = '{' + s_mid
d_input = json.loads(s_mid)
d_aux = d_input['TOB']['DI1F19']
d_rtn['TOB_F19'][i_trial][ts_date_all] = d_aux
d_aux = d_input['TOB']['DI1F21']
d_rtn['TOB_F21'][i_trial][ts_date_all] = d_aux
d_aux = dict(zip(['BID', 'ASK'], d_input['last_spread']))
d_rtn['LAST_SPREAD'][i_trial][ts_date_all] = d_aux
d_aux = dict(zip(['BID', 'ASK'],
[d_input['agentOrders']['agentBid'],
d_input['agentOrders']['agentAsk']]))
d_rtn['MY_PRICES'][i_trial][ts_date_all] = d_aux
# finish the loop as soon as the trial is analyzed
if i_trial > i_desired_trial:
break
return d_rtn, i_trades
def plot_trial(d_data, i_trades):
'''
Plots the data from logged metrics during a specific trial of a simulation.
It is designed to plot trades using D1F21, F19 and F23.
:param d_data: dict. data with the metrics used
:param i_trades: integer. number of trades in the simulation
'''
fig = plt.figure(figsize=(12, 10))
s_key = d_data['mid'].keys()[0]
majorFormatter = mpl.dates.DateFormatter('%H:%M')
###############
# Spread plot
###############
df_spread = pd.DataFrame(d_data['mid'][s_key]).T
df_spread = df_spread.resample('1min').last()
ax = plt.subplot2grid((6, 6), (4, 0), colspan=2, rowspan=2)
((df_spread['DI1F23'] - df_spread['DI1F21'])*10**2).plot(ax=ax)
ax.set_title('F23 - F21')
ax.set_ylabel('Spread')
# ax.xaxis.set_major_formatter(majorFormatter)
ax = plt.subplot2grid((6, 6), (4, 2), colspan=2, rowspan=2)
((df_spread['DI1F21'] - df_spread['DI1F19'])*10**2).plot(ax=ax)
ax.set_title('F21 - F19')
# ax.xaxis.set_major_formatter(majorFormatter)
###############
# PnL plot
###############
ax = plt.subplot2grid((6, 6), (0, 0), colspan=3, rowspan=4)
df_pnl = pd.Series(d_data['pnl'][s_key])
df_pnl = df_pnl.resample('1min').last()
df_pnl.plot(ax=ax)
ax.axhline(xmin=0, xmax=1, y=0, color='black', linestyle='dashed')
ax.set_title('PnL Curve')
ax.set_ylabel('Value')
# ax.xaxis.set_major_formatter(majorFormatter)
###############
# Position plot
###############
ax1 = plt.subplot2grid((6, 6), (0, 3), colspan=3, rowspan=2)
df_pos = pd.DataFrame(d_data['position'][s_key]).T
df_pos = df_pos.resample('1min').last()
df_pos.plot(ax=ax1)
ax1.set_title('Position')
ax1.set_ylabel('Qty')
# ax1.xaxis.set_major_formatter(majorFormatter)
###############
# Duration plot
###############
ax2 = plt.subplot2grid((6, 6), (2, 3), colspan=3, rowspan=2) # sharex=ax1
ax2.set_title('Duration Exposure')
ax2.set_ylabel('Duration')
df_duration = | pd.Series(d_data['duration'][s_key]) | pandas.Series |
import pandas as pd
import numpy as np
dataset = | pd.read_csv('./book2.csv') | pandas.read_csv |
# -*- coding: utf-8 -*-
import argparse
import pandas as pd
from dateutil.rrule import rrulestr, rrule
from dateutil.parser import parse
from datetime import datetime
import utils
import numpy as np
import csv
def load_arguments(**kwargs):
parser = argparse.ArgumentParser()
parser.add_argument("-hp", "--histo_prtfs", help="Chemin vers le fichier de l'historique des portefeuilles", type=str, default=r'./input/HISTO_PRTFS_20191231_20201231.csv')
parser.add_argument("-ho", "--histo_ope", help="Chemin vers le fichier de l'historique des opérations", type=str, default=r'./input/HISTO_OPE_20191231_20201231.csv')
parser.add_argument("-dd", "--date_debut", help="Date de début du calcul des performances", type=str, default='31/12/2019')
parser.add_argument("-df", "--date_fin", help="Date de fin du calcul des performances", type=str, default='31/12/2020')
return parser
def Calc_Perf(**kwargs):
parser = load_arguments(**kwargs)
args = parser.parse_args()
#Si les portefeuilles ne sont pas passés en argument alors on les récupères dans le fichier csv
dfPrtfs = kwargs.get('Portefeuilles',pd.DataFrame())
if dfPrtfs.empty:
dfPrtfs = | pd.read_csv(args.histo_prtfs,header=[0], sep=';', parse_dates=['DINV']) | pandas.read_csv |
import unittest
from StringIO import StringIO
from collections import Counter, OrderedDict
import os
import subprocess
import sys
from math import log
import numpy as np
from numpy.testing import assert_allclose
import pandas as pd
from pandas.util.testing import assert_frame_equal, assert_series_equal
from rosetta.text import text_processors, vw_helpers, nlp, converters
from rosetta.common import DocIDError, TokenError
class TestWordTokenizers(unittest.TestCase):
"""
"""
def setUp(self):
self.text = 'Is this really going to work? not-sure, but-maybe. O.K.'
self.word_tokenize = nlp.word_tokenize
self.bigram_tokenize = nlp.bigram_tokenize
def test_word_tokenize(self):
benchmark = [
'Is', 'this', 'really', 'going', 'to', 'work', 'not', 'sure',
'but', 'maybe', 'O.K']
result = self.word_tokenize(self.text)
self.assertEqual(result, benchmark)
def test_bigram_tokenize(self):
benchmark = [
('Is', 'this'), ('this', 'really'), ('really', 'going'),
('going', 'to'), ('to', 'work'), ('not', 'sure'), ('but', 'maybe')]
result = self.bigram_tokenize(self.text)
self.assertEqual(result, benchmark)
class TestTokenizerBasic(unittest.TestCase):
"""
"""
def setUp(self):
self.Tokenizer = text_processors.TokenizerBasic
def test_text_to_counter(self):
text = "Hi there's:alot,of | food hi"
result = self.Tokenizer().text_to_counter(text)
benchmark = Counter(["hi", "there's", "alot", "food", "hi"])
self.assertEqual(result, benchmark)
class TestSparseFormatter(unittest.TestCase):
def setUp(self):
self.formatter = text_processors.SparseFormatter()
def test_parse_feature_str(self):
feature_str = ' hi:1 bye:2.2 what:3 is:'
feature_values = self.formatter._parse_feature_str(feature_str)
benchmark = {'hi': 1, 'bye': 2.2, 'what': 3, 'is': 1}
self.assertEqual(feature_values, benchmark)
class TestVWFormatter(unittest.TestCase):
"""
"""
def setUp(self):
self.formatter = text_processors.VWFormatter()
def test_get_sstr_01(self):
doc_id = 'myname'
feature_values = OrderedDict([('hello', 1), ('dude', 3)])
importance = 1
result = self.formatter.get_sstr(
feature_values=feature_values, doc_id=doc_id,
importance=importance)
benchmark = " 1 %s| hello:1 dude:3" % doc_id
self.assertEqual(result, benchmark)
def test_get_sstr_02(self):
doc_id = 'myname|'
for doc_id in ['id|', 'id ', 'my:id', '|id', ':id', 'i:d', 'i d',
"'id", ":'"]:
with self.assertRaises(DocIDError):
self.formatter.get_sstr(doc_id=doc_id)
def test_write_dict_01(self):
record_str = " 3.2 doc_id1| hello:1 bye:2"
result = self.formatter.sstr_to_dict(record_str)
benchmark = {
'importance': 3.2, 'doc_id': 'doc_id1',
'feature_values': {'hello': 1, 'bye': 2}}
self.assertEqual(result, benchmark)
class TestVWHelpers(unittest.TestCase):
def setUp(self):
# self.varinfo_path = 'files/varinfo'
self.varinfo_file = StringIO(
'FeatureName '
'\t HashVal MinVal MaxVal Weight RelScore\n^bcc '
'\t 77964 0.00 1.00 +0.2789 100.00%\n^illiquids '
'\t 83330 5.00 2.00 -0.1786 64.05%\n')
self.topics_file_1 = StringIO(
"Version 7.3\nlabel: 11\n"
"0 1.1 2.2\n"
"1 1.11 2.22")
self.num_topics_1 = 2
self.predictions_file_1 = StringIO(
"0.0 0.0 doc1\n"
"0.0 0.0 doc2\n"
"1.1 2.2 doc1\n"
"1.11 2.22 doc2")
self.start_line_1 = 2
def test_parse_varinfo_01(self):
result = vw_helpers.parse_varinfo(self.varinfo_file)
benchmark = pd.DataFrame(
{
'feature_name': ['bcc', 'illiquids'],
'hash_val': [77964, 83330],
'max_val': [1., 2.],
'min_val': [0., 5.],
'rel_score': [1., 0.6405],
'weight': [0.2789, -0.1786]}).set_index('hash_val')
assert_frame_equal(result, benchmark)
def test_parse_lda_topics_01(self):
result = vw_helpers.parse_lda_topics(
self.topics_file_1, self.num_topics_1, normalize=False)
benchmark = pd.DataFrame(
{
'hash_val': [0, 1], 'topic_0': [1.1, 1.11],
'topic_1': [2.2, 2.22]}).set_index('hash_val')
assert_frame_equal(result, benchmark)
def test_parse_lda_topics_02(self):
result = vw_helpers.parse_lda_topics(
self.topics_file_1, self.num_topics_1, normalize=False,
max_token_hash=0)
benchmark = pd.DataFrame(
{
'hash_val': [0], 'topic_0': [1.1],
'topic_1': [2.2]}).set_index('hash_val')
assert_frame_equal(result, benchmark)
def test_parse_lda_predictions_01(self):
result = vw_helpers.parse_lda_predictions(
self.predictions_file_1, self.num_topics_1, self.start_line_1,
normalize=False)
benchmark = pd.DataFrame(
{'doc_id': ['doc1', 'doc2'], 'topic_0': [1.1, 1.11],
'topic_1': [2.2, 2.22]}).set_index('doc_id')
assert_frame_equal(result, benchmark)
def test_find_start_line_lda_predictions(self):
result = vw_helpers.find_start_line_lda_predictions(
self.predictions_file_1, self.num_topics_1)
self.assertEqual(result, 2)
class TestLDAResults(unittest.TestCase):
def setUp(self):
self.outfile = StringIO()
formatter = text_processors.VWFormatter()
self.sff = text_processors.SFileFilter(
formatter, bit_precision=8, verbose=False)
self.sff.id2token = {0: 'w0', 1: 'w1'}
sfile = StringIO(" 1 doc1| w0:1 w1:2\n 1 doc2| w0:3 w1:4")
self.sff.load_sfile(sfile)
self.topics_file_1 = StringIO(
"Version 7.3\nlabel: 11\n"
"0 1 2\n"
"1 3 4")
self.topics_file_2 = StringIO(
"Version 7.3\nlabel: 11\n"
"0 1 0\n"
"1 0 1")
self.num_topics_1 = 2
self.predictions_file_1 = StringIO(
"0.0 0.0 doc1\n"
"0.0 0.0 doc2\n"
"1 2 doc1\n"
"39 58 doc2")
def choose_lda(self, name='lda'):
if name == 'lda':
return vw_helpers.LDAResults(
self.topics_file_1, self.predictions_file_1,
self.sff, self.num_topics_1)
elif name == 'lda_2':
return vw_helpers.LDAResults(
self.topics_file_2, self.predictions_file_1, self.sff,
self.num_topics_1, alpha=1e-5)
def test_print_topics_1(self):
self.choose_lda().print_topics(num_words=2, outfile=self.outfile)
result = self.outfile.getvalue()
benchmark = (
u'========== Printing top 2 tokens in every topic==========\n-----'
'-------------------------\nTopic name: topic_0. P[topic_0] = 0.4'
'000\n topic_0 doc_freq\ntoken \nw1 '
' 0.75 2\nw0 0.25 2\n\n-------------------'
'-----------\nTopic name: topic_1. P[topic_1] = 0.6000\n t'
'opic_1 doc_freq\ntoken \nw1 0.66666'
'7 2\nw0 0.333333 2\n')
self.assertEqual(result, benchmark)
def test_set_probabilities_marginals(self):
lda = self.choose_lda()
pr_doc = pd.Series({'doc1': 3./(3+39+58), 'doc2': (39.+58)/(3+39+58)})
assert_series_equal(lda.pr_doc, pr_doc, check_names=False)
pr_topic = pd.Series({'topic_0': 4./10, 'topic_1': 6./10})
assert_series_equal(lda.pr_topic, pr_topic, check_names=False)
# Use the topics file for the token marginals
# Should be almost equal to results obtained with the predictions file
pr_token = pd.Series({'w0': 3./10, 'w1': 7./10})
assert_series_equal(lda.pr_token, pr_token, check_names=False)
def test_prob_1(self):
result = self.choose_lda().prob_token_topic(token='w0', c_token=['w1'])
benchmark = pd.DataFrame(
{'topic_0': [np.nan], 'topic_1': [np.nan]}, index=['w0'])
benchmark.index.name = 'token'
assert_frame_equal(result, benchmark)
def test_prob_2(self):
result = self.choose_lda().prob_token_topic(c_token=['w1'])
benchmark = pd.DataFrame(
{'topic_0': [3/7.], 'topic_1': [4/7.]}, index=['w1'])
benchmark.index.name = 'token'
assert_frame_equal(result, benchmark)
def test_prob_3(self):
result = self.choose_lda().prob_token_topic(
topic=['topic_0'], token=['w0'])
benchmark = pd.DataFrame({'topic_0': [1/10.]}, index=['w0'])
benchmark.index.name = 'token'
assert_frame_equal(result, benchmark)
def test_prob_4(self):
result = self.choose_lda().prob_token_topic(c_topic=['topic_0'])
benchmark = pd.DataFrame({'topic_0': [1/4., 3/4.]}, index=['w0', 'w1'])
benchmark.index.name = 'token'
assert_frame_equal(result, benchmark)
def test_prob_5(self):
result = self.choose_lda().prob_token_topic(
token=['w0'], c_topic=['topic_0'])
benchmark = pd.DataFrame({'topic_0': [1/4.]}, index=['w0'])
benchmark.index.name = 'token'
assert_frame_equal(result, benchmark)
def test_prob_6(self):
result = self.choose_lda().prob_doc_topic(
doc=['doc1'], c_topic=['topic_0'])
benchmark = pd.DataFrame({'topic_0': [1/40.]}, index=['doc1'])
benchmark.index.name = 'doc'
assert_frame_equal(result, benchmark)
def test_prob_7(self):
result = self.choose_lda().prob_doc_topic(
doc=['doc1', 'doc2'], c_topic=['topic_0'])
benchmark = pd.DataFrame(
{'topic_0': [1/40., 39/40.]}, index=['doc1', 'doc2'])
benchmark.index.name = 'doc'
assert_frame_equal(result, benchmark)
def test_cosine_similarity_1(self):
lda = self.choose_lda()
frame = lda.pr_topic_g_doc
result = lda.cosine_similarity(frame, frame)
assert_allclose(np.diag(result.values), 1)
def test_cosine_similarity_2(self):
topics = ['topic_0', 'topic_1']
frame1 = pd.DataFrame({'doc1': [1, 0], 'doc2': [0, 1]}, index=topics)
frame2 = pd.DataFrame({'doc3': [1, 0]}, index=topics)
result = self.choose_lda().cosine_similarity(frame1, frame2)
benchmark = pd.DataFrame({'doc3': [1, 0]}, index=['doc1', 'doc2'])
assert_frame_equal(result, benchmark.astype(float))
def test_cosine_similarity_3(self):
topics = ['topic_0', 'topic_1', 'topic_3']
frame1 = pd.DataFrame(
{'doc1': [0.5, 0.5, 0], 'doc2': [0, 0.5, 0.5]}, index=topics)
frame2 = pd.DataFrame({'doc3': [0.5, 0, 0.5]}, index=topics)
result = self.choose_lda().cosine_similarity(frame1, frame2)
benchmark = pd.DataFrame({'doc3': [0.5, 0.5]}, index=['doc1', 'doc2'])
assert_frame_equal(result, benchmark.astype(float))
def test_cosine_similarity_4(self):
topics = ['topic_0', 'topic_1']
frame1 = pd.DataFrame({'doc1': [1, 0], 'doc2': [0, 1]}, index=topics)
frame2 = | pd.Series({'topic_0': 1, 'topic_1': 0}) | pandas.Series |
# -*- coding: utf-8 -*-
"""Device curtailment plots.
This module creates plots are related to the curtailment of generators.
@author: <NAME>
"""
import os
import logging
import pandas as pd
from collections import OrderedDict
import matplotlib.pyplot as plt
import matplotlib as mpl
import matplotlib.dates as mdates
import matplotlib.ticker as mtick
import marmot.config.mconfig as mconfig
import marmot.plottingmodules.plotutils.plot_library as plotlib
from marmot.plottingmodules.plotutils.plot_data_helper import PlotDataHelper
from marmot.plottingmodules.plotutils.plot_exceptions import (MissingInputData, DataSavedInModule,
UnderDevelopment, MissingZoneData)
class MPlot(PlotDataHelper):
"""curtailment MPlot class.
All the plotting modules use this same class name.
This class contains plotting methods that are grouped based on the
current module name.
The curtailment.py module contains methods that are
related to the curtailment of generators .
MPlot inherits from the PlotDataHelper class to assist in creating figures.
"""
def __init__(self, argument_dict: dict):
"""
Args:
argument_dict (dict): Dictionary containing all
arguments passed from MarmotPlot.
"""
# iterate over items in argument_dict and set as properties of class
# see key_list in Marmot_plot_main for list of properties
for prop in argument_dict:
self.__setattr__(prop, argument_dict[prop])
# Instantiation of MPlotHelperFunctions
super().__init__(self.Marmot_Solutions_folder, self.AGG_BY, self.ordered_gen,
self.PLEXOS_color_dict, self.Scenarios, self.ylabels,
self.xlabels, self.gen_names_dict, Region_Mapping=self.Region_Mapping)
self.logger = logging.getLogger('marmot_plot.'+__name__)
self.x = mconfig.parser("figure_size","xdimension")
self.y = mconfig.parser("figure_size","ydimension")
self.y_axes_decimalpt = mconfig.parser("axes_options","y_axes_decimalpt")
self.curtailment_prop = mconfig.parser("plot_data","curtailment_property")
def curt_duration_curve(self, prop: str = None,
start_date_range: str = None, end_date_range: str = None, **_):
"""Curtailment duration curve (line plot)
Displays curtailment sorted from highest occurrence to lowest
over given time period.
Args:
prop (str, optional): Controls type of re to include in plot.
Controlled through the plot_select.csv.
Defaults to None.
start_date_range (str, optional): Defines a start date at which to represent data from.
Defaults to None.
end_date_range (str, optional): Defines a end date at which to represent data to.
Defaults to None.
Returns:
dict: dictionary containing the created plot and its data table.
"""
outputs = {}
# List of properties needed by the plot, properties are a set of tuples and contain 3 parts:
# required True/False, property name and scenarios required, scenarios must be a list.
properties = [(True,f"generator_{self.curtailment_prop}",self.Scenarios)]
# Runs get_formatted_data within PlotDataHelper to populate PlotDataHelper dictionary
# with all required properties, returns a 1 if required data is missing
check_input_data = self.get_formatted_data(properties)
if 1 in check_input_data:
return MissingInputData()
for zone_input in self.Zones:
self.logger.info(f"{self.AGG_BY} = {zone_input}")
RE_Curtailment_DC = pd.DataFrame()
PV_Curtailment_DC = pd.DataFrame()
for scenario in self.Scenarios:
self.logger.info(f"Scenario = {scenario}")
re_curt = self[f"generator_{self.curtailment_prop}"].get(scenario)
# Timeseries [MW] RE curtailment [MWh]
try: #Check for regions missing all generation.
re_curt = re_curt.xs(zone_input,level = self.AGG_BY)
except KeyError:
self.logger.info(f'No curtailment in {zone_input}')
continue
re_curt = self.df_process_gen_inputs(re_curt)
# If using Marmot's curtailment property
if self.curtailment_prop == 'Curtailment':
re_curt = self.assign_curtailment_techs(re_curt)
# Timeseries [MW] PV curtailment [MWh]
pv_curt = re_curt[re_curt.columns.intersection(self.pv_gen_cat)]
re_curt = re_curt.sum(axis=1)
pv_curt = pv_curt.sum(axis=1)
re_curt = re_curt.squeeze() #Convert to Series
pv_curt = pv_curt.squeeze() #Convert to Series
if pd.notna(start_date_range):
self.logger.info(f"Plotting specific date range: \
{str(start_date_range)} to {str(end_date_range)}")
re_curt = re_curt[start_date_range : end_date_range]
pv_curt = pv_curt[start_date_range : end_date_range]
if re_curt.empty is True and prop == "PV+Wind":
self.logger.warning('No data in selected Date Range')
continue
if pv_curt.empty is True and prop == "PV":
self.logger.warning('No data in selected Date Range')
continue
# Sort from larget to smallest
re_cdc = re_curt.sort_values(ascending=False).reset_index(drop=True)
pv_cdc = pv_curt.sort_values(ascending=False).reset_index(drop=True)
re_cdc.rename(scenario, inplace=True)
pv_cdc.rename(scenario, inplace=True)
RE_Curtailment_DC = pd.concat([RE_Curtailment_DC, re_cdc], axis=1, sort=False)
PV_Curtailment_DC = pd.concat([PV_Curtailment_DC, pv_cdc], axis=1, sort=False)
# Remove columns that have values less than 1
RE_Curtailment_DC = RE_Curtailment_DC.loc[:, (RE_Curtailment_DC >= 1).any(axis=0)]
PV_Curtailment_DC = PV_Curtailment_DC.loc[:, (PV_Curtailment_DC >= 1).any(axis=0)]
# Replace _ with white space
RE_Curtailment_DC.columns = RE_Curtailment_DC.columns.str.replace('_',' ')
PV_Curtailment_DC.columns = PV_Curtailment_DC.columns.str.replace('_',' ')
# Create Dictionary from scenario names and color list
colour_dict = dict(zip(RE_Curtailment_DC.columns, self.color_list))
fig2, ax = plt.subplots(figsize=(self.x,self.y))
if prop == "PV":
if PV_Curtailment_DC.empty:
out = MissingZoneData()
outputs[zone_input] = out
continue
# unit conversion return divisor and energy units
unitconversion = PlotDataHelper.capacity_energy_unitconversion(PV_Curtailment_DC.values.max())
PV_Curtailment_DC = PV_Curtailment_DC/unitconversion['divisor']
Data_Table_Out = PV_Curtailment_DC
Data_Table_Out = Data_Table_Out.add_suffix(f" ({unitconversion['units']})")
x_axis_lim = 1.25 * len(PV_Curtailment_DC)
for column in PV_Curtailment_DC:
ax.plot(PV_Curtailment_DC[column], linewidth=3, color=colour_dict[column],
label=column)
ax.legend(loc='lower left',bbox_to_anchor=(1,0),
facecolor='inherit', frameon=True)
ax.set_ylabel(f"PV Curtailment ({unitconversion['units']})", color='black', rotation='vertical')
if prop == "PV+Wind":
if RE_Curtailment_DC.empty:
out = MissingZoneData()
outputs[zone_input] = out
continue
# unit conversion return divisor and energy units
unitconversion = PlotDataHelper.capacity_energy_unitconversion(RE_Curtailment_DC.values.max())
RE_Curtailment_DC = RE_Curtailment_DC/unitconversion['divisor']
Data_Table_Out = RE_Curtailment_DC
Data_Table_Out = Data_Table_Out.add_suffix(f" ({unitconversion['units']})")
x_axis_lim = 1.25 * len(RE_Curtailment_DC)
for column in RE_Curtailment_DC:
ax.plot(RE_Curtailment_DC[column], linewidth=3, color=colour_dict[column],
label=column)
ax.legend(loc='lower left',bbox_to_anchor=(1,0),
facecolor='inherit', frameon=True)
ax.set_ylabel(f"PV + Wind Curtailment ({unitconversion['units']})", color='black', rotation='vertical')
ax.set_xlabel('Hours', color='black', rotation='horizontal')
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.tick_params(axis='y', which='major', length=5, width=1)
ax.tick_params(axis='x', which='major', length=5, width=1)
ax.yaxis.set_major_formatter(mpl.ticker.FuncFormatter(lambda x, p: format(x, f',.{self.y_axes_decimalpt}f')))
ax.margins(x=0.01)
#ax.set_xlim(0, 9490)
ax.set_xlim(0,x_axis_lim)
ax.set_ylim(bottom=0)
if mconfig.parser("plot_title_as_region"):
ax.set_title(zone_input)
outputs[zone_input] = {'fig': fig2, 'data_table': Data_Table_Out}
return outputs
def curt_pen(self, prop: str = None,
start_date_range: str = None, end_date_range: str = None, **_):
"""Plot of curtailment vs penetration.
Each scenario is represented by a different symbel on a x, y axis
Args:
prop (str, optional): Controls type of re to include in plot.
Controlled through the plot_select.csv.
Defaults to None.
start_date_range (str, optional): Defines a start date at which to represent data from.
Defaults to None.
end_date_range (str, optional): Defines a end date at which to represent data to.
Defaults to None.
Returns:
dict: dictionary containing the created plot and its data table.
"""
outputs = {}
# List of properties needed by the plot, properties are a set of tuples and contain 3 parts:
# required True/False, property name and scenarios required, scenarios must be a list.
properties = [(True, "generator_Generation", self.Scenarios),
(True, "generator_Available_Capacity", self.Scenarios),
(True, f"generator_{self.curtailment_prop}", self.Scenarios),
(True, "generator_Total_Generation_Cost", self.Scenarios)]
# Runs get_formatted_data within PlotDataHelper to populate PlotDataHelper dictionary
# with all required properties, returns a 1 if required data is missing
check_input_data = self.get_formatted_data(properties)
if 1 in check_input_data:
return MissingInputData()
for zone_input in self.Zones:
Penetration_Curtailment_out = pd.DataFrame()
self.logger.info(f"{self.AGG_BY } = {zone_input}")
for scenario in self.Scenarios:
self.logger.info(f"Scenario = {scenario}")
gen = self["generator_Generation"].get(scenario)
try: #Check for regions missing all generation.
gen = gen.xs(zone_input,level = self.AGG_BY)
except KeyError:
self.logger.info(f'No generation in {zone_input}')
continue
avail_gen = self["generator_Available_Capacity"].get(scenario)
avail_gen = avail_gen.xs(zone_input,level=self.AGG_BY)
re_curt = self[f"generator_{self.curtailment_prop}"].get(scenario)
try:
re_curt = re_curt.xs(zone_input,level=self.AGG_BY)
except KeyError:
self.logger.info(f'No curtailment in {zone_input}')
continue
re_curt = self.df_process_gen_inputs(re_curt)
# If using Marmot's curtailment property
if self.curtailment_prop == 'Curtailment':
re_curt = self.assign_curtailment_techs(re_curt)
# Finds the number of unique hours in the year
no_hours_year = len(gen.index.unique(level="timestamp"))
# Total generation across all technologies [MWh]
total_gen = float(gen.sum())
# Timeseries [MW] and Total VRE generation [MWh]
vre_gen = (gen.loc[(slice(None), self.vre_gen_cat),:])
total_vre_gen = float(vre_gen.sum())
# Timeseries [MW] and Total RE generation [MWh]
re_gen = (gen.loc[(slice(None), self.re_gen_cat),:])
total_re_gen = float(re_gen.sum())
# Timeseries [MW] and Total PV generation [MWh]
pv_gen = (gen.loc[(slice(None), self.pv_gen_cat),:])
total_pv_gen = float(pv_gen.sum())
# % Penetration of generation classes across the year
VRE_Penetration = (total_vre_gen/total_gen)*100
RE_Penetration = (total_re_gen/total_gen)*100
PV_Penetration = (total_pv_gen/total_gen)*100
# Timeseries [MW] and Total RE available [MWh]
re_avail = (avail_gen.loc[(slice(None), self.re_gen_cat),:])
total_re_avail = float(re_avail.sum())
# Timeseries [MW] and Total PV available [MWh]
pv_avail = (avail_gen.loc[(slice(None), self.pv_gen_cat),:])
total_pv_avail = float(pv_avail.sum())
# Total RE curtailment [MWh]
total_re_curt = float(re_curt.sum().sum())
# Timeseries [MW] and Total PV curtailment [MWh]
pv_curt = re_curt[re_curt.columns.intersection(self.pv_gen_cat)]
total_pv_curt = float(pv_curt.sum().sum())
# % of hours with curtailment
Prct_hr_RE_curt = (len((re_curt.sum(axis=1)).loc[(re_curt.sum(axis=1))>0])/no_hours_year)*100
Prct_hr_PV_curt = (len((pv_curt.sum(axis=1)).loc[(pv_curt.sum(axis=1))>0])/no_hours_year)*100
# Max instantaneous curtailment
if re_curt.empty == True:
continue
else:
Max_RE_Curt = max(re_curt.sum(axis=1))
if pv_curt.empty == True:
continue
else:
Max_PV_Curt = max(pv_curt.sum(axis=1))
# % RE and PV Curtailment Capacity Factor
if total_pv_curt > 0:
RE_Curt_Cap_factor = (total_re_curt/Max_RE_Curt)/no_hours_year
PV_Curt_Cap_factor = (total_pv_curt/Max_PV_Curt)/no_hours_year
else:
RE_Curt_Cap_factor = 0
PV_Curt_Cap_factor = 0
# % Curtailment across the year
if total_re_avail == 0:
continue
else:
Prct_RE_curt = (total_re_curt/total_re_avail)*100
if total_pv_avail == 0:
continue
else:
Prct_PV_curt = (total_pv_curt/total_pv_avail)*100
# Total generation cost
Total_Gen_Cost = self["generator_Total_Generation_Cost"].get(scenario)
Total_Gen_Cost = Total_Gen_Cost.xs(zone_input,level=self.AGG_BY)
Total_Gen_Cost = float(Total_Gen_Cost.sum())
vg_out = pd.Series([PV_Penetration ,RE_Penetration, VRE_Penetration, Max_PV_Curt,
Max_RE_Curt, Prct_PV_curt, Prct_RE_curt, Prct_hr_PV_curt,
Prct_hr_RE_curt, PV_Curt_Cap_factor, RE_Curt_Cap_factor, Total_Gen_Cost],
index=["% PV Penetration", "% RE Penetration", "% VRE Penetration",
"Max PV Curtailment [MW]", "Max RE Curtailment [MW]",
"% PV Curtailment", '% RE Curtailment',"% PV hrs Curtailed",
"% RE hrs Curtailed", "PV Curtailment Capacity Factor",
"RE Curtailment Capacity Factor", "Gen Cost"])
vg_out = vg_out.rename(scenario)
Penetration_Curtailment_out = pd.concat([Penetration_Curtailment_out, vg_out], axis=1, sort=False)
Penetration_Curtailment_out = Penetration_Curtailment_out.T
# Data table of values to return to main program
Data_Table_Out = Penetration_Curtailment_out
VG_index = pd.Series(Penetration_Curtailment_out.index)
# VG_index = VG_index.str.split(n=1, pat="_", expand=True)
# VG_index.rename(columns = {0:"Scenario"}, inplace=True)
VG_index.rename("Scenario", inplace=True)
# VG_index = VG_index["Scenario"]
Penetration_Curtailment_out.loc[:, "Scenario"] = VG_index[:,].values
marker_dict = dict(zip(VG_index.unique(), self.marker_style))
colour_dict = dict(zip(VG_index.unique(), self.color_list))
Penetration_Curtailment_out["colour"] = [colour_dict.get(x, '#333333') for x in Penetration_Curtailment_out.Scenario]
Penetration_Curtailment_out["marker"] = [marker_dict.get(x, '.') for x in Penetration_Curtailment_out.Scenario]
if Penetration_Curtailment_out.empty:
self.logger.warning(f'No Generation in {zone_input}')
out = MissingZoneData()
outputs[zone_input] = out
continue
fig1, ax = plt.subplots(figsize=(self.x,self.y))
for index, row in Penetration_Curtailment_out.iterrows():
if prop == "PV":
ax.scatter(row["% PV Penetration"], row["% PV Curtailment"],
marker=row["marker"], c=row["colour"], s=100, label = row["Scenario"])
ax.set_ylabel('% PV Curtailment', color='black', rotation='vertical')
ax.set_xlabel('% PV Penetration', color='black', rotation='horizontal')
elif prop == "PV+Wind":
ax.scatter(row["% RE Penetration"], row["% RE Curtailment"],
marker=row["marker"], c=row["colour"], s=40, label = row["Scenario"])
ax.set_ylabel('% PV + Wind Curtailment', color='black', rotation='vertical')
ax.set_xlabel('% PV + Wind Penetration', color='black', rotation='horizontal')
ax.set_ylim(bottom=0)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.tick_params(axis='y', which='major', length=5, width=1)
ax.tick_params(axis='x', which='major', length=5, width=1)
ax.margins(x=0.01)
if mconfig.parser("plot_title_as_region"):
ax.set_title(zone_input)
handles, labels = plt.gca().get_legend_handles_labels()
by_label = OrderedDict(zip(labels, handles))
plt.legend(by_label.values(), by_label.keys(), loc = 'lower right')
outputs[zone_input] = {'fig': fig1, 'data_table': Data_Table_Out}
return outputs
def curt_total(self, start_date_range: str = None, end_date_range: str = None, **_):
"""Creates stacked barplots of total curtailment by technology.
A separate bar is created for each scenario.
Args:
start_date_range (str, optional): Defines a start date at which to represent data from.
Defaults to None.
end_date_range (str, optional): Defines a end date at which to represent data to.
Defaults to None.
Returns:
dict: dictionary containing the created plot and its data table.
"""
outputs = {}
# List of properties needed by the plot, properties are a set of tuples and contain 3 parts:
# required True/False, property name and scenarios required, scenarios must be a list.
properties = [(True, f"generator_{self.curtailment_prop}", self.Scenarios),
(True, "generator_Available_Capacity", self.Scenarios)]
# Runs get_formatted_data within PlotDataHelper to populate PlotDataHelper dictionary
# with all required properties, returns a 1 if required data is missing
check_input_data = self.get_formatted_data(properties)
if 1 in check_input_data:
return MissingInputData()
for zone_input in self.Zones:
self.logger.info(f"{self.AGG_BY} = {zone_input}")
Total_Curtailment_out = pd.DataFrame()
Total_Available_gen = pd.DataFrame()
vre_curt_chunks = []
avail_gen_chunks = []
for scenario in self.Scenarios:
self.logger.info(f"Scenario = {scenario}")
vre_collection = {}
avail_vre_collection = {}
vre_curt = self[f"generator_{self.curtailment_prop}"].get(scenario)
try:
vre_curt = vre_curt.xs(zone_input,level=self.AGG_BY)
except KeyError:
self.logger.info(f'No curtailment in {zone_input}')
continue
vre_curt = self.df_process_gen_inputs(vre_curt)
# If using Marmot's curtailment property
if self.curtailment_prop == 'Curtailment':
vre_curt = self.assign_curtailment_techs(vre_curt)
avail_gen = self["generator_Available_Capacity"].get(scenario)
try: #Check for regions missing all generation.
avail_gen = avail_gen.xs(zone_input,level = self.AGG_BY)
except KeyError:
self.logger.info(f'No available generation in {zone_input}')
continue
avail_gen = self.df_process_gen_inputs(avail_gen)
# If using Marmot's curtailment property
if self.curtailment_prop == 'Curtailment':
avail_gen = self.assign_curtailment_techs(avail_gen)
all_empty = True
if pd.notna(start_date_range):
self.logger.info(f"Plotting specific date range: \
{str(start_date_range)} to {str(end_date_range)}")
for vre_type in self.vre_gen_cat:
try:
vre_curt_type = vre_curt[vre_type]
# vre_curt_type = vre_curt.xs(vre_type,level='tech')
except KeyError:
self.logger.info(f'No {vre_type} in {zone_input}')
continue
avail_gen_type = avail_gen[vre_type]
# Code to index data by date range, if a date range is listed in marmot_plot_select.csv
if pd.notna(start_date_range):
avail_gen_type = avail_gen_type.groupby(['timestamp']).sum()
vre_curt_type = vre_curt_type.groupby(['timestamp']).sum()
vre_curt_type = vre_curt_type[start_date_range : end_date_range]
avail_gen_type = avail_gen_type[start_date_range : end_date_range]
if vre_curt_type.empty is False and avail_gen_type.empty is False:
all_empty = False
vre_collection[vre_type] = float(vre_curt_type.sum())
avail_vre_collection[vre_type] = float(avail_gen_type.sum())
if all_empty:
self.logger.warning('No data in selected Date Range')
continue
vre_table = | pd.DataFrame(vre_collection,index=[scenario]) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 24 13:31:11 2019
@author: abibeka
"""
#0.0 Housekeeping. Clear variable space
from IPython import get_ipython #run magic commands
ipython = get_ipython()
ipython.magic("reset -f")
ipython = get_ipython()
import os
import sys
import numpy as np
import pandas as pd
import re
from bs4 import BeautifulSoup as BS
import xml.etree.ElementTree as ET
from itertools import islice
import glob
import datetime
NBTable = {
"Merge at US 92 WB-to-NB on-ramp":"US 92\\NB Merge",
"From US 92 onramp to LPGA off-ramp":"I95 from US92 to LPGA\\NB",
"Diverge at LPGA off-ramp": "NB Offramp",
"Merge at LPGA EB-to-NB on-ramp": "EB to NB",
"Merge at LPGA WB-to-NB on-ramp": "WB to NB",
"From LPGA WB-to-NB onramp to SR 40 off-ramp": "I95 from LPGA to SR40\\NB",
"Diverge at SR 40 off-ramp": "SR 40\\NB Offramp"
}
NB_dat = | pd.DataFrame.from_dict(NBTable,orient='index') | pandas.DataFrame.from_dict |
from django.test import TestCase
from transform_layer.services.data_service import DataService, KEY_SERVICE, KEY_MEMBER, KEY_FAMILY
from transform_layer.calculations import CalculationDispatcher
from django.db import connections
import pandas
from pandas.testing import assert_frame_equal, assert_series_equal
import unittest
class HasDataTestCase(unittest.TestCase):
def test_has_data_empty_dataframe(self):
data = pandas.DataFrame()
self.assertFalse(CalculationDispatcher.has_data(data))
def test_has_data_nonempty_dataframe(self):
d1 = {"col1": [1,2,3,4], "col2": [5,6,7,8]}
data = pandas.DataFrame(d1)
self.assertTrue(CalculationDispatcher.has_data(data))
def test_has_data_no_services(self):
d1 = {"col1": [1,2,3,4], "col2": [5,6,7,8]}
data = {
KEY_SERVICE: pandas.DataFrame(),
KEY_MEMBER: pandas.DataFrame(d1),
KEY_FAMILY: pandas.DataFrame(d1)
}
self.assertFalse(CalculationDispatcher.has_data(data))
def test_has_data_no_members(self):
d1 = {"col1": [1,2,3,4], "col2": [5,6,7,8]}
data = {
KEY_SERVICE: pandas.DataFrame(d1),
KEY_MEMBER: pandas.DataFrame(),
KEY_FAMILY: pandas.DataFrame(d1)
}
self.assertFalse(CalculationDispatcher.has_data(data))
def test_has_data_full_dict(self):
d1 = {"col1": [1,2,3,4], "col2": [5,6,7,8]}
data = {
KEY_SERVICE: pandas.DataFrame(d1),
KEY_MEMBER: pandas.DataFrame(d1),
KEY_FAMILY: | pandas.DataFrame(d1) | pandas.DataFrame |
import logging
import numpy as np
import os
import pandas as pd
import pickle
import re
from opencell.database import constants
logger = logging.getLogger(__name__)
class PlateMicroscopyManager:
'''
This class organizes the methods that determine the plate_id, well_id, etc,
for all of the raw images found in the 'PlateMicroscopy' directory
'''
def __init__(self, root_dir=None, cache_dir=None):
self.root_dir = root_dir
self.cache_dir = cache_dir
os.makedirs(self.cache_dir, exist_ok=True)
self.cached_os_walk_filepath = os.path.join(self.cache_dir, 'os_walk.p')
self.cached_metadata_filepath = os.path.join(self.cache_dir, 'all-metadata.csv')
self.cached_raw_metadata_filepath = os.path.join(self.cache_dir, 'raw-metadata.csv')
if os.path.isfile(self.cached_os_walk_filepath):
self.load_cached_os_walk()
if os.path.isfile(self.cached_metadata_filepath):
self.load_cached_metadata()
def load_cached_os_walk(self):
with open(self.cached_os_walk_filepath, 'rb') as file:
self.os_walk = pickle.load(file)
# note that a trailing slash is required on self.root_dir
# to correctly remove the root_dir from the raw filepaths in construct_metadata
self.root_dir = self.os_walk[0][0]
if self.root_dir[-1] != os.sep:
self.root_dir += os.sep
def cache_os_walk(self):
if os.path.isfile(self.cached_os_walk_filepath):
raise ValueError('Cached os_walk already exists')
self.os_walk = list(os.walk(self.root_dir))
with open(self.cached_os_walk_filepath, 'wb') as file:
pickle.dump(self.os_walk, file)
def load_cached_metadata(self):
self.md = pd.read_csv(self.cached_metadata_filepath)
if os.path.isfile(self.cached_raw_metadata_filepath):
self.md_raw = pd.read_csv(self.cached_raw_metadata_filepath)
else:
self.construct_raw_metadata()
def cache_metadata(self, overwrite=False):
if not overwrite and os.path.isfile(self.cached_metadata_filepath):
raise ValueError('Cached metadata already exists')
self.md.to_csv(self.cached_metadata_filepath, index=False)
self.md_raw.to_csv(self.cached_raw_metadata_filepath, index=False)
def check_max_depth(self):
'''
check the maximum subdirectory depth (relative to the PlateMicroscopy dir)
Depth is three for 'plate_dir/exp_dir/sortday_dir/'
Depth is two for either 'plate_dir/exp_dir/' or 'plate_dir/PublicationQuality/'
'''
maxx = 0
for row in self.os_walk:
path, subdirs, filenames = row
filenames = [name for name in filenames if '.tif' in name]
if not filenames:
continue
maxx = max(maxx, len(path.replace(self.root_dir, '').split(os.sep)))
return maxx
@staticmethod
def parse_raw_tiff_filename(filename):
'''
Parse well_id, site_num, and target name from a raw TIFF filename
For almost all filenames, the format is '{well_id}_{site_num}_{target_name}.ome.tif'
The exception is 'Jin' lines, which appear in plate6 and plate7;
here, the format is '{well_id}_{site_num}_Jin_{well_id}_{target_name}.ome.tif',
and it is the first well_id that is the 'real', pipeline-relevant, well_id
Note that the target name sometimes includes the terminus that was tagged,
in the form of a trailing '-N', '-C', '_N', '_C', '_Int', '-Int'
Also, two target names include a trailing '-A' or '-B'
(these are 'HLA-A' and 'ARHGAP11A-B')
'''
well_id = '[A-H][1-9][0-2]?'
site_num = '[1-9][0-9]?'
target_name = r'[a-zA-Z0-9]+'
appendix = r'[\-|_][a-zA-Z]+'
raw_pattern = rf'^({well_id})_({site_num})_({target_name})({appendix})?.ome.tif$'
# in Jin filenames, the second well_id is not relevant
raw_jin_pattern = (
rf'^({well_id})_({site_num})_Jin_(?:{well_id})_({target_name})({appendix})?.ome.tif$' # noqa: E501
)
filename_was_parsed = False
for pattern in [raw_pattern, raw_jin_pattern]:
result = re.match(pattern, filename)
if result:
filename_was_parsed = True
well_id, site_num, target_name, appendix = result.groups()
break
if not filename_was_parsed:
return None
site_num = int(site_num)
return well_id, site_num, target_name
def construct_metadata(self, paths_only=False):
'''
Create metadata dataframe from the os.walk results
'''
rows = []
for row in self.os_walk:
path, subdirs, filenames = row
# all TIFF files in the directory
# (we assume these are TIFF stacks)
filenames = [name for name in filenames if '.tif' in name]
if not filenames:
continue
# ignore plate-level directories that are not of the form
# 'mnG96wp{num}' or 'mNG96wp{num}_Thawed',
# where num is an integer greater than 0
rel_path = path.replace(self.root_dir, '')
if not re.match(r'^mNG96wp[1-9]([0-9])?(_Thawed|/)', rel_path):
continue
# create a column for each subdirectory, starting with the plate-level directory
path_dirs = rel_path.split(os.sep)
path_info = {'level_%d' % ind: path_dir for ind, path_dir in enumerate(path_dirs)}
# parse the plate_num and imaging round from the plate_dir
plate_num, imaging_round_num = self.parse_src_plate_dir(path_dirs[0])
plate_info = {
'plate_num': plate_num,
'imaging_round_num': imaging_round_num,
}
# create a row only for the path
if paths_only:
rows.append({**path_info, **plate_info})
continue
# create a row for each file
for filename in filenames:
rows.append({'filename': filename, **path_info, **plate_info})
md = | pd.DataFrame(data=rows) | pandas.DataFrame |
import tensorflow as tf
import pandas as pd
import numpy as np
import tempfile
import os
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder,MinMaxScaler
from sklearn.model_selection import KFold
from imblearn.combine import SMOTETomek
from imblearn.over_sampling import RandomOverSampler
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import f_classif
import shutil
import argparse
'''Coloumns contains 0'''
lstZerodrp=['Timestamp','BwdPSHFlags','FwdURGFlags','BwdURGFlags','CWEFlagCount','FwdBytsbAvg','FwdPktsbAvg','FwdBlkRateAvg','BwdBytsbAvg',
'BwdBlkRateAvg','BwdPktsbAvg']
'''Coloumns contains 1'''
lstScaledrp=['FwdPSHFlags','FINFlagCnt','SYNFlagCnt','RSTFlagCnt','PSHFlagCnt','ACKFlagCnt','URGFlagCnt','ECEFlagCnt']
DATA_FILE = '/opt/Network_Traffic.csv'
'''Dataset preprocess'''
def read_dataFile():
chunksize = 10000
chunk_list = []
missing_values = ["n/a", "na", "--", "Infinity", "infinity", "Nan", "NaN"]
for chunk in pd.read_csv(DATA_FILE, chunksize=chunksize, na_values = missing_values):
chunk_list.append(chunk)
break
dataFrme = pd.concat(chunk_list)
lstcols = []
for i in dataFrme.columns:
i = str(i).replace(' ','').replace('/','')
lstcols.append(i)
dataFrme.columns=lstcols
dfAllCpy = dataFrme.copy()
dataFrme = dataFrme.drop(lstZerodrp,axis=1)
return dataFrme
'''Remove NA'''
def preprocess_na(dataFrme):
na_lst = dataFrme.columns[dataFrme.isna().any()].tolist()
for j in na_lst:
dataFrme[j].fillna(0, inplace=True)
return dataFrme
def create_features_label(dataFrme):
#Create independent and Dependent Features
columns = dataFrme.columns.tolist()
# Filter the columns to remove data we do not want
columns = [c for c in columns if c not in ["Label"]]
# Store the variable we are predicting
target = "Label"
# Define a random state
state = np.random.RandomState(42)
X = dataFrme[columns]
Y = dataFrme[target]
return X,Y
'''Label substitution'''
def label_substitution(dataFrme):
dictLabel = {'Benign':0,'Bot':1}
dataFrme['Label']= dataFrme['Label'].map(dictLabel)
LABELS=['Benign','Bot']
count_classes = pd.value_counts(dataFrme['Label'], sort = True)
print(count_classes)
# Get the Benign and the Bot values
Benign = dataFrme[dataFrme['Label']==0]
Bot = dataFrme[dataFrme['Label']==1]
return dataFrme
'''Class Imabalancement'''
def handle_class_imbalance(X,Y):
# os_us = SMOTETomek(ratio=0.5)
# X_res, y_res = os_us.fit_sample(X, Y)
ros = RandomOverSampler(random_state=50)
X_res, y_res = ros.fit_sample(X, Y)
ibtrain_X = pd.DataFrame(X_res,columns=X.columns)
ibtrain_y = pd.DataFrame(y_res,columns=['Label'])
return ibtrain_X,ibtrain_y
'''Feature Selection'''
def correlation_features(ibtrain_X):
# Correlation Ananlysis
corr = ibtrain_X.corr()
cor_columns = np.full((corr.shape[0],), True, dtype=bool)
for i in range(corr.shape[0]):
for j in range(i+1, corr.shape[0]):
if corr.iloc[i,j] >= 0.9:
if cor_columns[j]:
cor_columns[j] = False
dfcorr_features = ibtrain_X[corr.columns[cor_columns]]
return dfcorr_features
''' Highly Coorelated features '''
def top_ten_features(dfcorr_features,ibtrain_X,ibtrain_y):
feat_X = dfcorr_features
feat_y = ibtrain_y['Label']
#apply SelectKBest class to extract top 10 best features
bestfeatures = SelectKBest(score_func=f_classif, k=10)
fit = bestfeatures.fit(feat_X,feat_y)
dfscores = | pd.DataFrame(fit.scores_) | pandas.DataFrame |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
# --------------------------------------------------------------------------------------------
import os
import unittest
import numpy as np
import pandas as pd
from nimbusml import Pipeline, FileDataStream, Role, DataSchema
from nimbusml.cluster import KMeansPlusPlus
from nimbusml.datasets import get_dataset
from nimbusml.ensemble import FastForestRegressor, LightGbmRanker
from nimbusml.feature_extraction.categorical import OneHotVectorizer, \
OneHotHashVectorizer
from nimbusml.linear_model import FastLinearClassifier, \
LogisticRegressionBinaryClassifier, LogisticRegressionClassifier
from nimbusml.model_selection import CV
from nimbusml.preprocessing import ToKey
from nimbusml.preprocessing.schema import ColumnConcatenator, ColumnDropper
from nimbusml.tests.test_utils import split_features_and_label
from sklearn.utils.testing import assert_equal, assert_true, \
assert_greater_equal
infert_file = get_dataset('infert').as_filepath()
def default_pipeline(
learner=FastForestRegressor,
transforms=[],
learner_arguments={},
init_pipeline=True):
pipeline = transforms + [learner(**learner_arguments)]
if init_pipeline:
pipeline = Pipeline(pipeline)
return pipeline
def infert_ds(label_index, label_name='Label'):
file_schema = 'sep=, col=id:TX:0 col=education:TX:1 col={}:R4:{} ' \
'col=Features:R4:{}-8 header=+'.format(
label_name, label_index, label_index + 1)
data = FileDataStream(infert_file, schema=file_schema)
if label_name != 'Label':
data._set_role(Role.Label, label_name)
return data
def infert_df(label_name):
df = get_dataset('infert').as_df()
df = (OneHotVectorizer() << 'education_str').fit_transform(df)
X, y = split_features_and_label(df, label_name)
return X, y
def random_df(shape=(100, 3)):
np.random.seed(0)
df = pd.DataFrame(np.random.rand(*shape))
df.columns = df.columns.astype('str')
return df
def random_series(values=[0, 1], length=100, type='int', name=None):
np.random.seed(0)
return pd.Series(np.random.choice(values, length).astype(type), name=name)
def default_infert_transforms():
return [OneHotVectorizer(columns={'edu': 'education'})]
def default_infert_learner_arguments():
return {'feature': ['Features', 'edu']}
def check_cv_results(learner_type, results, n_folds, expected_metrics):
assert_true(isinstance(results, dict))
result_names = set(results.keys())
common_outputs = {'predictions', 'models', 'metrics', 'metrics_summary'}
classification_outputs = common_outputs.union({'confusion_matrix'})
if learner_type in ['regressor', 'ranker', 'clusterer']:
assert_equal(result_names, common_outputs)
elif learner_type in ['binary', 'multiclass']:
assert_equal(result_names, classification_outputs)
else:
assert_true(False, 'Invalid learner type ' + learner_type)
for name, df in results.items():
if name == 'metrics_summary':
# metrics_summary has no fold column
# check for metrics accuracy
for m_name, m_expected_value in expected_metrics.items():
m_value = df.loc['Average', m_name]
assert_greater_equal(
m_value,
m_expected_value,
msg='Metric {} is lower than expected'.format(m_name))
# no more checks for metrics_summary
continue
assert_true(CV.fold_column_name in df.columns)
folds_series = df[CV.fold_column_name]
if name in ['models', 'metrics']:
folds_count = len(folds_series)
else:
folds_count = | pd.Series.nunique(folds_series) | pandas.Series.nunique |
import pandas as pd
from .components import Universe, Scheduler, Constructor, Backtester
from .report import log_report, perf_report
class Portfolio(object):
def __init__(self, universe, scheduler, constructor):
self._backtester = Backtester(universe, scheduler, constructor)
@property
def log(self):
return self._backtester.log
@property
def returns(self):
return self._backtester.result.returns
@property
def weights(self):
return self._backtester.result.weights
@property
def trades(self):
return self._backtester.result.trades
@property
def stats(self):
return self._backtester.result.stats
def report(self, start=None, end=None, benchmark=None, relative=False):
'''
Report performance metrics and charts.
Parameters
----------
start : string 'YYYY-MM-DD' or datetime
end : string 'YYYY-MM-DD' or datetime
benchamrk : pd.Series
daily index value or price, not daily return
relative : boolean
If True, excess returns will be analyzed.
'''
rtns = self._backtester.result.returns['return']
wgts = self._backtester.result.weights['weight']
if benchmark is not None:
bm = benchmark.pct_change()
bm.index = pd.to_datetime(bm.index, utc=True)
bm = bm.reindex(rtns.index).fillna(0)
t0 = rtns.index[0]
if rtns.loc[t0] == 0:
bm.loc[t0] = 0 # To align with portfolio return
if relative:
rtns = rtns - bm
else:
bm = None
trds = self._backtester.result.trades['trade']
perf_report(rtns, trds, wgts, bm)
# Main Function
def backtest(prices, schedule, weight=None, risk_budget=None, start='1900-01-01', end='2099-12-31', verbose=True):
'''
Run backtest.
Parameters
----------
prices : pd.DataFrame
Daily prices/index values of the assets.
schedule : string or list
weight : dictionary
risk_budget : dictionary
start : string 'YYYY-MM-DD' or datetime
end : string 'YYYY-MM-DD' or datetime
verbose : boolean
benchamrk : pd.Series
daily index value or price, not daily return
verbose : boolean
Returns
-------
portfolio : Portfolio object
Contains the universe data, backtest policies and the backtest result.
'''
# input check
if (weight is None and risk_budget is None) or (weight is not None and risk_budget is not None):
raise ValueError('You should decide rebalancing rule. weigt/risk_budget')
prices.index = pd.to_datetime(prices.index, utc=True)
pr = prices.stack().rename('price')
dr = prices.pct_change().stack().rename('return')
pricing = | pd.concat([pr, dr], axis=1) | pandas.concat |
import pandas as pd
import numpy as np
from sklearn.preprocessing import StandardScaler
from utils import sum_country_regions, get_change_rates, generate_df_change_rate, sliding_window, generate_COVID_input, generate_COVID_aux_input
import pickle
import copy
import os
import argparse
parser = argparse.ArgumentParser(description='Hi-covidnet DATALOADER')
# basic settings
parser.add_argument('--output_size', type=int, default=14, metavar='O', help='How many days you are predicting(default: 14)')
parser.add_argument('--save', action='store_true', default=False, help='Saving pre-processed data')
def normalize(df, axis=1):
"""
@df : shape(N,D)
"""
mean = df.iloc[:,4:].mean(axis=axis) # (D)
std = df.iloc[:,4:].std(axis=axis) # (D)
df.iloc[:,4:] = (df.iloc[:,4:].subtract(mean, axis='index')).divide(std, axis='index')
return df, mean, std
def scaling(df_confirm,df_death, df_confirm_change_1st_order,
df_confirm_change_2nd_order,df_death_change_1st_order,
df_death_change_2nd_order, fname="x_mean_std_list_5_27.pkl"):
##scaling
mean_std_list = []
df_confirm, mean, std = normalize(df_confirm, axis=1)
mean_std_list.append((mean,std))
df_death, mean, std = normalize(df_death, axis=1)
mean_std_list.append((mean,std))
df_confirm_change_1st_order, mean, std = normalize(df_confirm_change_1st_order, axis=1)
mean_std_list.append((mean,std))
df_confirm_change_2nd_order, mean, std = normalize(df_confirm_change_2nd_order, axis=1)
mean_std_list.append((mean,std))
df_death_change_1st_order, mean, std = normalize(df_death_change_1st_order, axis=1)
mean_std_list.append((mean,std))
df_death_change_2nd_order, mean, std = normalize(df_death_change_2nd_order, axis=1)
mean_std_list.append((mean,std))
pickle.dump(mean_std_list, open("pickled_ds/"+fname, "wb"))
def google_trenddata_loader(fname, countries_Korea_inbound):
google_trend = pd.read_csv('./dataset/{fname}.csv'.format(fname=fname), index_col=0)
iso_to_country = countries_Korea_inbound.set_index('iso').to_dict()['Country']
google_trend.rename(columns = iso_to_country, inplace = True)
google_trend = google_trend.set_index('date').T.reset_index()
google_trend = google_trend.rename(columns = {'index': 'Country'})
google_trend.columns = google_trend.columns.astype(str)
google_trend = google_trend.rename(columns = {col: str(int(col[4:6]))+'/'+str(int(col[-2:]))+'/' + col[2:4] for col in google_trend.columns[1:].astype(str)})
google_trend.loc[:, google_trend.columns[1:]] /= 100
google_trend.drop(np.argwhere(google_trend.Country == 'Korea, South')[0], inplace=True)
mean, std = google_trend.iloc[:,1:].mean(axis=1), google_trend.iloc[:,1:].std(axis=1)
google_trend.iloc[:,1:] = google_trend.iloc[:,1:].subtract(mean, axis='index').divide(std, axis='index')
return google_trend
def dataloader(output_size, save=False):
url_confirm = 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv'
df_confirm = pd.read_csv(url_confirm, error_bad_lines=False)
url_death = 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_deaths_global.csv'
df_death = pd.read_csv(url_death, error_bad_lines=False)
countries_with_regions = df_confirm['Country/Region'].value_counts()[df_confirm['Country/Region'].value_counts()>1].index
for country in countries_with_regions:
df_confirm = sum_country_regions(df_confirm, country)
df_death = sum_country_regions(df_death, country)
df_confirm.reset_index(inplace=True, drop=True)
df_death.reset_index(inplace=True, drop=True)
# Get the index of the 'last zero patients' from the countries
selected_index = {}
for country in range(len(df_confirm)):
try: df_confirm.loc[country] # No country due to merged
except Exception as e: continue
try:
selected_index[country] = list(filter(lambda i: df_confirm.loc[country].eq(0)[i], range(len(df_confirm.columns))))[-1]
except Exception as e:
selected_index[country] = 4
countries_Korea_inbound = pd.read_csv("./dataset/country_info.csv")
countries_Korea_inbound.loc[countries_Korea_inbound['Country'] == 'China', 'continent'] = 'China'
selected_country = countries_Korea_inbound.loc[countries_Korea_inbound.visit.eq(1), 'Country'].values
df_confirm_change_1st_order = generate_df_change_rate(df_confirm, days=1)
df_confirm_change_2nd_order = generate_df_change_rate(df_confirm_change_1st_order, days=1)
df_death_change_1st_order = generate_df_change_rate(df_death, days=1)
df_death_change_2nd_order = generate_df_change_rate(df_death_change_1st_order, days=1)
scaling(df_confirm,df_death, df_confirm_change_1st_order,
df_confirm_change_2nd_order,df_death_change_1st_order,
df_death_change_2nd_order)
fnames = ["trend_covid-19", "trend_covid_test", "trend_flu", "trend_mask"]
google_data = [google_trenddata_loader(fname, countries_Korea_inbound) for fname in fnames]
df_incoming = pd.read_csv('./dataset/confirmed_by_continent.csv')
scaler_list = pickle.load(open("pickled_ds/x_mean_std_list_5_27.pkl", "rb"))
data_model2, target_continent, target_total = generate_COVID_input(df_death,
df_death_change_1st_order,
df_death_change_2nd_order,
df_death_change_1st_order,
df_confirm_change_1st_order,
df_confirm_change_2nd_order,
df_incoming,
*google_data,
countries_Korea_inbound,
seq_length=14,
end_day='5/6/20', # '5/5/20'
is_7days= True if output_size==7 else False,
scaler_list=scaler_list)
print("data shape is ",len(data_model2), data_model2[30]['Argentina'].shape)
print("target_continent shape is ",target_continent.shape, "target_total shape is ",target_total.shape,)
print("Loading KT roaming data")
roaming = | pd.read_csv('./dataset/roaming_preprocess.csv') | pandas.read_csv |
from fowt_force_gen import windbins
import pandas as pd
import numpy as np
import datetime
from unittest import mock
class TestMetGeneration:
def test_met_generation_1(self):
# Interior test
file = 'tests/test_data//test_metdata_normal.txt'
met_data = windbins.get_met_data(file)
compare_data = {'Wind Speed': [2.5, 2.2, 2.0, 3.8, 7.7, 6.4, 8.0, 5.0, 9.7],
'Wind Direction': [327.0, 343.0, 328.0, 326.0, 288.0, 281.0, 278.0, 280.0, 245.0],
'Significant Wave Height': [1.57, 1.66, 1.58, 1.8, 1.81, 1.66, 1.77, 1.77, 1.76],
'Wave Direction': [113.0, 97.0, 98.0, 107.0, 122.0, 96.0, 135.0, 95.0, 116.0],
'Wave Period': [13.79, 13.79, 14.81, 12.9, 13.79, 17.39, 13.79, 17.39, 13.79]}
compare_data = pd.DataFrame(data=compare_data)
assert compare_data.equals(met_data)
def test_met_generation_2(self):
# Test with integer overflows in input file
file = 'tests/test_data//test_metdata_overflow.txt'
met_data = windbins.get_met_data(file)
compare_data = {
'Wind Speed': [0.7, np.nan, np.nan, 1.3, 0.6, 1.3, 1.3, 0.6, 0.7, 1.5, np.nan, 2.1, 2.4, 3.0, 2.6],
'Wind Direction': [100., np.nan, 126., 157., 172., np.nan, 168., 159., 154., 29., np.nan, 16., 23., 27.,
20.],
'Significant Wave Height': [np.nan, 2.9, np.nan, np.nan, np.nan, np.nan, np.nan, 2.81, np.nan, np.nan,
np.nan, 3.04, np.nan, np.nan, np.nan],
'Wave Direction': [np.nan, 83.0, np.nan, np.nan, np.nan, np.nan, np.nan, 86.0, np.nan, np.nan, np.nan, 80.0,
np.nan, np.nan, np.nan],
'Wave Period': [np.nan, 14.81, np.nan, np.nan, 12.4, np.nan, np.nan, 14.81, np.nan, np.nan, np.nan, 13.79,
np.nan, np.nan, np.nan]}
compare_data = pd.DataFrame(data=compare_data)
assert compare_data.equals(met_data)
class TestWindGeneration:
def test_wind_generation_1(self):
# Interior test
file = 'tests/test_data//test_winddata_normal.txt'
wind_data = windbins.get_wind_data(file)
compare_data = {'Wind Speed': [2.2, 2.1, 2.3, 2.1, 2.9, 2.6, 2.2, 2.0, 1.7, 2.0],
'Wind Direction': [345.0, 338.0, 335.0, 344.0, 332.0, 329.0, 324.0, 329.0, 340.0, 333.0]}
compare_data = pd.DataFrame(data=compare_data)
assert compare_data.equals(wind_data)
def test_wind_generation_2(self):
# Test with integer overflows in input file
file = 'tests/test_data//test_winddata_overflow.txt'
wind_data = windbins.get_wind_data(file)
compare_data = {'Wind Speed': [np.nan, np.nan, np.nan, 4.1, np.nan, 3.4, 3.7],
'Wind Direction': [np.nan, np.nan, np.nan, np.nan, 74.0, 77.0, 75.0]}
compare_data = pd.DataFrame(data=compare_data)
assert compare_data.equals(wind_data)
class TestCurrentGeneration:
def test_current_generation_1(self):
# Interior test
file = 'tests/test_data//test_currentdata_normal.txt'
current_data, current_depth = windbins.get_current_data(file)
compare_data = {'Current Speed': [31., 30., 33., 37., 42., 41., 32., 35., 18., 29.],
'Current Direction': [306., 178., 176., 189., 174., 159., 157., 176., 228., 228.]}
compare_data = pd.DataFrame(data=compare_data)
compare_depth = 2.5
assert compare_data.equals(current_data)
assert current_depth == compare_depth
def test_current_generation_2(self):
# Test with integer overflows in input file
file = 'tests/test_data//test_currentdata_overflow.txt'
current_data, current_depth = windbins.get_current_data(file)
compare_data = {'Current Speed': [10.2, 11.6, 8.2, np.nan, np.nan, 3.5, 15., 19.2],
'Current Direction': [105., 90., 91., np.nan, 98., np.nan, 193., 185.]}
compare_data = | pd.DataFrame(data=compare_data) | pandas.DataFrame |
# -*- coding:utf-8 -*-
# !/usr/bin/env python
"""
Date: 2022/5/16 18:36
Desc: 新股和风险警示股
新浪-行情中心-沪深股市-次新股
http://vip.stock.finance.sina.com.cn/mkt/#new_stock
东方财富网-行情中心-沪深个股-风险警示板
http://quote.eastmoney.com/center/gridlist.html#st_board
"""
import math
import pandas as pd
import requests
def stock_zh_a_st_em() -> pd.DataFrame:
"""
东方财富网-行情中心-沪深个股-风险警示板
http://quote.eastmoney.com/center/gridlist.html#st_board
:return: 风险警示板
:rtype: pandas.DataFrame
"""
url = 'http://40.push2.eastmoney.com/api/qt/clist/get'
params = {
'pn': '1',
'pz': '2000',
'po': '1',
'np': '1',
'ut': 'bd1d9ddb04089700cf9c27f6f7426281',
'fltt': '2',
'invt': '2',
'fid': 'f3',
'fs': 'm:0 f:4,m:1 f:4',
'fields': 'f2,f3,f4,f5,f6,f7,f8,f9,f10,f12,f13,f14,f15,f16,f17,f18,f20,f21,f23,f24,f25,f22,f11,f62,f128,f136,f115,f152',
'_': '1631107510188',
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json['data']['diff'])
temp_df.reset_index(inplace=True)
temp_df['index'] = range(1, len(temp_df)+1)
temp_df.columns = [
'序号',
'最新价',
'涨跌幅',
'涨跌额',
'成交量',
'成交额',
'振幅',
'换手率',
'市盈率-动态',
'量比',
'_',
'代码',
'_',
'名称',
'最高',
'最低',
'今开',
'昨收',
'_',
'_',
'_',
'市净率',
'_',
'_',
'_',
'_',
'_',
'_',
'_',
'_',
'_',
]
temp_df = temp_df[[
'序号',
'代码',
'名称',
'最新价',
'涨跌幅',
'涨跌额',
'成交量',
'成交额',
'振幅',
'最高',
'最低',
'今开',
'昨收',
'量比',
'换手率',
'市盈率-动态',
'市净率',
]]
temp_df['最新价'] = pd.to_numeric(temp_df['最新价'], errors="coerce")
temp_df['涨跌幅'] = pd.to_numeric(temp_df['涨跌幅'], errors="coerce")
temp_df['涨跌额'] = pd.to_numeric(temp_df['涨跌额'], errors="coerce")
temp_df['成交量'] = pd.to_numeric(temp_df['成交量'], errors="coerce")
temp_df['成交额'] = pd.to_numeric(temp_df['成交额'], errors="coerce")
temp_df['振幅'] = pd.to_numeric(temp_df['振幅'], errors="coerce")
temp_df['最高'] = pd.to_numeric(temp_df['最高'], errors="coerce")
temp_df['最低'] = pd.to_numeric(temp_df['最低'], errors="coerce")
temp_df['今开'] = pd.to_numeric(temp_df['今开'], errors="coerce")
temp_df['量比'] = pd.to_numeric(temp_df['量比'], errors="coerce")
temp_df['换手率'] = pd.to_numeric(temp_df['换手率'], errors="coerce")
return temp_df
def stock_zh_a_new_em() -> pd.DataFrame:
"""
东方财富网-行情中心-沪深个股-新股
http://quote.eastmoney.com/center/gridlist.html#newshares
:return: 新股
:rtype: pandas.DataFrame
"""
url = 'http://40.push2.eastmoney.com/api/qt/clist/get'
params = {
'pn': '1',
'pz': '2000',
'po': '1',
'np': '1',
'ut': 'bd1d9ddb04089700cf9c27f6f7426281',
'fltt': '2',
'invt': '2',
'fid': 'f26',
'fs': 'm:0 f:8,m:1 f:8',
'fields': 'f2,f3,f4,f5,f6,f7,f8,f9,f10,f12,f13,f14,f15,f16,f17,f18,f20,f21,f23,f24,f25,f22,f11,f62,f128,f136,f115,f152',
'_': '1631107510188',
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json['data']['diff'])
temp_df.reset_index(inplace=True)
temp_df['index'] = range(1, len(temp_df)+1)
temp_df.columns = [
'序号',
'最新价',
'涨跌幅',
'涨跌额',
'成交量',
'成交额',
'振幅',
'换手率',
'市盈率-动态',
'量比',
'_',
'代码',
'_',
'名称',
'最高',
'最低',
'今开',
'昨收',
'_',
'_',
'_',
'市净率',
'_',
'_',
'_',
'_',
'_',
'_',
'_',
'_',
'_',
]
temp_df = temp_df[[
'序号',
'代码',
'名称',
'最新价',
'涨跌幅',
'涨跌额',
'成交量',
'成交额',
'振幅',
'最高',
'最低',
'今开',
'昨收',
'量比',
'换手率',
'市盈率-动态',
'市净率',
]]
temp_df['最新价'] = pd.to_numeric(temp_df['最新价'], errors="coerce")
temp_df['涨跌幅'] = pd.to_numeric(temp_df['涨跌幅'], errors="coerce")
temp_df['涨跌额'] = pd.to_numeric(temp_df['涨跌额'], errors="coerce")
temp_df['成交量'] = pd.to_numeric(temp_df['成交量'], errors="coerce")
temp_df['成交额'] = pd.to_numeric(temp_df['成交额'], errors="coerce")
temp_df['振幅'] = pd.to_numeric(temp_df['振幅'], errors="coerce")
temp_df['最高'] = pd.to_numeric(temp_df['最高'], errors="coerce")
temp_df['最低'] = pd.t | o_numeric(temp_df['最低'], errors="coerce") | pandas.to_numeric |
'''
Tests for bipartitepandas
DATE: March 2021
'''
import pytest
import numpy as np
import pandas as pd
import bipartitepandas as bpd
import pickle
###################################
##### Tests for BipartiteBase #####
###################################
def test_refactor_1():
# 2 movers between firms 0 and 1, and 1 stayer at firm 2.
worker_data = []
# Firm 0 -> 1
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 1})
worker_data.append({'i': 0, 'j': 1, 'y': 1., 't': 2})
# Firm 1 -> 0
worker_data.append({'i': 1, 'j': 1, 'y': 1., 't': 1})
worker_data.append({'i': 1, 'j': 0, 'y': 1., 't': 2})
# Firm 2 -> 2
worker_data.append({'i': 2, 'j': 2, 'y': 1., 't': 1})
worker_data.append({'i': 2, 'j': 2, 'y': 2., 't': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
bdf = bpd.BipartiteLong(data=df)
bdf = bdf.clean_data()
bdf = bdf.get_collapsed_long()
bdf = bdf.get_es()
stayers = bdf[bdf['m'] == 0]
movers = bdf[bdf['m'] > 0]
assert len(stayers) == 0
assert movers.iloc[0]['i'] == 0
assert movers.iloc[0]['j1'] == 0
assert movers.iloc[0]['j2'] == 1
assert movers.iloc[0]['y1'] == 2
assert movers.iloc[0]['y2'] == 1
assert movers.iloc[1]['i'] == 1
assert movers.iloc[1]['j1'] == 1
assert movers.iloc[1]['j2'] == 0
assert movers.iloc[1]['y1'] == 1
assert movers.iloc[1]['y2'] == 1
def test_refactor_2():
# 2 movers between firms 0 and 1, and 1 stayer at firm 2. Time has jumps.
worker_data = []
# Firm 0 -> 1
# Time 1 -> 3
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 1})
worker_data.append({'i': 0, 'j': 1, 'y': 1., 't': 3})
# Firm 1 -> 0
worker_data.append({'i': 1, 'j': 1, 'y': 1., 't': 1})
worker_data.append({'i': 1, 'j': 0, 'y': 1., 't': 2})
# Firm 2 -> 2
worker_data.append({'i': 2, 'j': 2, 'y': 1., 't': 1})
worker_data.append({'i': 2, 'j': 2, 'y': 2., 't': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
bdf = bpd.BipartiteLong(data=df)
bdf = bdf.clean_data()
bdf = bdf.get_collapsed_long()
bdf = bdf.get_es()
stayers = bdf[bdf['m'] == 0]
movers = bdf[bdf['m'] > 0]
assert len(stayers) == 0
assert movers.iloc[0]['i'] == 0
assert movers.iloc[0]['j1'] == 0
assert movers.iloc[0]['j2'] == 1
assert movers.iloc[0]['y1'] == 2
assert movers.iloc[0]['y2'] == 1
assert movers.iloc[1]['i'] == 1
assert movers.iloc[1]['j1'] == 1
assert movers.iloc[1]['j2'] == 0
assert movers.iloc[1]['y1'] == 1
assert movers.iloc[1]['y2'] == 1
def test_refactor_3():
# 1 mover between firms 0 and 1, and 2 between firms 1 and 2.
worker_data = []
# Firm 0 -> 1
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 1})
worker_data.append({'i': 0, 'j': 1, 'y': 1., 't': 2})
# Firm 1 -> 2
worker_data.append({'i': 1, 'j': 1, 'y': 1., 't': 1})
worker_data.append({'i': 1, 'j': 2, 'y': 1., 't': 2})
# Firm 2 -> 1
worker_data.append({'i': 2, 'j': 2, 'y': 1., 't': 1})
worker_data.append({'i': 2, 'j': 1, 'y': 2., 't': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
bdf = bpd.BipartiteLong(data=df)
bdf = bdf.clean_data()
bdf = bdf.get_collapsed_long()
bdf = bdf.get_es()
stayers = bdf[bdf['m'] == 0]
movers = bdf[bdf['m'] > 0]
assert len(stayers) == 0
assert movers.iloc[0]['i'] == 0
assert movers.iloc[0]['j1'] == 0
assert movers.iloc[0]['j2'] == 1
assert movers.iloc[0]['y1'] == 2
assert movers.iloc[0]['y2'] == 1
assert movers.iloc[1]['i'] == 1
assert movers.iloc[1]['j1'] == 1
assert movers.iloc[1]['j2'] == 2
assert movers.iloc[1]['y1'] == 1
assert movers.iloc[1]['y2'] == 1
assert movers.iloc[2]['i'] == 2
assert movers.iloc[2]['j1'] == 2
assert movers.iloc[2]['j2'] == 1
assert movers.iloc[2]['y1'] == 1
assert movers.iloc[2]['y2'] == 2
def test_refactor_4():
# 1 mover between firms 0 and 1, and 2 between firms 1 and 2.
worker_data = []
# Firm 0 -> 1 -> 0
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 1})
worker_data.append({'i': 0, 'j': 1, 'y': 1., 't': 2})
worker_data.append({'i': 0, 'j': 0, 'y': 1., 't': 3})
# Firm 1 -> 2
worker_data.append({'i': 1, 'j': 1, 'y': 1., 't': 1})
worker_data.append({'i': 1, 'j': 2, 'y': 1., 't': 2})
# Firm 2 -> 1
worker_data.append({'i': 2, 'j': 2, 'y': 1., 't': 1})
worker_data.append({'i': 2, 'j': 1, 'y': 2., 't': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
bdf = bpd.BipartiteLong(data=df)
bdf = bdf.clean_data()
bdf = bdf.get_collapsed_long()
bdf = bdf.get_es()
stayers = bdf[bdf['m'] == 0]
movers = bdf[bdf['m'] > 0]
assert len(stayers) == 0
assert movers.iloc[0]['i'] == 0
assert movers.iloc[0]['j1'] == 0
assert movers.iloc[0]['j2'] == 1
assert movers.iloc[0]['y1'] == 2
assert movers.iloc[0]['y2'] == 1
assert movers.iloc[1]['i'] == 0
assert movers.iloc[1]['j1'] == 1
assert movers.iloc[1]['j2'] == 0
assert movers.iloc[1]['y1'] == 1
assert movers.iloc[1]['y2'] == 1
assert movers.iloc[2]['i'] == 1
assert movers.iloc[2]['j1'] == 1
assert movers.iloc[2]['j2'] == 2
assert movers.iloc[2]['y1'] == 1
assert movers.iloc[2]['y2'] == 1
assert movers.iloc[3]['i'] == 2
assert movers.iloc[3]['j1'] == 2
assert movers.iloc[3]['j2'] == 1
assert movers.iloc[3]['y1'] == 1
assert movers.iloc[3]['y2'] == 2
def test_refactor_5():
# 1 mover between firms 0 and 1, and 2 between firms 1 and 2. Time has jumps.
worker_data = []
# Firm 0 -> 1 -> 0
# Time 1 -> 2 -> 4
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 1})
worker_data.append({'i': 0, 'j': 1, 'y': 1., 't': 2})
worker_data.append({'i': 0, 'j': 0, 'y': 1., 't': 4})
# Firm 1 -> 2
worker_data.append({'i': 1, 'j': 1, 'y': 1., 't': 1})
worker_data.append({'i': 1, 'j': 2, 'y': 1., 't': 2})
# Firm 2 -> 1
worker_data.append({'i': 2, 'j': 2, 'y': 1., 't': 1})
worker_data.append({'i': 2, 'j': 1, 'y': 2., 't': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
bdf = bpd.BipartiteLong(data=df)
bdf = bdf.clean_data()
bdf = bdf.get_collapsed_long()
bdf = bdf.get_es()
stayers = bdf[bdf['m'] == 0]
movers = bdf[bdf['m'] > 0]
assert len(stayers) == 0
assert movers.iloc[0]['i'] == 0
assert movers.iloc[0]['j1'] == 0
assert movers.iloc[0]['j2'] == 1
assert movers.iloc[0]['y1'] == 2
assert movers.iloc[0]['y2'] == 1
assert movers.iloc[1]['i'] == 0
assert movers.iloc[1]['j1'] == 1
assert movers.iloc[1]['j2'] == 0
assert movers.iloc[1]['y1'] == 1
assert movers.iloc[1]['y2'] == 1
assert movers.iloc[2]['i'] == 1
assert movers.iloc[2]['j1'] == 1
assert movers.iloc[2]['j2'] == 2
assert movers.iloc[2]['y1'] == 1
assert movers.iloc[2]['y2'] == 1
assert movers.iloc[3]['i'] == 2
assert movers.iloc[3]['j1'] == 2
assert movers.iloc[3]['j2'] == 1
assert movers.iloc[3]['y1'] == 1
assert movers.iloc[3]['y2'] == 2
def test_refactor_6():
# 1 mover between firms 0 and 1, and 2 between firms 1 and 2. Time has jumps.
worker_data = []
# Firm 0 -> 0 -> 1 -> 0
# Time 1 -> 2 -> 3 -> 5
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 1})
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 2})
worker_data.append({'i': 0, 'j': 1, 'y': 1., 't': 3})
worker_data.append({'i': 0, 'j': 0, 'y': 1., 't': 5})
# Firm 1 -> 2
worker_data.append({'i': 1, 'j': 1, 'y': 1., 't': 1})
worker_data.append({'i': 1, 'j': 2, 'y': 1., 't': 2})
# Firm 2 -> 1
worker_data.append({'i': 2, 'j': 2, 'y': 1., 't': 1})
worker_data.append({'i': 2, 'j': 1, 'y': 2., 't': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
bdf = bpd.BipartiteLong(data=df)
bdf = bdf.clean_data()
bdf = bdf.get_collapsed_long()
bdf = bdf.get_es()
stayers = bdf[bdf['m'] == 0]
movers = bdf[bdf['m'] > 0]
assert len(stayers) == 0
assert movers.iloc[0]['i'] == 0
assert movers.iloc[0]['j1'] == 0
assert movers.iloc[0]['j2'] == 1
assert movers.iloc[0]['y1'] == 2
assert movers.iloc[0]['y2'] == 1
assert movers.iloc[1]['i'] == 0
assert movers.iloc[1]['j1'] == 1
assert movers.iloc[1]['j2'] == 0
assert movers.iloc[1]['y1'] == 1
assert movers.iloc[1]['y2'] == 1
assert movers.iloc[2]['i'] == 1
assert movers.iloc[2]['j1'] == 1
assert movers.iloc[2]['j2'] == 2
assert movers.iloc[2]['y1'] == 1
assert movers.iloc[2]['y2'] == 1
assert movers.iloc[3]['i'] == 2
assert movers.iloc[3]['j1'] == 2
assert movers.iloc[3]['j2'] == 1
assert movers.iloc[3]['y1'] == 1
assert movers.iloc[3]['y2'] == 2
def test_refactor_7():
# 1 mover between firms 0 and 1, and 2 between firms 1 and 2. Time has jumps.
worker_data = []
# Firm 0 -> 0 -> 1 -> 0
# Time 1 -> 3 -> 4 -> 6
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 1})
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 3})
worker_data.append({'i': 0, 'j': 1, 'y': 1., 't': 4})
worker_data.append({'i': 0, 'j': 0, 'y': 1., 't': 6})
# Firm 1 -> 2
worker_data.append({'i': 1, 'j': 1, 'y': 1., 't': 1})
worker_data.append({'i': 1, 'j': 2, 'y': 1., 't': 2})
# Firm 2 -> 1
worker_data.append({'i': 2, 'j': 2, 'y': 1., 't': 1})
worker_data.append({'i': 2, 'j': 1, 'y': 2., 't': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
bdf = bpd.BipartiteLong(data=df)
bdf = bdf.clean_data()
bdf = bdf.get_collapsed_long()
bdf = bdf.get_es()
stayers = bdf[bdf['m'] == 0]
movers = bdf[bdf['m'] > 0]
assert len(stayers) == 0
assert movers.iloc[0]['i'] == 0
assert movers.iloc[0]['j1'] == 0
assert movers.iloc[0]['j2'] == 1
assert movers.iloc[0]['y1'] == 2
assert movers.iloc[0]['y2'] == 1
assert movers.iloc[1]['i'] == 0
assert movers.iloc[1]['j1'] == 1
assert movers.iloc[1]['j2'] == 0
assert movers.iloc[1]['y1'] == 1
assert movers.iloc[1]['y2'] == 1
assert movers.iloc[2]['i'] == 1
assert movers.iloc[2]['j1'] == 1
assert movers.iloc[2]['j2'] == 2
assert movers.iloc[2]['y1'] == 1
assert movers.iloc[2]['y2'] == 1
assert movers.iloc[3]['i'] == 2
assert movers.iloc[3]['j1'] == 2
assert movers.iloc[3]['j2'] == 1
assert movers.iloc[3]['y1'] == 1
assert movers.iloc[3]['y2'] == 2
def test_refactor_8():
# 2 movers between firms 0 and 1, and 1 between firms 1 and 2. Time has jumps.
worker_data = []
# Firm 0 -> 0 -> 1 -> 0
# Time 1 -> 3 -> 4 -> 6
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 1})
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 3})
worker_data.append({'i': 0, 'j': 1, 'y': 1., 't': 4})
worker_data.append({'i': 0, 'j': 0, 'y': 1., 't': 6})
# Firm 0 -> 1
worker_data.append({'i': 1, 'j': 0, 'y': 1., 't': 1})
worker_data.append({'i': 1, 'j': 1, 'y': 1., 't': 2})
# Firm 2 -> 1
worker_data.append({'i': 2, 'j': 2, 'y': 1., 't': 1})
worker_data.append({'i': 2, 'j': 1, 'y': 2., 't': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
bdf = bpd.BipartiteLong(data=df)
bdf = bdf.clean_data()
bdf = bdf.get_collapsed_long()
bdf = bdf.get_es()
stayers = bdf[bdf['m'] == 0]
movers = bdf[bdf['m'] > 0]
assert len(stayers) == 0
assert movers.iloc[0]['i'] == 0
assert movers.iloc[0]['j1'] == 0
assert movers.iloc[0]['j2'] == 1
assert movers.iloc[0]['y1'] == 2
assert movers.iloc[0]['y2'] == 1
assert movers.iloc[1]['i'] == 0
assert movers.iloc[1]['j1'] == 1
assert movers.iloc[1]['j2'] == 0
assert movers.iloc[1]['y1'] == 1
assert movers.iloc[1]['y2'] == 1
assert movers.iloc[2]['i'] == 1
assert movers.iloc[2]['j1'] == 0
assert movers.iloc[2]['j2'] == 1
assert movers.iloc[2]['y1'] == 1
assert movers.iloc[2]['y2'] == 1
assert movers.iloc[3]['i'] == 2
assert movers.iloc[3]['j1'] == 2
assert movers.iloc[3]['j2'] == 1
assert movers.iloc[3]['y1'] == 1
assert movers.iloc[3]['y2'] == 2
def test_refactor_9():
# 2 movers between firms 0 and 1, and 1 between firms 1 and 2. Time has jumps.
worker_data = []
# Firm 0 -> 0 -> 1 -> 0
# Time 1 -> 3 -> 4 -> 6
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 1})
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 3})
worker_data.append({'i': 0, 'j': 1, 'y': 1., 't': 4})
worker_data.append({'i': 0, 'j': 0, 'y': 1., 't': 6})
# Firm 1 -> 0
worker_data.append({'i': 1, 'j': 1, 'y': 1., 't': 1})
worker_data.append({'i': 1, 'j': 0, 'y': 1., 't': 2})
# Firm 2 -> 1
worker_data.append({'i': 2, 'j': 2, 'y': 1., 't': 1})
worker_data.append({'i': 2, 'j': 1, 'y': 2., 't': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
bdf = bpd.BipartiteLong(data=df)
bdf = bdf.clean_data()
bdf = bdf.get_collapsed_long()
bdf = bdf.get_es()
stayers = bdf[bdf['m'] == 0]
movers = bdf[bdf['m'] > 0]
assert len(stayers) == 0
assert movers.iloc[0]['i'] == 0
assert movers.iloc[0]['j1'] == 0
assert movers.iloc[0]['j2'] == 1
assert movers.iloc[0]['y1'] == 2
assert movers.iloc[0]['y2'] == 1
assert movers.iloc[1]['i'] == 0
assert movers.iloc[1]['j1'] == 1
assert movers.iloc[1]['j2'] == 0
assert movers.iloc[1]['y1'] == 1
assert movers.iloc[1]['y2'] == 1
assert movers.iloc[2]['i'] == 1
assert movers.iloc[2]['j1'] == 1
assert movers.iloc[2]['j2'] == 0
assert movers.iloc[2]['y1'] == 1
assert movers.iloc[2]['y2'] == 1
assert movers.iloc[3]['i'] == 2
assert movers.iloc[3]['j1'] == 2
assert movers.iloc[3]['j2'] == 1
assert movers.iloc[3]['y1'] == 1
assert movers.iloc[3]['y2'] == 2
def test_refactor_10():
# 1 mover between firms 0 and 1, 1 between firms 1 and 2, and 1 stayer at firm 2.
worker_data = []
# Firm 0 -> 1
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 1})
worker_data.append({'i': 0, 'j': 1, 'y': 1., 't': 2})
# Firm 1 -> 2
worker_data.append({'i': 1, 'j': 1, 'y': 1., 't': 1})
worker_data.append({'i': 1, 'j': 2, 'y': 1., 't': 2})
# Firm 2 -> 2
worker_data.append({'i': 2, 'j': 2, 'y': 1., 't': 1})
worker_data.append({'i': 2, 'j': 2, 'y': 1., 't': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
bdf = bpd.BipartiteLong(data=df)
bdf = bdf.clean_data()
bdf = bdf.get_collapsed_long()
bdf = bdf.get_es()
stayers = bdf[bdf['m'] == 0]
movers = bdf[bdf['m'] > 0]
assert stayers.iloc[0]['i'] == 2
assert stayers.iloc[0]['j1'] == 2
assert stayers.iloc[0]['j2'] == 2
assert stayers.iloc[0]['y1'] == 1
assert stayers.iloc[0]['y2'] == 1
assert movers.iloc[0]['i'] == 0
assert movers.iloc[0]['j1'] == 0
assert movers.iloc[0]['j2'] == 1
assert movers.iloc[0]['y1'] == 2
assert movers.iloc[0]['y2'] == 1
assert movers.iloc[1]['i'] == 1
assert movers.iloc[1]['j1'] == 1
assert movers.iloc[1]['j2'] == 2
assert movers.iloc[1]['y1'] == 1
assert movers.iloc[1]['y2'] == 1
def test_refactor_11():
# 1 mover between firms 0 and 1 and 2 and 3, 1 between firms 1 and 2, and 1 stayer at firm 2.
# Check going to event study and back to long, for data where movers have extended periods where they stay at the same firm
worker_data = []
# Firm 0 -> 1 -> 2 -> 3
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 1})
worker_data.append({'i': 0, 'j': 1, 'y': 1., 't': 2})
worker_data.append({'i': 0, 'j': 2, 'y': 0.5, 't': 3})
worker_data.append({'i': 0, 'j': 2, 'y': 0.5, 't': 4})
worker_data.append({'i': 0, 'j': 2, 'y': 0.75, 't': 5})
worker_data.append({'i': 0, 'j': 3, 'y': 1.5, 't': 6})
# Firm 1 -> 2
worker_data.append({'i': 1, 'j': 1, 'y': 1., 't': 1})
worker_data.append({'i': 1, 'j': 2, 'y': 1., 't': 2})
# Firm 2 -> 2
worker_data.append({'i': 2, 'j': 2, 'y': 1., 't': 1})
worker_data.append({'i': 2, 'j': 2, 'y': 1., 't': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
bdf = bpd.BipartiteLong(data=df).clean_data().get_es()
stayers = bdf[bdf['m'] == 0]
movers = bdf[bdf['m'] > 0]
assert stayers.iloc[0]['i'] == 0
assert stayers.iloc[0]['j1'] == 2
assert stayers.iloc[0]['j2'] == 2
assert stayers.iloc[0]['y1'] == 0.5
assert stayers.iloc[0]['y2'] == 0.5
assert stayers.iloc[0]['t1'] == 4
assert stayers.iloc[0]['t2'] == 4
assert stayers.iloc[1]['i'] == 2
assert stayers.iloc[1]['j1'] == 2
assert stayers.iloc[1]['j2'] == 2
assert stayers.iloc[1]['y1'] == 1.
assert stayers.iloc[1]['y2'] == 1.
assert stayers.iloc[1]['t1'] == 1
assert stayers.iloc[1]['t2'] == 1
assert stayers.iloc[2]['i'] == 2
assert stayers.iloc[2]['j1'] == 2
assert stayers.iloc[2]['j2'] == 2
assert stayers.iloc[2]['y1'] == 1.
assert stayers.iloc[2]['y2'] == 1.
assert stayers.iloc[2]['t1'] == 2
assert stayers.iloc[2]['t2'] == 2
assert movers.iloc[0]['i'] == 0
assert movers.iloc[0]['j1'] == 0
assert movers.iloc[0]['j2'] == 1
assert movers.iloc[0]['y1'] == 2.
assert movers.iloc[0]['y2'] == 1.
assert movers.iloc[0]['t1'] == 1
assert movers.iloc[0]['t2'] == 2
assert movers.iloc[1]['i'] == 0
assert movers.iloc[1]['j1'] == 1
assert movers.iloc[1]['j2'] == 2
assert movers.iloc[1]['y1'] == 1.
assert movers.iloc[1]['y2'] == 0.5
assert movers.iloc[1]['t1'] == 2
assert movers.iloc[1]['t2'] == 3
assert movers.iloc[2]['i'] == 0
assert movers.iloc[2]['j1'] == 2
assert movers.iloc[2]['j2'] == 3
assert movers.iloc[2]['y1'] == 0.75
assert movers.iloc[2]['y2'] == 1.5
assert movers.iloc[2]['t1'] == 5
assert movers.iloc[2]['t2'] == 6
assert movers.iloc[3]['i'] == 1
assert movers.iloc[3]['j1'] == 1
assert movers.iloc[3]['j2'] == 2
assert movers.iloc[3]['y1'] == 1.
assert movers.iloc[3]['y2'] == 1.
assert movers.iloc[3]['t1'] == 1
assert movers.iloc[3]['t2'] == 2
bdf = bdf.get_long()
for row in range(len(bdf)):
df_row = df.iloc[row]
bdf_row = bdf.iloc[row]
for col in ['i', 'j', 'y', 't']:
assert df_row[col] == bdf_row[col]
def test_refactor_12():
# Check going to event study and back to long
df = bpd.SimBipartite(bpd.sim_params({'p_move': 0.05, 'rng': np.random.default_rng(1234)})).sim_network()
bdf = bpd.BipartiteLong(df).clean_data()
assert len(bdf) == len(bdf.get_es().get_long())
def test_contiguous_fids_11():
# Check contiguous_ids() with firm ids.
worker_data = []
# Firm 0 -> 1
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 1})
worker_data.append({'i': 0, 'j': 1, 'y': 1., 't': 2})
# Firm 1 -> 3
worker_data.append({'i': 1, 'j': 1, 'y': 1., 't': 1})
worker_data.append({'i': 1, 'j': 3, 'y': 1., 't': 2})
# Firm 3 -> 3
worker_data.append({'i': 2, 'j': 3, 'y': 1., 't': 1})
worker_data.append({'i': 2, 'j': 3, 'y': 1., 't': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
bdf = bpd.BipartiteLong(data=df)
bdf = bdf.clean_data()
bdf = bdf.get_collapsed_long()
bdf = bdf.get_es()
stayers = bdf[bdf['m'] == 0]
movers = bdf[bdf['m'] > 0]
assert stayers.iloc[0]['i'] == 2
assert stayers.iloc[0]['j1'] == 2
assert stayers.iloc[0]['j2'] == 2
assert stayers.iloc[0]['y1'] == 1
assert stayers.iloc[0]['y2'] == 1
assert movers.iloc[0]['i'] == 0
assert movers.iloc[0]['j1'] == 0
assert movers.iloc[0]['j2'] == 1
assert movers.iloc[0]['y1'] == 2
assert movers.iloc[0]['y2'] == 1
assert movers.iloc[1]['i'] == 1
assert movers.iloc[1]['j1'] == 1
assert movers.iloc[1]['j2'] == 2
assert movers.iloc[1]['y1'] == 1
assert movers.iloc[1]['y2'] == 1
def test_contiguous_wids_12():
# Check contiguous_ids() with worker ids.
worker_data = []
# Firm 0 -> 1
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 1})
worker_data.append({'i': 0, 'j': 1, 'y': 1., 't': 2})
# Firm 1 -> 2
worker_data.append({'i': 1, 'j': 1, 'y': 1., 't': 1})
worker_data.append({'i': 1, 'j': 2, 'y': 1., 't': 2})
# Worker 3
# Firm 2 -> 2
worker_data.append({'i': 3, 'j': 2, 'y': 1., 't': 1})
worker_data.append({'i': 3, 'j': 2, 'y': 1., 't': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
bdf = bpd.BipartiteLong(data=df)
bdf = bdf.clean_data()
bdf = bdf.get_collapsed_long()
bdf = bdf.get_es()
stayers = bdf[bdf['m'] == 0]
movers = bdf[bdf['m'] > 0]
assert stayers.iloc[0]['i'] == 2
assert stayers.iloc[0]['j1'] == 2
assert stayers.iloc[0]['j2'] == 2
assert stayers.iloc[0]['y1'] == 1
assert stayers.iloc[0]['y2'] == 1
assert movers.iloc[0]['i'] == 0
assert movers.iloc[0]['j1'] == 0
assert movers.iloc[0]['j2'] == 1
assert movers.iloc[0]['y1'] == 2
assert movers.iloc[0]['y2'] == 1
assert movers.iloc[1]['i'] == 1
assert movers.iloc[1]['j1'] == 1
assert movers.iloc[1]['j2'] == 2
assert movers.iloc[1]['y1'] == 1
assert movers.iloc[1]['y2'] == 1
def test_contiguous_cids_13():
# Check contiguous_ids() with cluster ids.
worker_data = []
# Firm 0 -> 1
# Cluster 1 -> 2
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 1, 'g': 1})
worker_data.append({'i': 0, 'j': 1, 'y': 1., 't': 2, 'g': 2})
# Firm 1 -> 2
# Cluster 2 -> 1
worker_data.append({'i': 1, 'j': 1, 'y': 1., 't': 1, 'g': 2})
worker_data.append({'i': 1, 'j': 2, 'y': 1., 't': 2, 'g': 1})
# Firm 2 -> 2
# Cluster 1 -> 1
worker_data.append({'i': 2, 'j': 2, 'y': 1., 't': 1, 'g': 1})
worker_data.append({'i': 2, 'j': 2, 'y': 1., 't': 2, 'g': 1})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
bdf = bpd.BipartiteLong(data=df)
bdf = bdf.clean_data()
bdf = bdf.get_collapsed_long()
bdf = bdf.get_es()
stayers = bdf[bdf['m'] == 0]
movers = bdf[bdf['m'] > 0]
assert stayers.iloc[0]['i'] == 2
assert stayers.iloc[0]['j1'] == 2
assert stayers.iloc[0]['j2'] == 2
assert stayers.iloc[0]['y1'] == 1
assert stayers.iloc[0]['y2'] == 1
assert stayers.iloc[0]['g1'] == 0
assert stayers.iloc[0]['g2'] == 0
assert movers.iloc[0]['i'] == 0
assert movers.iloc[0]['j1'] == 0
assert movers.iloc[0]['j2'] == 1
assert movers.iloc[0]['y1'] == 2
assert movers.iloc[0]['y2'] == 1
assert movers.iloc[0]['g1'] == 0
assert movers.iloc[0]['g2'] == 1
assert movers.iloc[1]['i'] == 1
assert movers.iloc[1]['j1'] == 1
assert movers.iloc[1]['j2'] == 2
assert movers.iloc[1]['y1'] == 1
assert movers.iloc[1]['y2'] == 1
assert movers.iloc[1]['g1'] == 1
assert movers.iloc[1]['g2'] == 0
def test_contiguous_cids_14():
# Check contiguous_ids() with cluster ids.
worker_data = []
# Firm 0 -> 1
# Cluster 2 -> 1
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 1, 'g': 2})
worker_data.append({'i': 0, 'j': 1, 'y': 1., 't': 2, 'g': 1})
# Firm 1 -> 2
# Cluster 1 -> 2
worker_data.append({'i': 1, 'j': 1, 'y': 1., 't': 1, 'g': 1})
worker_data.append({'i': 1, 'j': 2, 'y': 1., 't': 2, 'g': 2})
# Firm 2 -> 2
# Cluster 2 -> 2
worker_data.append({'i': 2, 'j': 2, 'y': 1., 't': 1, 'g': 2})
worker_data.append({'i': 2, 'j': 2, 'y': 1., 't': 2, 'g': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
bdf = bpd.BipartiteLong(data=df, include_id_reference_dict=True)
bdf = bdf.clean_data()
bdf = bdf.get_collapsed_long()
bdf = bdf.get_es().original_ids()
stayers = bdf[bdf['m'] == 0]
movers = bdf[bdf['m'] > 0]
assert movers.iloc[0]['i'] == 0
assert movers.iloc[0]['j1'] == 0
assert movers.iloc[0]['j2'] == 1
assert movers.iloc[0]['y1'] == 2
assert movers.iloc[0]['y2'] == 1
assert movers.iloc[0]['original_g1'] == 2
assert movers.iloc[0]['original_g2'] == 1
assert movers.iloc[0]['g1'] == 0
assert movers.iloc[0]['g2'] == 1
assert movers.iloc[1]['i'] == 1
assert movers.iloc[1]['j1'] == 1
assert movers.iloc[1]['j2'] == 2
assert movers.iloc[1]['y1'] == 1
assert movers.iloc[1]['y2'] == 1
assert movers.iloc[1]['original_g1'] == 1
assert movers.iloc[1]['original_g2'] == 2
assert movers.iloc[1]['g1'] == 1
assert movers.iloc[1]['g2'] == 0
assert stayers.iloc[0]['i'] == 2
assert stayers.iloc[0]['j1'] == 2
assert stayers.iloc[0]['j2'] == 2
assert stayers.iloc[0]['y1'] == 1
assert stayers.iloc[0]['y2'] == 1
assert stayers.iloc[0]['original_g1'] == 2
assert stayers.iloc[0]['original_g2'] == 2
assert stayers.iloc[0]['g1'] == 0
assert stayers.iloc[0]['g2'] == 0
def test_col_dict_15():
# Check that col_dict works properly.
worker_data = []
# Firm 0 -> 1
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 1})
worker_data.append({'i': 0, 'j': 1, 'y': 1., 't': 2})
# Firm 1 -> 2
worker_data.append({'i': 1, 'j': 1, 'y': 1., 't': 1})
worker_data.append({'i': 1, 'j': 2, 'y': 1., 't': 2})
# Worker 3
# Firm 2 -> 2
worker_data.append({'i': 3, 'j': 2, 'y': 1., 't': 1})
worker_data.append({'i': 3, 'j': 2, 'y': 1., 't': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)]).rename({'j': 'firm', 'i': 'worker'}, axis=1)
bdf = bpd.BipartiteLong(data=df, col_dict={'j': 'firm', 'i': 'worker'})
bdf = bdf.clean_data()
bdf = bdf.get_collapsed_long()
bdf = bdf.get_es()
stayers = bdf[bdf['m'] == 0]
movers = bdf[bdf['m'] > 0]
assert stayers.iloc[0]['i'] == 2
assert stayers.iloc[0]['j1'] == 2
assert stayers.iloc[0]['j2'] == 2
assert stayers.iloc[0]['y1'] == 1
assert stayers.iloc[0]['y2'] == 1
assert movers.iloc[0]['i'] == 0
assert movers.iloc[0]['j1'] == 0
assert movers.iloc[0]['j2'] == 1
assert movers.iloc[0]['y1'] == 2
assert movers.iloc[0]['y2'] == 1
assert movers.iloc[1]['i'] == 1
assert movers.iloc[1]['j1'] == 1
assert movers.iloc[1]['j2'] == 2
assert movers.iloc[1]['y1'] == 1
assert movers.iloc[1]['y2'] == 1
def test_worker_year_unique_16_1():
# Workers with multiple jobs in the same year, keep the highest paying, with long format. Testing 'max', 'sum', and 'mean' options, where options should not have an effect.
worker_data = []
# Firm 0 -> 1
# Time 1 -> 2
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 1})
worker_data.append({'i': 0, 'j': 1, 'y': 1., 't': 2})
# Firm 1 -> 2 -> 3
# Time 1 -> 2 -> 2
worker_data.append({'i': 1, 'j': 1, 'y': 1., 't': 1})
worker_data.append({'i': 1, 'j': 2, 'y': 1., 't': 2})
worker_data.append({'i': 1, 'j': 3, 'y': 0.5, 't': 2})
# Worker 3
# Firm 2 -> 1 -> 2
# Time 1 -> 1 -> 2
worker_data.append({'i': 3, 'j': 2, 'y': 1., 't': 1})
worker_data.append({'i': 3, 'j': 1, 'y': 1.5, 't': 1})
worker_data.append({'i': 3, 'j': 2, 'y': 1., 't': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
for how in ['max', 'sum', 'mean']:
bdf = bpd.BipartiteLong(data=df)
bdf = bdf.clean_data(bpd.clean_params({'i_t_how': how}))
stayers = bdf[bdf['m'] == 0]
movers = bdf[bdf['m'] > 0]
assert len(stayers) == 0
assert movers.iloc[0]['i'] == 0
assert movers.iloc[0]['j'] == 0
assert movers.iloc[0]['y'] == 2
assert movers.iloc[0]['t'] == 1
assert movers.iloc[1]['i'] == 0
assert movers.iloc[1]['j'] == 1
assert movers.iloc[1]['y'] == 1
assert movers.iloc[1]['t'] == 2
assert movers.iloc[2]['i'] == 1
assert movers.iloc[2]['j'] == 1
assert movers.iloc[2]['y'] == 1
assert movers.iloc[2]['t'] == 1
assert movers.iloc[3]['i'] == 1
assert movers.iloc[3]['j'] == 2
assert movers.iloc[3]['y'] == 1
assert movers.iloc[3]['t'] == 2
assert movers.iloc[4]['i'] == 2
assert movers.iloc[4]['j'] == 1
assert movers.iloc[4]['y'] == 1.5
assert movers.iloc[4]['t'] == 1
assert movers.iloc[5]['i'] == 2
assert movers.iloc[5]['j'] == 2
assert movers.iloc[5]['y'] == 1
assert movers.iloc[5]['t'] == 2
def test_worker_year_unique_16_2():
# Workers with multiple jobs in the same year, keep the highest paying, with long format. Testing 'max', 'sum' and 'mean' options, where options should have an effect.
worker_data = []
# Firm 0 -> 1
# Time 1 -> 2
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 1})
worker_data.append({'i': 0, 'j': 1, 'y': 1., 't': 2})
# Firm 1 -> 2 -> 2 -> 3
# Time 1 -> 2 -> 2 -> 2
worker_data.append({'i': 1, 'j': 1, 'y': 1., 't': 1})
worker_data.append({'i': 1, 'j': 2, 'y': 1., 't': 2})
worker_data.append({'i': 1, 'j': 2, 'y': 1.5, 't': 2})
worker_data.append({'i': 1, 'j': 3, 'y': 0.5, 't': 2})
# Worker 3
# Firm 2 -> 1 -> 2
# Time 1 -> 1 -> 2
worker_data.append({'i': 3, 'j': 2, 'y': 1., 't': 1})
worker_data.append({'i': 3, 'j': 1, 'y': 1.5, 't': 1})
worker_data.append({'i': 3, 'j': 2, 'y': 1., 't': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
for how in ['max', 'sum', 'mean']:
bdf = bpd.BipartiteLong(data=df.copy())
bdf = bdf.clean_data(bpd.clean_params({'i_t_how': how}))
stayers = bdf[bdf['m'] == 0]
movers = bdf[bdf['m'] > 0]
assert len(stayers) == 0
assert movers.iloc[0]['i'] == 0
assert movers.iloc[0]['j'] == 0
assert movers.iloc[0]['y'] == 2
assert movers.iloc[0]['t'] == 1
assert movers.iloc[1]['i'] == 0
assert movers.iloc[1]['j'] == 1
assert movers.iloc[1]['y'] == 1
assert movers.iloc[1]['t'] == 2
assert movers.iloc[2]['i'] == 1
assert movers.iloc[2]['j'] == 1
assert movers.iloc[2]['y'] == 1
assert movers.iloc[2]['t'] == 1
assert movers.iloc[3]['i'] == 1
assert movers.iloc[3]['j'] == 2
if how == 'max':
assert movers.iloc[3]['y'] == 1.5
elif how == 'sum':
assert movers.iloc[3]['y'] == 2.5
elif how == 'mean':
assert movers.iloc[3]['y'] == 1.25
assert movers.iloc[3]['t'] == 2
assert movers.iloc[4]['i'] == 2
assert movers.iloc[4]['j'] == 1
assert movers.iloc[4]['y'] == 1.5
assert movers.iloc[4]['t'] == 1
assert movers.iloc[5]['i'] == 2
assert movers.iloc[5]['j'] == 2
assert movers.iloc[5]['y'] == 1
assert movers.iloc[5]['t'] == 2
def test_worker_year_unique_16_3():
# Workers with multiple jobs in the same year, keep the highest paying, with collapsed long format. Testing 'max', 'sum', and 'mean' options, where options should have an effect. Using collapsed long data.
worker_data = []
# Firm 0 -> 1
# Time 1 -> 2
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't1': 1, 't2': 1})
worker_data.append({'i': 0, 'j': 1, 'y': 1., 't1': 2, 't2': 2})
# Firm 1 -> 2 -> 2 -> 3
# Time 1 -> 2 -> 2 -> 2
worker_data.append({'i': 1, 'j': 1, 'y': 1., 't1': 1, 't2': 2})
worker_data.append({'i': 1, 'j': 2, 'y': 1., 't1': 2, 't2': 2})
worker_data.append({'i': 1, 'j': 2, 'y': 1.5, 't1': 2, 't2': 2})
worker_data.append({'i': 1, 'j': 3, 'y': 0.5, 't1': 2, 't2': 2})
# Worker 3
# Firm 2 -> 1
# Time 1 -> 1
worker_data.append({'i': 3, 'j': 2, 'y': 1., 't1': 1, 't2': 2})
worker_data.append({'i': 3, 'j': 1, 'y': 1.5, 't1': 1, 't2': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
for how in ['max', 'sum', 'mean']:
bdf = bpd.BipartiteLongCollapsed(data=df)
bdf = bdf.clean_data(bpd.clean_params({'i_t_how': how}))
stayers = bdf[bdf['m'] == 0]
movers = bdf[bdf['m'] > 0]
assert stayers.iloc[0]['i'] == 2
assert stayers.iloc[0]['j'] == 1
assert stayers.iloc[0]['y'] == 1.5
assert stayers.iloc[0]['t1'] == 1
assert stayers.iloc[0]['t2'] == 2
assert movers.iloc[0]['i'] == 0
assert movers.iloc[0]['j'] == 0
assert movers.iloc[0]['y'] == 2
assert movers.iloc[0]['t1'] == 1
assert movers.iloc[0]['t2'] == 1
assert movers.iloc[1]['i'] == 0
assert movers.iloc[1]['j'] == 1
assert movers.iloc[1]['y'] == 1
assert movers.iloc[1]['t1'] == 2
assert movers.iloc[1]['t2'] == 2
assert movers.iloc[2]['i'] == 1
assert movers.iloc[2]['j'] == 1
assert movers.iloc[2]['y'] == 1
assert movers.iloc[2]['t1'] == 1
assert movers.iloc[2]['t2'] == 1
assert movers.iloc[3]['i'] == 1
assert movers.iloc[3]['j'] == 2
if how == 'max':
assert movers.iloc[3]['y'] == 1.5
elif how == 'sum':
assert movers.iloc[3]['y'] == 2.5
elif how == 'mean':
assert movers.iloc[3]['y'] == 1.25
assert movers.iloc[3]['t1'] == 2
assert movers.iloc[3]['t2'] == 2
def test_worker_year_unique_16_4():
# Workers with multiple jobs in the same year, keep the highest paying, with event study format. Testing 'max', 'sum', and 'mean' options, where options should have an effect. NOTE: because of how data converts from event study to long (it only shifts period 2 (e.g. j2, y2) for the last row, as it assumes observations zigzag), it will only correct duplicates for period 1
worker_data = []
# Worker 0
worker_data.append({'i': 0, 'j1': 0, 'j2': 1, 'y1': 2., 'y2': 1., 't1': 1, 't2': 2})
# Worker 1
worker_data.append({'i': 1, 'j1': 1, 'j2': 2, 'y1': 0.5, 'y2': 1.5, 't1': 1, 't2': 2})
worker_data.append({'i': 1, 'j1': 1, 'j2': 2, 'y1': 0.75, 'y2': 1., 't1': 1, 't2': 2})
worker_data.append({'i': 1, 'j1': 2, 'j2': 1, 'y1': 1., 'y2': 2., 't1': 1, 't2': 2})
# Worker 3
worker_data.append({'i': 3, 'j1': 2, 'j2': 2, 't1': 1, 't2': 1, 'y1': 1., 'y2': 1.})
worker_data.append({'i': 3, 'j1': 2, 'j2': 2, 'y1': 1., 'y2': 1., 't1': 2, 't2': 2})
worker_data.append({'i': 3, 'j1': 1, 'j2': 1, 'y1': 1.5, 'y2': 1.5, 't1': 1, 't2': 1})
worker_data.append({'i': 3, 'j1': 1, 'j2': 1, 'y1': 1.5, 'y2': 1.5, 't1': 2, 't2': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
for how in ['max', 'sum', 'mean']:
bdf = bpd.BipartiteEventStudy(data=df.copy(), include_id_reference_dict=True)
bdf = bdf.clean_data(bpd.clean_params({'i_t_how': how})).original_ids()
stayers = bdf[bdf['m'] == 0]
movers = bdf[bdf['m'] > 0]
assert stayers.iloc[0]['i'] == 2
assert stayers.iloc[0]['original_i'] == 3
assert stayers.iloc[0]['j1'] == 1
assert stayers.iloc[0]['j2'] == 1
assert stayers.iloc[0]['y1'] == 1.5
assert stayers.iloc[0]['y2'] == 1.5
assert stayers.iloc[0]['t1'] == 1
assert stayers.iloc[0]['t2'] == 1
assert stayers.iloc[1]['i'] == 2
assert stayers.iloc[1]['original_i'] == 3
assert stayers.iloc[1]['j1'] == 1
assert stayers.iloc[1]['j2'] == 1
assert stayers.iloc[1]['y1'] == 1.5
assert stayers.iloc[1]['y2'] == 1.5
assert stayers.iloc[1]['t1'] == 2
assert stayers.iloc[1]['t2'] == 2
assert movers.iloc[0]['original_i'] == 0
assert movers.iloc[0]['i'] == 0
assert movers.iloc[0]['j1'] == 0
assert movers.iloc[0]['j2'] == 1
assert movers.iloc[0]['y1'] == 2
assert movers.iloc[0]['y2'] == 1
assert movers.iloc[0]['t1'] == 1
assert movers.iloc[0]['t2'] == 2
assert movers.iloc[1]['original_i'] == 1
assert movers.iloc[1]['i'] == 1
if how == 'max':
assert movers.iloc[1]['j1'] == 2
assert movers.iloc[1]['y1'] == 1
assert movers.iloc[1]['j2'] == 1
assert movers.iloc[1]['y2'] == 2
elif how == 'sum':
assert movers.iloc[1]['j1'] == 1
assert movers.iloc[1]['y1'] == 1.25
assert movers.iloc[1]['j2'] == 2
assert movers.iloc[1]['y2'] == 2.5
elif how == 'mean':
assert movers.iloc[1]['j1'] == 2
assert movers.iloc[1]['y1'] == 1
assert movers.iloc[1]['j2'] == 1
assert movers.iloc[1]['y2'] == 2
assert movers.iloc[1]['t1'] == 1
assert movers.iloc[1]['t2'] == 2
def test_string_ids_17():
# String worker and firm ids.
worker_data = []
# Worker 'a'
worker_data.append({'i': 'a', 'j': 'a', 'y': 2., 't': 1})
worker_data.append({'i': 'a', 'j': 'b', 'y': 1., 't': 2})
# Worker 'b'
worker_data.append({'i': 'b', 'j': 'b', 'y': 1., 't': 1})
worker_data.append({'i': 'b', 'j': 'c', 'y': 1., 't': 2})
worker_data.append({'i': 'b', 'j': 'd', 'y': 0.5, 't': 2})
# Worker 'd'
worker_data.append({'i': 'd', 'j': 'c', 'y': 1., 't': 1})
worker_data.append({'i': 'd', 'j': 'b', 'y': 1.5, 't': 1})
worker_data.append({'i': 'd', 'j': 'c', 'y': 1., 't': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
bdf = bpd.BipartiteLong(data=df)
bdf = bdf.clean_data()
stayers = bdf[bdf['m'] == 0]
movers = bdf[bdf['m'] > 0]
assert len(stayers) == 0
assert movers.iloc[0]['i'] == 0
assert movers.iloc[0]['j'] == 0
assert movers.iloc[0]['y'] == 2
assert movers.iloc[0]['t'] == 1
assert movers.iloc[1]['i'] == 0
assert movers.iloc[1]['j'] == 1
assert movers.iloc[1]['y'] == 1
assert movers.iloc[1]['t'] == 2
assert movers.iloc[2]['i'] == 1
assert movers.iloc[2]['j'] == 1
assert movers.iloc[2]['y'] == 1
assert movers.iloc[2]['t'] == 1
assert movers.iloc[3]['i'] == 1
assert movers.iloc[3]['j'] == 2
assert movers.iloc[3]['y'] == 1
assert movers.iloc[3]['t'] == 2
assert movers.iloc[4]['i'] == 2
assert movers.iloc[4]['j'] == 1
assert movers.iloc[4]['y'] == 1.5
assert movers.iloc[4]['t'] == 1
assert movers.iloc[5]['i'] == 2
assert movers.iloc[5]['j'] == 2
assert movers.iloc[5]['y'] == 1
assert movers.iloc[5]['t'] == 2
def test_general_methods_18():
# Test some general methods, like n_workers/n_firms/n_clusters, included_cols(), drop(), and rename().
worker_data = []
# Worker 0
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 1, 'g': 2})
worker_data.append({'i': 0, 'j': 1, 'y': 1., 't': 2, 'g': 1})
# Worker 1
worker_data.append({'i': 1, 'j': 1, 'y': 1., 't': 1, 'g': 1})
worker_data.append({'i': 1, 'j': 2, 'y': 1., 't': 2, 'g': 2})
# Worker 2
worker_data.append({'i': 2, 'j': 2, 'y': 1., 't': 1, 'g': 2})
worker_data.append({'i': 2, 'j': 2, 'y': 1., 't': 2, 'g': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
bdf = bpd.BipartiteLong(data=df)
bdf = bdf.clean_data()
bdf = bdf.get_collapsed_long()
bdf = bdf.get_es()
assert bdf.n_workers() == 3
assert bdf.n_firms() == 3
assert bdf.n_clusters() == 2
correct_cols = True
all_cols = bdf._included_cols()
for col in ['i', 'j', 'y', 't', 'g']:
if col not in all_cols:
correct_cols = False
break
assert correct_cols
bdf.drop('g1', axis=1, inplace=True)
assert 'g1' in bdf.columns and 'g2' in bdf.columns
bdf.drop('g', axis=1, inplace=True)
assert 'g1' not in bdf.columns and 'g2' not in bdf.columns
bdf.rename({'i': 'w'})
assert 'i' in bdf.columns
bdf['g1'] = 1
bdf['g2'] = 1
bdf.col_dict['g1'] = 'g1'
bdf.col_dict['g2'] = 'g2'
assert 'g1' in bdf.columns and 'g2' in bdf.columns
bdf.rename({'g': 'r'})
assert 'g1' not in bdf.columns and 'g2' not in bdf.columns
def test_save_19():
# Make sure changing attributes in a saved version does not overwrite values in the original.
worker_data = []
# Worker 0
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 1})
worker_data.append({'i': 0, 'j': 1, 'y': 1., 't': 2})
# Worker 1
worker_data.append({'i': 1, 'j': 1, 'y': 1., 't': 1})
worker_data.append({'i': 1, 'j': 2, 'y': 1., 't': 2})
worker_data.append({'i': 1, 'j': 3, 'y': 0.5, 't': 2})
# Worker 3
worker_data.append({'i': 3, 'j': 2, 'y': 1., 't': 1})
worker_data.append({'i': 3, 'j': 1, 'y': 1.5, 't': 1})
worker_data.append({'i': 3, 'j': 2, 'y': 1., 't': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
# Long
bdf = bpd.BipartiteLong(data=df)
bdf = bdf.clean_data().drop('m', axis=1, inplace=True)
bdf2 = bdf.copy()
bdf2 = bdf2.gen_m(copy=False)
assert 'm' in bdf2._included_cols() and 'm' not in bdf._included_cols()
# Event study
bdf = bdf.gen_m(copy=False).get_es()
bdf = bdf.clean_data().drop('m', axis=1, inplace=True)
bdf2 = bdf.copy()
bdf2 = bdf2.gen_m(copy=False)
assert 'm' in bdf2._included_cols() and 'm' not in bdf._included_cols()
# Collapsed long
bdf = bdf.gen_m(copy=False).get_long().get_collapsed_long()
bdf = bdf.clean_data().drop('m', axis=1, inplace=True)
bdf2 = bdf.copy()
bdf2 = bdf2.gen_m(copy=False)
assert 'm' in bdf2._included_cols() and 'm' not in bdf._included_cols()
# Collapsed event study
bdf = bdf.gen_m(copy=False).get_es()
bdf = bdf.clean_data().drop('m', axis=1, inplace=True)
bdf2 = bdf.copy()
bdf2 = bdf2.gen_m(copy=False)
assert 'm' in bdf2._included_cols() and 'm' not in bdf._included_cols()
def test_id_reference_dict_20():
# String worker and firm ids, link with id_reference_dict.
worker_data = []
# Worker 'a'
worker_data.append({'i': 'a', 'j': 'a', 'y': 2., 't': 1})
worker_data.append({'i': 'a', 'j': 'b', 'y': 1., 't': 2})
# Worker 'b'
worker_data.append({'i': 'b', 'j': 'b', 'y': 1., 't': 1})
worker_data.append({'i': 'b', 'j': 'c', 'y': 1., 't': 2})
worker_data.append({'i': 'b', 'j': 'd', 'y': 0.5, 't': 2})
# Worker 'd'
worker_data.append({'i': 'd', 'j': 'c', 'y': 1., 't': 1})
worker_data.append({'i': 'd', 'j': 'b', 'y': 1.5, 't': 1})
worker_data.append({'i': 'd', 'j': 'c', 'y': 1., 't': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
bdf = bpd.BipartiteLong(data=df, include_id_reference_dict=True)
bdf = bdf.clean_data()
id_reference_dict = bdf.id_reference_dict
merge_df = bdf.merge(id_reference_dict['i'], how='left', left_on='i', right_on='adjusted_ids_1').rename({'original_ids': 'original_i'})
merge_df = merge_df.merge(id_reference_dict['j'], how='left', left_on='j', right_on='adjusted_ids_1').rename({'original_ids': 'original_j'})
stayers = merge_df[merge_df['m'] == 0]
movers = merge_df[merge_df['m'] == 1]
assert len(stayers) == 0
assert movers.iloc[0]['i'] == 0
assert movers.iloc[0]['original_i'] == 'a'
assert movers.iloc[0]['j'] == 0
assert movers.iloc[0]['original_j'] == 'a'
assert movers.iloc[0]['y'] == 2
assert movers.iloc[0]['t'] == 1
assert movers.iloc[1]['i'] == 0
assert movers.iloc[1]['original_i'] == 'a'
assert movers.iloc[1]['j'] == 1
assert movers.iloc[1]['original_j'] == 'b'
assert movers.iloc[1]['y'] == 1
assert movers.iloc[1]['t'] == 2
assert movers.iloc[2]['i'] == 1
assert movers.iloc[2]['original_i'] == 'b'
assert movers.iloc[2]['j'] == 1
assert movers.iloc[2]['original_j'] == 'b'
assert movers.iloc[2]['y'] == 1
assert movers.iloc[2]['t'] == 1
assert movers.iloc[3]['i'] == 1
assert movers.iloc[3]['original_i'] == 'b'
assert movers.iloc[3]['j'] == 2
assert movers.iloc[3]['original_j'] == 'c'
assert movers.iloc[3]['y'] == 1
assert movers.iloc[3]['t'] == 2
assert movers.iloc[4]['i'] == 2
assert movers.iloc[4]['original_i'] == 'd'
assert movers.iloc[4]['j'] == 1
assert movers.iloc[4]['original_j'] == 'b'
assert movers.iloc[4]['y'] == 1.5
assert movers.iloc[4]['t'] == 1
assert movers.iloc[5]['i'] == 2
assert movers.iloc[5]['original_i'] == 'd'
assert movers.iloc[5]['j'] == 2
assert movers.iloc[5]['original_j'] == 'c'
assert movers.iloc[5]['y'] == 1
assert movers.iloc[5]['t'] == 2
def test_id_reference_dict_22():
# String worker and firm ids, link with id_reference_dict. Testing original_ids() method.
worker_data = []
# Worker 'a'
worker_data.append({'i': 'a', 'j': 'a', 'y': 2., 't': 1})
worker_data.append({'i': 'a', 'j': 'b', 'y': 1., 't': 2})
# Worker 'b'
worker_data.append({'i': 'b', 'j': 'b', 'y': 1., 't': 1})
worker_data.append({'i': 'b', 'j': 'c', 'y': 1., 't': 2})
worker_data.append({'i': 'b', 'j': 'd', 'y': 0.5, 't': 2})
# Worker 'd'
worker_data.append({'i': 'd', 'j': 'c', 'y': 1., 't': 1})
worker_data.append({'i': 'd', 'j': 'b', 'y': 1.5, 't': 1})
worker_data.append({'i': 'd', 'j': 'c', 'y': 1., 't': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
bdf = bpd.BipartiteLong(data=df, include_id_reference_dict=True)
bdf = bdf.clean_data()
merge_df = bdf.original_ids()
stayers = merge_df[merge_df['m'] == 0]
movers = merge_df[merge_df['m'] == 1]
assert len(stayers) == 0
assert movers.iloc[0]['i'] == 0
assert movers.iloc[0]['original_i'] == 'a'
assert movers.iloc[0]['j'] == 0
assert movers.iloc[0]['original_j'] == 'a'
assert movers.iloc[0]['y'] == 2
assert movers.iloc[0]['t'] == 1
assert movers.iloc[1]['i'] == 0
assert movers.iloc[1]['original_i'] == 'a'
assert movers.iloc[1]['j'] == 1
assert movers.iloc[1]['original_j'] == 'b'
assert movers.iloc[1]['y'] == 1
assert movers.iloc[1]['t'] == 2
assert movers.iloc[2]['i'] == 1
assert movers.iloc[2]['original_i'] == 'b'
assert movers.iloc[2]['j'] == 1
assert movers.iloc[2]['original_j'] == 'b'
assert movers.iloc[2]['y'] == 1
assert movers.iloc[2]['t'] == 1
assert movers.iloc[3]['i'] == 1
assert movers.iloc[3]['original_i'] == 'b'
assert movers.iloc[3]['j'] == 2
assert movers.iloc[3]['original_j'] == 'c'
assert movers.iloc[3]['y'] == 1
assert movers.iloc[3]['t'] == 2
assert movers.iloc[4]['i'] == 2
assert movers.iloc[4]['original_i'] == 'd'
assert movers.iloc[4]['j'] == 1
assert movers.iloc[4]['original_j'] == 'b'
assert movers.iloc[4]['y'] == 1.5
assert movers.iloc[4]['t'] == 1
assert movers.iloc[5]['i'] == 2
assert movers.iloc[5]['original_i'] == 'd'
assert movers.iloc[5]['j'] == 2
assert movers.iloc[5]['original_j'] == 'c'
assert movers.iloc[5]['y'] == 1
assert movers.iloc[5]['t'] == 2
def test_id_reference_dict_23():
# String worker and firm ids, link with id_reference_dict. Testing original_ids() method where there are multiple steps of references.
worker_data = []
# Worker 'a'
# Firm a -> b -> c turns into 0 -> 1 -> 2 turns into 0 -> 1
worker_data.append({'i': 'a', 'j': 'a', 'y': 2., 't': 1})
worker_data.append({'i': 'a', 'j': 'b', 'y': 1., 't': 2})
worker_data.append({'i': 'a', 'j': 'c', 'y': 1.5, 't': 3})
# Worker 'b'
# Firm b -> d turns into 1 -> 3 turns into 0 -> 2
worker_data.append({'i': 'b', 'j': 'b', 'y': 1., 't': 1})
worker_data.append({'i': 'b', 'j': 'd', 'y': 1., 't': 2})
worker_data.append({'i': 'b', 'j': 'c', 'y': 0.5, 't': 2})
# Worker 'd'
# Firm b -> d turns into 1 -> 3 turns into 0 -> 2
worker_data.append({'i': 'd', 'j': 'd', 'y': 1., 't': 1})
worker_data.append({'i': 'd', 'j': 'b', 'y': 1.5, 't': 1})
worker_data.append({'i': 'd', 'j': 'd', 'y': 1., 't': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
bdf = bpd.BipartiteLong(data=df, include_id_reference_dict=True)
bdf = bdf.clean_data()
bdf = bdf[bdf['j'] > 0]
bdf = bdf.clean_data(bpd.clean_params({'connectedness': None}))
merge_df = bdf.original_ids()
stayers = merge_df[merge_df['m'] == 0]
movers = merge_df[merge_df['m'] > 0]
assert len(stayers) == 0
assert movers.iloc[0]['i'] == 0
assert movers.iloc[0]['original_i'] == 'a'
assert movers.iloc[0]['j'] == 0
assert movers.iloc[0]['original_j'] == 'b'
assert movers.iloc[0]['y'] == 1
assert movers.iloc[0]['t'] == 2
assert movers.iloc[1]['i'] == 0
assert movers.iloc[1]['original_i'] == 'a'
assert movers.iloc[1]['j'] == 1
assert movers.iloc[1]['original_j'] == 'c'
assert movers.iloc[1]['y'] == 1.5
assert movers.iloc[1]['t'] == 3
assert movers.iloc[2]['i'] == 1
assert movers.iloc[2]['original_i'] == 'b'
assert movers.iloc[2]['j'] == 0
assert movers.iloc[2]['original_j'] == 'b'
assert movers.iloc[2]['y'] == 1
assert movers.iloc[2]['t'] == 1
assert movers.iloc[3]['i'] == 1
assert movers.iloc[3]['original_i'] == 'b'
assert movers.iloc[3]['j'] == 2
assert movers.iloc[3]['original_j'] == 'd'
assert movers.iloc[3]['y'] == 1
assert movers.iloc[3]['t'] == 2
assert movers.iloc[4]['i'] == 2
assert movers.iloc[4]['original_i'] == 'd'
assert movers.iloc[4]['j'] == 0
assert movers.iloc[4]['original_j'] == 'b'
assert movers.iloc[4]['y'] == 1.5
assert movers.iloc[4]['t'] == 1
assert movers.iloc[5]['i'] == 2
assert movers.iloc[5]['original_i'] == 'd'
assert movers.iloc[5]['j'] == 2
assert movers.iloc[5]['original_j'] == 'd'
assert movers.iloc[5]['y'] == 1
assert movers.iloc[5]['t'] == 2
def test_fill_time_24_1():
# Test .fill_time() method for long format, with no data to fill in.
worker_data = []
# Worker 0
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 1})
worker_data.append({'i': 0, 'j': 1, 'y': 1., 't': 2})
# Worker 1
worker_data.append({'i': 1, 'j': 1, 'y': 1., 't': 1})
worker_data.append({'i': 1, 'j': 2, 'y': 1., 't': 2})
# Worker 3
worker_data.append({'i': 3, 'j': 2, 'y': 1., 't': 1})
worker_data.append({'i': 3, 'j': 2, 'y': 1., 't': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
bdf = bpd.BipartiteLong(data=df)
bdf = bdf.clean_data()
new_df = bdf.fill_periods()
stayers = new_df[new_df['m'] == 0]
movers = new_df[new_df['m'] == 1]
assert stayers.iloc[0]['i'] == 2
assert stayers.iloc[0]['j'] == 2
assert stayers.iloc[0]['y'] == 1
assert movers.iloc[0]['i'] == 0
assert movers.iloc[0]['j'] == 0
assert movers.iloc[0]['y'] == 2
assert movers.iloc[1]['i'] == 0
assert movers.iloc[1]['j'] == 1
assert movers.iloc[1]['y'] == 1
assert movers.iloc[2]['i'] == 1
assert movers.iloc[2]['j'] == 1
assert movers.iloc[2]['y'] == 1
assert movers.iloc[3]['i'] == 1
assert movers.iloc[3]['j'] == 2
assert movers.iloc[3]['y'] == 1
def test_fill_time_24_2():
# Test .fill_time() method for long format, with 1 row of data to fill in.
worker_data = []
# Worker 0
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 1})
worker_data.append({'i': 0, 'j': 1, 'y': 1., 't': 2})
# Worker 1
# Time 1 -> 3
worker_data.append({'i': 1, 'j': 1, 'y': 1., 't': 1})
worker_data.append({'i': 1, 'j': 2, 'y': 1., 't': 3})
# Worker 3
worker_data.append({'i': 3, 'j': 2, 'y': 1., 't': 1})
worker_data.append({'i': 3, 'j': 2, 'y': 1., 't': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
bdf = bpd.BipartiteLong(data=df)
bdf = bdf.clean_data()
new_df = bdf.fill_periods()
stayers = new_df[new_df.groupby('i')['m'].transform('max') == 0]
movers = new_df[new_df.groupby('i')['m'].transform('max') == 1]
assert stayers.iloc[0]['i'] == 2
assert stayers.iloc[0]['j'] == 2
assert stayers.iloc[0]['y'] == 1
assert movers.iloc[0]['i'] == 0
assert movers.iloc[0]['j'] == 0
assert movers.iloc[0]['y'] == 2
assert movers.iloc[1]['i'] == 0
assert movers.iloc[1]['j'] == 1
assert movers.iloc[1]['y'] == 1
assert movers.iloc[2]['i'] == 1
assert movers.iloc[2]['j'] == 1
assert movers.iloc[2]['y'] == 1
assert movers.iloc[3]['i'] == 1
assert movers.iloc[3]['j'] == - 1
assert np.isnan(movers.iloc[3]['y'])
assert np.isnan(movers.iloc[3]['m'])
assert movers.iloc[4]['i'] == 1
assert movers.iloc[4]['j'] == 2
assert movers.iloc[4]['y'] == 1
def test_fill_time_24_3():
# Test .fill_time() method for long format, with 2 rows of data to fill in.
worker_data = []
# Worker 0
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 1})
worker_data.append({'i': 0, 'j': 1, 'y': 1., 't': 2})
# Worker 1
# Time 1 -> 4
worker_data.append({'i': 1, 'j': 1, 'y': 1., 't': 1})
worker_data.append({'i': 1, 'j': 2, 'y': 1., 't': 4})
# Worker 3
worker_data.append({'i': 3, 'j': 2, 'y': 1., 't': 1})
worker_data.append({'i': 3, 'j': 2, 'y': 1., 't': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
bdf = bpd.BipartiteLong(data=df)
bdf = bdf.clean_data()
new_df = bdf.fill_periods()
stayers = new_df[new_df.groupby('i')['m'].transform('max') == 0]
movers = new_df[new_df.groupby('i')['m'].transform('max') == 1]
assert stayers.iloc[0]['i'] == 2
assert stayers.iloc[0]['j'] == 2
assert stayers.iloc[0]['y'] == 1
assert movers.iloc[0]['i'] == 0
assert movers.iloc[0]['j'] == 0
assert movers.iloc[0]['y'] == 2
assert movers.iloc[1]['i'] == 0
assert movers.iloc[1]['j'] == 1
assert movers.iloc[1]['y'] == 1
assert movers.iloc[2]['i'] == 1
assert movers.iloc[2]['j'] == 1
assert movers.iloc[2]['y'] == 1
assert movers.iloc[3]['i'] == 1
assert movers.iloc[3]['j'] == - 1
assert np.isnan(movers.iloc[3]['y'])
assert np.isnan(movers.iloc[3]['m'])
assert movers.iloc[4]['i'] == 1
assert movers.iloc[4]['j'] == - 1
assert np.isnan(movers.iloc[4]['y'])
assert np.isnan(movers.iloc[4]['m'])
assert movers.iloc[5]['i'] == 1
assert movers.iloc[5]['j'] == 2
assert movers.iloc[5]['y'] == 1
def test_uncollapse_25():
# Convert from collapsed long to long format.
worker_data = []
# Worker 0
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't1': 1, 't2': 1})
worker_data.append({'i': 0, 'j': 1, 'y': 1., 't1': 2, 't2': 2})
# Worker 1
# Time 1 -> 3
worker_data.append({'i': 1, 'j': 1, 'y': 1., 't1': 1, 't2': 2})
worker_data.append({'i': 1, 'j': 2, 'y': 1., 't1': 2, 't2': 2})
worker_data.append({'i': 1, 'j': 2, 'y': 1.5, 't1': 2, 't2': 2})
worker_data.append({'i': 1, 'j': 3, 'y': 0.5, 't1': 2, 't2': 2})
# Worker 3
worker_data.append({'i': 3, 'j': 2, 'y': 1., 't1': 1, 't2': 2})
worker_data.append({'i': 3, 'j': 1, 'y': 1.5, 't1': 1, 't2': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
bdf = bpd.BipartiteLongCollapsed(data=df).uncollapse()
assert bdf.iloc[0]['i'] == 0
assert bdf.iloc[0]['j'] == 0
assert bdf.iloc[0]['y'] == 2
assert bdf.iloc[0]['t'] == 1
assert bdf.iloc[1]['i'] == 0
assert bdf.iloc[1]['j'] == 1
assert bdf.iloc[1]['y'] == 1
assert bdf.iloc[1]['t'] == 2
assert bdf.iloc[2]['i'] == 1
assert bdf.iloc[2]['j'] == 1
assert bdf.iloc[2]['y'] == 1
assert bdf.iloc[2]['t'] == 1
assert bdf.iloc[3]['i'] == 1
assert bdf.iloc[3]['j'] == 1
assert bdf.iloc[3]['y'] == 1
assert bdf.iloc[3]['t'] == 2
assert bdf.iloc[4]['i'] == 1
assert bdf.iloc[4]['j'] == 2
assert bdf.iloc[4]['y'] == 1
assert bdf.iloc[4]['t'] == 2
assert bdf.iloc[5]['i'] == 1
assert bdf.iloc[5]['j'] == 2
assert bdf.iloc[5]['y'] == 1.5
assert bdf.iloc[5]['t'] == 2
assert bdf.iloc[6]['i'] == 1
assert bdf.iloc[6]['j'] == 3
assert bdf.iloc[6]['y'] == 0.5
assert bdf.iloc[6]['t'] == 2
assert bdf.iloc[7]['i'] == 3
assert bdf.iloc[7]['j'] == 2
assert bdf.iloc[7]['y'] == 1
assert bdf.iloc[7]['t'] == 1
assert bdf.iloc[8]['i'] == 3
assert bdf.iloc[8]['j'] == 2
assert bdf.iloc[8]['y'] == 1
assert bdf.iloc[8]['t'] == 2
assert bdf.iloc[9]['i'] == 3
assert bdf.iloc[9]['j'] == 1
assert bdf.iloc[9]['y'] == 1.5
assert bdf.iloc[9]['t'] == 1
assert bdf.iloc[10]['i'] == 3
assert bdf.iloc[10]['j'] == 1
assert bdf.iloc[10]['y'] == 1.5
assert bdf.iloc[10]['t'] == 2
def test_keep_ids_26():
# Keep only given ids.
df = bpd.SimBipartite(bpd.sim_params({'p_move': 0.05, 'rng': np.random.default_rng(1234)})).sim_network()
bdf = bpd.BipartiteLong(df).clean_data()
all_fids = bdf['j'].unique()
ids_to_keep = all_fids[: len(all_fids) // 2]
bdf_keep = bdf.get_es().keep_ids('j', ids_to_keep).get_long()
assert set(bdf_keep['j']) == set(ids_to_keep)
# Make sure long and es give same results
bdf_keep2 = bdf.keep_ids('j', ids_to_keep)
assert len(bdf_keep) == len(bdf_keep2)
def test_drop_ids_27():
# Drop given ids.
df = bpd.SimBipartite(bpd.sim_params({'p_move': 0.05, 'rng': np.random.default_rng(1234)})).sim_network()
bdf = bpd.BipartiteLong(df).clean_data()
all_fids = bdf['j'].unique()
ids_to_drop = all_fids[: len(all_fids) // 2]
bdf_keep = bdf.get_es().drop_ids('j', ids_to_drop).get_long()
assert set(bdf_keep['j']) == set(all_fids).difference(set(ids_to_drop))
# Make sure long and es give same results
bdf_keep2 = bdf.drop_ids('j', ids_to_drop)
assert len(bdf_keep) == len(bdf_keep2)
def test_min_obs_firms_28_1():
# List only firms that meet a minimum threshold of observations.
# Using long/event study.
df = bpd.SimBipartite(bpd.sim_params({'p_move': 0.05, 'rng': np.random.default_rng(1234)})).sim_network()
bdf = bpd.BipartiteLong(df).clean_data()
threshold = 250
# First, manually estimate the valid set of firms
frame = bdf.copy()
n_moves = frame.groupby('j')['i'].size()
valid_firms = sorted(n_moves[n_moves >= threshold].index)
# Next, estimate the set of valid firms using the built-in function
valid_firms2 = sorted(bdf.min_obs_firms(threshold))
valid_firms3 = sorted(bdf.get_es().min_obs_firms(threshold))
assert (0 < len(valid_firms) < df['j'].nunique())
assert len(valid_firms) == len(valid_firms2) == len(valid_firms3)
for i in range(len(valid_firms)):
assert valid_firms[i] == valid_firms2[i] == valid_firms3[i]
def test_min_obs_firms_28_2():
# List only firms that meet a minimum threshold of observations.
# Using long collapsed/event study collapsed.
df = bpd.SimBipartite(bpd.sim_params({'p_move': 0.05, 'rng': np.random.default_rng(1234)})).sim_network()
bdf = bpd.BipartiteLong(df).clean_data().get_collapsed_long()
threshold = 60
# First, manually estimate the valid set of firms
frame = bdf.copy()
n_moves = frame.groupby('j')['i'].size()
valid_firms = sorted(n_moves[n_moves >= threshold].index)
# Next, estimate the set of valid firms using the built-in function
valid_firms2 = sorted(bdf.min_obs_firms(threshold))
valid_firms3 = sorted(bdf.get_es().min_obs_firms(threshold))
assert (0 < len(valid_firms) < df['j'].nunique())
assert len(valid_firms) == len(valid_firms2) == len(valid_firms3)
for i in range(len(valid_firms)):
assert valid_firms[i] == valid_firms2[i] == valid_firms3[i]
def test_min_obs_frame_29_1():
# Keep only firms that meet a minimum threshold of observations.
# Using long/event study.
df = bpd.SimBipartite(bpd.sim_params({'p_move': 0.05, 'rng': np.random.default_rng(1234)})).sim_network()
bdf = bpd.BipartiteLong(df).clean_data()
threshold = 250
# First, manually estimate the new frame
frame = bdf.copy()
n_moves = frame.groupby('j')['i'].size()
valid_firms = sorted(n_moves[n_moves >= threshold].index)
new_frame = frame.keep_ids('j', valid_firms)
new_frame.reset_index(drop=True, inplace=True)
# Next, estimate the new frame using the built-in function
new_frame2 = bdf.min_obs_frame(threshold)
new_frame3 = bdf.get_es().min_obs_frame(threshold).get_long()
assert (0 < len(new_frame) < len(bdf))
assert len(new_frame) == len(new_frame2) == len(new_frame3)
for i in range(100): # range(len(new_frame)): # It takes too long to go through all rows
for col in ['i', 'j', 'y', 't']:
# Skip 'm' since we didn't recompute it
assert new_frame.iloc[i][col] == new_frame2.iloc[i][col] == new_frame3.iloc[i][col]
for i in range(len(new_frame) - 100, len(new_frame)): # range(len(new_frame)): # It takes too long to go through all rows
for col in ['i', 'j', 'y', 't']:
# Skip 'm' since we didn't recompute it
assert new_frame.iloc[i][col] == new_frame2.iloc[i][col] == new_frame3.iloc[i][col]
def test_min_obs_frame_29_2():
# Keep only firms that meet a minimum threshold of observations.
# Using long collapsed/event study collapsed.
df = bpd.SimBipartite(bpd.sim_params({'p_move': 0.05, 'rng': np.random.default_rng(1234)})).sim_network()
bdf = bpd.BipartiteLong(df).clean_data().get_collapsed_long()
threshold = 60
# First, manually estimate the new frame
frame = bdf.copy()
n_moves = frame.groupby('j')['i'].size()
valid_firms = sorted(n_moves[n_moves >= threshold].index)
new_frame = frame.keep_ids('j', valid_firms)
# Next, estimate the new frame using the built-in function
new_frame2 = bdf.min_obs_frame(threshold)
new_frame3 = bdf.get_es().min_obs_frame(threshold).get_long()
assert (0 < len(new_frame) < len(bdf))
assert len(new_frame) == len(new_frame2) == len(new_frame3)
for i in range(100): # range(len(new_frame)): # It takes too long to go through all rows
for col in ['i', 'j', 'y', 't1', 't2']:
# Skip 'm' since we didn't recompute it
assert new_frame.iloc[i][col] == new_frame2.iloc[i][col] == new_frame3.iloc[i][col]
for i in range(len(new_frame) - 100, len(new_frame)): # range(len(new_frame)): # It takes too long to go through all rows
for col in ['i', 'j', 'y', 't1', 't2']:
# Skip 'm' since we didn't recompute it
assert new_frame.iloc[i][col] == new_frame2.iloc[i][col] == new_frame3.iloc[i][col]
def test_min_workers_firms_30():
# List only firms that meet a minimum threshold of workers.
# Using long/event study/long collapsed/event study collapsed.
df = bpd.SimBipartite(bpd.sim_params({'p_move': 0.05, 'rng': np.random.default_rng(1234)})).sim_network()
bdf = bpd.BipartiteLong(df).clean_data()
threshold = 40
# First, manually estimate the valid set of firms
frame = bdf.copy()
# Count workers
n_workers = frame.groupby('j')['i'].nunique()
valid_firms = sorted(n_workers[n_workers >= threshold].index)
# Next, estimate the set of valid firms using the built-in function
valid_firms2 = sorted(bdf.min_workers_firms(threshold))
valid_firms3 = sorted(bdf.get_es().min_workers_firms(threshold))
valid_firms4 = sorted(bdf.get_collapsed_long().min_workers_firms(threshold))
valid_firms5 = sorted(bdf.get_collapsed_long().get_es().min_workers_firms(threshold))
assert (0 < len(valid_firms) < df['j'].nunique())
assert len(valid_firms) == len(valid_firms2) == len(valid_firms3) == len(valid_firms4) == len(valid_firms5)
for i in range(len(valid_firms)):
assert valid_firms[i] == valid_firms2[i] == valid_firms3[i] == valid_firms4[i] == valid_firms5[i]
def test_min_workers_frame_31():
# Keep only firms that meet a minimum threshold of workers.
# Using long/event study/long collapsed/event study collapsed.
df = bpd.SimBipartite(bpd.sim_params({'p_move': 0.05, 'rng': np.random.default_rng(1234)})).sim_network()
bdf = bpd.BipartiteLong(df).clean_data()
threshold = 60
# First, manually estimate the new frame
frame = bdf.copy()
# Count workers
n_workers = frame.groupby('j')['i'].nunique()
valid_firms = n_workers[n_workers >= threshold].index
new_frame = frame.keep_ids('j', valid_firms).get_collapsed_long()
# Next, estimate the new frame using the built-in function
new_frame2 = bdf.min_workers_frame(threshold).get_collapsed_long()
new_frame3 = bdf.get_es().min_workers_frame(threshold).get_long().get_collapsed_long()
new_frame4 = bdf.get_collapsed_long().min_workers_frame(threshold)
new_frame5 = bdf.get_collapsed_long().get_es().min_workers_frame(threshold).get_long()
assert (0 < len(new_frame) < len(bdf))
assert len(new_frame) == len(new_frame2) == len(new_frame3) == len(new_frame4) == len(new_frame5)
for i in range(100): # range(len(new_frame)): # It takes too long to go through all rows
for col in ['i', 'j', 'y', 't1', 't2']:
# Skip 'm' since we didn't recompute it
assert new_frame.iloc[i][col] == new_frame2.iloc[i][col] == new_frame3.iloc[i][col] == new_frame4.iloc[i][col] == new_frame5.iloc[i][col]
for i in range(len(new_frame) - 100, len(new_frame)): # range(len(new_frame)): # It takes too long to go through all rows
for col in ['i', 'j', 'y', 't1', 't2']:
# Skip 'm' since we didn't recompute it
assert new_frame.iloc[i][col] == new_frame2.iloc[i][col] == new_frame3.iloc[i][col] == new_frame4.iloc[i][col] == new_frame5.iloc[i][col]
def test_min_moves_firms_32_1():
# List only firms that meet a minimum threshold of moves.
# Using long/event study.
df = bpd.SimBipartite(bpd.sim_params({'p_move': 0.05, 'rng': np.random.default_rng(1234)})).sim_network()
bdf = bpd.BipartiteLong(df).clean_data()
threshold = 20
# First, manually estimate the valid set of firms
frame = bdf.copy()
frame.loc[frame.loc[:, 'm'] == 2, 'm'] = 1
n_moves = frame.groupby('j')['m'].sum()
valid_firms = sorted(n_moves[n_moves >= threshold].index)
# Next, estimate the set of valid firms using the built-in function
valid_firms2 = sorted(bdf.min_moves_firms(threshold))
valid_firms3 = sorted(bdf.get_es().min_moves_firms(threshold))
assert (0 < len(valid_firms) < df['j'].nunique())
assert len(valid_firms) == len(valid_firms2) == len(valid_firms3)
for i in range(len(valid_firms)):
assert valid_firms[i] == valid_firms2[i] == valid_firms3[i]
def test_min_moves_firms_32_2():
# List only firms that meet a minimum threshold of moves.
# Using long collapsed/event study collapsed.
df = bpd.SimBipartite(bpd.sim_params({'p_move': 0.05, 'rng': np.random.default_rng(1234)})).sim_network()
bdf = bpd.BipartiteLong(df).clean_data().get_collapsed_long()
threshold = 20
# First, manually estimate the valid set of firms
frame = bdf.copy()
frame.loc[frame.loc[:, 'm'] == 2, 'm'] = 1
n_moves = frame.groupby('j')['m'].sum()
valid_firms = sorted(n_moves[n_moves >= threshold].index)
# Next, estimate the set of valid firms using the built-in function
valid_firms2 = sorted(bdf.min_moves_firms(threshold))
valid_firms3 = sorted(bdf.get_es().min_moves_firms(threshold))
assert (0 < len(valid_firms) < df['j'].nunique())
assert len(valid_firms) == len(valid_firms2) == len(valid_firms3)
for i in range(len(valid_firms)):
assert valid_firms[i] == valid_firms2[i] == valid_firms3[i]
def test_min_moves_frame_33():
# Keep only firms that meet a minimum threshold of moves.
# Using long/event study/long collapsed/event study collapsed.
df = bpd.SimBipartite(bpd.sim_params({'p_move': 0.05, 'rng': np.random.default_rng(1234)})).sim_network()
bdf = bpd.BipartiteLong(df).clean_data()
threshold = 12
# First, manually estimate the valid set of firms
frame = bdf.copy()
frame.loc[frame.loc[:, 'm'] == 2, 'm'] = 1
n_moves = frame.groupby('j')['m'].sum()
valid_firms = sorted(n_moves[n_moves >= threshold].index)
new_frame = frame.keep_ids('j', valid_firms)
# Iterate until set of firms stays the same between loops
loop = True
n_loops = 0
while loop:
n_loops += 1
prev_frame = new_frame
prev_frame.loc[prev_frame.loc[:, 'm'] == 2, 'm'] = 1
# Keep firms with sufficiently many moves
n_moves = prev_frame.groupby('j')['m'].sum()
valid_firms = sorted(n_moves[n_moves >= threshold].index)
new_frame = prev_frame.keep_ids('j', valid_firms)
loop = (len(new_frame) != len(prev_frame))
new_frame = new_frame.get_collapsed_long()
# Next, estimate the new frame using the built-in function
new_frame2 = bdf.min_moves_frame(threshold).get_collapsed_long()
new_frame3 = bdf.get_es().min_moves_frame(threshold).get_long().get_collapsed_long()
new_frame4 = bdf.get_collapsed_long().min_moves_frame(threshold)
new_frame5 = bdf.get_collapsed_long().get_es().min_moves_frame(threshold).get_long()
assert n_loops > 1
assert (0 < len(new_frame) < len(bdf))
assert len(new_frame) == len(new_frame2) == len(new_frame3) == len(new_frame4) == len(new_frame5)
for i in range(100): # range(len(new_frame)): # It takes too long to go through all rows
for col in ['i', 'j', 'y', 't1', 't2']:
# Skip 'm' since we didn't recompute it
assert new_frame.iloc[i][col] == new_frame2.iloc[i][col] == new_frame3.iloc[i][col] == new_frame4.iloc[i][col] == new_frame5.iloc[i][col]
for i in range(len(new_frame) - 100, len(new_frame)): # range(len(new_frame)): # It takes too long to go through all rows
for col in ['i', 'j', 'y', 't1', 't2']:
# Skip 'm' since we didn't recompute it
assert new_frame.iloc[i][col] == new_frame2.iloc[i][col] == new_frame3.iloc[i][col] == new_frame4.iloc[i][col] == new_frame5.iloc[i][col]
def test_min_movers_firms_34():
# List only firms that meet a minimum threshold of movers.
# Using long/event study/long collapsed/event study collapsed.
df = bpd.SimBipartite(bpd.sim_params({'p_move': 0.05, 'rng': np.random.default_rng(1234)})).sim_network()
bdf = bpd.BipartiteLong(df).clean_data()
threshold = 20
# First, manually estimate the valid set of firms
frame = bdf.copy()
# Keep movers
frame = frame[frame['m'] > 0]
n_movers = frame.groupby('j')['i'].nunique()
valid_firms = sorted(n_movers[n_movers >= threshold].index)
# Next, estimate the set of valid firms using the built-in function
valid_firms2 = sorted(bdf.min_movers_firms(threshold))
valid_firms3 = sorted(bdf.get_es().min_movers_firms(threshold))
valid_firms4 = sorted(bdf.get_collapsed_long().min_movers_firms(threshold))
valid_firms5 = sorted(bdf.get_collapsed_long().get_es().min_movers_firms(threshold))
assert (0 < len(valid_firms) < df['j'].nunique())
assert len(valid_firms) == len(valid_firms2) == len(valid_firms3) == len(valid_firms4) == len(valid_firms5)
for i in range(len(valid_firms)):
assert valid_firms[i] == valid_firms2[i] == valid_firms3[i] == valid_firms4[i] == valid_firms5[i]
def test_min_movers_frame_35():
# Keep only firms that meet a minimum threshold of movers.
# Using long/event study/long collapsed/event study collapsed.
df = bpd.SimBipartite(bpd.sim_params({'p_move': 0.05, 'rng': np.random.default_rng(1234)})).sim_network()
bdf = bpd.BipartiteLong(df).clean_data()
threshold = 12
# First, manually estimate the new frame
frame = bdf.copy()
# Keep movers
frame_movers = frame[frame['m'] > 0]
n_movers = frame_movers.groupby('j')['i'].nunique()
valid_firms = n_movers[n_movers >= threshold].index
new_frame = frame.keep_ids('j', valid_firms)
# Iterate until set of firms stays the same between loops
loop = True
n_loops = 0
while loop:
n_loops += 1
prev_frame = new_frame
# Keep movers
prev_frame_movers = prev_frame[prev_frame['m'] > 0]
n_movers = prev_frame_movers.groupby('j')['i'].nunique()
valid_firms = n_movers[n_movers >= threshold].index
new_frame = prev_frame.keep_ids('j', valid_firms)
loop = (len(new_frame) != len(prev_frame))
new_frame = new_frame.get_collapsed_long()
# Next, estimate the new frame using the built-in function
new_frame2 = bdf.min_movers_frame(threshold).get_collapsed_long()
new_frame3 = bdf.get_es().min_movers_frame(threshold).get_long().get_collapsed_long()
new_frame4 = bdf.get_collapsed_long().min_movers_frame(threshold)
new_frame5 = bdf.get_collapsed_long().get_es().min_movers_frame(threshold).get_long()
assert n_loops > 1
assert (0 < len(new_frame) < len(bdf))
assert len(new_frame) == len(new_frame2) == len(new_frame3) == len(new_frame4) == len(new_frame5)
for i in range(100): # range(len(new_frame)): # It takes too long to go through all rows
for col in ['i', 'j', 'y', 't1', 't2']:
# Skip 'm' since we didn't recompute it
assert new_frame.iloc[i][col] == new_frame2.iloc[i][col] == new_frame3.iloc[i][col] == new_frame4.iloc[i][col] == new_frame5.iloc[i][col]
for i in range(len(new_frame) - 100, len(new_frame)): # range(len(new_frame)): # It takes too long to go through all rows
for col in ['i', 'j', 'y', 't1', 't2']:
# Skip 'm' since we didn't recompute it
assert new_frame.iloc[i][col] == new_frame2.iloc[i][col] == new_frame3.iloc[i][col] == new_frame4.iloc[i][col] == new_frame5.iloc[i][col]
###################################
##### Tests for BipartiteLong #####
###################################
def test_long_get_es_extended_1():
# Test get_es_extended() by making sure it is generating the event study correctly for periods_pre=2 and periods_post=1
worker_data = []
# Worker 0
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 1})
worker_data.append({'i': 0, 'j': 1, 'y': 1., 't': 2})
worker_data.append({'i': 0, 'j': 1, 'y': 1., 't': 3})
worker_data.append({'i': 0, 'j': 0, 'y': 1., 't': 4})
# Worker 1
worker_data.append({'i': 1, 'j': 1, 'y': 1., 't': 1})
worker_data.append({'i': 1, 'j': 2, 'y': 1., 't': 2})
worker_data.append({'i': 1, 'j': 2, 'y': 2., 't': 3})
worker_data.append({'i': 1, 'j': 5, 'y': 1., 't': 3})
# Worker 3
worker_data.append({'i': 3, 'j': 2, 'y': 1., 't': 1})
worker_data.append({'i': 3, 'j': 2, 'y': 1., 't': 2})
worker_data.append({'i': 3, 'j': 3, 'y': 1.5, 't': 3})
# Worker 4
worker_data.append({'i': 4, 'j': 0, 'y': 1., 't': 1})
df = pd.concat([ | pd.DataFrame(worker, index=[i]) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# pylint: disable=E1101,E1103,W0232
import os
import sys
from datetime import datetime
from distutils.version import LooseVersion
import numpy as np
import pandas as pd
import pandas.compat as compat
import pandas.core.common as com
import pandas.util.testing as tm
from pandas import (Categorical, Index, Series, DataFrame, PeriodIndex,
Timestamp, CategoricalIndex)
from pandas.compat import range, lrange, u, PY3
from pandas.core.config import option_context
# GH 12066
# flake8: noqa
class TestCategorical(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.factor = Categorical.from_array(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'],
ordered=True)
def test_getitem(self):
self.assertEqual(self.factor[0], 'a')
self.assertEqual(self.factor[-1], 'c')
subf = self.factor[[0, 1, 2]]
tm.assert_almost_equal(subf._codes, [0, 1, 1])
subf = self.factor[np.asarray(self.factor) == 'c']
tm.assert_almost_equal(subf._codes, [2, 2, 2])
def test_getitem_listlike(self):
# GH 9469
# properly coerce the input indexers
np.random.seed(1)
c = Categorical(np.random.randint(0, 5, size=150000).astype(np.int8))
result = c.codes[np.array([100000]).astype(np.int64)]
expected = c[np.array([100000]).astype(np.int64)].codes
self.assert_numpy_array_equal(result, expected)
def test_setitem(self):
# int/positional
c = self.factor.copy()
c[0] = 'b'
self.assertEqual(c[0], 'b')
c[-1] = 'a'
self.assertEqual(c[-1], 'a')
# boolean
c = self.factor.copy()
indexer = np.zeros(len(c), dtype='bool')
indexer[0] = True
indexer[-1] = True
c[indexer] = 'c'
expected = Categorical.from_array(['c', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], ordered=True)
self.assert_categorical_equal(c, expected)
def test_setitem_listlike(self):
# GH 9469
# properly coerce the input indexers
np.random.seed(1)
c = Categorical(np.random.randint(0, 5, size=150000).astype(
np.int8)).add_categories([-1000])
indexer = np.array([100000]).astype(np.int64)
c[indexer] = -1000
# we are asserting the code result here
# which maps to the -1000 category
result = c.codes[np.array([100000]).astype(np.int64)]
self.assertEqual(result, np.array([5], dtype='int8'))
def test_constructor_unsortable(self):
# it works!
arr = np.array([1, 2, 3, datetime.now()], dtype='O')
factor = Categorical.from_array(arr, ordered=False)
self.assertFalse(factor.ordered)
if compat.PY3:
self.assertRaises(
TypeError, lambda: Categorical.from_array(arr, ordered=True))
else:
# this however will raise as cannot be sorted (on PY3 or older
# numpies)
if LooseVersion(np.__version__) < "1.10":
self.assertRaises(
TypeError,
lambda: Categorical.from_array(arr, ordered=True))
else:
Categorical.from_array(arr, ordered=True)
def test_is_equal_dtype(self):
# test dtype comparisons between cats
c1 = Categorical(list('aabca'), categories=list('abc'), ordered=False)
c2 = Categorical(list('aabca'), categories=list('cab'), ordered=False)
c3 = Categorical(list('aabca'), categories=list('cab'), ordered=True)
self.assertTrue(c1.is_dtype_equal(c1))
self.assertTrue(c2.is_dtype_equal(c2))
self.assertTrue(c3.is_dtype_equal(c3))
self.assertFalse(c1.is_dtype_equal(c2))
self.assertFalse(c1.is_dtype_equal(c3))
self.assertFalse(c1.is_dtype_equal(Index(list('aabca'))))
self.assertFalse(c1.is_dtype_equal(c1.astype(object)))
self.assertTrue(c1.is_dtype_equal(CategoricalIndex(c1)))
self.assertFalse(c1.is_dtype_equal(
CategoricalIndex(c1, categories=list('cab'))))
self.assertFalse(c1.is_dtype_equal(CategoricalIndex(c1, ordered=True)))
def test_constructor(self):
exp_arr = np.array(["a", "b", "c", "a", "b", "c"])
c1 = Categorical(exp_arr)
self.assert_numpy_array_equal(c1.__array__(), exp_arr)
c2 = Categorical(exp_arr, categories=["a", "b", "c"])
self.assert_numpy_array_equal(c2.__array__(), exp_arr)
c2 = Categorical(exp_arr, categories=["c", "b", "a"])
self.assert_numpy_array_equal(c2.__array__(), exp_arr)
# categories must be unique
def f():
Categorical([1, 2], [1, 2, 2])
self.assertRaises(ValueError, f)
def f():
Categorical(["a", "b"], ["a", "b", "b"])
self.assertRaises(ValueError, f)
def f():
with tm.assert_produces_warning(FutureWarning):
Categorical([1, 2], [1, 2, np.nan, np.nan])
self.assertRaises(ValueError, f)
# The default should be unordered
c1 = Categorical(["a", "b", "c", "a"])
self.assertFalse(c1.ordered)
# Categorical as input
c1 = Categorical(["a", "b", "c", "a"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(c1, categories=["a", "b", "c"])
self.assert_numpy_array_equal(c1.__array__(), c2.__array__())
self.assert_numpy_array_equal(c2.categories, np.array(["a", "b", "c"]))
# Series of dtype category
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(Series(c1))
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(Series(c1))
self.assertTrue(c1.equals(c2))
# Series
c1 = Categorical(["a", "b", "c", "a"])
c2 = Categorical(Series(["a", "b", "c", "a"]))
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(
Series(["a", "b", "c", "a"]), categories=["a", "b", "c", "d"])
self.assertTrue(c1.equals(c2))
# This should result in integer categories, not float!
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
self.assertTrue(com.is_integer_dtype(cat.categories))
# https://github.com/pydata/pandas/issues/3678
cat = pd.Categorical([np.nan, 1, 2, 3])
self.assertTrue(com.is_integer_dtype(cat.categories))
# this should result in floats
cat = pd.Categorical([np.nan, 1, 2., 3])
self.assertTrue(com.is_float_dtype(cat.categories))
cat = pd.Categorical([np.nan, 1., 2., 3.])
self.assertTrue(com.is_float_dtype(cat.categories))
# Deprecating NaNs in categoires (GH #10748)
# preserve int as far as possible by converting to object if NaN is in
# categories
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([np.nan, 1, 2, 3],
categories=[np.nan, 1, 2, 3])
self.assertTrue(com.is_object_dtype(cat.categories))
# This doesn't work -> this would probably need some kind of "remember
# the original type" feature to try to cast the array interface result
# to...
# vals = np.asarray(cat[cat.notnull()])
# self.assertTrue(com.is_integer_dtype(vals))
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([np.nan, "a", "b", "c"],
categories=[np.nan, "a", "b", "c"])
self.assertTrue(com.is_object_dtype(cat.categories))
# but don't do it for floats
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([np.nan, 1., 2., 3.],
categories=[np.nan, 1., 2., 3.])
self.assertTrue(com.is_float_dtype(cat.categories))
# corner cases
cat = pd.Categorical([1])
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == 1)
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
cat = pd.Categorical(["a"])
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == "a")
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
# Scalars should be converted to lists
cat = pd.Categorical(1)
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == 1)
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
cat = pd.Categorical([1], categories=1)
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == 1)
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
# Catch old style constructor useage: two arrays, codes + categories
# We can only catch two cases:
# - when the first is an integer dtype and the second is not
# - when the resulting codes are all -1/NaN
with tm.assert_produces_warning(RuntimeWarning):
c_old = Categorical([0, 1, 2, 0, 1, 2],
categories=["a", "b", "c"]) # noqa
with tm.assert_produces_warning(RuntimeWarning):
c_old = Categorical([0, 1, 2, 0, 1, 2], # noqa
categories=[3, 4, 5])
# the next one are from the old docs, but unfortunately these don't
# trigger :-(
with tm.assert_produces_warning(None):
c_old2 = Categorical([0, 1, 2, 0, 1, 2], [1, 2, 3]) # noqa
cat = Categorical([1, 2], categories=[1, 2, 3])
# this is a legitimate constructor
with tm.assert_produces_warning(None):
c = Categorical(np.array([], dtype='int64'), # noqa
categories=[3, 2, 1], ordered=True)
def test_constructor_with_index(self):
ci = CategoricalIndex(list('aabbca'), categories=list('cab'))
self.assertTrue(ci.values.equals(Categorical(ci)))
ci = CategoricalIndex(list('aabbca'), categories=list('cab'))
self.assertTrue(ci.values.equals(Categorical(
ci.astype(object), categories=ci.categories)))
def test_constructor_with_generator(self):
# This was raising an Error in isnull(single_val).any() because isnull
# returned a scalar for a generator
xrange = range
exp = Categorical([0, 1, 2])
cat = Categorical((x for x in [0, 1, 2]))
self.assertTrue(cat.equals(exp))
cat = Categorical(xrange(3))
self.assertTrue(cat.equals(exp))
# This uses xrange internally
from pandas.core.index import MultiIndex
MultiIndex.from_product([range(5), ['a', 'b', 'c']])
# check that categories accept generators and sequences
cat = pd.Categorical([0, 1, 2], categories=(x for x in [0, 1, 2]))
self.assertTrue(cat.equals(exp))
cat = pd.Categorical([0, 1, 2], categories=xrange(3))
self.assertTrue(cat.equals(exp))
def test_from_codes(self):
# too few categories
def f():
Categorical.from_codes([1, 2], [1, 2])
self.assertRaises(ValueError, f)
# no int codes
def f():
Categorical.from_codes(["a"], [1, 2])
self.assertRaises(ValueError, f)
# no unique categories
def f():
Categorical.from_codes([0, 1, 2], ["a", "a", "b"])
self.assertRaises(ValueError, f)
# too negative
def f():
Categorical.from_codes([-2, 1, 2], ["a", "b", "c"])
self.assertRaises(ValueError, f)
exp = Categorical(["a", "b", "c"], ordered=False)
res = Categorical.from_codes([0, 1, 2], ["a", "b", "c"])
self.assertTrue(exp.equals(res))
# Not available in earlier numpy versions
if hasattr(np.random, "choice"):
codes = np.random.choice([0, 1], 5, p=[0.9, 0.1])
pd.Categorical.from_codes(codes, categories=["train", "test"])
def test_comparisons(self):
result = self.factor[self.factor == 'a']
expected = self.factor[np.asarray(self.factor) == 'a']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor != 'a']
expected = self.factor[np.asarray(self.factor) != 'a']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor < 'c']
expected = self.factor[np.asarray(self.factor) < 'c']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor > 'a']
expected = self.factor[np.asarray(self.factor) > 'a']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor >= 'b']
expected = self.factor[np.asarray(self.factor) >= 'b']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor <= 'b']
expected = self.factor[np.asarray(self.factor) <= 'b']
self.assertTrue(result.equals(expected))
n = len(self.factor)
other = self.factor[np.random.permutation(n)]
result = self.factor == other
expected = np.asarray(self.factor) == np.asarray(other)
self.assert_numpy_array_equal(result, expected)
result = self.factor == 'd'
expected = np.repeat(False, len(self.factor))
self.assert_numpy_array_equal(result, expected)
# comparisons with categoricals
cat_rev = pd.Categorical(["a", "b", "c"], categories=["c", "b", "a"],
ordered=True)
cat_rev_base = pd.Categorical(
["b", "b", "b"], categories=["c", "b", "a"], ordered=True)
cat = pd.Categorical(["a", "b", "c"], ordered=True)
cat_base = pd.Categorical(["b", "b", "b"], categories=cat.categories,
ordered=True)
# comparisons need to take categories ordering into account
res_rev = cat_rev > cat_rev_base
exp_rev = np.array([True, False, False])
self.assert_numpy_array_equal(res_rev, exp_rev)
res_rev = cat_rev < cat_rev_base
exp_rev = np.array([False, False, True])
self.assert_numpy_array_equal(res_rev, exp_rev)
res = cat > cat_base
exp = np.array([False, False, True])
self.assert_numpy_array_equal(res, exp)
# Only categories with same categories can be compared
def f():
cat > cat_rev
self.assertRaises(TypeError, f)
cat_rev_base2 = pd.Categorical(
["b", "b", "b"], categories=["c", "b", "a", "d"])
def f():
cat_rev > cat_rev_base2
self.assertRaises(TypeError, f)
# Only categories with same ordering information can be compared
cat_unorderd = cat.set_ordered(False)
self.assertFalse((cat > cat).any())
def f():
cat > cat_unorderd
self.assertRaises(TypeError, f)
# comparison (in both directions) with Series will raise
s = Series(["b", "b", "b"])
self.assertRaises(TypeError, lambda: cat > s)
self.assertRaises(TypeError, lambda: cat_rev > s)
self.assertRaises(TypeError, lambda: s < cat)
self.assertRaises(TypeError, lambda: s < cat_rev)
# comparison with numpy.array will raise in both direction, but only on
# newer numpy versions
a = np.array(["b", "b", "b"])
self.assertRaises(TypeError, lambda: cat > a)
self.assertRaises(TypeError, lambda: cat_rev > a)
# The following work via '__array_priority__ = 1000'
# works only on numpy >= 1.7.1
if LooseVersion(np.__version__) > "1.7.1":
self.assertRaises(TypeError, lambda: a < cat)
self.assertRaises(TypeError, lambda: a < cat_rev)
# Make sure that unequal comparison take the categories order in
# account
cat_rev = pd.Categorical(
list("abc"), categories=list("cba"), ordered=True)
exp = np.array([True, False, False])
res = cat_rev > "b"
self.assert_numpy_array_equal(res, exp)
def test_na_flags_int_categories(self):
# #1457
categories = lrange(10)
labels = np.random.randint(0, 10, 20)
labels[::5] = -1
cat = Categorical(labels, categories, fastpath=True)
repr(cat)
self.assert_numpy_array_equal(com.isnull(cat), labels == -1)
def test_categories_none(self):
factor = Categorical(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], ordered=True)
self.assertTrue(factor.equals(self.factor))
def test_describe(self):
# string type
desc = self.factor.describe()
expected = DataFrame({'counts': [3, 2, 3],
'freqs': [3 / 8., 2 / 8., 3 / 8.]},
index=pd.CategoricalIndex(['a', 'b', 'c'],
name='categories'))
tm.assert_frame_equal(desc, expected)
# check unused categories
cat = self.factor.copy()
cat.set_categories(["a", "b", "c", "d"], inplace=True)
desc = cat.describe()
expected = DataFrame({'counts': [3, 2, 3, 0],
'freqs': [3 / 8., 2 / 8., 3 / 8., 0]},
index=pd.CategoricalIndex(['a', 'b', 'c', 'd'],
name='categories'))
tm.assert_frame_equal(desc, expected)
# check an integer one
desc = Categorical([1, 2, 3, 1, 2, 3, 3, 2, 1, 1, 1]).describe()
expected = DataFrame({'counts': [5, 3, 3],
'freqs': [5 / 11., 3 / 11., 3 / 11.]},
index=pd.CategoricalIndex([1, 2, 3],
name='categories'))
tm.assert_frame_equal(desc, expected)
# https://github.com/pydata/pandas/issues/3678
# describe should work with NaN
cat = pd.Categorical([np.nan, 1, 2, 2])
desc = cat.describe()
expected = DataFrame({'counts': [1, 2, 1],
'freqs': [1 / 4., 2 / 4., 1 / 4.]},
index=pd.CategoricalIndex([1, 2, np.nan],
categories=[1, 2],
name='categories'))
tm.assert_frame_equal(desc, expected)
# NA as a category
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical(["a", "c", "c", np.nan],
categories=["b", "a", "c", np.nan])
result = cat.describe()
expected = DataFrame([[0, 0], [1, 0.25], [2, 0.5], [1, 0.25]],
columns=['counts', 'freqs'],
index=pd.CategoricalIndex(['b', 'a', 'c', np.nan],
name='categories'))
tm.assert_frame_equal(result, expected)
# NA as an unused category
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical(["a", "c", "c"],
categories=["b", "a", "c", np.nan])
result = cat.describe()
exp_idx = pd.CategoricalIndex(
['b', 'a', 'c', np.nan], name='categories')
expected = DataFrame([[0, 0], [1, 1 / 3.], [2, 2 / 3.], [0, 0]],
columns=['counts', 'freqs'], index=exp_idx)
tm.assert_frame_equal(result, expected)
def test_print(self):
expected = ["[a, b, b, a, a, c, c, c]",
"Categories (3, object): [a < b < c]"]
expected = "\n".join(expected)
actual = repr(self.factor)
self.assertEqual(actual, expected)
def test_big_print(self):
factor = Categorical([0, 1, 2, 0, 1, 2] * 100, ['a', 'b', 'c'],
name='cat', fastpath=True)
expected = ["[a, b, c, a, b, ..., b, c, a, b, c]", "Length: 600",
"Categories (3, object): [a, b, c]"]
expected = "\n".join(expected)
actual = repr(factor)
self.assertEqual(actual, expected)
def test_empty_print(self):
factor = Categorical([], ["a", "b", "c"])
expected = ("[], Categories (3, object): [a, b, c]")
# hack because array_repr changed in numpy > 1.6.x
actual = repr(factor)
self.assertEqual(actual, expected)
self.assertEqual(expected, actual)
factor = Categorical([], ["a", "b", "c"], ordered=True)
expected = ("[], Categories (3, object): [a < b < c]")
actual = repr(factor)
self.assertEqual(expected, actual)
factor = Categorical([], [])
expected = ("[], Categories (0, object): []")
self.assertEqual(expected, repr(factor))
def test_print_none_width(self):
# GH10087
a = pd.Series(pd.Categorical([1, 2, 3, 4]))
exp = u("0 1\n1 2\n2 3\n3 4\n" +
"dtype: category\nCategories (4, int64): [1, 2, 3, 4]")
with option_context("display.width", None):
self.assertEqual(exp, repr(a))
def test_unicode_print(self):
if PY3:
_rep = repr
else:
_rep = unicode # noqa
c = pd.Categorical(['aaaaa', 'bb', 'cccc'] * 20)
expected = u"""\
[aaaaa, bb, cccc, aaaaa, bb, ..., bb, cccc, aaaaa, bb, cccc]
Length: 60
Categories (3, object): [aaaaa, bb, cccc]"""
self.assertEqual(_rep(c), expected)
c = pd.Categorical([u'ああああ', u'いいいいい', u'ううううううう']
* 20)
expected = u"""\
[ああああ, いいいいい, ううううううう, ああああ, いいいいい, ..., いいいいい, ううううううう, ああああ, いいいいい, ううううううう]
Length: 60
Categories (3, object): [ああああ, いいいいい, ううううううう]""" # noqa
self.assertEqual(_rep(c), expected)
# unicode option should not affect to Categorical, as it doesn't care
# the repr width
with option_context('display.unicode.east_asian_width', True):
c = pd.Categorical([u'ああああ', u'いいいいい', u'ううううううう']
* 20)
expected = u"""[ああああ, いいいいい, ううううううう, ああああ, いいいいい, ..., いいいいい, ううううううう, ああああ, いいいいい, ううううううう]
Length: 60
Categories (3, object): [ああああ, いいいいい, ううううううう]""" # noqa
self.assertEqual(_rep(c), expected)
def test_periodindex(self):
idx1 = PeriodIndex(['2014-01', '2014-01', '2014-02', '2014-02',
'2014-03', '2014-03'], freq='M')
cat1 = Categorical.from_array(idx1)
str(cat1)
exp_arr = np.array([0, 0, 1, 1, 2, 2], dtype='int64')
exp_idx = PeriodIndex(['2014-01', '2014-02', '2014-03'], freq='M')
self.assert_numpy_array_equal(cat1._codes, exp_arr)
self.assertTrue(cat1.categories.equals(exp_idx))
idx2 = PeriodIndex(['2014-03', '2014-03', '2014-02', '2014-01',
'2014-03', '2014-01'], freq='M')
cat2 = Categorical.from_array(idx2, ordered=True)
str(cat2)
exp_arr = np.array([2, 2, 1, 0, 2, 0], dtype='int64')
exp_idx2 = PeriodIndex(['2014-01', '2014-02', '2014-03'], freq='M')
self.assert_numpy_array_equal(cat2._codes, exp_arr)
self.assertTrue(cat2.categories.equals(exp_idx2))
idx3 = PeriodIndex(['2013-12', '2013-11', '2013-10', '2013-09',
'2013-08', '2013-07', '2013-05'], freq='M')
cat3 = Categorical.from_array(idx3, ordered=True)
exp_arr = np.array([6, 5, 4, 3, 2, 1, 0], dtype='int64')
exp_idx = PeriodIndex(['2013-05', '2013-07', '2013-08', '2013-09',
'2013-10', '2013-11', '2013-12'], freq='M')
self.assert_numpy_array_equal(cat3._codes, exp_arr)
self.assertTrue(cat3.categories.equals(exp_idx))
def test_categories_assigments(self):
s = pd.Categorical(["a", "b", "c", "a"])
exp = np.array([1, 2, 3, 1])
s.categories = [1, 2, 3]
self.assert_numpy_array_equal(s.__array__(), exp)
self.assert_numpy_array_equal(s.categories, np.array([1, 2, 3]))
# lengthen
def f():
s.categories = [1, 2, 3, 4]
self.assertRaises(ValueError, f)
# shorten
def f():
s.categories = [1, 2]
self.assertRaises(ValueError, f)
def test_construction_with_ordered(self):
# GH 9347, 9190
cat = Categorical([0, 1, 2])
self.assertFalse(cat.ordered)
cat = Categorical([0, 1, 2], ordered=False)
self.assertFalse(cat.ordered)
cat = Categorical([0, 1, 2], ordered=True)
self.assertTrue(cat.ordered)
def test_ordered_api(self):
# GH 9347
cat1 = pd.Categorical(["a", "c", "b"], ordered=False)
self.assertTrue(cat1.categories.equals(Index(['a', 'b', 'c'])))
self.assertFalse(cat1.ordered)
cat2 = pd.Categorical(["a", "c", "b"], categories=['b', 'c', 'a'],
ordered=False)
self.assertTrue(cat2.categories.equals(Index(['b', 'c', 'a'])))
self.assertFalse(cat2.ordered)
cat3 = pd.Categorical(["a", "c", "b"], ordered=True)
self.assertTrue(cat3.categories.equals(Index(['a', 'b', 'c'])))
self.assertTrue(cat3.ordered)
cat4 = pd.Categorical(["a", "c", "b"], categories=['b', 'c', 'a'],
ordered=True)
self.assertTrue(cat4.categories.equals(Index(['b', 'c', 'a'])))
self.assertTrue(cat4.ordered)
def test_set_ordered(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
cat2 = cat.as_unordered()
self.assertFalse(cat2.ordered)
cat2 = cat.as_ordered()
self.assertTrue(cat2.ordered)
cat2.as_unordered(inplace=True)
self.assertFalse(cat2.ordered)
cat2.as_ordered(inplace=True)
self.assertTrue(cat2.ordered)
self.assertTrue(cat2.set_ordered(True).ordered)
self.assertFalse(cat2.set_ordered(False).ordered)
cat2.set_ordered(True, inplace=True)
self.assertTrue(cat2.ordered)
cat2.set_ordered(False, inplace=True)
self.assertFalse(cat2.ordered)
# deperecated in v0.16.0
with tm.assert_produces_warning(FutureWarning):
cat.ordered = False
self.assertFalse(cat.ordered)
with tm.assert_produces_warning(FutureWarning):
cat.ordered = True
self.assertTrue(cat.ordered)
def test_set_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
exp_categories = np.array(["c", "b", "a"])
exp_values = np.array(["a", "b", "c", "a"])
res = cat.set_categories(["c", "b", "a"], inplace=True)
self.assert_numpy_array_equal(cat.categories, exp_categories)
self.assert_numpy_array_equal(cat.__array__(), exp_values)
self.assertIsNone(res)
res = cat.set_categories(["a", "b", "c"])
# cat must be the same as before
self.assert_numpy_array_equal(cat.categories, exp_categories)
self.assert_numpy_array_equal(cat.__array__(), exp_values)
# only res is changed
exp_categories_back = np.array(["a", "b", "c"])
self.assert_numpy_array_equal(res.categories, exp_categories_back)
self.assert_numpy_array_equal(res.__array__(), exp_values)
# not all "old" included in "new" -> all not included ones are now
# np.nan
cat = Categorical(["a", "b", "c", "a"], ordered=True)
res = cat.set_categories(["a"])
self.assert_numpy_array_equal(res.codes, np.array([0, -1, -1, 0]))
# still not all "old" in "new"
res = cat.set_categories(["a", "b", "d"])
self.assert_numpy_array_equal(res.codes, np.array([0, 1, -1, 0]))
self.assert_numpy_array_equal(res.categories,
np.array(["a", "b", "d"]))
# all "old" included in "new"
cat = cat.set_categories(["a", "b", "c", "d"])
exp_categories = np.array(["a", "b", "c", "d"])
self.assert_numpy_array_equal(cat.categories, exp_categories)
# internals...
c = Categorical([1, 2, 3, 4, 1], categories=[1, 2, 3, 4], ordered=True)
self.assert_numpy_array_equal(c._codes, np.array([0, 1, 2, 3, 0]))
self.assert_numpy_array_equal(c.categories, np.array([1, 2, 3, 4]))
self.assert_numpy_array_equal(c.get_values(),
np.array([1, 2, 3, 4, 1]))
c = c.set_categories(
[4, 3, 2, 1
]) # all "pointers" to '4' must be changed from 3 to 0,...
self.assert_numpy_array_equal(c._codes, np.array([3, 2, 1, 0, 3])
) # positions are changed
self.assert_numpy_array_equal(c.categories, np.array([4, 3, 2, 1])
) # categories are now in new order
self.assert_numpy_array_equal(c.get_values(), np.array([1, 2, 3, 4, 1])
) # output is the same
self.assertTrue(c.min(), 4)
self.assertTrue(c.max(), 1)
# set_categories should set the ordering if specified
c2 = c.set_categories([4, 3, 2, 1], ordered=False)
self.assertFalse(c2.ordered)
self.assert_numpy_array_equal(c.get_values(), c2.get_values())
# set_categories should pass thru the ordering
c2 = c.set_ordered(False).set_categories([4, 3, 2, 1])
self.assertFalse(c2.ordered)
self.assert_numpy_array_equal(c.get_values(), c2.get_values())
def test_rename_categories(self):
cat = pd.Categorical(["a", "b", "c", "a"])
# inplace=False: the old one must not be changed
res = cat.rename_categories([1, 2, 3])
self.assert_numpy_array_equal(res.__array__(), np.array([1, 2, 3, 1]))
self.assert_numpy_array_equal(res.categories, np.array([1, 2, 3]))
self.assert_numpy_array_equal(cat.__array__(),
np.array(["a", "b", "c", "a"]))
self.assert_numpy_array_equal(cat.categories,
np.array(["a", "b", "c"]))
res = cat.rename_categories([1, 2, 3], inplace=True)
# and now inplace
self.assertIsNone(res)
self.assert_numpy_array_equal(cat.__array__(), np.array([1, 2, 3, 1]))
self.assert_numpy_array_equal(cat.categories, np.array([1, 2, 3]))
# lengthen
def f():
cat.rename_categories([1, 2, 3, 4])
self.assertRaises(ValueError, f)
# shorten
def f():
cat.rename_categories([1, 2])
self.assertRaises(ValueError, f)
def test_reorder_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
old = cat.copy()
new = Categorical(["a", "b", "c", "a"], categories=["c", "b", "a"],
ordered=True)
# first inplace == False
res = cat.reorder_categories(["c", "b", "a"])
# cat must be the same as before
self.assert_categorical_equal(cat, old)
# only res is changed
self.assert_categorical_equal(res, new)
# inplace == True
res = cat.reorder_categories(["c", "b", "a"], inplace=True)
self.assertIsNone(res)
self.assert_categorical_equal(cat, new)
# not all "old" included in "new"
cat = Categorical(["a", "b", "c", "a"], ordered=True)
def f():
cat.reorder_categories(["a"])
self.assertRaises(ValueError, f)
# still not all "old" in "new"
def f():
cat.reorder_categories(["a", "b", "d"])
self.assertRaises(ValueError, f)
# all "old" included in "new", but too long
def f():
cat.reorder_categories(["a", "b", "c", "d"])
self.assertRaises(ValueError, f)
def test_add_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
old = cat.copy()
new = Categorical(["a", "b", "c", "a"],
categories=["a", "b", "c", "d"], ordered=True)
# first inplace == False
res = cat.add_categories("d")
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
res = cat.add_categories(["d"])
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
# inplace == True
res = cat.add_categories("d", inplace=True)
self.assert_categorical_equal(cat, new)
self.assertIsNone(res)
# new is in old categories
def f():
cat.add_categories(["d"])
self.assertRaises(ValueError, f)
# GH 9927
cat = Categorical(list("abc"), ordered=True)
expected = Categorical(
list("abc"), categories=list("abcde"), ordered=True)
# test with Series, np.array, index, list
res = cat.add_categories(Series(["d", "e"]))
self.assert_categorical_equal(res, expected)
res = cat.add_categories(np.array(["d", "e"]))
self.assert_categorical_equal(res, expected)
res = cat.add_categories(Index(["d", "e"]))
self.assert_categorical_equal(res, expected)
res = cat.add_categories(["d", "e"])
self.assert_categorical_equal(res, expected)
def test_remove_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
old = cat.copy()
new = Categorical(["a", "b", np.nan, "a"], categories=["a", "b"],
ordered=True)
# first inplace == False
res = cat.remove_categories("c")
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
res = cat.remove_categories(["c"])
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
# inplace == True
res = cat.remove_categories("c", inplace=True)
self.assert_categorical_equal(cat, new)
self.assertIsNone(res)
# removal is not in categories
def f():
cat.remove_categories(["c"])
self.assertRaises(ValueError, f)
def test_remove_unused_categories(self):
c = Categorical(["a", "b", "c", "d", "a"],
categories=["a", "b", "c", "d", "e"])
exp_categories_all = np.array(["a", "b", "c", "d", "e"])
exp_categories_dropped = np.array(["a", "b", "c", "d"])
self.assert_numpy_array_equal(c.categories, exp_categories_all)
res = c.remove_unused_categories()
self.assert_numpy_array_equal(res.categories, exp_categories_dropped)
self.assert_numpy_array_equal(c.categories, exp_categories_all)
res = c.remove_unused_categories(inplace=True)
self.assert_numpy_array_equal(c.categories, exp_categories_dropped)
self.assertIsNone(res)
# with NaN values (GH11599)
c = Categorical(["a", "b", "c", np.nan],
categories=["a", "b", "c", "d", "e"])
res = c.remove_unused_categories()
self.assert_numpy_array_equal(res.categories,
np.array(["a", "b", "c"]))
self.assert_numpy_array_equal(c.categories, exp_categories_all)
val = ['F', np.nan, 'D', 'B', 'D', 'F', np.nan]
cat = pd.Categorical(values=val, categories=list('ABCDEFG'))
out = cat.remove_unused_categories()
self.assert_numpy_array_equal(out.categories, ['B', 'D', 'F'])
self.assert_numpy_array_equal(out.codes, [2, -1, 1, 0, 1, 2, -1])
self.assertEqual(out.get_values().tolist(), val)
alpha = list('abcdefghijklmnopqrstuvwxyz')
val = np.random.choice(alpha[::2], 10000).astype('object')
val[np.random.choice(len(val), 100)] = np.nan
cat = pd.Categorical(values=val, categories=alpha)
out = cat.remove_unused_categories()
self.assertEqual(out.get_values().tolist(), val.tolist())
def test_nan_handling(self):
# Nans are represented as -1 in codes
c = Categorical(["a", "b", np.nan, "a"])
self.assert_numpy_array_equal(c.categories, np.array(["a", "b"]))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0]))
c[1] = np.nan
self.assert_numpy_array_equal(c.categories, np.array(["a", "b"]))
self.assert_numpy_array_equal(c._codes, np.array([0, -1, -1, 0]))
# If categories have nan included, the code should point to that
# instead
with tm.assert_produces_warning(FutureWarning):
c = Categorical(["a", "b", np.nan, "a"],
categories=["a", "b", np.nan])
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, 2, 0]))
c[1] = np.nan
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 2, 2, 0]))
# Changing categories should also make the replaced category np.nan
c = Categorical(["a", "b", "c", "a"])
with tm.assert_produces_warning(FutureWarning):
c.categories = ["a", "b", np.nan] # noqa
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, 2, 0]))
# Adding nan to categories should make assigned nan point to the
# category!
c = Categorical(["a", "b", np.nan, "a"])
self.assert_numpy_array_equal(c.categories, np.array(["a", "b"]))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0]))
with tm.assert_produces_warning(FutureWarning):
c.set_categories(["a", "b", np.nan], rename=True, inplace=True)
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0]))
c[1] = np.nan
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 2, -1, 0]))
# Remove null categories (GH 10156)
cases = [
([1.0, 2.0, np.nan], [1.0, 2.0]),
(['a', 'b', None], ['a', 'b']),
([pd.Timestamp('2012-05-01'), pd.NaT],
[pd.Timestamp('2012-05-01')])
]
null_values = [np.nan, None, pd.NaT]
for with_null, without in cases:
with tm.assert_produces_warning(FutureWarning):
base = Categorical([], with_null)
expected = Categorical([], without)
for nullval in null_values:
result = base.remove_categories(nullval)
self.assert_categorical_equal(result, expected)
# Different null values are indistinguishable
for i, j in [(0, 1), (0, 2), (1, 2)]:
nulls = [null_values[i], null_values[j]]
def f():
with tm.assert_produces_warning(FutureWarning):
Categorical([], categories=nulls)
self.assertRaises(ValueError, f)
def test_isnull(self):
exp = np.array([False, False, True])
c = Categorical(["a", "b", np.nan])
res = c.isnull()
self.assert_numpy_array_equal(res, exp)
with tm.assert_produces_warning(FutureWarning):
c = Categorical(["a", "b", np.nan], categories=["a", "b", np.nan])
res = c.isnull()
self.assert_numpy_array_equal(res, exp)
# test both nan in categories and as -1
exp = np.array([True, False, True])
c = Categorical(["a", "b", np.nan])
with tm.assert_produces_warning(FutureWarning):
c.set_categories(["a", "b", np.nan], rename=True, inplace=True)
c[0] = np.nan
res = c.isnull()
self.assert_numpy_array_equal(res, exp)
def test_codes_immutable(self):
# Codes should be read only
c = Categorical(["a", "b", "c", "a", np.nan])
exp = np.array([0, 1, 2, 0, -1], dtype='int8')
self.assert_numpy_array_equal(c.codes, exp)
# Assignments to codes should raise
def f():
c.codes = np.array([0, 1, 2, 0, 1], dtype='int8')
self.assertRaises(ValueError, f)
# changes in the codes array should raise
# np 1.6.1 raises RuntimeError rather than ValueError
codes = c.codes
def f():
codes[4] = 1
self.assertRaises(ValueError, f)
# But even after getting the codes, the original array should still be
# writeable!
c[4] = "a"
exp = np.array([0, 1, 2, 0, 0], dtype='int8')
self.assert_numpy_array_equal(c.codes, exp)
c._codes[4] = 2
exp = np.array([0, 1, 2, 0, 2], dtype='int8')
self.assert_numpy_array_equal(c.codes, exp)
def test_min_max(self):
# unordered cats have no min/max
cat = Categorical(["a", "b", "c", "d"], ordered=False)
self.assertRaises(TypeError, lambda: cat.min())
self.assertRaises(TypeError, lambda: cat.max())
cat = Categorical(["a", "b", "c", "d"], ordered=True)
_min = cat.min()
_max = cat.max()
self.assertEqual(_min, "a")
self.assertEqual(_max, "d")
cat = Categorical(["a", "b", "c", "d"],
categories=['d', 'c', 'b', 'a'], ordered=True)
_min = cat.min()
_max = cat.max()
self.assertEqual(_min, "d")
self.assertEqual(_max, "a")
cat = Categorical([np.nan, "b", "c", np.nan],
categories=['d', 'c', 'b', 'a'], ordered=True)
_min = cat.min()
_max = cat.max()
self.assertTrue(np.isnan(_min))
self.assertEqual(_max, "b")
_min = cat.min(numeric_only=True)
self.assertEqual(_min, "c")
_max = cat.max(numeric_only=True)
self.assertEqual(_max, "b")
cat = Categorical([np.nan, 1, 2, np.nan], categories=[5, 4, 3, 2, 1],
ordered=True)
_min = cat.min()
_max = cat.max()
self.assertTrue(np.isnan(_min))
self.assertEqual(_max, 1)
_min = cat.min(numeric_only=True)
self.assertEqual(_min, 2)
_max = cat.max(numeric_only=True)
self.assertEqual(_max, 1)
def test_unique(self):
# categories are reordered based on value when ordered=False
cat = Categorical(["a", "b"])
exp = np.asarray(["a", "b"])
res = cat.unique()
self.assert_numpy_array_equal(res, exp)
cat = Categorical(["a", "b", "a", "a"], categories=["a", "b", "c"])
res = cat.unique()
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, Categorical(exp))
cat = Categorical(["c", "a", "b", "a", "a"],
categories=["a", "b", "c"])
exp = np.asarray(["c", "a", "b"])
res = cat.unique()
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, Categorical(
exp, categories=['c', 'a', 'b']))
# nan must be removed
cat = Categorical(["b", np.nan, "b", np.nan, "a"],
categories=["a", "b", "c"])
res = cat.unique()
exp = np.asarray(["b", np.nan, "a"], dtype=object)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, Categorical(
["b", np.nan, "a"], categories=["b", "a"]))
def test_unique_ordered(self):
# keep categories order when ordered=True
cat = Categorical(['b', 'a', 'b'], categories=['a', 'b'], ordered=True)
res = cat.unique()
exp = np.asarray(['b', 'a'])
exp_cat = Categorical(exp, categories=['a', 'b'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
cat = Categorical(['c', 'b', 'a', 'a'], categories=['a', 'b', 'c'],
ordered=True)
res = cat.unique()
exp = np.asarray(['c', 'b', 'a'])
exp_cat = Categorical(exp, categories=['a', 'b', 'c'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
cat = Categorical(['b', 'a', 'a'], categories=['a', 'b', 'c'],
ordered=True)
res = cat.unique()
exp = np.asarray(['b', 'a'])
exp_cat = Categorical(exp, categories=['a', 'b'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
cat = Categorical(['b', 'b', np.nan, 'a'], categories=['a', 'b', 'c'],
ordered=True)
res = cat.unique()
exp = np.asarray(['b', np.nan, 'a'], dtype=object)
exp_cat = Categorical(exp, categories=['a', 'b'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
def test_mode(self):
s = Categorical([1, 1, 2, 4, 5, 5, 5], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
exp = Categorical([5], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([1, 1, 1, 4, 5, 5, 5], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
exp = Categorical([5, 1], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([1, 2, 3, 4, 5], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
exp = Categorical([], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
# NaN should not become the mode!
s = Categorical([np.nan, np.nan, np.nan, 4, 5],
categories=[5, 4, 3, 2, 1], ordered=True)
res = s.mode()
exp = Categorical([], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([np.nan, np.nan, np.nan, 4, 5, 4],
categories=[5, 4, 3, 2, 1], ordered=True)
res = s.mode()
exp = Categorical([4], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([np.nan, np.nan, 4, 5, 4], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
exp = Categorical([4], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
def test_sort(self):
# unordered cats are sortable
cat = Categorical(["a", "b", "b", "a"], ordered=False)
cat.sort_values()
cat.sort()
cat = Categorical(["a", "c", "b", "d"], ordered=True)
# sort_values
res = cat.sort_values()
exp = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp)
cat = Categorical(["a", "c", "b", "d"],
categories=["a", "b", "c", "d"], ordered=True)
res = cat.sort_values()
exp = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp)
res = cat.sort_values(ascending=False)
exp = np.array(["d", "c", "b", "a"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp)
# sort (inplace order)
cat1 = cat.copy()
cat1.sort()
exp = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(cat1.__array__(), exp)
def test_slicing_directly(self):
cat = Categorical(["a", "b", "c", "d", "a", "b", "c"])
sliced = cat[3]
tm.assert_equal(sliced, "d")
sliced = cat[3:5]
expected = Categorical(["d", "a"], categories=['a', 'b', 'c', 'd'])
self.assert_numpy_array_equal(sliced._codes, expected._codes)
tm.assert_index_equal(sliced.categories, expected.categories)
def test_set_item_nan(self):
cat = pd.Categorical([1, 2, 3])
exp = pd.Categorical([1, np.nan, 3], categories=[1, 2, 3])
cat[1] = np.nan
self.assertTrue(cat.equals(exp))
# if nan in categories, the proper code should be set!
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[1] = np.nan
exp = np.array([0, 3, 2, -1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[1:3] = np.nan
exp = np.array([0, 3, 3, -1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[1:3] = [np.nan, 1]
exp = np.array([0, 3, 0, -1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[1:3] = [np.nan, np.nan]
exp = np.array([0, 3, 3, -1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1, 2, np.nan, 3], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[pd.isnull(cat)] = np.nan
exp = np.array([0, 1, 3, 2])
self.assert_numpy_array_equal(cat.codes, exp)
def test_shift(self):
# GH 9416
cat = pd.Categorical(['a', 'b', 'c', 'd', 'a'])
# shift forward
sp1 = cat.shift(1)
xp1 = pd.Categorical([np.nan, 'a', 'b', 'c', 'd'])
self.assert_categorical_equal(sp1, xp1)
self.assert_categorical_equal(cat[:-1], sp1[1:])
# shift back
sn2 = cat.shift(-2)
xp2 = pd.Categorical(['c', 'd', 'a', np.nan, np.nan],
categories=['a', 'b', 'c', 'd'])
self.assert_categorical_equal(sn2, xp2)
self.assert_categorical_equal(cat[2:], sn2[:-2])
# shift by zero
self.assert_categorical_equal(cat, cat.shift(0))
def test_nbytes(self):
cat = pd.Categorical([1, 2, 3])
exp = cat._codes.nbytes + cat._categories.values.nbytes
self.assertEqual(cat.nbytes, exp)
def test_memory_usage(self):
cat = pd.Categorical([1, 2, 3])
self.assertEqual(cat.nbytes, cat.memory_usage())
self.assertEqual(cat.nbytes, cat.memory_usage(deep=True))
cat = pd.Categorical(['foo', 'foo', 'bar'])
self.assertEqual(cat.nbytes, cat.memory_usage())
self.assertTrue(cat.memory_usage(deep=True) > cat.nbytes)
# sys.getsizeof will call the .memory_usage with
# deep=True, and add on some GC overhead
diff = cat.memory_usage(deep=True) - sys.getsizeof(cat)
self.assertTrue(abs(diff) < 100)
def test_searchsorted(self):
# https://github.com/pydata/pandas/issues/8420
s1 = pd.Series(['apple', 'bread', 'bread', 'cheese', 'milk'])
s2 = pd.Series(['apple', 'bread', 'bread', 'cheese', 'milk', 'donuts'])
c1 = pd.Categorical(s1, ordered=True)
c2 = pd.Categorical(s2, ordered=True)
# Single item array
res = c1.searchsorted(['bread'])
chk = s1.searchsorted(['bread'])
exp = np.array([1])
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
# Scalar version of single item array
# Categorical return np.array like pd.Series, but different from
# np.array.searchsorted()
res = c1.searchsorted('bread')
chk = s1.searchsorted('bread')
exp = np.array([1])
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
# Searching for a value that is not present in the Categorical
res = c1.searchsorted(['bread', 'eggs'])
chk = s1.searchsorted(['bread', 'eggs'])
exp = np.array([1, 4])
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
# Searching for a value that is not present, to the right
res = c1.searchsorted(['bread', 'eggs'], side='right')
chk = s1.searchsorted(['bread', 'eggs'], side='right')
exp = np.array([3, 4]) # eggs before milk
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
# As above, but with a sorter array to reorder an unsorted array
res = c2.searchsorted(['bread', 'eggs'], side='right',
sorter=[0, 1, 2, 3, 5, 4])
chk = s2.searchsorted(['bread', 'eggs'], side='right',
sorter=[0, 1, 2, 3, 5, 4])
exp = np.array([3, 5]
) # eggs after donuts, after switching milk and donuts
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
def test_deprecated_labels(self):
# TODO: labels is deprecated and should be removed in 0.18 or 2017,
# whatever is earlier
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
exp = cat.codes
with tm.assert_produces_warning(FutureWarning):
res = cat.labels
self.assert_numpy_array_equal(res, exp)
self.assertFalse(LooseVersion(pd.__version__) >= '0.18')
def test_deprecated_levels(self):
# TODO: levels is deprecated and should be removed in 0.18 or 2017,
# whatever is earlier
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
exp = cat.categories
with tm.assert_produces_warning(FutureWarning):
res = cat.levels
self.assert_numpy_array_equal(res, exp)
with tm.assert_produces_warning(FutureWarning):
res = pd.Categorical([1, 2, 3, np.nan], levels=[1, 2, 3])
self.assert_numpy_array_equal(res.categories, exp)
self.assertFalse(LooseVersion(pd.__version__) >= '0.18')
def test_removed_names_produces_warning(self):
# 10482
with tm.assert_produces_warning(UserWarning):
Categorical([0, 1], name="a")
with tm.assert_produces_warning(UserWarning):
Categorical.from_codes([1, 2], ["a", "b", "c"], name="a")
def test_datetime_categorical_comparison(self):
dt_cat = pd.Categorical(
pd.date_range('2014-01-01', periods=3), ordered=True)
self.assert_numpy_array_equal(dt_cat > dt_cat[0], [False, True, True])
self.assert_numpy_array_equal(dt_cat[0] < dt_cat, [False, True, True])
def test_reflected_comparison_with_scalars(self):
# GH8658
cat = pd.Categorical([1, 2, 3], ordered=True)
self.assert_numpy_array_equal(cat > cat[0], [False, True, True])
self.assert_numpy_array_equal(cat[0] < cat, [False, True, True])
def test_comparison_with_unknown_scalars(self):
# https://github.com/pydata/pandas/issues/9836#issuecomment-92123057
# and following comparisons with scalars not in categories should raise
# for unequal comps, but not for equal/not equal
cat = pd.Categorical([1, 2, 3], ordered=True)
self.assertRaises(TypeError, lambda: cat < 4)
self.assertRaises(TypeError, lambda: cat > 4)
self.assertRaises(TypeError, lambda: 4 < cat)
self.assertRaises(TypeError, lambda: 4 > cat)
self.assert_numpy_array_equal(cat == 4, [False, False, False])
self.assert_numpy_array_equal(cat != 4, [True, True, True])
class TestCategoricalAsBlock(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.factor = Categorical.from_array(['a', 'b', 'b', 'a', 'a', 'c',
'c', 'c'])
df = DataFrame({'value': np.random.randint(0, 10000, 100)})
labels = ["{0} - {1}".format(i, i + 499) for i in range(0, 10000, 500)]
df = df.sort_values(by=['value'], ascending=True)
df['value_group'] = pd.cut(df.value, range(0, 10500, 500), right=False,
labels=labels)
self.cat = df
def test_dtypes(self):
# GH8143
index = ['cat', 'obj', 'num']
cat = pd.Categorical(['a', 'b', 'c'])
obj = pd.Series(['a', 'b', 'c'])
num = pd.Series([1, 2, 3])
df = pd.concat([pd.Series(cat), obj, num], axis=1, keys=index)
result = df.dtypes == 'object'
expected = Series([False, True, False], index=index)
tm.assert_series_equal(result, expected)
result = df.dtypes == 'int64'
expected = Series([False, False, True], index=index)
tm.assert_series_equal(result, expected)
result = df.dtypes == 'category'
expected = Series([True, False, False], index=index)
tm.assert_series_equal(result, expected)
def test_codes_dtypes(self):
# GH 8453
result = Categorical(['foo', 'bar', 'baz'])
self.assertTrue(result.codes.dtype == 'int8')
result = Categorical(['foo%05d' % i for i in range(400)])
self.assertTrue(result.codes.dtype == 'int16')
result = Categorical(['foo%05d' % i for i in range(40000)])
self.assertTrue(result.codes.dtype == 'int32')
# adding cats
result = Categorical(['foo', 'bar', 'baz'])
self.assertTrue(result.codes.dtype == 'int8')
result = result.add_categories(['foo%05d' % i for i in range(400)])
self.assertTrue(result.codes.dtype == 'int16')
# removing cats
result = result.remove_categories(['foo%05d' % i for i in range(300)])
self.assertTrue(result.codes.dtype == 'int8')
def test_basic(self):
# test basic creation / coercion of categoricals
s = Series(self.factor, name='A')
self.assertEqual(s.dtype, 'category')
self.assertEqual(len(s), len(self.factor))
str(s.values)
str(s)
# in a frame
df = DataFrame({'A': self.factor})
result = df['A']
tm.assert_series_equal(result, s)
result = df.iloc[:, 0]
tm.assert_series_equal(result, s)
self.assertEqual(len(df), len(self.factor))
str(df.values)
str(df)
df = DataFrame({'A': s})
result = df['A']
tm.assert_series_equal(result, s)
self.assertEqual(len(df), len(self.factor))
str(df.values)
str(df)
# multiples
df = DataFrame({'A': s, 'B': s, 'C': 1})
result1 = df['A']
result2 = df['B']
tm.assert_series_equal(result1, s)
tm.assert_series_equal(result2, s, check_names=False)
self.assertEqual(result2.name, 'B')
self.assertEqual(len(df), len(self.factor))
str(df.values)
str(df)
# GH8623
x = pd.DataFrame([[1, '<NAME>'], [2, '<NAME>'],
[1, '<NAME>']],
columns=['person_id', 'person_name'])
x['person_name'] = pd.Categorical(x.person_name
) # doing this breaks transform
expected = x.iloc[0].person_name
result = x.person_name.iloc[0]
self.assertEqual(result, expected)
result = x.person_name[0]
self.assertEqual(result, expected)
result = x.person_name.loc[0]
self.assertEqual(result, expected)
def test_creation_astype(self):
l = ["a", "b", "c", "a"]
s = pd.Series(l)
exp = pd.Series(Categorical(l))
res = s.astype('category')
tm.assert_series_equal(res, exp)
l = [1, 2, 3, 1]
s = pd.Series(l)
exp = pd.Series(Categorical(l))
res = s.astype('category')
tm.assert_series_equal(res, exp)
df = pd.DataFrame({"cats": [1, 2, 3, 4, 5, 6],
"vals": [1, 2, 3, 4, 5, 6]})
cats = Categorical([1, 2, 3, 4, 5, 6])
exp_df = pd.DataFrame({"cats": cats, "vals": [1, 2, 3, 4, 5, 6]})
df["cats"] = df["cats"].astype("category")
tm.assert_frame_equal(exp_df, df)
df = pd.DataFrame({"cats": ['a', 'b', 'b', 'a', 'a', 'd'],
"vals": [1, 2, 3, 4, 5, 6]})
cats = Categorical(['a', 'b', 'b', 'a', 'a', 'd'])
exp_df = pd.DataFrame({"cats": cats, "vals": [1, 2, 3, 4, 5, 6]})
df["cats"] = df["cats"].astype("category")
tm.assert_frame_equal(exp_df, df)
# with keywords
l = ["a", "b", "c", "a"]
s = pd.Series(l)
exp = pd.Series(Categorical(l, ordered=True))
res = s.astype('category', ordered=True)
tm.assert_series_equal(res, exp)
exp = pd.Series(Categorical(
l, categories=list('abcdef'), ordered=True))
res = s.astype('category', categories=list('abcdef'), ordered=True)
tm.assert_series_equal(res, exp)
def test_construction_series(self):
l = [1, 2, 3, 1]
exp = Series(l).astype('category')
res = Series(l, dtype='category')
tm.assert_series_equal(res, exp)
l = ["a", "b", "c", "a"]
exp = Series(l).astype('category')
res = Series(l, dtype='category')
tm.assert_series_equal(res, exp)
# insert into frame with different index
# GH 8076
index = pd.date_range('20000101', periods=3)
expected = Series(Categorical(values=[np.nan, np.nan, np.nan],
categories=['a', 'b', 'c']))
expected.index = index
expected = DataFrame({'x': expected})
df = DataFrame(
{'x': Series(['a', 'b', 'c'], dtype='category')}, index=index)
tm.assert_frame_equal(df, expected)
def test_construction_frame(self):
# GH8626
# dict creation
df = DataFrame({'A': list('abc')}, dtype='category')
expected = Series(list('abc'), dtype='category', name='A')
tm.assert_series_equal(df['A'], expected)
# to_frame
s = Series(list('abc'), dtype='category')
result = s.to_frame()
expected = Series(list('abc'), dtype='category', name=0)
tm.assert_series_equal(result[0], expected)
result = s.to_frame(name='foo')
expected = Series(list('abc'), dtype='category', name='foo')
tm.assert_series_equal(result['foo'], expected)
# list-like creation
df = DataFrame(list('abc'), dtype='category')
expected = Series(list('abc'), dtype='category', name=0)
tm.assert_series_equal(df[0], expected)
# ndim != 1
df = DataFrame([pd.Categorical(list('abc'))])
expected = DataFrame({0: Series(list('abc'), dtype='category')})
tm.assert_frame_equal(df, expected)
df = DataFrame([pd.Categorical(list('abc')), pd.Categorical(list(
'abd'))])
expected = DataFrame({0: Series(list('abc'), dtype='category'),
1: Series(list('abd'), dtype='category')},
columns=[0, 1])
tm.assert_frame_equal(df, expected)
# mixed
df = DataFrame([pd.Categorical(list('abc')), list('def')])
expected = DataFrame({0: Series(list('abc'), dtype='category'),
1: list('def')}, columns=[0, 1])
tm.assert_frame_equal(df, expected)
# invalid (shape)
self.assertRaises(
ValueError,
lambda: DataFrame([pd.Categorical(list('abc')),
pd.Categorical(list('abdefg'))]))
# ndim > 1
self.assertRaises(NotImplementedError,
lambda: pd.Categorical(np.array([list('abcd')])))
def test_reshaping(self):
p = tm.makePanel()
p['str'] = 'foo'
df = p.to_frame()
df['category'] = df['str'].astype('category')
result = df['category'].unstack()
c = Categorical(['foo'] * len(p.major_axis))
expected = DataFrame({'A': c.copy(),
'B': c.copy(),
'C': c.copy(),
'D': c.copy()},
columns=Index(list('ABCD'), name='minor'),
index=p.major_axis.set_names('major'))
tm.assert_frame_equal(result, expected)
def test_reindex(self):
index = pd.date_range('20000101', periods=3)
# reindexing to an invalid Categorical
s = Series(['a', 'b', 'c'], dtype='category')
result = s.reindex(index)
expected = Series(Categorical(values=[np.nan, np.nan, np.nan],
categories=['a', 'b', 'c']))
expected.index = index
tm.assert_series_equal(result, expected)
# partial reindexing
expected = Series(Categorical(values=['b', 'c'], categories=['a', 'b',
'c']))
expected.index = [1, 2]
result = s.reindex([1, 2])
tm.assert_series_equal(result, expected)
expected = Series(Categorical(
values=['c', np.nan], categories=['a', 'b', 'c']))
expected.index = [2, 3]
result = s.reindex([2, 3])
tm.assert_series_equal(result, expected)
def test_sideeffects_free(self):
# Passing a categorical to a Series and then changing values in either
# the series or the categorical should not change the values in the
# other one, IF you specify copy!
cat = Categorical(["a", "b", "c", "a"])
s = pd.Series(cat, copy=True)
self.assertFalse(s.cat is cat)
s.cat.categories = [1, 2, 3]
exp_s = np.array([1, 2, 3, 1])
exp_cat = np.array(["a", "b", "c", "a"])
self.assert_numpy_array_equal(s.__array__(), exp_s)
self.assert_numpy_array_equal(cat.__array__(), exp_cat)
# setting
s[0] = 2
exp_s2 = np.array([2, 2, 3, 1])
self.assert_numpy_array_equal(s.__array__(), exp_s2)
self.assert_numpy_array_equal(cat.__array__(), exp_cat)
# however, copy is False by default
# so this WILL change values
cat = Categorical(["a", "b", "c", "a"])
s = pd.Series(cat)
self.assertTrue(s.values is cat)
s.cat.categories = [1, 2, 3]
exp_s = np.array([1, 2, 3, 1])
self.assert_numpy_array_equal(s.__array__(), exp_s)
self.assert_numpy_array_equal(cat.__array__(), exp_s)
s[0] = 2
exp_s2 = np.array([2, 2, 3, 1])
self.assert_numpy_array_equal(s.__array__(), exp_s2)
self.assert_numpy_array_equal(cat.__array__(), exp_s2)
def test_nan_handling(self):
# Nans are represented as -1 in labels
s = Series(Categorical(["a", "b", np.nan, "a"]))
self.assert_numpy_array_equal(s.cat.categories, np.array(["a", "b"]))
self.assert_numpy_array_equal(s.values.codes, np.array([0, 1, -1, 0]))
# If categories have nan included, the label should point to that
# instead
with tm.assert_produces_warning(FutureWarning):
s2 = Series(Categorical(
["a", "b", np.nan, "a"], categories=["a", "b", np.nan]))
self.assert_numpy_array_equal(s2.cat.categories, np.array(
["a", "b", np.nan], dtype=np.object_))
self.assert_numpy_array_equal(s2.values.codes, np.array([0, 1, 2, 0]))
# Changing categories should also make the replaced category np.nan
s3 = Series(Categorical(["a", "b", "c", "a"]))
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
s3.cat.categories = ["a", "b", np.nan]
self.assert_numpy_array_equal(s3.cat.categories, np.array(
["a", "b", np.nan], dtype=np.object_))
self.assert_numpy_array_equal(s3.values.codes, np.array([0, 1, 2, 0]))
def test_cat_accessor(self):
s = Series(Categorical(["a", "b", np.nan, "a"]))
self.assert_numpy_array_equal(s.cat.categories, np.array(["a", "b"]))
self.assertEqual(s.cat.ordered, False)
exp = Categorical(["a", "b", np.nan, "a"], categories=["b", "a"])
s.cat.set_categories(["b", "a"], inplace=True)
self.assertTrue(s.values.equals(exp))
res = s.cat.set_categories(["b", "a"])
self.assertTrue(res.values.equals(exp))
exp = Categorical(["a", "b", np.nan, "a"], categories=["b", "a"])
s[:] = "a"
s = s.cat.remove_unused_categories()
self.assert_numpy_array_equal(s.cat.categories, np.array(["a"]))
def test_sequence_like(self):
# GH 7839
# make sure can iterate
df = DataFrame({"id": [1, 2, 3, 4, 5, 6],
"raw_grade": ['a', 'b', 'b', 'a', 'a', 'e']})
df['grade'] = Categorical(df['raw_grade'])
# basic sequencing testing
result = list(df.grade.values)
expected = np.array(df.grade.values).tolist()
tm.assert_almost_equal(result, expected)
# iteration
for t in df.itertuples(index=False):
str(t)
for row, s in df.iterrows():
str(s)
for c, col in df.iteritems():
str(s)
def test_series_delegations(self):
# invalid accessor
self.assertRaises(AttributeError, lambda: Series([1, 2, 3]).cat)
tm.assertRaisesRegexp(
AttributeError,
r"Can only use .cat accessor with a 'category' dtype",
lambda: Series([1, 2, 3]).cat)
self.assertRaises(AttributeError, lambda: Series(['a', 'b', 'c']).cat)
self.assertRaises(AttributeError, lambda: Series(np.arange(5.)).cat)
self.assertRaises(AttributeError,
lambda: Series([Timestamp('20130101')]).cat)
# Series should delegate calls to '.categories', '.codes', '.ordered'
# and the methods '.set_categories()' 'drop_unused_categories()' to the
# categorical
s = Series(Categorical(["a", "b", "c", "a"], ordered=True))
exp_categories = np.array(["a", "b", "c"])
self.assert_numpy_array_equal(s.cat.categories, exp_categories)
s.cat.categories = [1, 2, 3]
exp_categories = np.array([1, 2, 3])
self.assert_numpy_array_equal(s.cat.categories, exp_categories)
exp_codes = Series([0, 1, 2, 0], dtype='int8')
tm.assert_series_equal(s.cat.codes, exp_codes)
self.assertEqual(s.cat.ordered, True)
s = s.cat.as_unordered()
self.assertEqual(s.cat.ordered, False)
s.cat.as_ordered(inplace=True)
self.assertEqual(s.cat.ordered, True)
# reorder
s = Series(Categorical(["a", "b", "c", "a"], ordered=True))
exp_categories = np.array(["c", "b", "a"])
exp_values = np.array(["a", "b", "c", "a"])
s = s.cat.set_categories(["c", "b", "a"])
self.assert_numpy_array_equal(s.cat.categories, exp_categories)
self.assert_numpy_array_equal(s.values.__array__(), exp_values)
self.assert_numpy_array_equal(s.__array__(), exp_values)
# remove unused categories
s = Series(Categorical(["a", "b", "b", "a"], categories=["a", "b", "c"
]))
exp_categories = np.array(["a", "b"])
exp_values = np.array(["a", "b", "b", "a"])
s = s.cat.remove_unused_categories()
self.assert_numpy_array_equal(s.cat.categories, exp_categories)
self.assert_numpy_array_equal(s.values.__array__(), exp_values)
self.assert_numpy_array_equal(s.__array__(), exp_values)
# This method is likely to be confused, so test that it raises an error
# on wrong inputs:
def f():
s.set_categories([4, 3, 2, 1])
self.assertRaises(Exception, f)
# right: s.cat.set_categories([4,3,2,1])
def test_series_functions_no_warnings(self):
df = pd.DataFrame({'value': np.random.randint(0, 100, 20)})
labels = ["{0} - {1}".format(i, i + 9) for i in range(0, 100, 10)]
with tm.assert_produces_warning(False):
df['group'] = pd.cut(df.value, range(0, 105, 10), right=False,
labels=labels)
def test_assignment_to_dataframe(self):
# assignment
df = DataFrame({'value': np.array(
np.random.randint(0, 10000, 100), dtype='int32')})
labels = ["{0} - {1}".format(i, i + 499) for i in range(0, 10000, 500)]
df = df.sort_values(by=['value'], ascending=True)
s = pd.cut(df.value, range(0, 10500, 500), right=False, labels=labels)
d = s.values
df['D'] = d
str(df)
result = df.dtypes
expected = Series(
[np.dtype('int32'), com.CategoricalDtype()], index=['value', 'D'])
tm.assert_series_equal(result, expected)
df['E'] = s
str(df)
result = df.dtypes
expected = Series([np.dtype('int32'), com.CategoricalDtype(),
com.CategoricalDtype()],
index=['value', 'D', 'E'])
tm.assert_series_equal(result, expected)
result1 = df['D']
result2 = df['E']
self.assertTrue(result1._data._block.values.equals(d))
# sorting
s.name = 'E'
self.assertTrue(result2.sort_index().equals(s.sort_index()))
cat = pd.Categorical([1, 2, 3, 10], categories=[1, 2, 3, 4, 10])
df = pd.DataFrame(pd.Series(cat))
def test_describe(self):
# Categoricals should not show up together with numerical columns
result = self.cat.describe()
self.assertEqual(len(result.columns), 1)
# In a frame, describe() for the cat should be the same as for string
# arrays (count, unique, top, freq)
cat = Categorical(["a", "b", "b", "b"], categories=['a', 'b', 'c'],
ordered=True)
s = Series(cat)
result = s.describe()
expected = Series([4, 2, "b", 3],
index=['count', 'unique', 'top', 'freq'])
tm.assert_series_equal(result, expected)
cat = pd.Series(pd.Categorical(["a", "b", "c", "c"]))
df3 = pd.DataFrame({"cat": cat, "s": ["a", "b", "c", "c"]})
res = df3.describe()
self.assert_numpy_array_equal(res["cat"].values, res["s"].values)
def test_repr(self):
a = pd.Series(pd.Categorical([1, 2, 3, 4]))
exp = u("0 1\n1 2\n2 3\n3 4\n" +
"dtype: category\nCategories (4, int64): [1, 2, 3, 4]")
self.assertEqual(exp, a.__unicode__())
a = pd.Series(pd.Categorical(["a", "b"] * 25))
exp = u("0 a\n1 b\n" + " ..\n" + "48 a\n49 b\n" +
"dtype: category\nCategories (2, object): [a, b]")
with option_context("display.max_rows", 5):
self.assertEqual(exp, repr(a))
levs = list("abcdefghijklmnopqrstuvwxyz")
a = pd.Series(pd.Categorical(
["a", "b"], categories=levs, ordered=True))
exp = u("0 a\n1 b\n" + "dtype: category\n"
"Categories (26, object): [a < b < c < d ... w < x < y < z]")
self.assertEqual(exp, a.__unicode__())
def test_categorical_repr(self):
c = pd.Categorical([1, 2, 3])
exp = """[1, 2, 3]
Categories (3, int64): [1, 2, 3]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical([1, 2, 3, 1, 2, 3], categories=[1, 2, 3])
exp = """[1, 2, 3, 1, 2, 3]
Categories (3, int64): [1, 2, 3]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical([1, 2, 3, 4, 5] * 10)
exp = """[1, 2, 3, 4, 5, ..., 1, 2, 3, 4, 5]
Length: 50
Categories (5, int64): [1, 2, 3, 4, 5]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(np.arange(20))
exp = """[0, 1, 2, 3, 4, ..., 15, 16, 17, 18, 19]
Length: 20
Categories (20, int64): [0, 1, 2, 3, ..., 16, 17, 18, 19]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_ordered(self):
c = pd.Categorical([1, 2, 3], ordered=True)
exp = """[1, 2, 3]
Categories (3, int64): [1 < 2 < 3]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical([1, 2, 3, 1, 2, 3], categories=[1, 2, 3],
ordered=True)
exp = """[1, 2, 3, 1, 2, 3]
Categories (3, int64): [1 < 2 < 3]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical([1, 2, 3, 4, 5] * 10, ordered=True)
exp = """[1, 2, 3, 4, 5, ..., 1, 2, 3, 4, 5]
Length: 50
Categories (5, int64): [1 < 2 < 3 < 4 < 5]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(np.arange(20), ordered=True)
exp = """[0, 1, 2, 3, 4, ..., 15, 16, 17, 18, 19]
Length: 20
Categories (20, int64): [0 < 1 < 2 < 3 ... 16 < 17 < 18 < 19]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_datetime(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
c = pd.Categorical(idx)
# TODO(wesm): exceeding 80 characters in the console is not good
# behavior
exp = (
"[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, "
"2011-01-01 12:00:00, 2011-01-01 13:00:00]\n"
"Categories (5, datetime64[ns]): [2011-01-01 09:00:00, "
"2011-01-01 10:00:00, 2011-01-01 11:00:00,\n"
" 2011-01-01 12:00:00, "
"2011-01-01 13:00:00]""")
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = (
"[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, "
"2011-01-01 12:00:00, 2011-01-01 13:00:00, 2011-01-01 09:00:00, "
"2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, "
"2011-01-01 13:00:00]\n"
"Categories (5, datetime64[ns]): [2011-01-01 09:00:00, "
"2011-01-01 10:00:00, 2011-01-01 11:00:00,\n"
" 2011-01-01 12:00:00, "
"2011-01-01 13:00:00]")
self.assertEqual(repr(c), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
c = pd.Categorical(idx)
exp = (
"[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, "
"2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, "
"2011-01-01 13:00:00-05:00]\n"
"Categories (5, datetime64[ns, US/Eastern]): "
"[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00,\n"
" "
"2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00,\n"
" "
"2011-01-01 13:00:00-05:00]")
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = (
"[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, "
"2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, "
"2011-01-01 13:00:00-05:00, 2011-01-01 09:00:00-05:00, "
"2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, "
"2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00]\n"
"Categories (5, datetime64[ns, US/Eastern]): "
"[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00,\n"
" "
"2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00,\n"
" "
"2011-01-01 13:00:00-05:00]")
self.assertEqual(repr(c), exp)
def test_categorical_repr_datetime_ordered(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
c = pd.Categorical(idx, ordered=True)
exp = """[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00]
Categories (5, datetime64[ns]): [2011-01-01 09:00:00 < 2011-01-01 10:00:00 < 2011-01-01 11:00:00 <
2011-01-01 12:00:00 < 2011-01-01 13:00:00]""" # noqa
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00, 2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00]
Categories (5, datetime64[ns]): [2011-01-01 09:00:00 < 2011-01-01 10:00:00 < 2011-01-01 11:00:00 <
2011-01-01 12:00:00 < 2011-01-01 13:00:00]""" # noqa
self.assertEqual(repr(c), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
c = pd.Categorical(idx, ordered=True)
exp = """[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00]
Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00 < 2011-01-01 10:00:00-05:00 <
2011-01-01 11:00:00-05:00 < 2011-01-01 12:00:00-05:00 <
2011-01-01 13:00:00-05:00]""" # noqa
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00, 2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00]
Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00 < 2011-01-01 10:00:00-05:00 <
2011-01-01 11:00:00-05:00 < 2011-01-01 12:00:00-05:00 <
2011-01-01 13:00:00-05:00]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_period(self):
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
c = pd.Categorical(idx)
exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]
Categories (5, period): [2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00,
2011-01-01 13:00]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00, 2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]
Categories (5, period): [2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00,
2011-01-01 13:00]"""
self.assertEqual(repr(c), exp)
idx = pd.period_range('2011-01', freq='M', periods=5)
c = pd.Categorical(idx)
exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05]
Categories (5, period): [2011-01, 2011-02, 2011-03, 2011-04, 2011-05]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05, 2011-01, 2011-02, 2011-03, 2011-04, 2011-05]
Categories (5, period): [2011-01, 2011-02, 2011-03, 2011-04, 2011-05]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_period_ordered(self):
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
c = pd.Categorical(idx, ordered=True)
exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]
Categories (5, period): [2011-01-01 09:00 < 2011-01-01 10:00 < 2011-01-01 11:00 < 2011-01-01 12:00 <
2011-01-01 13:00]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00, 2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]
Categories (5, period): [2011-01-01 09:00 < 2011-01-01 10:00 < 2011-01-01 11:00 < 2011-01-01 12:00 <
2011-01-01 13:00]"""
self.assertEqual(repr(c), exp)
idx = pd.period_range('2011-01', freq='M', periods=5)
c = pd.Categorical(idx, ordered=True)
exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05]
Categories (5, period): [2011-01 < 2011-02 < 2011-03 < 2011-04 < 2011-05]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05, 2011-01, 2011-02, 2011-03, 2011-04, 2011-05]
Categories (5, period): [2011-01 < 2011-02 < 2011-03 < 2011-04 < 2011-05]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_timedelta(self):
idx = pd.timedelta_range('1 days', periods=5)
c = pd.Categorical(idx)
exp = """[1 days, 2 days, 3 days, 4 days, 5 days]
Categories (5, timedelta64[ns]): [1 days, 2 days, 3 days, 4 days, 5 days]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = """[1 days, 2 days, 3 days, 4 days, 5 days, 1 days, 2 days, 3 days, 4 days, 5 days]
Categories (5, timedelta64[ns]): [1 days, 2 days, 3 days, 4 days, 5 days]"""
self.assertEqual(repr(c), exp)
idx = pd.timedelta_range('1 hours', periods=20)
c = pd.Categorical(idx)
exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00]
Length: 20
Categories (20, timedelta64[ns]): [0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00,
3 days 01:00:00, ..., 16 days 01:00:00, 17 days 01:00:00,
18 days 01:00:00, 19 days 01:00:00]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00]
Length: 40
Categories (20, timedelta64[ns]): [0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00,
3 days 01:00:00, ..., 16 days 01:00:00, 17 days 01:00:00,
18 days 01:00:00, 19 days 01:00:00]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_timedelta_ordered(self):
idx = pd.timedelta_range('1 days', periods=5)
c = pd.Categorical(idx, ordered=True)
exp = """[1 days, 2 days, 3 days, 4 days, 5 days]
Categories (5, timedelta64[ns]): [1 days < 2 days < 3 days < 4 days < 5 days]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[1 days, 2 days, 3 days, 4 days, 5 days, 1 days, 2 days, 3 days, 4 days, 5 days]
Categories (5, timedelta64[ns]): [1 days < 2 days < 3 days < 4 days < 5 days]"""
self.assertEqual(repr(c), exp)
idx = pd.timedelta_range('1 hours', periods=20)
c = pd.Categorical(idx, ordered=True)
exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00]
Length: 20
Categories (20, timedelta64[ns]): [0 days 01:00:00 < 1 days 01:00:00 < 2 days 01:00:00 <
3 days 01:00:00 ... 16 days 01:00:00 < 17 days 01:00:00 <
18 days 01:00:00 < 19 days 01:00:00]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00]
Length: 40
Categories (20, timedelta64[ns]): [0 days 01:00:00 < 1 days 01:00:00 < 2 days 01:00:00 <
3 days 01:00:00 ... 16 days 01:00:00 < 17 days 01:00:00 <
18 days 01:00:00 < 19 days 01:00:00]"""
self.assertEqual(repr(c), exp)
def test_categorical_series_repr(self):
s = pd.Series(pd.Categorical([1, 2, 3]))
exp = """0 1
1 2
2 3
dtype: category
Categories (3, int64): [1, 2, 3]"""
self.assertEqual(repr(s), exp)
s = pd.Series(pd.Categorical(np.arange(10)))
exp = """0 0
1 1
2 2
3 3
4 4
5 5
6 6
7 7
8 8
9 9
dtype: category
Categories (10, int64): [0, 1, 2, 3, ..., 6, 7, 8, 9]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_ordered(self):
s = pd.Series(pd.Categorical([1, 2, 3], ordered=True))
exp = """0 1
1 2
2 3
dtype: category
Categories (3, int64): [1 < 2 < 3]"""
self.assertEqual(repr(s), exp)
s = pd.Series(pd.Categorical(np.arange(10), ordered=True))
exp = """0 0
1 1
2 2
3 3
4 4
5 5
6 6
7 7
8 8
9 9
dtype: category
Categories (10, int64): [0 < 1 < 2 < 3 ... 6 < 7 < 8 < 9]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_datetime(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
s = pd.Series(pd.Categorical(idx))
exp = """0 2011-01-01 09:00:00
1 2011-01-01 10:00:00
2 2011-01-01 11:00:00
3 2011-01-01 12:00:00
4 2011-01-01 13:00:00
dtype: category
Categories (5, datetime64[ns]): [2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00,
2011-01-01 12:00:00, 2011-01-01 13:00:00]"""
self.assertEqual(repr(s), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
s = pd.Series(pd.Categorical(idx))
exp = """0 2011-01-01 09:00:00-05:00
1 2011-01-01 10:00:00-05:00
2 2011-01-01 11:00:00-05:00
3 2011-01-01 12:00:00-05:00
4 2011-01-01 13:00:00-05:00
dtype: category
Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00,
2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00,
2011-01-01 13:00:00-05:00]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_datetime_ordered(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 2011-01-01 09:00:00
1 2011-01-01 10:00:00
2 2011-01-01 11:00:00
3 2011-01-01 12:00:00
4 2011-01-01 13:00:00
dtype: category
Categories (5, datetime64[ns]): [2011-01-01 09:00:00 < 2011-01-01 10:00:00 < 2011-01-01 11:00:00 <
2011-01-01 12:00:00 < 2011-01-01 13:00:00]"""
self.assertEqual(repr(s), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 2011-01-01 09:00:00-05:00
1 2011-01-01 10:00:00-05:00
2 2011-01-01 11:00:00-05:00
3 2011-01-01 12:00:00-05:00
4 2011-01-01 13:00:00-05:00
dtype: category
Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00 < 2011-01-01 10:00:00-05:00 <
2011-01-01 11:00:00-05:00 < 2011-01-01 12:00:00-05:00 <
2011-01-01 13:00:00-05:00]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_period(self):
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
s = pd.Series(pd.Categorical(idx))
exp = """0 2011-01-01 09:00
1 2011-01-01 10:00
2 2011-01-01 11:00
3 2011-01-01 12:00
4 2011-01-01 13:00
dtype: category
Categories (5, period): [2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00,
2011-01-01 13:00]"""
self.assertEqual(repr(s), exp)
idx = pd.period_range('2011-01', freq='M', periods=5)
s = pd.Series(pd.Categorical(idx))
exp = """0 2011-01
1 2011-02
2 2011-03
3 2011-04
4 2011-05
dtype: category
Categories (5, period): [2011-01, 2011-02, 2011-03, 2011-04, 2011-05]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_period_ordered(self):
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 2011-01-01 09:00
1 2011-01-01 10:00
2 2011-01-01 11:00
3 2011-01-01 12:00
4 2011-01-01 13:00
dtype: category
Categories (5, period): [2011-01-01 09:00 < 2011-01-01 10:00 < 2011-01-01 11:00 < 2011-01-01 12:00 <
2011-01-01 13:00]"""
self.assertEqual(repr(s), exp)
idx = pd.period_range('2011-01', freq='M', periods=5)
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 2011-01
1 2011-02
2 2011-03
3 2011-04
4 2011-05
dtype: category
Categories (5, period): [2011-01 < 2011-02 < 2011-03 < 2011-04 < 2011-05]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_timedelta(self):
idx = pd.timedelta_range('1 days', periods=5)
s = pd.Series(pd.Categorical(idx))
exp = """0 1 days
1 2 days
2 3 days
3 4 days
4 5 days
dtype: category
Categories (5, timedelta64[ns]): [1 days, 2 days, 3 days, 4 days, 5 days]"""
self.assertEqual(repr(s), exp)
idx = pd.timedelta_range('1 hours', periods=10)
s = pd.Series(pd.Categorical(idx))
exp = """0 0 days 01:00:00
1 1 days 01:00:00
2 2 days 01:00:00
3 3 days 01:00:00
4 4 days 01:00:00
5 5 days 01:00:00
6 6 days 01:00:00
7 7 days 01:00:00
8 8 days 01:00:00
9 9 days 01:00:00
dtype: category
Categories (10, timedelta64[ns]): [0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00,
3 days 01:00:00, ..., 6 days 01:00:00, 7 days 01:00:00,
8 days 01:00:00, 9 days 01:00:00]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_timedelta_ordered(self):
idx = pd.timedelta_range('1 days', periods=5)
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 1 days
1 2 days
2 3 days
3 4 days
4 5 days
dtype: category
Categories (5, timedelta64[ns]): [1 days < 2 days < 3 days < 4 days < 5 days]"""
self.assertEqual(repr(s), exp)
idx = pd.timedelta_range('1 hours', periods=10)
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 0 days 01:00:00
1 1 days 01:00:00
2 2 days 01:00:00
3 3 days 01:00:00
4 4 days 01:00:00
5 5 days 01:00:00
6 6 days 01:00:00
7 7 days 01:00:00
8 8 days 01:00:00
9 9 days 01:00:00
dtype: category
Categories (10, timedelta64[ns]): [0 days 01:00:00 < 1 days 01:00:00 < 2 days 01:00:00 <
3 days 01:00:00 ... 6 days 01:00:00 < 7 days 01:00:00 <
8 days 01:00:00 < 9 days 01:00:00]"""
self.assertEqual(repr(s), exp)
def test_categorical_index_repr(self):
idx = pd.CategoricalIndex(pd.Categorical([1, 2, 3]))
exp = """CategoricalIndex([1, 2, 3], categories=[1, 2, 3], ordered=False, dtype='category')"""
self.assertEqual(repr(idx), exp)
i = pd.CategoricalIndex(pd.Categorical(np.arange(10)))
exp = """CategoricalIndex([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], categories=[0, 1, 2, 3, 4, 5, 6, 7, ...], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_ordered(self):
i = pd.CategoricalIndex(pd.Categorical([1, 2, 3], ordered=True))
exp = """CategoricalIndex([1, 2, 3], categories=[1, 2, 3], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
i = pd.CategoricalIndex(pd.Categorical(np.arange(10), ordered=True))
exp = """CategoricalIndex([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], categories=[0, 1, 2, 3, 4, 5, 6, 7, ...], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_datetime(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00:00', '2011-01-01 10:00:00',
'2011-01-01 11:00:00', '2011-01-01 12:00:00',
'2011-01-01 13:00:00'],
categories=[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00:00-05:00', '2011-01-01 10:00:00-05:00',
'2011-01-01 11:00:00-05:00', '2011-01-01 12:00:00-05:00',
'2011-01-01 13:00:00-05:00'],
categories=[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_datetime_ordered(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
exp = """CategoricalIndex(['2011-01-01 09:00:00', '2011-01-01 10:00:00',
'2011-01-01 11:00:00', '2011-01-01 12:00:00',
'2011-01-01 13:00:00'],
categories=[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
exp = """CategoricalIndex(['2011-01-01 09:00:00-05:00', '2011-01-01 10:00:00-05:00',
'2011-01-01 11:00:00-05:00', '2011-01-01 12:00:00-05:00',
'2011-01-01 13:00:00-05:00'],
categories=[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
i = pd.CategoricalIndex(pd.Categorical(idx.append(idx), ordered=True))
exp = """CategoricalIndex(['2011-01-01 09:00:00-05:00', '2011-01-01 10:00:00-05:00',
'2011-01-01 11:00:00-05:00', '2011-01-01 12:00:00-05:00',
'2011-01-01 13:00:00-05:00', '2011-01-01 09:00:00-05:00',
'2011-01-01 10:00:00-05:00', '2011-01-01 11:00:00-05:00',
'2011-01-01 12:00:00-05:00', '2011-01-01 13:00:00-05:00'],
categories=[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_period(self):
# test all length
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=1)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00'], categories=[2011-01-01 09:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=2)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00'], categories=[2011-01-01 09:00, 2011-01-01 10:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=3)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'], categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00',
'2011-01-01 12:00', '2011-01-01 13:00'],
categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
i = pd.CategoricalIndex(pd.Categorical(idx.append(idx)))
exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00',
'2011-01-01 12:00', '2011-01-01 13:00', '2011-01-01 09:00',
'2011-01-01 10:00', '2011-01-01 11:00', '2011-01-01 12:00',
'2011-01-01 13:00'],
categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.period_range('2011-01', freq='M', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01', '2011-02', '2011-03', '2011-04', '2011-05'], categories=[2011-01, 2011-02, 2011-03, 2011-04, 2011-05], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_period_ordered(self):
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00',
'2011-01-01 12:00', '2011-01-01 13:00'],
categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.period_range('2011-01', freq='M', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
exp = """CategoricalIndex(['2011-01', '2011-02', '2011-03', '2011-04', '2011-05'], categories=[2011-01, 2011-02, 2011-03, 2011-04, 2011-05], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_timedelta(self):
idx = pd.timedelta_range('1 days', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['1 days', '2 days', '3 days', '4 days', '5 days'], categories=[1 days 00:00:00, 2 days 00:00:00, 3 days 00:00:00, 4 days 00:00:00, 5 days 00:00:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.timedelta_range('1 hours', periods=10)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['0 days 01:00:00', '1 days 01:00:00', '2 days 01:00:00',
'3 days 01:00:00', '4 days 01:00:00', '5 days 01:00:00',
'6 days 01:00:00', '7 days 01:00:00', '8 days 01:00:00',
'9 days 01:00:00'],
categories=[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, 5 days 01:00:00, 6 days 01:00:00, 7 days 01:00:00, ...], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_timedelta_ordered(self):
idx = pd.timedelta_range('1 days', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
exp = """CategoricalIndex(['1 days', '2 days', '3 days', '4 days', '5 days'], categories=[1 days 00:00:00, 2 days 00:00:00, 3 days 00:00:00, 4 days 00:00:00, 5 days 00:00:00], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.timedelta_range('1 hours', periods=10)
i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
exp = """CategoricalIndex(['0 days 01:00:00', '1 days 01:00:00', '2 days 01:00:00',
'3 days 01:00:00', '4 days 01:00:00', '5 days 01:00:00',
'6 days 01:00:00', '7 days 01:00:00', '8 days 01:00:00',
'9 days 01:00:00'],
categories=[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, 5 days 01:00:00, 6 days 01:00:00, 7 days 01:00:00, ...], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_frame(self):
# normal DataFrame
dt = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
p = pd.period_range('2011-01', freq='M', periods=5)
df = pd.DataFrame({'dt': dt, 'p': p})
exp = """ dt p
0 2011-01-01 09:00:00-05:00 2011-01
1 2011-01-01 10:00:00-05:00 2011-02
2 2011-01-01 11:00:00-05:00 2011-03
3 2011-01-01 12:00:00-05:00 2011-04
4 2011-01-01 13:00:00-05:00 2011-05"""
df = pd.DataFrame({'dt': pd.Categorical(dt), 'p': pd.Categorical(p)})
self.assertEqual(repr(df), exp)
def test_info(self):
# make sure it works
n = 2500
df = DataFrame({'int64': np.random.randint(100, size=n)})
df['category'] = Series(np.array(list('abcdefghij')).take(
np.random.randint(0, 10, size=n))).astype('category')
df.isnull()
df.info()
df2 = df[df['category'] == 'd']
df2.info()
def test_groupby_sort(self):
# http://stackoverflow.com/questions/23814368/sorting-pandas-categorical-labels-after-groupby
# This should result in a properly sorted Series so that the plot
# has a sorted x axis
# self.cat.groupby(['value_group'])['value_group'].count().plot(kind='bar')
res = self.cat.groupby(['value_group'])['value_group'].count()
exp = res[sorted(res.index, key=lambda x: float(x.split()[0]))]
exp.index = pd.CategoricalIndex(exp.index, name=exp.index.name)
tm.assert_series_equal(res, exp)
def test_min_max(self):
# unordered cats have no min/max
cat = Series(Categorical(["a", "b", "c", "d"], ordered=False))
self.assertRaises(TypeError, lambda: cat.min())
self.assertRaises(TypeError, lambda: cat.max())
cat = Series(Categorical(["a", "b", "c", "d"], ordered=True))
_min = cat.min()
_max = cat.max()
self.assertEqual(_min, "a")
self.assertEqual(_max, "d")
cat = Series(Categorical(["a", "b", "c", "d"], categories=[
'd', 'c', 'b', 'a'], ordered=True))
_min = cat.min()
_max = cat.max()
self.assertEqual(_min, "d")
self.assertEqual(_max, "a")
cat = Series(Categorical(
[np.nan, "b", "c", np.nan], categories=['d', 'c', 'b', 'a'
], ordered=True))
_min = cat.min()
_max = cat.max()
self.assertTrue(np.isnan(_min))
self.assertEqual(_max, "b")
cat = Series(Categorical(
[np.nan, 1, 2, np.nan], categories=[5, 4, 3, 2, 1], ordered=True))
_min = cat.min()
_max = cat.max()
self.assertTrue(np.isnan(_min))
self.assertEqual(_max, 1)
def test_mode(self):
s = Series(Categorical([1, 1, 2, 4, 5, 5, 5],
categories=[5, 4, 3, 2, 1], ordered=True))
res = s.mode()
exp = Series(Categorical([5], categories=[
5, 4, 3, 2, 1], ordered=True))
tm.assert_series_equal(res, exp)
s = Series(Categorical([1, 1, 1, 4, 5, 5, 5],
categories=[5, 4, 3, 2, 1], ordered=True))
res = s.mode()
exp = Series(Categorical([5, 1], categories=[
5, 4, 3, 2, 1], ordered=True))
tm.assert_series_equal(res, exp)
s = Series(Categorical([1, 2, 3, 4, 5], categories=[5, 4, 3, 2, 1],
ordered=True))
res = s.mode()
exp = Series(Categorical([], categories=[5, 4, 3, 2, 1], ordered=True))
tm.assert_series_equal(res, exp)
def test_value_counts(self):
s = pd.Series(pd.Categorical(
["a", "b", "c", "c", "c", "b"], categories=["c", "a", "b", "d"]))
res = s.value_counts(sort=False)
exp = Series([3, 1, 2, 0],
index=pd.CategoricalIndex(["c", "a", "b", "d"]))
tm.assert_series_equal(res, exp)
res = s.value_counts(sort=True)
exp = Series([3, 2, 1, 0],
index=pd.CategoricalIndex(["c", "b", "a", "d"]))
tm.assert_series_equal(res, exp)
def test_value_counts_with_nan(self):
# https://github.com/pydata/pandas/issues/9443
s = pd.Series(["a", "b", "a"], dtype="category")
tm.assert_series_equal(
s.value_counts(dropna=True),
pd.Series([2, 1], index=pd.CategoricalIndex(["a", "b"])))
tm.assert_series_equal(
s.value_counts(dropna=False),
pd.Series([2, 1], index=pd.CategoricalIndex(["a", "b"])))
s = pd.Series(["a", "b", None, "a", None, None], dtype="category")
tm.assert_series_equal(
s.value_counts(dropna=True),
pd.Series([2, 1], index=pd.CategoricalIndex(["a", "b"])))
tm.assert_series_equal(
s.value_counts(dropna=False),
pd.Series([3, 2, 1], index=pd.CategoricalIndex([np.nan, "a", "b"])))
# When we aren't sorting by counts, and np.nan isn't a
# category, it should be last.
tm.assert_series_equal(
s.value_counts(dropna=False, sort=False),
pd.Series([2, 1, 3],
index=pd.CategoricalIndex(["a", "b", np.nan])))
with tm.assert_produces_warning(FutureWarning):
s = pd.Series(pd.Categorical(
["a", "b", "a"], categories=["a", "b", np.nan]))
tm.assert_series_equal(
s.value_counts(dropna=True),
pd.Series([2, 1], index=pd.CategoricalIndex(["a", "b"])))
tm.assert_series_equal(
s.value_counts(dropna=False),
pd.Series([2, 1, 0],
index=pd.CategoricalIndex(["a", "b", np.nan])))
with tm.assert_produces_warning(FutureWarning):
s = pd.Series(pd.Categorical(
["a", "b", None, "a", None, None], categories=["a", "b", np.nan
]))
tm.assert_series_equal(
s.value_counts(dropna=True),
pd.Series([2, 1], index=pd.CategoricalIndex(["a", "b"])))
tm.assert_series_equal(
s.value_counts(dropna=False),
pd.Series([3, 2, 1],
index=pd.CategoricalIndex([np.nan, "a", "b"])))
def test_groupby(self):
cats = Categorical(
["a", "a", "a", "b", "b", "b", "c", "c", "c"
], categories=["a", "b", "c", "d"], ordered=True)
data = DataFrame({"a": [1, 1, 1, 2, 2, 2, 3, 4, 5], "b": cats})
expected = DataFrame({'a': Series(
[1, 2, 4, np.nan], index=pd.CategoricalIndex(
['a', 'b', 'c', 'd'], name='b'))})
result = data.groupby("b").mean()
tm.assert_frame_equal(result, expected)
raw_cat1 = Categorical(["a", "a", "b", "b"],
categories=["a", "b", "z"], ordered=True)
raw_cat2 = Categorical(["c", "d", "c", "d"],
categories=["c", "d", "y"], ordered=True)
df = DataFrame({"A": raw_cat1, "B": raw_cat2, "values": [1, 2, 3, 4]})
# single grouper
gb = df.groupby("A")
exp_idx = pd.CategoricalIndex(['a', 'b', 'z'], name='A')
expected = DataFrame({'values': Series([3, 7, np.nan], index=exp_idx)})
result = gb.sum()
tm.assert_frame_equal(result, expected)
# multiple groupers
gb = df.groupby(['A', 'B'])
expected = DataFrame({'values': Series(
[1, 2, np.nan, 3, 4, np.nan, np.nan, np.nan, np.nan
], index=pd.MultiIndex.from_product(
[['a', 'b', 'z'], ['c', 'd', 'y']], names=['A', 'B']))})
result = gb.sum()
tm.assert_frame_equal(result, expected)
# multiple groupers with a non-cat
df = df.copy()
df['C'] = ['foo', 'bar'] * 2
gb = df.groupby(['A', 'B', 'C'])
expected = DataFrame({'values': Series(
np.nan, index=pd.MultiIndex.from_product(
[['a', 'b', 'z'], ['c', 'd', 'y'], ['foo', 'bar']
], names=['A', 'B', 'C']))}).sortlevel()
expected.iloc[[1, 2, 7, 8], 0] = [1, 2, 3, 4]
result = gb.sum()
tm.assert_frame_equal(result, expected)
# GH 8623
x = pd.DataFrame([[1, '<NAME>'], [2, '<NAME>'],
[1, '<NAME>']],
columns=['person_id', 'person_name'])
x['person_name'] = pd.Categorical(x.person_name)
g = x.groupby(['person_id'])
result = g.transform(lambda x: x)
tm.assert_frame_equal(result, x[['person_name']])
result = x.drop_duplicates('person_name')
expected = x.iloc[[0, 1]]
tm.assert_frame_equal(result, expected)
def f(x):
return x.drop_duplicates('person_name').iloc[0]
result = g.apply(f)
expected = x.iloc[[0, 1]].copy()
expected.index = Index([1, 2], name='person_id')
expected['person_name'] = expected['person_name'].astype('object')
tm.assert_frame_equal(result, expected)
# GH 9921
# Monotonic
df = DataFrame({"a": [5, 15, 25]})
c = pd.cut(df.a, bins=[0, 10, 20, 30, 40])
result = df.a.groupby(c).transform(sum)
tm.assert_series_equal(result, df['a'], check_names=False)
self.assertTrue(result.name is None)
tm.assert_series_equal(
df.a.groupby(c).transform(lambda xs: np.sum(xs)), df['a'])
tm.assert_frame_equal(df.groupby(c).transform(sum), df[['a']])
tm.assert_frame_equal(
df.groupby(c).transform(lambda xs: np.max(xs)), df[['a']])
# Filter
tm.assert_series_equal(df.a.groupby(c).filter(np.all), df['a'])
tm.assert_frame_equal(df.groupby(c).filter(np.all), df)
# Non-monotonic
df = DataFrame({"a": [5, 15, 25, -5]})
c = pd.cut(df.a, bins=[-10, 0, 10, 20, 30, 40])
result = df.a.groupby(c).transform(sum)
tm.assert_series_equal(result, df['a'], check_names=False)
self.assertTrue(result.name is None)
tm.assert_series_equal(
df.a.groupby(c).transform(lambda xs: np.sum(xs)), df['a'])
tm.assert_frame_equal(df.groupby(c).transform(sum), df[['a']])
tm.assert_frame_equal(
df.groupby(c).transform(lambda xs: np.sum(xs)), df[['a']])
# GH 9603
df = pd.DataFrame({'a': [1, 0, 0, 0]})
c = pd.cut(df.a, [0, 1, 2, 3, 4])
result = df.groupby(c).apply(len)
expected = pd.Series([1, 0, 0, 0],
index=pd.CategoricalIndex(c.values.categories))
expected.index.name = 'a'
tm.assert_series_equal(result, expected)
def test_pivot_table(self):
raw_cat1 = Categorical(["a", "a", "b", "b"],
categories=["a", "b", "z"], ordered=True)
raw_cat2 = Categorical(["c", "d", "c", "d"],
categories=["c", "d", "y"], ordered=True)
df = DataFrame({"A": raw_cat1, "B": raw_cat2, "values": [1, 2, 3, 4]})
result = pd.pivot_table(df, values='values', index=['A', 'B'])
expected = Series([1, 2, np.nan, 3, 4, np.nan, np.nan, np.nan, np.nan],
index=pd.MultiIndex.from_product(
[['a', 'b', 'z'], ['c', 'd', 'y']],
names=['A', 'B']),
name='values')
tm.assert_series_equal(result, expected)
def test_count(self):
s = Series(Categorical([np.nan, 1, 2, np.nan],
categories=[5, 4, 3, 2, 1], ordered=True))
result = s.count()
self.assertEqual(result, 2)
def test_sort(self):
c = Categorical(["a", "b", "b", "a"], ordered=False)
cat = Series(c)
# 9816 deprecated
with tm.assert_produces_warning(FutureWarning):
c.order()
# sort in the categories order
expected = Series(
Categorical(["a", "a", "b", "b"],
ordered=False), index=[0, 3, 1, 2])
result = cat.sort_values()
tm.assert_series_equal(result, expected)
cat = Series(Categorical(["a", "c", "b", "d"], ordered=True))
res = cat.sort_values()
exp = np.array(["a", "b", "c", "d"])
self.assert_numpy_array_equal(res.__array__(), exp)
cat = Series(Categorical(["a", "c", "b", "d"], categories=[
"a", "b", "c", "d"], ordered=True))
res = cat.sort_values()
exp = np.array(["a", "b", "c", "d"])
self.assert_numpy_array_equal(res.__array__(), exp)
res = cat.sort_values(ascending=False)
exp = np.array(["d", "c", "b", "a"])
self.assert_numpy_array_equal(res.__array__(), exp)
raw_cat1 = Categorical(["a", "b", "c", "d"],
categories=["a", "b", "c", "d"], ordered=False)
raw_cat2 = Categorical(["a", "b", "c", "d"],
categories=["d", "c", "b", "a"], ordered=True)
s = ["a", "b", "c", "d"]
df = DataFrame({"unsort": raw_cat1,
"sort": raw_cat2,
"string": s,
"values": [1, 2, 3, 4]})
# Cats must be sorted in a dataframe
res = df.sort_values(by=["string"], ascending=False)
exp = np.array(["d", "c", "b", "a"])
self.assert_numpy_array_equal(res["sort"].values.__array__(), exp)
self.assertEqual(res["sort"].dtype, "category")
res = df.sort_values(by=["sort"], ascending=False)
exp = df.sort_values(by=["string"], ascending=True)
self.assert_numpy_array_equal(res["values"], exp["values"])
self.assertEqual(res["sort"].dtype, "category")
self.assertEqual(res["unsort"].dtype, "category")
# unordered cat, but we allow this
df.sort_values(by=["unsort"], ascending=False)
# multi-columns sort
# GH 7848
df = DataFrame({"id": [6, 5, 4, 3, 2, 1],
"raw_grade": ['a', 'b', 'b', 'a', 'a', 'e']})
df["grade"] = pd.Categorical(df["raw_grade"], ordered=True)
df['grade'] = df['grade'].cat.set_categories(['b', 'e', 'a'])
# sorts 'grade' according to the order of the categories
result = df.sort_values(by=['grade'])
expected = df.iloc[[1, 2, 5, 0, 3, 4]]
tm.assert_frame_equal(result, expected)
# multi
result = df.sort_values(by=['grade', 'id'])
expected = df.iloc[[2, 1, 5, 4, 3, 0]]
tm.assert_frame_equal(result, expected)
# reverse
cat = Categorical(["a", "c", "c", "b", "d"], ordered=True)
res = cat.sort_values(ascending=False)
exp_val = np.array(["d", "c", "c", "b", "a"], dtype=object)
exp_categories = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp_val)
self.assert_numpy_array_equal(res.categories, exp_categories)
# some NaN positions
cat = Categorical(["a", "c", "b", "d", np.nan], ordered=True)
res = cat.sort_values(ascending=False, na_position='last')
exp_val = np.array(["d", "c", "b", "a", np.nan], dtype=object)
exp_categories = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp_val)
self.assert_numpy_array_equal(res.categories, exp_categories)
cat = Categorical(["a", "c", "b", "d", np.nan], ordered=True)
res = cat.sort_values(ascending=False, na_position='first')
exp_val = np.array([np.nan, "d", "c", "b", "a"], dtype=object)
exp_categories = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp_val)
self.assert_numpy_array_equal(res.categories, exp_categories)
cat = Categorical(["a", "c", "b", "d", np.nan], ordered=True)
res = cat.sort_values(ascending=False, na_position='first')
exp_val = np.array([np.nan, "d", "c", "b", "a"], dtype=object)
exp_categories = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp_val)
self.assert_numpy_array_equal(res.categories, exp_categories)
cat = Categorical(["a", "c", "b", "d", np.nan], ordered=True)
res = cat.sort_values(ascending=False, na_position='last')
exp_val = np.array(["d", "c", "b", "a", np.nan], dtype=object)
exp_categories = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp_val)
self.assert_numpy_array_equal(res.categories, exp_categories)
def test_slicing(self):
cat = Series(Categorical([1, 2, 3, 4]))
reversed = cat[::-1]
exp = np.array([4, 3, 2, 1])
self.assert_numpy_array_equal(reversed.__array__(), exp)
df = DataFrame({'value': (np.arange(100) + 1).astype('int64')})
df['D'] = pd.cut(df.value, bins=[0, 25, 50, 75, 100])
expected = Series([11, '(0, 25]'], index=['value', 'D'], name=10)
result = df.iloc[10]
tm.assert_series_equal(result, expected)
expected = DataFrame({'value': np.arange(11, 21).astype('int64')},
index=np.arange(10, 20).astype('int64'))
expected['D'] = pd.cut(expected.value, bins=[0, 25, 50, 75, 100])
result = df.iloc[10:20]
tm.assert_frame_equal(result, expected)
expected = Series([9, '(0, 25]'], index=['value', 'D'], name=8)
result = df.loc[8]
tm.assert_series_equal(result, expected)
def test_slicing_and_getting_ops(self):
# systematically test the slicing operations:
# for all slicing ops:
# - returning a dataframe
# - returning a column
# - returning a row
# - returning a single value
cats = pd.Categorical(
["a", "c", "b", "c", "c", "c", "c"], categories=["a", "b", "c"])
idx = pd.Index(["h", "i", "j", "k", "l", "m", "n"])
values = [1, 2, 3, 4, 5, 6, 7]
df = pd.DataFrame({"cats": cats, "values": values}, index=idx)
# the expected values
cats2 = pd.Categorical(["b", "c"], categories=["a", "b", "c"])
idx2 = pd.Index(["j", "k"])
values2 = [3, 4]
# 2:4,: | "j":"k",:
exp_df = pd.DataFrame({"cats": cats2, "values": values2}, index=idx2)
# :,"cats" | :,0
exp_col = pd.Series(cats, index=idx, name='cats')
# "j",: | 2,:
exp_row = pd.Series(["b", 3], index=["cats", "values"], dtype="object",
name="j")
# "j","cats | 2,0
exp_val = "b"
# iloc
# frame
res_df = df.iloc[2:4, :]
tm.assert_frame_equal(res_df, exp_df)
self.assertTrue(com.is_categorical_dtype(res_df["cats"]))
# row
res_row = df.iloc[2, :]
tm.assert_series_equal(res_row, exp_row)
tm.assertIsInstance(res_row["cats"], compat.string_types)
# col
res_col = df.iloc[:, 0]
tm.assert_series_equal(res_col, exp_col)
self.assertTrue(com.is_categorical_dtype(res_col))
# single value
res_val = df.iloc[2, 0]
self.assertEqual(res_val, exp_val)
# loc
# frame
res_df = df.loc["j":"k", :]
tm.assert_frame_equal(res_df, exp_df)
self.assertTrue(com.is_categorical_dtype(res_df["cats"]))
# row
res_row = df.loc["j", :]
tm.assert_series_equal(res_row, exp_row)
tm.assertIsInstance(res_row["cats"], compat.string_types)
# col
res_col = df.loc[:, "cats"]
tm.assert_series_equal(res_col, exp_col)
self.assertTrue(com.is_categorical_dtype(res_col))
# single value
res_val = df.loc["j", "cats"]
self.assertEqual(res_val, exp_val)
# ix
# frame
# res_df = df.ix["j":"k",[0,1]] # doesn't work?
res_df = df.ix["j":"k", :]
tm.assert_frame_equal(res_df, exp_df)
self.assertTrue(com.is_categorical_dtype(res_df["cats"]))
# row
res_row = df.ix["j", :]
tm.assert_series_equal(res_row, exp_row)
tm.assertIsInstance(res_row["cats"], compat.string_types)
# col
res_col = df.ix[:, "cats"]
tm.assert_series_equal(res_col, exp_col)
self.assertTrue(com.is_categorical_dtype(res_col))
# single value
res_val = df.ix["j", 0]
self.assertEqual(res_val, exp_val)
# iat
res_val = df.iat[2, 0]
self.assertEqual(res_val, exp_val)
# at
res_val = df.at["j", "cats"]
self.assertEqual(res_val, exp_val)
# fancy indexing
exp_fancy = df.iloc[[2]]
res_fancy = df[df["cats"] == "b"]
tm.assert_frame_equal(res_fancy, exp_fancy)
res_fancy = df[df["values"] == 3]
tm.assert_frame_equal(res_fancy, exp_fancy)
# get_value
res_val = df.get_value("j", "cats")
self.assertEqual(res_val, exp_val)
# i : int, slice, or sequence of integers
res_row = df.iloc[2]
tm.assert_series_equal(res_row, exp_row)
tm.assertIsInstance(res_row["cats"], compat.string_types)
res_df = df.iloc[slice(2, 4)]
tm.assert_frame_equal(res_df, exp_df)
self.assertTrue(com.is_categorical_dtype(res_df["cats"]))
res_df = df.iloc[[2, 3]]
tm.assert_frame_equal(res_df, exp_df)
self.assertTrue(com.is_categorical_dtype(res_df["cats"]))
res_col = df.iloc[:, 0]
tm.assert_series_equal(res_col, exp_col)
self.assertTrue(com.is_categorical_dtype(res_col))
res_df = df.iloc[:, slice(0, 2)]
tm.assert_frame_equal(res_df, df)
self.assertTrue(com.is_categorical_dtype(res_df["cats"]))
res_df = df.iloc[:, [0, 1]]
tm.assert_frame_equal(res_df, df)
self.assertTrue(com.is_categorical_dtype(res_df["cats"]))
def test_slicing_doc_examples(self):
# GH 7918
cats = Categorical(
["a", "b", "b", "b", "c", "c", "c"], categories=["a", "b", "c"])
idx = Index(["h", "i", "j", "k", "l", "m", "n", ])
values = [1, 2, 2, 2, 3, 4, 5]
df = DataFrame({"cats": cats, "values": values}, index=idx)
result = df.iloc[2:4, :]
expected = DataFrame(
{"cats": Categorical(
['b', 'b'], categories=['a', 'b', 'c']),
"values": [2, 2]}, index=['j', 'k'])
tm.assert_frame_equal(result, expected)
result = df.iloc[2:4, :].dtypes
expected = Series(['category', 'int64'], ['cats', 'values'])
tm.assert_series_equal(result, expected)
result = df.loc["h":"j", "cats"]
expected = Series(Categorical(['a', 'b', 'b'],
categories=['a', 'b', 'c']),
index=['h', 'i', 'j'], name='cats')
tm.assert_series_equal(result, expected)
result = df.ix["h":"j", 0:1]
expected = DataFrame({'cats': Series(
Categorical(
['a', 'b', 'b'], categories=['a', 'b', 'c']), index=['h', 'i',
'j'])})
tm.assert_frame_equal(result, expected)
def test_assigning_ops(self):
# systematically test the assigning operations:
# for all slicing ops:
# for value in categories and value not in categories:
# - assign a single value -> exp_single_cats_value
# - assign a complete row (mixed values) -> exp_single_row
# assign multiple rows (mixed values) (-> array) -> exp_multi_row
# assign a part of a column with dtype == categorical ->
# exp_parts_cats_col
# assign a part of a column with dtype != categorical ->
# exp_parts_cats_col
cats = pd.Categorical(
["a", "a", "a", "a", "a", "a", "a"], categories=["a", "b"])
idx = pd.Index(["h", "i", "j", "k", "l", "m", "n"])
values = [1, 1, 1, 1, 1, 1, 1]
orig = pd.DataFrame({"cats": cats, "values": values}, index=idx)
# the expected values
# changed single row
cats1 = pd.Categorical(
["a", "a", "b", "a", "a", "a", "a"], categories=["a", "b"])
idx1 = pd.Index(["h", "i", "j", "k", "l", "m", "n"])
values1 = [1, 1, 2, 1, 1, 1, 1]
exp_single_row = pd.DataFrame(
{"cats": cats1,
"values": values1}, index=idx1)
# changed multiple rows
cats2 = pd.Categorical(
["a", "a", "b", "b", "a", "a", "a"], categories=["a", "b"])
idx2 = pd.Index(["h", "i", "j", "k", "l", "m", "n"])
values2 = [1, 1, 2, 2, 1, 1, 1]
exp_multi_row = pd.DataFrame(
{"cats": cats2,
"values": values2}, index=idx2)
# changed part of the cats column
cats3 = pd.Categorical(
["a", "a", "b", "b", "a", "a", "a"], categories=["a", "b"])
idx3 = pd.Index(["h", "i", "j", "k", "l", "m", "n"])
values3 = [1, 1, 1, 1, 1, 1, 1]
exp_parts_cats_col = pd.DataFrame(
{"cats": cats3,
"values": values3}, index=idx3)
# changed single value in cats col
cats4 = pd.Categorical(
["a", "a", "b", "a", "a", "a", "a"], categories=["a", "b"])
idx4 = pd.Index(["h", "i", "j", "k", "l", "m", "n"])
values4 = [1, 1, 1, 1, 1, 1, 1]
exp_single_cats_value = pd.DataFrame(
{"cats": cats4,
"values": values4}, index=idx4)
# iloc
# ###############
# - assign a single value -> exp_single_cats_value
df = orig.copy()
df.iloc[2, 0] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
df = orig.copy()
df.iloc[df.index == "j", 0] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
# - assign a single value not in the current categories set
def f():
df = orig.copy()
df.iloc[2, 0] = "c"
self.assertRaises(ValueError, f)
# - assign a complete row (mixed values) -> exp_single_row
df = orig.copy()
df.iloc[2, :] = ["b", 2]
tm.assert_frame_equal(df, exp_single_row)
# - assign a complete row (mixed values) not in categories set
def f():
df = orig.copy()
df.iloc[2, :] = ["c", 2]
self.assertRaises(ValueError, f)
# - assign multiple rows (mixed values) -> exp_multi_row
df = orig.copy()
df.iloc[2:4, :] = [["b", 2], ["b", 2]]
tm.assert_frame_equal(df, exp_multi_row)
def f():
df = orig.copy()
df.iloc[2:4, :] = [["c", 2], ["c", 2]]
self.assertRaises(ValueError, f)
# assign a part of a column with dtype == categorical ->
# exp_parts_cats_col
df = orig.copy()
df.iloc[2:4, 0] = pd.Categorical(["b", "b"], categories=["a", "b"])
tm.assert_frame_equal(df, exp_parts_cats_col)
with tm.assertRaises(ValueError):
# different categories -> not sure if this should fail or pass
df = orig.copy()
df.iloc[2:4, 0] = pd.Categorical(
["b", "b"], categories=["a", "b", "c"])
with tm.assertRaises(ValueError):
# different values
df = orig.copy()
df.iloc[2:4, 0] = pd.Categorical(
["c", "c"], categories=["a", "b", "c"])
# assign a part of a column with dtype != categorical ->
# exp_parts_cats_col
df = orig.copy()
df.iloc[2:4, 0] = ["b", "b"]
tm.assert_frame_equal(df, exp_parts_cats_col)
with tm.assertRaises(ValueError):
df.iloc[2:4, 0] = ["c", "c"]
# loc
# ##############
# - assign a single value -> exp_single_cats_value
df = orig.copy()
df.loc["j", "cats"] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
df = orig.copy()
df.loc[df.index == "j", "cats"] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
# - assign a single value not in the current categories set
def f():
df = orig.copy()
df.loc["j", "cats"] = "c"
self.assertRaises(ValueError, f)
# - assign a complete row (mixed values) -> exp_single_row
df = orig.copy()
df.loc["j", :] = ["b", 2]
tm.assert_frame_equal(df, exp_single_row)
# - assign a complete row (mixed values) not in categories set
def f():
df = orig.copy()
df.loc["j", :] = ["c", 2]
self.assertRaises(ValueError, f)
# - assign multiple rows (mixed values) -> exp_multi_row
df = orig.copy()
df.loc["j":"k", :] = [["b", 2], ["b", 2]]
tm.assert_frame_equal(df, exp_multi_row)
def f():
df = orig.copy()
df.loc["j":"k", :] = [["c", 2], ["c", 2]]
self.assertRaises(ValueError, f)
# assign a part of a column with dtype == categorical ->
# exp_parts_cats_col
df = orig.copy()
df.loc["j":"k", "cats"] = pd.Categorical(
["b", "b"], categories=["a", "b"])
tm.assert_frame_equal(df, exp_parts_cats_col)
with tm.assertRaises(ValueError):
# different categories -> not sure if this should fail or pass
df = orig.copy()
df.loc["j":"k", "cats"] = pd.Categorical(
["b", "b"], categories=["a", "b", "c"])
with tm.assertRaises(ValueError):
# different values
df = orig.copy()
df.loc["j":"k", "cats"] = pd.Categorical(
["c", "c"], categories=["a", "b", "c"])
# assign a part of a column with dtype != categorical ->
# exp_parts_cats_col
df = orig.copy()
df.loc["j":"k", "cats"] = ["b", "b"]
tm.assert_frame_equal(df, exp_parts_cats_col)
with tm.assertRaises(ValueError):
df.loc["j":"k", "cats"] = ["c", "c"]
# ix
# ##############
# - assign a single value -> exp_single_cats_value
df = orig.copy()
df.ix["j", 0] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
df = orig.copy()
df.ix[df.index == "j", 0] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
# - assign a single value not in the current categories set
def f():
df = orig.copy()
df.ix["j", 0] = "c"
self.assertRaises(ValueError, f)
# - assign a complete row (mixed values) -> exp_single_row
df = orig.copy()
df.ix["j", :] = ["b", 2]
tm.assert_frame_equal(df, exp_single_row)
# - assign a complete row (mixed values) not in categories set
def f():
df = orig.copy()
df.ix["j", :] = ["c", 2]
self.assertRaises(ValueError, f)
# - assign multiple rows (mixed values) -> exp_multi_row
df = orig.copy()
df.ix["j":"k", :] = [["b", 2], ["b", 2]]
tm.assert_frame_equal(df, exp_multi_row)
def f():
df = orig.copy()
df.ix["j":"k", :] = [["c", 2], ["c", 2]]
self.assertRaises(ValueError, f)
# assign a part of a column with dtype == categorical ->
# exp_parts_cats_col
df = orig.copy()
df.ix["j":"k", 0] = pd.Categorical(["b", "b"], categories=["a", "b"])
tm.assert_frame_equal(df, exp_parts_cats_col)
with tm.assertRaises(ValueError):
# different categories -> not sure if this should fail or pass
df = orig.copy()
df.ix["j":"k", 0] = pd.Categorical(
["b", "b"], categories=["a", "b", "c"])
with tm.assertRaises(ValueError):
# different values
df = orig.copy()
df.ix["j":"k", 0] = pd.Categorical(
["c", "c"], categories=["a", "b", "c"])
# assign a part of a column with dtype != categorical ->
# exp_parts_cats_col
df = orig.copy()
df.ix["j":"k", 0] = ["b", "b"]
tm.assert_frame_equal(df, exp_parts_cats_col)
with tm.assertRaises(ValueError):
df.ix["j":"k", 0] = ["c", "c"]
# iat
df = orig.copy()
df.iat[2, 0] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
# - assign a single value not in the current categories set
def f():
df = orig.copy()
df.iat[2, 0] = "c"
self.assertRaises(ValueError, f)
# at
# - assign a single value -> exp_single_cats_value
df = orig.copy()
df.at["j", "cats"] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
# - assign a single value not in the current categories set
def f():
df = orig.copy()
df.at["j", "cats"] = "c"
self.assertRaises(ValueError, f)
# fancy indexing
catsf = pd.Categorical(
["a", "a", "c", "c", "a", "a", "a"], categories=["a", "b", "c"])
idxf = pd.Index(["h", "i", "j", "k", "l", "m", "n"])
valuesf = [1, 1, 3, 3, 1, 1, 1]
df = pd.DataFrame({"cats": catsf, "values": valuesf}, index=idxf)
exp_fancy = exp_multi_row.copy()
exp_fancy["cats"].cat.set_categories(["a", "b", "c"], inplace=True)
df[df["cats"] == "c"] = ["b", 2]
tm.assert_frame_equal(df, exp_multi_row)
# set_value
df = orig.copy()
df.set_value("j", "cats", "b")
tm.assert_frame_equal(df, exp_single_cats_value)
def f():
df = orig.copy()
df.set_value("j", "cats", "c")
self.assertRaises(ValueError, f)
# Assigning a Category to parts of a int/... column uses the values of
# the Catgorical
df = pd.DataFrame({"a": [1, 1, 1, 1, 1],
"b": ["a", "a", "a", "a", "a"]})
exp = pd.DataFrame({"a": [1, "b", "b", 1, 1],
"b": ["a", "a", "b", "b", "a"]})
df.loc[1:2, "a"] = pd.Categorical(["b", "b"], categories=["a", "b"])
df.loc[2:3, "b"] = pd.Categorical(["b", "b"], categories=["a", "b"])
tm.assert_frame_equal(df, exp)
# Series
orig = Series(pd.Categorical(["b", "b"], categories=["a", "b"]))
s = orig.copy()
s[:] = "a"
exp = Series(pd.Categorical(["a", "a"], categories=["a", "b"]))
tm.assert_series_equal(s, exp)
s = orig.copy()
s[1] = "a"
exp = Series(pd.Categorical(["b", "a"], categories=["a", "b"]))
tm.assert_series_equal(s, exp)
s = orig.copy()
s[s.index > 0] = "a"
exp = Series(pd.Categorical(["b", "a"], categories=["a", "b"]))
tm.assert_series_equal(s, exp)
s = orig.copy()
s[[False, True]] = "a"
exp = Series(pd.Categorical(["b", "a"], categories=["a", "b"]))
tm.assert_series_equal(s, exp)
s = orig.copy()
s.index = ["x", "y"]
s["y"] = "a"
exp = Series(
pd.Categorical(["b", "a"],
categories=["a", "b"]), index=["x", "y"])
tm.assert_series_equal(s, exp)
# ensure that one can set something to np.nan
s = Series(Categorical([1, 2, 3]))
exp = Series(Categorical([1, np.nan, 3]))
s[1] = np.nan
tm.assert_series_equal(s, exp)
def test_comparisons(self):
tests_data = [(list("abc"), list("cba"), list("bbb")),
([1, 2, 3], [3, 2, 1], [2, 2, 2])]
for data, reverse, base in tests_data:
cat_rev = pd.Series(pd.Categorical(data, categories=reverse,
ordered=True))
cat_rev_base = pd.Series(pd.Categorical(base, categories=reverse,
ordered=True))
cat = pd.Series(pd.Categorical(data, ordered=True))
cat_base = pd.Series(pd.Categorical(
base, categories=cat.cat.categories, ordered=True))
s = Series(base)
a = np.array(base)
# comparisons need to take categories ordering into account
res_rev = cat_rev > cat_rev_base
exp_rev = Series([True, False, False])
tm.assert_series_equal(res_rev, exp_rev)
res_rev = cat_rev < cat_rev_base
exp_rev = Series([False, False, True])
tm.assert_series_equal(res_rev, exp_rev)
res = cat > cat_base
exp = Series([False, False, True])
tm.assert_series_equal(res, exp)
scalar = base[1]
res = cat > scalar
exp = Series([False, False, True])
exp2 = cat.values > scalar
tm.assert_series_equal(res, exp)
tm.assert_numpy_array_equal(res.values, exp2)
res_rev = cat_rev > scalar
exp_rev = Series([True, False, False])
exp_rev2 = cat_rev.values > scalar
tm.assert_series_equal(res_rev, exp_rev)
tm.assert_numpy_array_equal(res_rev.values, exp_rev2)
# Only categories with same categories can be compared
def f():
cat > cat_rev
self.assertRaises(TypeError, f)
# categorical cannot be compared to Series or numpy array, and also
# not the other way around
self.assertRaises(TypeError, lambda: cat > s)
self.assertRaises(TypeError, lambda: cat_rev > s)
self.assertRaises(TypeError, lambda: cat > a)
self.assertRaises(TypeError, lambda: cat_rev > a)
self.assertRaises(TypeError, lambda: s < cat)
self.assertRaises(TypeError, lambda: s < cat_rev)
self.assertRaises(TypeError, lambda: a < cat)
self.assertRaises(TypeError, lambda: a < cat_rev)
# unequal comparison should raise for unordered cats
cat = Series(Categorical(list("abc")))
def f():
cat > "b"
self.assertRaises(TypeError, f)
cat = Series(Categorical(list("abc"), ordered=False))
def f():
cat > "b"
self.assertRaises(TypeError, f)
# https://github.com/pydata/pandas/issues/9836#issuecomment-92123057
# and following comparisons with scalars not in categories should raise
# for unequal comps, but not for equal/not equal
cat = Series(Categorical(list("abc"), ordered=True))
self.assertRaises(TypeError, lambda: cat < "d")
self.assertRaises(TypeError, lambda: cat > "d")
self.assertRaises(TypeError, lambda: "d" < cat)
self.assertRaises(TypeError, lambda: "d" > cat)
self.assert_series_equal(cat == "d", Series([False, False, False]))
self.assert_series_equal(cat != "d", Series([True, True, True]))
# And test NaN handling...
cat = Series(Categorical(["a", "b", "c", np.nan]))
exp = Series([True, True, True, False])
res = (cat == cat)
tm.assert_series_equal(res, exp)
def test_cat_equality(self):
# GH 8938
# allow equality comparisons
a = Series(list('abc'), dtype="category")
b = Series(list('abc'), dtype="object")
c = Series(['a', 'b', 'cc'], dtype="object")
d = Series(list('acb'), dtype="object")
e = Categorical(list('abc'))
f = Categorical(list('acb'))
# vs scalar
self.assertFalse((a == 'a').all())
self.assertTrue(((a != 'a') == ~(a == 'a')).all())
self.assertFalse(('a' == a).all())
self.assertTrue((a == 'a')[0])
self.assertTrue(('a' == a)[0])
self.assertFalse(('a' != a)[0])
# vs list-like
self.assertTrue((a == a).all())
self.assertFalse((a != a).all())
self.assertTrue((a == list(a)).all())
self.assertTrue((a == b).all())
self.assertTrue((b == a).all())
self.assertTrue(((~(a == b)) == (a != b)).all())
self.assertTrue(((~(b == a)) == (b != a)).all())
self.assertFalse((a == c).all())
self.assertFalse((c == a).all())
self.assertFalse((a == d).all())
self.assertFalse((d == a).all())
# vs a cat-like
self.assertTrue((a == e).all())
self.assertTrue((e == a).all())
self.assertFalse((a == f).all())
self.assertFalse((f == a).all())
self.assertTrue(((~(a == e) == (a != e)).all()))
self.assertTrue(((~(e == a) == (e != a)).all()))
self.assertTrue(((~(a == f) == (a != f)).all()))
self.assertTrue(((~(f == a) == (f != a)).all()))
# non-equality is not comparable
self.assertRaises(TypeError, lambda: a < b)
self.assertRaises(TypeError, lambda: b < a)
self.assertRaises(TypeError, lambda: a > b)
self.assertRaises(TypeError, lambda: b > a)
def test_concat(self):
cat = pd.Categorical(["a", "b"], categories=["a", "b"])
vals = [1, 2]
df = pd.DataFrame({"cats": cat, "vals": vals})
cat2 = pd.Categorical(["a", "b", "a", "b"], categories=["a", "b"])
vals2 = [1, 2, 1, 2]
exp = pd.DataFrame({"cats": cat2,
"vals": vals2}, index=pd.Index([0, 1, 0, 1]))
res = pd.concat([df, df])
tm.assert_frame_equal(exp, res)
# Concat should raise if the two categoricals do not have the same
# categories
cat3 = pd.Categorical(["a", "b"], categories=["a", "b", "c"])
vals3 = [1, 2]
df_wrong_categories = pd.DataFrame({"cats": cat3, "vals": vals3})
def f():
pd.concat([df, df_wrong_categories])
self.assertRaises(ValueError, f)
# GH 7864
# make sure ordering is preserverd
df = pd.DataFrame({"id": [1, 2, 3, 4, 5, 6],
"raw_grade": ['a', 'b', 'b', 'a', 'a', 'e']})
df["grade"] = pd.Categorical(df["raw_grade"])
df['grade'].cat.set_categories(['e', 'a', 'b'])
df1 = df[0:3]
df2 = df[3:]
self.assert_numpy_array_equal(df['grade'].cat.categories,
df1['grade'].cat.categories)
self.assert_numpy_array_equal(df['grade'].cat.categories,
df2['grade'].cat.categories)
dfx = pd.concat([df1, df2])
dfx['grade'].cat.categories
self.assert_numpy_array_equal(df['grade'].cat.categories,
dfx['grade'].cat.categories)
def test_concat_preserve(self):
# GH 8641
# series concat not preserving category dtype
s = Series(list('abc'), dtype='category')
s2 = Series(list('abd'), dtype='category')
def f():
pd.concat([s, s2])
self.assertRaises(ValueError, f)
result = pd.concat([s, s], ignore_index=True)
expected = Series(list('abcabc')).astype('category')
tm.assert_series_equal(result, expected)
result = pd.concat([s, s])
expected = Series(
list('abcabc'), index=[0, 1, 2, 0, 1, 2]).astype('category')
tm.assert_series_equal(result, expected)
a = Series(np.arange(6, dtype='int64'))
b = Series(list('aabbca'))
df2 = DataFrame({'A': a,
'B': b.astype('category', categories=list('cab'))})
result = pd.concat([df2, df2])
expected = DataFrame({'A': pd.concat([a, a]),
'B': pd.concat([b, b]).astype(
'category', categories=list('cab'))})
tm.assert_frame_equal(result, expected)
def test_categorical_index_preserver(self):
a = Series(np.arange(6, dtype='int64'))
b = Series(list('aabbca'))
df2 = DataFrame({'A': a,
'B': b.astype('category', categories=list(
'cab'))}).set_index('B')
result = pd.concat([df2, df2])
expected = DataFrame({'A': pd.concat([a, a]),
'B': pd.concat([b, b]).astype(
'category', categories=list(
'cab'))}).set_index('B')
tm.assert_frame_equal(result, expected)
# wrong catgories
df3 = DataFrame({'A': a,
'B': b.astype('category', categories=list(
'abc'))}).set_index('B')
self.assertRaises(TypeError, lambda: pd.concat([df2, df3]))
def test_append(self):
cat = pd.Categorical(["a", "b"], categories=["a", "b"])
vals = [1, 2]
df = pd.DataFrame({"cats": cat, "vals": vals})
cat2 = pd.Categorical(["a", "b", "a", "b"], categories=["a", "b"])
vals2 = [1, 2, 1, 2]
exp = pd.DataFrame({"cats": cat2,
"vals": vals2}, index=pd.Index([0, 1, 0, 1]))
res = df.append(df)
tm.assert_frame_equal(exp, res)
# Concat should raise if the two categoricals do not have the same
# categories
cat3 = pd.Categorical(["a", "b"], categories=["a", "b", "c"])
vals3 = [1, 2]
df_wrong_categories = pd.DataFrame({"cats": cat3, "vals": vals3})
def f():
df.append(df_wrong_categories)
self.assertRaises(ValueError, f)
def test_merge(self):
# GH 9426
right = DataFrame({'c': {0: 'a',
1: 'b',
2: 'c',
3: 'd',
4: 'e'},
'd': {0: 'null',
1: 'null',
2: 'null',
3: 'null',
4: 'null'}})
left = DataFrame({'a': {0: 'f',
1: 'f',
2: 'f',
3: 'f',
4: 'f'},
'b': {0: 'g',
1: 'g',
2: 'g',
3: 'g',
4: 'g'}})
df = pd.merge(left, right, how='left', left_on='b', right_on='c')
# object-object
expected = df.copy()
# object-cat
cright = right.copy()
cright['d'] = cright['d'].astype('category')
result = pd.merge(left, cright, how='left', left_on='b', right_on='c')
tm.assert_frame_equal(result, expected)
# cat-object
cleft = left.copy()
cleft['b'] = cleft['b'].astype('category')
result = pd.merge(cleft, cright, how='left', left_on='b', right_on='c')
tm.assert_frame_equal(result, expected)
# cat-cat
cright = right.copy()
cright['d'] = cright['d'].astype('category')
cleft = left.copy()
cleft['b'] = cleft['b'].astype('category')
result = pd.merge(cleft, cright, how='left', left_on='b', right_on='c')
tm.assert_frame_equal(result, expected)
def test_repeat(self):
# GH10183
cat = pd.Categorical(["a", "b"], categories=["a", "b"])
exp = pd.Categorical(["a", "a", "b", "b"], categories=["a", "b"])
res = cat.repeat(2)
self.assert_categorical_equal(res, exp)
def test_na_actions(self):
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
vals = ["a", "b", np.nan, "d"]
df = pd.DataFrame({"cats": cat, "vals": vals})
cat2 = pd.Categorical([1, 2, 3, 3], categories=[1, 2, 3])
vals2 = ["a", "b", "b", "d"]
df_exp_fill = pd.DataFrame({"cats": cat2, "vals": vals2})
cat3 = pd.Categorical([1, 2, 3], categories=[1, 2, 3])
vals3 = ["a", "b", np.nan]
df_exp_drop_cats = | pd.DataFrame({"cats": cat3, "vals": vals3}) | pandas.DataFrame |
"""
In this example we see how to create a multiclass neural net with pytorch
"""
import os
import pickle
from typing import List, Optional, Any, Dict
import numpy as np
import pandas as pd
from sklearn.preprocessing import LabelEncoder
import torch
from preprocessing import dense_preprocessing_pipeline
from model_utils import build_classifier, train_classifier, save_torch_model
preprocessor = None
def fit(
X: pd.DataFrame,
y: pd.Series,
output_dir: str,
class_order: Optional[List[str]] = None,
row_weights: Optional[np.ndarray] = None,
**kwargs,
):
""" This hook MUST ALWAYS be implemented for custom tasks.
This hook defines how DataRobot will train this task.
DataRobot runs this hook when the task is being trained inside a blueprint.
DataRobot will pass the training data, project target, and additional parameters based on the project
and blueprint configuration as parameters to this function.
As an output, this hook is expected to create an artifact containing a trained object,
that is then used to score new data.
Parameters
----------
X: pd.DataFrame
Training data that DataRobot passes when this task is being trained. Note that both the training data AND
column (feature) names are passed
y: pd.Series
Project's target column.
output_dir: str
A path to the output folder (also provided in --output paramter of 'drum fit' command)
The artifact [in this example - containing the trained sklearn pipeline]
must be saved into this folder.
class_order: Optional[List[str]]
This indicates which class DataRobot considers positive or negative. E.g. 'yes' is positive, 'no' is negative.
Class order will always be passed to fit by DataRobot for classification tasks,
and never otherwise. When models predict, they output a likelihood of one class, with a
value from 0 to 1. The likelihood of the other class is 1 - this likelihood.
The first element in the class_order list is the name of the class considered negative inside DR's project,
and the second is the name of the class that is considered positive
row_weights: Optional[np.ndarray]
An array of non-negative numeric values which can be used to dictate how important
a row is. Row weights is only optionally used, and there will be no filtering for which
custom models support this. There are two situations when values will be passed into
row_weights, during smart downsampling and when weights are explicitly specified in the project settings.
kwargs
Added for forwards compatibility.
Returns
-------
None
fit() doesn't return anything, but must output an artifact
(typically containing a trained object) into output_dir
so that the trained object can be used during scoring.
"""
print("Fitting Preprocessing pipeline")
preprocessor = dense_preprocessing_pipeline.fit(X)
lb = LabelEncoder().fit(y)
# write out the class labels file
print("Serializing preprocessor and class labels")
with open(os.path.join(output_dir, "class_labels.txt"), mode="w") as f:
f.write("\n".join(str(label) for label in lb.classes_))
# Dump the trained object [in this example - a trained PyTorch model]
# into an artifact [in this example - artifact.pth]
# and save it into output_dir so that it can be used later when scoring data
# Note: DRUM will automatically load the model when it is in the default format (see docs)
# and there is only one artifact file
with open(os.path.join(output_dir, "preprocessor.pkl"), mode="wb") as f:
pickle.dump(preprocessor, f)
print("Transforming input data")
X = preprocessor.transform(X)
y = lb.transform(y)
estimator, optimizer, criterion = build_classifier(X, len(lb.classes_))
print("Training classifier")
train_classifier(X, y, estimator, optimizer, criterion)
artifact_name = "artifact.pth"
save_torch_model(estimator, output_dir, artifact_name)
def load_model(code_dir: str) -> Any:
"""
Can be used to load supported models if your model has multiple artifacts, or for loading
models that DRUM does not natively support
Parameters
----------
code_dir : is the directory where model artifact and additional code are provided, passed in
Returns
-------
If used, this hook must return a non-None value
"""
global preprocessor
with open(os.path.join(code_dir, "preprocessor.pkl"), mode="rb") as f:
preprocessor = pickle.load(f)
model = torch.load(os.path.join(code_dir, "artifact.pth"))
model.eval()
return model
def score(data: pd.DataFrame, model: Any, **kwargs: Dict[str, Any]) -> pd.DataFrame:
"""
DataRobot will run this hook when the task is used for scoring inside a blueprint
This hook defines the output of a custom estimator and returns predictions on input data.
It should be skipped if a task is a transform.
Note: While best practice is to include the score hook, if the score hook is not present DataRobot will
add a score hook and call the default predict method for the library
See https://github.com/datarobot/datarobot-user-models#built-in-model-support for details
Parameters
----------
data: pd.DataFrame
Is the dataframe to make predictions against. If the `transform` hook is utilized,
`data` will be the transformed data
model: Any
Trained object, extracted by DataRobot from the artifact created in fit().
In this example, contains trained sklearn pipeline extracted from artifact.pkl.
kwargs:
Additional keyword arguments to the method
Returns
-------
This method should return predictions as a dataframe with the following format:
Classification: must have columns for each class label with floating- point class
probabilities as values. Each row should sum to 1.0. The original class names defined in the project
must be used as column names. This applies to binary and multi-class classification.
"""
# Note how we use the preprocessor that's loaded in load_model
data = preprocessor.transform(data)
data_tensor = torch.from_numpy(data).type(torch.FloatTensor)
predictions = model(data_tensor).cpu().data.numpy()
return | pd.DataFrame(data=predictions, columns=kwargs["class_labels"]) | pandas.DataFrame |
import pandas as pd
import datetime
import numpy as np
import glob
def ingest_historical_data(csv_file):
"""
Read data from csv file
"""
print('reading data...')
df = pd.read_csv(csv_file)
df.columns = ['created_at','type','actor_login_h','repo_name_h','payload_action','payload_pull_request_merged']
print('to datetime..')
df['created_at'] = pd.to_datetime(df['created_at'])
print('sorting...')
df = df.sort_values('created_at')
return df
def subset_data(df,start,end):
"""
Return temporal data subset based on start and end dates
"""
print('subsetting...')
df = df[ (df['created_at'] >= start) & (df['created_at'] <= end) ]
return(df)
def shift_data(df,shift, end):
"""
Shift data based on fixed offset (shift) and subset based on upper limit (end)
"""
print('shifting...')
df['created_at'] += shift
df = df[df['created_at'] <= end]
return df
def sample_data(df,start,end,proportional=True):
"""
Sample data either uniformly (proportional=False) or proporationally (proportional=True) to fill test period from start to end
"""
print('inter-event times...')
df['inter_event_times'] = df['created_at'] - df['created_at'].shift()
inter_event_times = df['inter_event_times'].dropna()
max_time = df['created_at'].min()
multiplier=( (pd.to_datetime(end) - pd.to_datetime(start)) / df['inter_event_times'].mean() ) / float(len(df.index))
#repeat until enough data is sampled to fill the test period
while max_time < pd.to_datetime(end):
if proportional:
sample = pd.DataFrame(df['inter_event_times'].dropna().sample(int(multiplier*len(df.index)),replace=True))
sampled_inter_event_times = sample.cumsum()
else:
sample = pd.DataFrame(np.random.uniform(np.min(inter_event_times.dt.total_seconds()),1.0,int(multiplier*len(df.index))))[0].round(0)
sample = pd.to_timedelta(sample,unit='s')
sampled_inter_event_times = pd.DataFrame(sample).cumsum()
event_times = (pd.to_datetime(start) + sampled_inter_event_times)
max_time = pd.to_datetime(event_times.max().values[0])
multiplier*=1.5
event_times = event_times[(event_times < pd.to_datetime(end)).values]
if proportional:
users = df['actor_login_h']
repos = df['repo_name_h']
events = df['type']
else:
users = pd.Series(df['actor_login_h'].unique())
repos = pd.Series(df['repo_name_h'].unique())
events = pd.Series(df['type'].unique())
users = users.sample(len(event_times),replace=True).values
repos = repos.sample(len(event_times),replace=True).values
events = events.sample(len(event_times),replace=True).values
df_out = pd.DataFrame({'time':event_times.values.flatten(),
'event':events,
'user':users,
'repo':repos})
if proportional:
pr_action = df[df['type'] == 'PullRequestEvent']['payload_action']
pr_merged = df[df['type'] == 'PullRequestEvent']['payload_pull_request_merged']
iss_action = df[df['type'] == 'IssuesEvent']['payload_action']
else:
pr_action = df[df['type'] == 'PullRequestEvent']['payload_action'].unique()
pr_merged = df[df['type'] == 'PullRequestEvent']['payload_pull_request_merged'].unique()
iss_action = df[df['type'] == 'IssuesEvent']['payload_action'].unique()
pull_requests = df_out[df_out['event'] == 'PullRequestEvent']
pull_requests['payload_action'] = pd.Series(pr_action).sample(len(pull_requests.index),
replace=True).values
pull_requests['payload_pull_request_merged'] = pd.Series(pr_merged).sample(len(pull_requests.index),
replace=True).values
issues = df_out[df_out['event'] == 'IssuesEvent']
issues['payload_action'] = | pd.Series(iss_action) | pandas.Series |
import os
import time
import pandas as pd
import numpy as np
import functools
from functools import reduce
def time_pass(func):
@functools.wraps(func)
def wrapper(*args, **kw):
time_begin = time.time()
result = func(*args, **kw)
time_stop = time.time()
time_passed = time_stop - time_begin
minutes, seconds = divmod(time_passed, 60)
hours, minutes = divmod(minutes, 60)
print('%s: %s:%s:%s' % (func.__name__, int(hours), int(minutes), int(seconds)))
return result
return wrapper
@time_pass
def complete_data(the_dat_edge, the_dat_app, the_input_path):
"""
把剩下的数据读取之后拼接到前面读取的数据后面
"""
def read_big_table(path):
reader = pd.read_table(path, header=None, chunksize=10000)
data = pd.concat(reader, axis=0, ignore_index=True)
return data
def read_edge(filename): # 定义一个读取数据的函数来批量读取那些被分开的数据集
tmp = read_big_table(os.path.join(the_input_path, "open_data/dat_edge/%s" % filename))
tmp.columns = ['from_id', 'to_id', 'info']
return tmp
dat_edge_names = ['dat_edge_%s' % str(x) for x in list(range(2, 12))]
dat_edge_left = reduce(lambda x, y: x.append(y),
(read_edge(filename) for filename in dat_edge_names))
def read_app(filename): # 定义一个读取数据的函数来批量读取那些被分开的数据集
tmp = read_big_table(os.path.join(the_input_path, "open_data/dat_app/%s" % filename))
tmp.columns = ['id', 'apps']
return tmp
dat_app_names = ['dat_app_%s' % str(x) for x in list(range(2, 8))]
dat_app_left = reduce(lambda x, y: x.append(y),
(read_app(filename) for filename in dat_app_names))
dat_edge_1 = the_dat_edge.append(dat_edge_left) # 把第一个数据和剩下的数据合并起来
dat_app_1 = the_dat_app.append(dat_app_left) # 把第一个数据和剩下的数据合并起来
return dat_edge_1, dat_app_1
@time_pass
def dummy_symbol(the_dat_symbol):
"""
1. 把dat_symbol的一级分类的所有可能取值all_first挑出来,
2. 然后得到:每一个id的'symbol'列里的一级分类是否包含all_first,得到0-1向量
3. 同样的处理一级分类和二级分类的组合,单独处理二级分类我觉得没这个必要了
"""
def get_first(string):
f_s = string.split(',')
first = set(list(map(lambda x: x.split('_')[0], f_s)))
return first
def get_second(string):
f_s = string.split(',')
second = set(list(map(lambda x: x.split('_')[1], f_s)))
return second
def get_both(string):
f_s = string.split(',')
return set(f_s)
def is_in_first(string):
f_s = string.split(',')
first = set(list(map(lambda x: x.split('_')[0], f_s)))
is_in = list(map(lambda x: x in first, all_first))
return is_in
def is_in_second(string):
f_s = string.split(',')
second = set(list(map(lambda x: x.split('_')[1], f_s)))
is_in = list(map(lambda x: x in second, all_second))
return is_in
def is_in_both(string):
f_s = set(string.split(','))
is_in = list(map(lambda x: x in f_s, all_both))
return is_in
tmp = the_dat_symbol['symbol'].unique()
# 获取所有的一级分类和一二级分类
all_first = reduce(lambda x, y: x.union(y),
map(get_first, tmp))
all_second = reduce(lambda x, y: x.union(y),
map(get_second, tmp))
all_both = reduce(lambda x, y: x.union(y),
map(get_both, tmp))
# 得到每个id的0-1向量,存储成DataFrame
in_first_0 = pd.DataFrame(list(map(is_in_first, the_dat_symbol['symbol'])),
columns=all_first)
in_second_0 = pd.DataFrame(list(map(is_in_second, the_dat_symbol['symbol'])),
columns=all_second)
in_both_0 = pd.DataFrame(list(map(is_in_both, the_dat_symbol['symbol'])),
columns=all_both)
in_first_1 = pd.concat([the_dat_symbol[['id']], in_first_0], axis=1) + 0
in_second_1 = pd.concat([the_dat_symbol[['id']], in_second_0], axis=1) + 0
in_both_1 = pd.concat([the_dat_symbol[['id']], in_both_0], axis=1) + 0
return in_first_1, in_second_1, in_both_1
@time_pass
def deal_dat_edge(data_all):
"""
1. 把dat_edge处理好,运行dat_edge.head(15),就会发现需要把第10行这类数据和其他数据分开,
2. 分为dat_edge_single,dat_edge_multi
3. 然后把dat_edge_multi处理成跟dat_edge_single一样的格式,叫做dat_edge_multi_new
4. 然后把两者合并成为dat_edge_new
5. 之后经由dat_edge_split_2把info分为三个部分:['date', 'times', 'weight']
"""
length = list(map(len, map(lambda x: x.split(','), data_all['info'])))
dat_edge_single = data_all[np.array(length) == 1]
dat_edge_multi = data_all[np.array(length) > 1]
def dat_edge_split(i):
i_info = dat_edge_multi.iloc[i]
string = i_info['info']
s = string.split(',')
result = pd.DataFrame({'info': s,
'from_id': [i_info['from_id']] * len(s),
'to_id': [i_info['to_id']] * len(s),
'id': [i_info['id']] * len(s)})
return result[['id', 'from_id', 'to_id', 'info']]
all_df = map(dat_edge_split, range(len(dat_edge_multi)))
dat_edge_multi_new = pd.concat(all_df, axis=0, ignore_index=True) # 比较慢
dat_edge_new = pd.concat([dat_edge_single, dat_edge_multi_new], axis=0, ignore_index=True)
# dat_edge_new = dat_edge_single.append(dat_edge_multi_new, ignore_index=True)
@time_pass
def dat_edge_split_2(data):
def split(string):
date, left = string.split(':')
times, weight = left.split('_')
return date, times, weight
info_df = pd.DataFrame(list(map(split, data['info'])),
columns=['date', 'times', 'weight'])
data_new_2 = pd.concat([data[['id', 'from_id', 'to_id']], info_df], axis=1)
return data_new_2
dat_edge_new_2 = dat_edge_split_2(dat_edge_new)
return dat_edge_new_2
@time_pass
def deal_edge(the_sample_train, the_dat_edge):
"""
提取出每一个用户的“流出”特征: 向量长度、times之和、times的中位数、最小值、最大值
weight之和、weight的中位数、最小值、最大值,这样就用9个特征提取出了“流出”特征
"""
col_names = (['length', 'unique_count', 'times_sum', 'weight_sum']
+ ['dup_ratio_left', 'dup_ratio_1', 'dup_ratio_2', 'dup_ratio_3', 'dup_ratio_4', 'dup_ratio_5']
+ ['times_left', 'times_1', 'times_2', 'times_3', 'times_4', 'times_5',
'times_6', 'times_7', 'times_8', 'times_9', 'times_10']
+ ['times_min', 'times_25', 'times_median', 'times_75', 'times_max']
+ ['weight_min', 'weight_25', 'weight_median', 'weight_75', 'weight_max']
+ ['times_up_out_ratio', 'times_low_out_ratio']
+ ['weight_up_out_ratio', 'weight_low_out_ratio']
+ ['time_sign_trend', 'time_abs', 'weight_sign_trend', 'weight_abs']
+ ['times_2017_11', 'times_2017_12', 'times_2017_13']
+ ['weight_2017_11', 'weight_2017_12', 'weight_2017_13']
+ ['date_unique_count', 'date_min', 'date_max', 'days_gap']
+ ['latest_times', 'latest_peoples', 'latest_weights', 'multi_ratio'])
sample_dat_edge_from = pd.merge(the_sample_train, the_dat_edge,
left_on='id', right_on='from_id',
how='inner')
dat_edge_from = deal_dat_edge(sample_dat_edge_from)
dat_edge_from['times'] = list(map(int, dat_edge_from['times']))
dat_edge_from['weight'] = list(map(float, dat_edge_from['weight']))
unique_id_from = np.unique(dat_edge_from['id'])
feature_9_1 = list(map(lambda x: cal_9_feature(x, dat_edge_from, 'to_id'), unique_id_from))
df_feature_9_1 = pd.DataFrame(feature_9_1, columns=['out_%s' % x for x in col_names])
df_feature_9_1['id'] = unique_id_from
# 提取出每一个用户的“流入”特征,类似上面的,可以提取出9个“流入”特征
sample_dat_edge_to = pd.merge(the_sample_train, the_dat_edge,
left_on='id', right_on='to_id',
how='inner')
dat_edge_to = deal_dat_edge(sample_dat_edge_to)
dat_edge_to['times'] = list(map(int, dat_edge_to['times']))
dat_edge_to['weight'] = list(map(float, dat_edge_to['weight']))
unique_id_to = np.unique(dat_edge_to['id'])
feature_9_2 = list(map(lambda x: cal_9_feature(x, dat_edge_to, 'from_id'), unique_id_to))
df_feature_9_2 = | pd.DataFrame(feature_9_2, columns=['in_%s' % x for x in col_names]) | pandas.DataFrame |
import argparse
import datetime
import os
import shutil
import unittest
from unittest import mock
import pandas
from matrix.common import date
from matrix.common.request.request_tracker import Subtask
from matrix.common.query.cell_query_results_reader import CellQueryResultsReader
from matrix.common.query.feature_query_results_reader import FeatureQueryResultsReader
from matrix.docker.matrix_converter import main, MatrixConverter, SUPPORTED_FORMATS
from matrix.docker.query_runner import QueryType
class TestMatrixConverter(unittest.TestCase):
def setUp(self):
self.test_manifest = {
"columns": ["a", "b", "c"],
"part_urls": ["A", "B", "C"],
"record_count": 5
}
args = ["test_id", "test_exp_manifest", "test_cell_manifest",
"test_gene_manifest", "test_target", "loom", "."]
parser = argparse.ArgumentParser()
parser.add_argument("request_id")
parser.add_argument("expression_manifest_key")
parser.add_argument("cell_metadata_manifest_key")
parser.add_argument("gene_metadata_manifest_key")
parser.add_argument("target_path")
parser.add_argument("format", choices=SUPPORTED_FORMATS)
parser.add_argument("working_dir")
self.args = parser.parse_args(args)
self.matrix_converter = MatrixConverter(self.args)
@mock.patch("os.remove")
@mock.patch("matrix.common.request.request_tracker.RequestTracker.creation_date", new_callable=mock.PropertyMock)
@mock.patch("matrix.common.request.request_tracker.RequestTracker.complete_request")
@mock.patch("matrix.common.request.request_tracker.RequestTracker.complete_subtask_execution")
@mock.patch("matrix.docker.matrix_converter.MatrixConverter._upload_converted_matrix")
@mock.patch("matrix.docker.matrix_converter.MatrixConverter._to_loom")
@mock.patch("matrix.common.query.query_results_reader.QueryResultsReader._parse_manifest")
def test_run(self,
mock_parse_manifest,
mock_to_loom,
mock_upload_converted_matrix,
mock_subtask_exec,
mock_complete_request,
mock_creation_date,
mock_os_remove):
mock_parse_manifest.return_value = self.test_manifest
mock_creation_date.return_value = date.to_string(datetime.datetime.utcnow())
mock_to_loom.return_value = "local_matrix_path"
self.matrix_converter.run()
mock_manifest_calls = [
mock.call("test_cell_manifest"),
mock.call("test_exp_manifest"),
mock.call("test_gene_manifest")
]
mock_parse_manifest.assert_has_calls(mock_manifest_calls)
mock_to_loom.assert_called_once()
mock_subtask_exec.assert_called_once_with(Subtask.CONVERTER)
mock_complete_request.assert_called_once()
mock_upload_converted_matrix.assert_called_once_with("local_matrix_path", "test_target")
@mock.patch("s3fs.S3FileSystem.open")
def test__n_slices(self, mock_open):
manifest_file_path = "tests/functional/res/cell_metadata_manifest"
with open(manifest_file_path) as f:
mock_open.return_value = f
self.matrix_converter.query_results = {
QueryType.CELL: CellQueryResultsReader("test_manifest_key")
}
self.assertEqual(self.matrix_converter._n_slices(), 8)
def test__make_directory(self):
self.assertEqual(os.path.isdir('test_target'), False)
results_dir = self.matrix_converter._make_directory()
self.assertEqual(os.path.isdir('test_target'), True)
shutil.rmtree(results_dir)
def test__zip_up_matrix_output(self):
results_dir = self.matrix_converter._make_directory()
shutil.copyfile('LICENSE', './test_target/LICENSE')
path = self.matrix_converter._zip_up_matrix_output(results_dir, ['LICENSE'])
self.assertEqual(path, './test_target.zip')
os.remove('./test_target.zip')
@mock.patch("pandas.DataFrame.to_csv")
@mock.patch("matrix.common.query.feature_query_results_reader.FeatureQueryResultsReader.load_results")
@mock.patch("matrix.common.query.query_results_reader.QueryResultsReader._parse_manifest")
def test__write_out_gene_dataframe__with_compression(self, mock_parse_manifest, mock_load_results, mock_to_csv):
self.matrix_converter.query_results = {
QueryType.FEATURE: FeatureQueryResultsReader("test_manifest_key")
}
results_dir = self.matrix_converter._make_directory()
mock_load_results.return_value = | pandas.DataFrame() | pandas.DataFrame |
import pandas as pd
import pytest
from isic_challenge_scoring import metrics
def test_to_labels(categories):
probabilities = pd.DataFrame(
[
# NV
[0.0, 0.1, 0.0, 0.0, 0.0, 0.0, 0.0],
# undecided
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
# AKIEC
[0.2, 0.2, 0.2, 0.8, 0.2, 0.2, 0.2],
# undecided
[0.4, 0.4, 0.4, 0.4, 0.4, 0.4, 0.4],
# MEL
[1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
],
columns=categories,
)
labels = metrics._to_labels(probabilities)
assert labels.equals(pd.Series(['NV', 'undecided', 'AKIEC', 'undecided', 'MEL']))
def test_get_frequencies(categories):
labels = pd.Series(['MEL', 'MEL', 'VASC', 'AKIEC', 'MEL'])
weights = pd.Series([1.0, 1.0, 1.0, 1.0, 0.0])
label_frequencies = metrics._get_frequencies(labels, weights, categories)
assert label_frequencies.equals(
pd.Series(
{'MEL': 2.0, 'NV': 0.0, 'BCC': 0.0, 'AKIEC': 1.0, 'BKL': 0.0, 'DF': 0.0, 'VASC': 1.0}
)
)
# Ensure the ordering is correct (although Python3.6 dicts are ordered)
assert label_frequencies.index.equals(categories)
@pytest.mark.parametrize(
'truth_labels, prediction_labels, correct_value',
[
(['MEL'], ['MEL'], 1.0),
(['NV'], ['NV'], 1.0),
(['NV'], ['MEL'], 0.0),
(['MEL', 'MEL'], ['MEL', 'MEL'], 1.0),
(['MEL', 'NV'], ['MEL', 'NV'], 1.0),
(['MEL', 'NV'], ['MEL', 'MEL'], 0.5),
(['MEL', 'NV', 'MEL'], ['MEL', 'MEL', 'MEL'], 0.5),
(['MEL', 'NV', 'MEL', 'MEL'], ['MEL', 'MEL', 'MEL', 'MEL'], 0.5),
(['MEL', 'NV', 'MEL', 'MEL'], ['MEL', 'MEL', 'MEL', 'NV'], 1 / 3),
(['MEL', 'NV', 'MEL', 'MEL'], ['NV', 'MEL', 'NV', 'NV'], 0.0),
],
)
@pytest.mark.parametrize('test_weight_zero', [False, True])
def test_label_balanced_multiclass_accuracy(
truth_labels, prediction_labels, correct_value, test_weight_zero, categories
):
weights = [1.0] * len(truth_labels)
if test_weight_zero:
# Insert a final incorrect, but unweighted prediction
truth_labels = truth_labels + ['MEL']
prediction_labels = prediction_labels + ['NV']
weights = weights + [0.0]
value = metrics._label_balanced_multiclass_accuracy(
pd.Series(truth_labels), pd.Series(prediction_labels), pd.Series(weights), categories
)
assert value == correct_value
@pytest.mark.parametrize(
'truth_probabilities, prediction_probabilities, sensitivity_threshold, correct_value',
[
# This only checks some edge cases for sanity
# Perfect predictor, tolerant threshold
([0.0, 0.0, 1.0, 1.0], [0.2, 0.4, 0.6, 0.8], 0.1, 1.0),
# Perfect predictor, stringent threshold
([0.0, 0.0, 1.0, 1.0], [0.2, 0.4, 0.6, 0.8], 1.0, 1.0),
# 50/50 predictor, tolerant threshold
([0.0, 0.0, 1.0, 1.0], [0.3, 0.7, 0.3, 0.7], 0.1, 0.495),
# 50/50 predictor, stringent threshold
([0.0, 0.0, 1.0, 1.0], [0.3, 0.7, 0.3, 0.7], 1.0, 0.0),
# Wrong predictor, tolerant threshold
([0.0, 0.0, 1.0, 1.0], [0.8, 0.6, 0.4, 0.2], 0.1, 0.0),
# Wrong predictor, stringent threshold
([0.0, 0.0, 1.0, 1.0], [0.8, 0.6, 0.4, 0.2], 1.0, 0.0),
],
)
@pytest.mark.parametrize('test_weight_zero', [False, True])
def test_auc_above_sensitivity(
truth_probabilities,
prediction_probabilities,
sensitivity_threshold,
correct_value,
test_weight_zero,
):
weights = [1.0] * len(truth_probabilities)
if test_weight_zero:
# Insert a final incorrect, but unweighted prediction
truth_probabilities = truth_probabilities + [1.0]
prediction_probabilities = prediction_probabilities + [0.2]
weights = weights + [0.0]
value = metrics.auc_above_sensitivity(
| pd.Series(truth_probabilities) | pandas.Series |
#!/usr/bin/env python
#
# Parses and compiles metrics previously computed by calc_metrics.sh.
#
# Usage: ./compile_metrics.py FOLDER
#
# FOLDER should contain subfolders like lddt, trr_score, etc. This will output
# a file, combined_metrics.csv, in FOLDER.
#
import pandas as pd
import numpy as np
import os, glob, argparse, sys
from collections import OrderedDict
p = argparse.ArgumentParser()
p.add_argument('folder', help='Folder of outputs to process')
p.add_argument('--out', help='Output file name.')
args = p.parse_args()
if args.out is None:
args.out = os.path.join(args.folder,'combined_metrics.csv')
if not os.path.isdir(args.folder):
sys.exit(f'ERROR: Input path {args.folder} not a folder.')
def parse_fastdesign_filters(folder):
files = glob.glob(os.path.join(folder,'*.pdb'))
records = []
for f in files:
row = OrderedDict()
row['name'] = os.path.basename(f)[:-4]
recording = False
with open(f) as inf:
for line in inf:
if recording and len(line)>1:
tokens = line.split()
if len(tokens) == 2:
row[tokens[0]] = float(tokens[1])
if '#END_POSE_ENERGIES_TABLE' in line:
recording=True
if line.startswith('pose'):
row['rosetta_energy'] = float(line.split()[-1])
records.append(row)
if len(records)>0: return pd.DataFrame.from_records(records)
return pd.DataFrame({'name':[]})
def parse_lddt(folder):
data = {'name':[], 'lddt':[]}
files = glob.glob(os.path.join(folder,'*.npz'))
if len(files)==0:
return | pd.DataFrame({'name':[]}) | pandas.DataFrame |
import pandas as pd
import numpy as np
from pandas import DataFrame, get_dummies
import keras
from keras.layers import Dense, Dropout
from keras.models import Sequential
from keras.utils import to_categorical
from keras.callbacks import EarlyStopping
from keras.constraints import max_norm
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
f = pd.read_csv('presidents-data-words-january-3-2018.csv')
df = | DataFrame(f) | pandas.DataFrame |
"""Tests suite for Period handling.
Parts derived from scikits.timeseries code, original authors:
- <NAME> & <NAME>
- pierregm_at_uga_dot_edu - mattknow_ca_at_hotmail_dot_com
"""
from unittest import TestCase
from datetime import datetime, timedelta
from numpy.ma.testutils import assert_equal
from pandas.tseries.period import Period, PeriodIndex
from pandas.tseries.index import DatetimeIndex, date_range
from pandas.tseries.tools import to_datetime
import pandas.core.datetools as datetools
import numpy as np
from pandas import Series, TimeSeries
from pandas.util.testing import assert_series_equal
class TestPeriodProperties(TestCase):
"Test properties such as year, month, weekday, etc...."
#
def __init__(self, *args, **kwds):
TestCase.__init__(self, *args, **kwds)
def test_interval_constructor(self):
i1 = Period('1/1/2005', freq='M')
i2 = Period('Jan 2005')
self.assertEquals(i1, i2)
i1 = Period('2005', freq='A')
i2 = Period('2005')
i3 = Period('2005', freq='a')
self.assertEquals(i1, i2)
self.assertEquals(i1, i3)
i4 = Period('2005', freq='M')
i5 = Period('2005', freq='m')
self.assert_(i1 != i4)
self.assertEquals(i4, i5)
i1 = Period.now('Q')
i2 = Period(datetime.now(), freq='Q')
i3 = Period.now('q')
self.assertEquals(i1, i2)
self.assertEquals(i1, i3)
# Biz day construction, roll forward if non-weekday
i1 = Period('3/10/12', freq='B')
i2 = Period('3/12/12', freq='D')
self.assertEquals(i1, i2.asfreq('B'))
i3 = Period('3/10/12', freq='b')
self.assertEquals(i1, i3)
i1 = Period(year=2005, quarter=1, freq='Q')
i2 = Period('1/1/2005', freq='Q')
self.assertEquals(i1, i2)
i1 = Period(year=2005, quarter=3, freq='Q')
i2 = Period('9/1/2005', freq='Q')
self.assertEquals(i1, i2)
i1 = Period(year=2005, month=3, day=1, freq='D')
i2 = Period('3/1/2005', freq='D')
self.assertEquals(i1, i2)
i3 = Period(year=2005, month=3, day=1, freq='d')
self.assertEquals(i1, i3)
i1 = Period(year=2012, month=3, day=10, freq='B')
i2 = Period('3/12/12', freq='B')
self.assertEquals(i1, i2)
i1 = Period('2005Q1')
i2 = Period(year=2005, quarter=1, freq='Q')
i3 = Period('2005q1')
self.assertEquals(i1, i2)
self.assertEquals(i1, i3)
i1 = Period('05Q1')
self.assertEquals(i1, i2)
lower = | Period('05q1') | pandas.tseries.period.Period |
import okex.account_api as account
import okex.futures_api as future
import okex.spot_api as spot
import pandas as pd
import datetime
import time
from tools.DBTool import DBTool
from tools.ReadConfig import ReadConfig
from sqlalchemy import create_engine
from sqlalchemy.types import DECIMAL, TEXT, Date, DateTime
class Account():
def __init__(self,spotAPI,accountAPI,engine):
self.spotAPI =spotAPI
self.accountAPI = accountAPI
self.engine= engine
def get_timestamp(self,time):
# now = datetime.datetime.now()
t = time.isoformat("T", "milliseconds")
return t + "Z"
def get_okex_spot_accounts(self,time):
# okex现货账户信息
result = self.spotAPI.get_account_info()
spotAccount = pd.DataFrame(result, columns=['frozen', 'hold', 'id', 'currency', 'balance', 'available', 'holds'])
spotAccount['time'] = time
spotAccount['ts'] = time.timestamp()
return spotAccount
def save_okex_spot_accounts(self,spotAccounts):
dtypedict = {'frozen': DECIMAL(18, 8), 'hold': DECIMAL(18, 8), 'id': TEXT, 'currency': TEXT,
'balance': DECIMAL(18, 8), 'available': DECIMAL(18, 8), 'holds': DECIMAL(18, 8), 'data': DateTime,
'ts': TEXT}
spotAccounts.to_sql(name='account_okex_spot', con=self.engine, chunksize=1000, if_exists='append', index=None,
dtype=dtypedict)
def get_okex_spot_fills(self, instrument_id, after, before):
# okex现货账户信息
result = self.spotAPI.get_fills(instrument_id=instrument_id, order_id='', after=after, before=before, limit='')
spot_fills = pd.DataFrame(result[0], columns=['ledger_id', 'trade_id', 'instrument_id', 'price', 'size', 'order_id',
'timestamp', 'exec_type', 'fee', 'side', 'currency'])
spot_fills['timestamp'] = spot_fills['timestamp'].apply(lambda x: x[:-1])
return spot_fills
def save_okex_spot_fills(self,spot_fills):
dtypedict = {'ledger_id': TEXT, 'trade_id': TEXT, 'instrument_id': TEXT, 'price': DECIMAL(18, 8),
'size': TEXT, 'order_id': TEXT, 'timestamp': DateTime, 'exec_type': TEXT,
'fee': DECIMAL(18, 8), 'side': TEXT, 'currency': TEXT}
spot_fills.to_sql(name='account_okex_spot_fill', con=self.engine, chunksize=1000, if_exists='append', index=None,
dtype=dtypedict)
def get_okex_asset_valuation(self, account_type, currency):
result0 = self.accountAPI.get_asset_valuation(account_type=account_type, valuation_currency=currency)
asset0 = pd.DataFrame(result0, columns=['account_type', 'balance', 'valuation_currency', 'timestamp'], index=[0])
def ff(x):
timeArray = time.strptime(x["timestamp"], "%Y-%m-%d %H:%M:%S")
timeStamp = int(time.mktime(timeArray))
return timeStamp
# okex现货账户信息
asset0['ts'] = asset0['timestamp'].apply(lambda x: x[:-1])
asset0['timestamp'] = asset0['timestamp'].apply(lambda x: x[:-1])
return asset0
def save_okex_asset_valuation(self,asset_valuation):
dtypedict = {'account_type': TEXT, 'balance': DECIMAL(18, 8), 'valuation_currency': TEXT, 'timestamp': DateTime,
'ts': TEXT}
asset_valuation.to_sql(name='account_okex_asset_valuation', con=self.engine, chunksize=1000, if_exists='append',
index=None, dtype=dtypedict)
def insert_func(self,table, conn, keys, data_iter):
"""
Pandas中to_sql方法的回调函数
:param table:Pandas的table
:param conn:数据库驱动连接对象
:param keys:要存入的字段名
:param data_iter:DataFrame对象也就是数据迭代器
:return:
"""
dbapi_conn = conn.connection
# 创建数据库游标对象
with dbapi_conn.cursor() as cursor:
# 遍历拼接sql语句
for data_tuple in data_iter:
sql = """INSERT INTO {TABLE_NAME}(bill_name, room_number, bind_status, community_name, area_m) VALUES('{BILL_NAME}', '{ROOM_NUMBER}', {BIND_STATUS}, '{COMMUNITY_NAME}', {AREA_M}) ON conflict({UNIQUE_LIST}) DO UPDATE SET bill_name='{BILL_NAME}', bind_status={BIND_STATUS}, area_m='{AREA_M}'""".format(
TABLE_NAME=table.name, UNIQUE_LIST="community_name, room_number",
BILL_NAME=data_tuple[0], ROOM_NUMBER=data_tuple[2], COMMUNITY_NAME=data_tuple[1],
BIND_STATUS=data_tuple[3], AREA_M=data_tuple[4])
cursor.execute(sql)
def main():
config = ReadConfig()
# 初始化数据库连接
engine = create_engine(config.get_dbURL())
okex_api_key = config.get_okex("OKEX_API_KEY")
okex_secret_key = config.get_okex("OKEX_SECRET_KEY")
okex_passphrase = config.get_okex("OKEX_PASSPHRASE")
spotAPI = spot.SpotAPI(okex_api_key, okex_secret_key, okex_passphrase, False)
accountAPI = account.AccountAPI(okex_api_key, okex_secret_key, okex_passphrase, False)
accountClient=Account(spotAPI,accountAPI,engine)
now = datetime.datetime.now()
flag = True
after = ''
while (flag):
query_sql = '''SELECT ledger_id
FROM orange.account_okex_spot_fill where instrument_id ='OKB-USDT' order by `timestamp` limit 1'''
# print(query_sql)
res = | pd.read_sql(sql=query_sql, con=engine) | pandas.read_sql |
from .transform_function import TransformFunction
from .online_transform_function import OnlineTransformFunction
from .embody import _latent_operation_body_, get_truncnorm_moments_vec
from scipy.stats import norm, truncnorm
import numpy as np
import pandas as pd
from concurrent.futures import ProcessPoolExecutor
from scipy.linalg import svdvals
from collections import defaultdict
import warnings
var_type_names = ['continuous', 'ordinal', 'lower_truncated', 'upper_truncated', 'twosided_truncated']
class GaussianCopula():
'''
Gaussian copula model.
This class allows to estimate the parameters of a Gaussian copula model from incomplete data,
and impute the missing entries using the learned model.
Parameters
----------
training_mode: {'standard', 'minibatch-offline', 'minibatch-online'}, default='standard'
String describing the type of training to use. Must be one of:
'standard'
all data are used to estimate the marginals and update the model in each iteration
'minibatch-offline'
all data are used to estimate the marginals, but only a mini-batch's data are used to update the model in each iteration
'minibatch-online'
only recent data are used to estimate the marginals, and only a mini-batch's data are used to update the model in each iteration
tol: float, default=0.01
The convergence threshold. EM iterations will stop when the parameter update ratio is below this threshold.
max_iter: int, default=50
The number of EM iterations to perform.
random_state: int, default=101
Controls the randomness in generating latent ordinal values. Not used if there is no ordinal variable.
n_jobs: int, default=1
The number of jobs to run in parallel.
verbose: int, default=0
Controls the verbosity when fitting and predicting.
0 : silence
1 : information
2 : rich information
3 : debugging
num_ord_updates: int, default=1
Number of steps to take when approximating the mean and variance of the latent variables corresponding to ordinal dimensions.
We do not recommend using value larger than 1 (the default value) at this moment. It will slow the speed without clear
performance improvement.
min_ord_ratio: float, default=0.1
Used for automatic variable type decision. The largest mode frequency for continuous variables.
stepsize_func: a function that takes 1-dim input and return 1-dim output
Only used when (1) training_mode = 'minibatch-offline'; (2) training_mode = 'minibatch-online' and 'const_stepsize=None'.
The supplied function should outputs monotonically decreasing values in the range (0,1) on positive integers
const_stepsize: float in the range (0,1) or None, default is 0.5.
Only used when training_mode = 'minibatch-online'.
num_pass: int or None, default = 2
Only used when training_mode='minibatch-offline'. Used to set max_iter.
batch_size: int, default=100
The number of data points in each mini-batch.
window_size: int, default=200
The lookback window length for online marginal estimate. Only used when training_mode = 'minibatch-online'.
decay: int or None, default=None
The decay rate to be allocated to observations for online imputation. Only used when training_mode = 'minibatch-online'.
realtime_marginal: bool, default=True
Only used when training_mode = 'minibatch-online'.
If set to True, the marginal updates after every row; otherwise, the marginal updates after every batch_size rows.
In comparison, correlation update is conducted after every batch_size rows.
The model runs longer but gives more accurate estimation when set to True.
corr_diff_type: A list with elements from {'F', 'S', 'N'}, default = ['F']
The matrix norm used to compute copula correlation update ratio. Used for detecting change points when training mode = 'minibatch-online'.
Must be one of:
'F'
Frobenius norm
'S'
Spectral norm
'N'
Nuclear norm
Attributes
----------
n_iter_: int
The number of EM iterations conducted.
likelihood: ndarray of shape (n_iter_,)
The model likelihood value at each iteration.
feature_names: ndarray of shape (n_features,)
Number of features seen during `fit`.
Methods
-------
fit(X)
Fit a Gaussian copula model on X.
transform(X)
Return the imputed X using the stored model.
fit_transform(X)
Fit a Gaussian copula model on X and return the transformed X.
fit_transform_evaluate(X, eval_func)
Conduct eval_func on the imputed datasets returned after each iteration.
sample_imputation(X)
Return multiple imputed datasets X using the stored model.
get_params()
Get parameters for this estimator.
get_vartypes()
Get the specified variable types used in model fitting.
get_imputed_confidence_interval()
Get the confidence intervals for the imputed missing entries.
get_reliability()
Get the reliability, a relative quantity across all imputed entries, when either all variables are continuous or all variables are ordinal
fit_change_point_test()
Conduct change point test after receiving each data batch.
'''
def __init__(self, training_mode='standard', tol=0.01, max_iter=50, random_state=101, n_jobs=1, verbose=0, num_ord_updates=1, min_ord_ratio=0.1, stepsize_func=lambda k, c=5:c/(k+c), const_stepsize=0.5, num_pass=2, batch_size=100, window_size=200, decay=None, realtime_marginal=True, corr_diff_type=['F']):
def check_stepsize():
L = np.array([stepsize_func(x) for x in range(1, max_iter+1, 1)])
if L.min() <=0 or L.max()>=1:
print(f'Step size should be in the range of (0,1). The input stepsize function yields step size from {L.min()} to {L.max()}')
raise
if not all(x>y for x, y in zip(L, L[1:])):
print(f'Input step size is not monotonically decreasing.')
raise
if training_mode == 'minibatch-online':
if const_stepsize is None:
check_stepsize()
self.stepsize = stepsize_func
else:
assert 0<const_stepsize<1, 'const_stepsize must be in the range (0, 1)'
self.stepsize = lambda x, c=const_stepsize: c
elif training_mode == 'minibatch-offline':
check_stepsize()
self.stepsize = stepsize_func
elif training_mode == 'standard':
pass
else:
print("Invalida training_mode, must be one of 'standard', 'minibatch-offline', 'minibatch-online'")
raise
self._training_mode = training_mode
self._batch_size = batch_size
self._window_size = window_size
self._realtime_marginal = realtime_marginal
self._decay = decay
self._corr_diff_type = corr_diff_type
# self._cont_indices and self._ord_indices store boolean indexing
self._cont_indices = None
self._ord_indices = None
# self.cont_indices and self.ord_indices store integer indexing
self.cont_indices = None
self.ord_indices = None
self._min_ord_ratio = min_ord_ratio
self.var_type_dict = {}
self._seed = random_state
self._rng = np.random.default_rng(self._seed)
self._sample_seed = self._seed
self._threshold = tol
self._max_iter = max_iter
self._max_workers = n_jobs
self._verbose = verbose
self._num_ord_updates = num_ord_updates
self._num_pass = num_pass
self._iter = 0
# model parameter
self._corr = None
# attributes
self.n_iter_ = 0
self.likelihood = []
self.features_names = None
self.corrupdate = []
self.corr_diff = defaultdict(list)
################################################
#### public functions
################################################
def fit(self, X,
continuous = None,
ordinal = None,
lower_truncated= None,
upper_truncated = None,
twosided_truncated = None,
**kwargs):
'''
Fits the Gaussian copula imputer on the input data X.
Parameters
----------
X: array-like of shape (n_samples, n_features)
Input data
continous, ordinal, lower_truncated, upper_truncated, twosided_truncated, poisson: list of integers
list of the corresponding variable type indices
kwargs:
additional keyword arguments for fit_offline:
first_fit: bool, default=True
If true, initialize the copula correlation matrix
max_iter: int or None.
The used maximum number of iterations is self._max_iter if max_iter is None else max_iter
convergence_verbose: bool, default = True
Output convergence information if True
'''
self.store_var_type(continuous = continuous,
ordinal = ordinal,
lower_truncated = lower_truncated,
upper_truncated = upper_truncated,
twosided_truncated = twosided_truncated
)
if self._training_mode == 'minibatch-online':
print('fit method is not implemented for minibatch-online mode, since the fitting and imputation are done in the unit of mini-batch. To impute the missing entries, call fit_transform.')
raise
else:
self.fit_offline(X, **kwargs)
def transform(self, X=None, num_ord_updates=2):
'''
Impute the missing entries in X using currently fitted model (accessed through self._corr).
Parameters
----------
X: array-like of shape (n_samples, n_features) or None
Data to be imputed. If None, set X as the data used to fit the model.
num_ord_updates: int, default=2
Number of steps to take when approximating the mean and variance of the latent variables corresponding to ordinal dimensions.
Returns
-------
X_imp: array-like of shape (n_samples, n_features)
Tne imputed complete dataset
'''
# get Z
if X is None:
Z = self._latent_Zimp
else:
Z, Z_ord_lower, Z_ord_upper = self._observed_to_latent(X_to_transform=X)
Z, _ = self._fillup_latent(Z=Z, Z_ord_lower=Z_ord_lower, Z_ord_upper=Z_ord_upper, num_ord_updates=num_ord_updates)
# from Z to X
X_imp = self._latent_to_imp(Z=Z, X_to_impute=X)
return X_imp
def fit_transform(self, X,
continuous = None,
ordinal = None,
lower_truncated= None,
upper_truncated = None,
twosided_truncated = None,
**kwargs
):
'''
Fit to data, then transform it.
For 'minibatch-online' mode, the variable types are set in this function call since the fit and transformation are done in an alternative fashion.
For the other two modes, the variable types are set in the function fit_offline.
Parameters
----------
X: array-like of shape (n_samples, n_features)
Input data
continous, ordinal, lower_truncated, upper_truncated, twosided_truncated, poisson: list of integers
list of the corresponding variable type indices
kwargs:
additional keyword arguments for fit_transform_online and fit_offline
Keyword arguments of fit_transform_online:
X_true: array-like of shape (n_samples, n_features) or None
If not None, it indicates that some (could be all) of the missing entries of X_batch are revealed,
and stored in X_true, after the imputation of X_batch. Those observation entries will be used to
update the model.
n_trian: int, default=0
The number of rows to be used to initialize the model estimation.
Use self._batch_size if n_train is 0
For keyword arguments of fit_offline, see Parameters of fit()
Returns
-------
X_imp: array-like of shape (n_samples, n_features)
Tne imputed complete dataset
'''
self.store_var_type(continuous = continuous,
ordinal = ordinal,
lower_truncated = lower_truncated,
upper_truncated = upper_truncated,
twosided_truncated = twosided_truncated
)
if self._training_mode == 'minibatch-online':
X = self._preprocess_data(X, set_indices=False)
if 'X_true' in kwargs:
self.set_indices(np.asarray(kwargs['X_true']))
else:
self.set_indices(X)
kwargs_online = {name:kwargs[name] for name in ['n_train', 'X_true'] if name in kwargs}
X_imp = self.fit_transform_online(X, **kwargs_online)
else:
X = self._preprocess_data(X)
kwargs_offline = {name:kwargs[name] for name in ['first_fit', 'max_iter', 'convergence_verbose'] if name in kwargs}
X_imp = self.fit_transform_offline(X, **kwargs_offline)
return X_imp
def fit_transform_evaluate(self, X, eval_func=None, num_iter=30, return_Ximp=False, **kwargs):
'''
Run the algorithm for num_iter iterations and evaluate the returned imputed sample at each iteration.
Parameters
----------
X: array-like of shape (n_samples, n_features)
Data to be imputed.
eval_func: function that takes array-like of shape (n_samples, n_features) input
If not None, apply eval_func to the imputed dataset after each iteration and return the results.
num_iter: int, default = 30
The number of iterations to run.
return_Ximp: bool, default = False
If True, return the imputed datasets after each iteration.
kwargs:
additional keyword arguments for fit_transform_online and fit_offline
See Parameters of fit_transform()
Returns
-------
out: dict
'X_imp': the imputed datasets
'evluation': the desired evaluation on imputed datasets
'''
out = defaultdict(list)
# first fit
Ximp = self.fit_transform(X = X, max_iter = 1, convergence_verbose = False, **kwargs)
if eval_func is not None:
out['evaluation'].append(eval_func(Ximp))
if return_Ximp:
out['X_imp'].append(Ximp)
# subsequent fits
for i in range(1, num_iter, 1):
Ximp = self.fit_transform(X = X, max_iter = 1, first_fit = False, convergence_verbose = False)
if eval_func is not None:
out['evaluation'].append(eval_func(Ximp))
if return_Ximp:
out['X_imp'].append(Ximp)
return out
def get_params(self):
'''
Get parameters for this estimator.
Returns:
params: dict
'''
params = {'copula_corr': self._corr.copy()}
return params
def get_vartypes(self, feature_names=None):
'''
Return the variable types used during the model fitting. Each variable is one of the following:
'continuous', 'ordinal', 'lower_truncated', 'upper_truncated', 'twosided_truncated'
Parameters
----------
feature_names: list of str or None
If not None, feature_names will be used to name variables
Returns
-------
_var_types: dict
Keys: 'continuous', 'ordinal', 'lower_truncated', 'upper_truncated', 'twosided_truncated'
'''
_var_types = self.var_type_dict.copy()
if feature_names is not None:
names = list(feature_names)
for key,value in _var_types.items():
_var_types[key] = [names[i] for i in value]
for name in var_type_names:
if name not in _var_types:
_var_types[name] = []
return _var_types
def get_imputed_confidence_interval(self, X=None, alpha = 0.95, num_ord_updates=2, type='analytical', **kwargs):
'''
Compute the confidence interval for each imputed entry.
Parameters
----------
X: array-like of shape (n_samples, n_features) or None
Data to be imputed. If None, set X as the data used to fit the model.
alpha: float in (0,1), default = 0.95
The desired significance level.
num_ord_updates: int, default = 2
Number of steps to take when approximating the mean and variance of the latent variables corresponding to ordinal dimensions.
type: {'anlaytical', 'quantile'}, default ='analytical'.
'analytical': derive the analytical confidence interval
'qunatile': first do multiple imputation and then derive empirical quantile confidence intervals
kwargs:
additional keyword arguments for get_imputed_confidence_interval_quantiles
Returns
-------
out: dict with keys
'upper': array-like of shape (n_samples, n_features)
The upper bound of the confidence interval
'lower': array-like of shape (n_samples, n_features)
The lower bound of the confidence interval
'''
if self._training_mode == 'minibatch-online':
raise NotImplementedError('Confidence interval has not yet been supported for minibatch-online mode')
if type == 'quantile':
return self.get_imputed_confidence_interval_quantile(X=X, alpha=alpha, num_ord_updates=num_ord_updates, **kwargs)
if X is None:
Zimp = self._latent_Zimp
Cord = self._latent_Cord
X = self.transform_function.X
else:
Z, Z_ord_lower, Z_ord_upper = self._observed_to_latent(X_to_transform=X)
Zimp, Cord = self._fillup_latent(Z=Z, Z_ord_lower=Z_ord_lower, Z_ord_upper=Z_ord_upper, num_ord_updates=num_ord_updates)
n, p = Zimp.shape
margin = norm.ppf(1-(1-alpha)/2)
# upper and lower have np.nan at oberved locations because std_cond has np.nan at those locations
std_cond = self._get_cond_std_missing(X=X, Cord=Cord)
upper = Zimp + margin * std_cond
lower = Zimp - margin * std_cond
# monotonic transformation
upper = self._latent_to_imp(Z=upper, X_to_impute=X)
lower = self._latent_to_imp(Z=lower, X_to_impute=X)
obs_loc = ~np.isnan(X)
upper[obs_loc] = np.nan
lower[obs_loc] = np.nan
out = {'upper':upper, 'lower':lower}
return out
def sample_imputation(self, X=None, num=5, num_ord_updates=1):
'''
Sample multiple imputed datasets using the currently fitted method.
Parameters
----------
X: array of shape (n_samples, n_features) or None.
The dataset to be imputed. Use the seen data for model fitting if None.
num: int, default=5
The number of imputation samples to draw.
num_ord_updates: int, default=1
The number of iterations to perform for estimating latent mean at ordinals.
Return
------
X_imp_num: array of shape (n_samples, n_features, num)
Imputed dataset.
'''
if X is None:
X = self.transform_function.X
if all(self._cont_indices):
Z, Z_ord_lower, Z_ord_upper = self._observed_to_latent(X_to_transform=X)
Z_imp_num = self._sample_latent(Z=Z, Z_ord_lower=Z_ord_lower, Z_ord_upper=Z_ord_upper, num=num, num_ord_updates=num_ord_updates)
X_imp_num = np.zeros_like(Z_imp_num)
for i in range(num):
X_imp_num[...,i] = self._latent_to_imp(Z=Z_imp_num[...,i], X_to_impute=X)
else:
# slower
n, p = X.shape
X_imp_num = np.empty((n, p, num))
Z_cont = self.transform_function.get_cont_latent(X_to_transform=X)
for i in range(num):
# Z_ord_lower and Z_ord_upper will be different across i
Z, Z_ord_lower, Z_ord_upper = self._observed_to_latent(X_to_transform=X, Z_cont=Z_cont, method='sampling')
# TODO: complete Z
Z_imp = self._sample_latent(Z=Z, Z_ord_lower=Z_ord_lower, Z_ord_upper=Z_ord_upper, num=1, num_ord_updates=num_ord_updates)
X_imp_num[...,i] = self._latent_to_imp(Z=Z_imp[...,0], X_to_impute=X)
return X_imp_num
def get_reliability(self, Ximp=None, alpha=0.95):
'''
Get the reliability of imputed entries. The notion of reliability is a relative quantity across all imputed entries.
Entries with higher reliability are more likely to have small imputation error.
Parameters
----------
Ximp: array-like of shape (n_samples, n_features) or None
Only used for all continuous variables.
The returned Gaussian copula imputed matrix.
alpha: float in (0,1), default = 0.95
The desired significance level.
Returns
-------
r: array-like of shape (n_samples, n_features)
Elementwise reliability
'''
if all(self._cont_indices):
r = self.get_reliability_cont(Ximp, alpha)
elif all(self._ord_indices):
r = self.get_reliability_ord()
else:
raise ValueError('Reliability computation is only available for either all continuous variables or all ordinal variables')
return r
def fit_change_point_test(self, X, X_true=None, n_train=0, nsamples=100, verbose=False):
'''
Conduct change point detection after receiving each data batch.
Parameters
----------
X: array-like of shape (n_samples, n_features)
Input data
X_true: array-like of shape (n_samples, n_features) or None
If not None, it indicates that some (could be all) of the missing entries of X_batch are revealed,
and stored in X_true, after the imputation of X_batch. Those observation entries will be used to
update the model.
n_trian: int, default=0
The number of rows to be used to initialize the model estimation.
Use self._batch_size if n_train is 0
n_samples: int, default=100
The number of samples to draw for the resampling test
verbose: bool, default=True
If True, print progress information
Returns
-------
out: dict with keys
pval: dict with list
with keys as self._corr_diff_type,
and values as the corresponding empirical pvalues
statistics: dict with list
with keys as self._corr_diff_type
and values as the corresponding test statistics
'''
assert self._training_mode == 'minibatch-online'
if X_true is None:
X = self._preprocess_data(X)
else:
X = self._preprocess_data(X, set_indices=False)
self.set_indices(np.asarray(X_true))
cdf_types, inverse_cdf_types = self.get_cdf_estimation_type(p = X.shape[1])
self.transform_function = OnlineTransformFunction(self._cont_indices,
self._ord_indices,
window_size=self._window_size,
decay = self._decay,
cdf_types=cdf_types,
inverse_cdf_types=inverse_cdf_types
)
n,p = X.shape
self._corr = np.identity(p)
# initialize the model
n_train = self._batch_size if n_train == 0 else n_train
assert n_train > 0
ind_train = np.arange(n_train)
X_train = X[ind_train] if X_true is None else X_true[ind_train]
self.transform_function.update_window(X_train)
_ = self.partial_fit(X_batch = X_train, step_size=1)
pvals = defaultdict(list)
test_stats = defaultdict(list)
i=0
while True:
batch_lower = n_train + i*self._batch_size
batch_upper = min(n_train + (i+1)*self._batch_size, n)
if batch_lower>=n:
break
if verbose:
print(f'start batch {i+1}')
indices = np.arange(batch_lower, batch_upper, 1)
_X_true = None if X_true is None else X_true[indices]
_pval, _diff = self.change_point_test(X[indices,:], X_true=_X_true, step_size=self.stepsize(i+1), nsamples=nsamples)
for t in self._corr_diff_type:
pvals[t].append(_pval[t])
test_stats[t].append(_diff[t])
i+=1
out = {'pval':pvals, 'statistics':test_stats}
return out
####################################
#### General nonpublic functions
###################################
def get_imputed_confidence_interval_quantile(self, X=None, alpha = 0.95, num_ord_updates=1, num=200):
'''
Compute the confidence interval for each imputed entry.
Parameters
----------
X, alpha:
see Parameters in get_imputed_confidence_interval
num_ord_updates: int, default = 2
Number of steps to take when approximating the mean and variance of the latent variables corresponding to ordinal dimensions.
num: int, default=200
Number of multiple samples to draw
Returns
-------
out:
see Returns in get_imputed_confidence_interval
'''
if X is None:
X = self.transform_function.X
X_imp_num = self.sample_imputation(X = X, num = num, num_ord_updates = num_ord_updates)
q_lower, q_upper = (1-alpha)/2, 1-(1-alpha)/2
lower, upper = np.quantile(X_imp_num, [q_lower, q_upper], axis=2)
obs_loc = ~np.isnan(X)
upper[obs_loc] = np.nan
lower[obs_loc] = np.nan
return {'upper':upper, 'lower':lower}
def get_reliability_cont(self, Ximp, alpha=0.95):
'''
Implements get_reliability when all variabels are continuous.
Parameters
----------
Ximp: array-like of shape (n_samples, n_features) or None
Only used for all continuous variables.
The returned Gaussian copula imputed matrix.
alpha: float in (0,1), default = 0.95
The desired significance level.
Returns
-------
reliability: array-like of shape (n_samples, n_features)
Elementwise reliability
'''
ct = self.get_imputed_confidence_interval(alpha = alpha)
d = ct['upper'] - ct['lower']
d_square, x_square = np.power(d,2), np.power(Ximp, 2)
missing_loc = np.isnan(self.transform_function.X)
# reliability has np.nan at observation locations because d has np.nan at those locations
reliability = (d_square[missing_loc].sum() - d_square) / (x_square[missing_loc].sum() - x_square)
return reliability
def get_reliability_ord(self):
'''
Implements get_reliability when all variabels are ordinal.
Returns
-------
reliability: array-like of shape (n_samples, n_features)
Elementwise reliability
'''
std_cond = self._get_cond_std_missing()
try:
Zimp = self._latent_Zimp
except AttributeError:
print(f'Cannot compute reliability before model fitting and imputation')
raise
Z_ord_lower, _ = self.transform_function.get_ord_latent()
reliability = np.zeros_like(Zimp) + np.nan
p = Zimp.shape[1]
for j in range(p):
# get cutsoff
col = Z_ord_lower[:,j]
missing_indices = np.isnan(col)
cuts = np.unique(col[~missing_indices])
cuts = cuts[np.isfinite(cuts)]
# compute reliability/the probability lower bound
for i,x in enumerate(missing_indices):
if x:
t = np.abs(Zimp[i,j] - cuts).min()
reliability[i,j] = 1 - np.power(std_cond[i,j]/t, 2)
return reliability
################################################
#### offline functions
################################################
def fit_transform_offline(self, X, **kwargs):
'''
Implement fit_transform when the training mode is 'standard' or 'minibatch-offline'
Parameters
----------
See Parameters of fit()
Returns
-------
See Returns of transform()
'''
self.fit_offline(X, **kwargs)
X_imp = self.transform()
return X_imp
def fit_offline(self, X, first_fit=True, max_iter=None, convergence_verbose=True, fit_cov=True):
'''
Implement fit when the training mode is 'standard' or 'minibatch-offline'
Parameters
----------
See Parameters of fit()
'''
X = self._preprocess_data(X)
# do marginal estimation
# for every fit, a brand new marginal transformation is used
if first_fit:
cdf_types, inverse_cdf_types = self.get_cdf_estimation_type(p = X.shape[1])
self.transform_function = TransformFunction(X,
cont_indices=self._cont_indices,
ord_indices=self._ord_indices,
cdf_types=cdf_types,
inverse_cdf_types=inverse_cdf_types
)
Z, Z_ord_lower, Z_ord_upper = self._observed_to_latent()
else:
Z_ord_lower, Z_ord_upper = self._Z_ord_lower, self._Z_ord_upper
Z = self._latent_Zimp.copy()
Z[np.isnan(X)] = np.nan
# estimate copula correlation matrix
if fit_cov:
Z_imp, C_ord = self._fit_covariance(Z, Z_ord_lower, Z_ord_upper,
first_fit=first_fit, max_iter=max_iter, convergence_verbose=convergence_verbose)
# attributes to store after model fitting
self._latent_Zimp = Z_imp
self._latent_Cord = C_ord
# attributes to store for additional training
self._Z_ord_lower = Z_ord_lower
self._Z_ord_upper = Z_ord_upper
################################################
#### online functions
################################################
def fit_transform_online(self, X, X_true=None, n_train=0):
'''
Implement fit_transform when the training mode is 'minibatch-online'
Parameters
----------
See Parameters of fit_transform()
Returns
-------
See Returns of transform()
'''
if X_true is not None:
X_true = np.array(X_true)
cdf_types, inverse_cdf_types = self.get_cdf_estimation_type(p = X.shape[1])
self.transform_function = OnlineTransformFunction(self._cont_indices,
self._ord_indices,
window_size=self._window_size,
decay = self._decay,
cdf_types=cdf_types,
inverse_cdf_types=inverse_cdf_types
)
n,p = X.shape
X_imp = np.zeros_like(X)
self._corr = np.identity(p)
# initialize the model
n_train = self._batch_size if n_train == 0 else n_train
assert n_train > 0
ind_train = np.arange(n_train)
X_train = X[ind_train] if X_true is None else X_true[ind_train]
self.transform_function.update_window(X_train)
_ = self.partial_fit(X_batch = X_train, step_size=1)
X_imp[ind_train] = self.transform(X = X_train, num_ord_updates=self._num_ord_updates)
i=0
while True:
batch_lower = n_train + i*self._batch_size
batch_upper = min(n_train + (i+1)*self._batch_size, n)
if batch_lower>=n:
break
indices = np.arange(batch_lower, batch_upper, 1)
_X_true = None if X_true is None else X_true[indices]
X_imp[indices] = self.partial_fit_transform(X[indices], step_size=self.stepsize(i+1), X_true=_X_true)
i+=1
if self._verbose > 0:
print(f'finish batch {i}')
return X_imp
def partial_fit_transform(self, X_batch, step_size=0.5, X_true=None):
"""
Updates the fit of the copula using the data in X_batch and returns the
imputed values and the new correlation for the copula
Parameters
----------
X_batch: array-like of shape (nbatch, nfeatures)
data matrix with entries to use to update copula and be imputed
step_size: float in (0,1), default=0.5
tunes how much to weight new covariance estimates
X_true:
If not None, it indicates that some (could be all) of the missing entries of X_batch are revealed,
and stored in X_true, after the imputation of X_batch. Those observation entries will be used to
update the model.
Returns
-------
X_imp: array-like of shape (nbatch, nfeatures)
X_batch with missing values imputed
"""
# impute missing entries in new data using previously fitted model
# just a step of out-of-sample imputation
X_for_update = X_batch if X_true is None else X_true
if self._realtime_marginal:
X_imp = X_batch.copy()
for i,x in enumerate(X_batch):
X_imp[i] = self.transform(X = x.reshape((1, -1)), num_ord_updates = self._num_ord_updates)
self.transform_function.update_window(X_for_update[i].reshape((1, -1)))
else:
X_imp = self.transform(X = X_batch, num_ord_updates=self._num_ord_updates)
self.transform_function.update_window(X_for_update)
# use new model to update model parameters
prev_corr = self._corr.copy()
new_corr = self.partial_fit(X_batch=X_for_update, step_size=step_size, model_update=True)
diff = self.get_matrix_diff(prev_corr, self._corr, self._corr_diff_type)
self._update_corr_diff(diff)
return X_imp
def partial_fit(self, X_batch, step_size=0.5, model_update=True):
'''
Update the copula correlation from new samples in X_batch, with given step size
Parameters
----------
X_batch: array-like of shape (nbatch, nfeatures)
data matrix with entries to use to update copula
step_size: float in (0,1), default=0.5
tunes how much to weight new covariance estimates
model_update: bool, default=True
If True, update fitting information
Returns
-------
new_corr: array-like of shape (nfeatures, nfeatures)
updated copula correlation
'''
Z_ord_lower, Z_ord_upper = self.transform_function.get_ord_latent(X_to_transform=X_batch)
Z_ord = self._init_Z_ord(Z_ord_lower, Z_ord_upper, method='univariate_mean')
Z_cont = self.transform_function.get_cont_latent(X_to_transform=X_batch)
Z = np.empty_like(X_batch)
Z[:, self._cont_indices] = Z_cont
Z[:, self._ord_indices] = Z_ord
corr, Z_imp, Z, C_ord, loglik = self._em_step(Z, Z_ord_lower, Z_ord_upper)
new_corr = corr*step_size + (1-step_size)*self._corr
if model_update:
self._corr = new_corr
self._latent_Zimp = Z_imp
self._latent_Cord = C_ord
self.likelihood.append(loglik)
return new_corr
def change_point_test(self, X, step_size, X_true=None, nsamples=100):
'''
Conduct change point test at the newly received data batch X
Parameters
----------
X: array-like of shape (nbatch, nfeatures)
The newly received (incomplete) data batch
X_true: array-like of shape (nbatch, nfeatures)
A matrix agrees with X at observed entries but has fewer missing entries.
step_size: flaot in (0,1)
The correlation update step size
nsamples: int, default = 100
The number of samples to draw for approximating the null distribution.
Returns
-------
pval: float
empirical p-value
diff: float
test statistics
'''
n,p = X.shape
missing_indices = np.isnan(X)
prev_corr = self._corr.copy()
changing_stat = defaultdict(list)
X_to_impute = np.zeros_like(X) * np.nan
for i in range(nsamples):
z = self._rng.multivariate_normal(np.zeros(p), prev_corr, n)
# mask
x = np.empty((n,p))
x[:,self.cont_indices] = self.transform_function.impute_cont_observed(z, X_to_impute)
x[:,self.ord_indices] = self.transform_function.impute_ord_observed(z, X_to_impute)
x[missing_indices] = np.nan
# TODO: compare with enabling marginal_update
new_corr = self.partial_fit(x, step_size=step_size, model_update=False)
diff = self.get_matrix_diff(prev_corr, new_corr, self._corr_diff_type)
self._update_corr_diff(diff, output=changing_stat)
self.transform_function.update_window(X)
new_corr = self.partial_fit(X, step_size=step_size, model_update=True)
diff = self.get_matrix_diff(prev_corr, new_corr, self._corr_diff_type)
self._update_corr_diff(diff)
# compute empirical p-values
changing_stat = | pd.DataFrame(changing_stat) | pandas.DataFrame |
#
# Copyright 2018 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from abc import ABC, abstractproperty
from collections import OrderedDict
import functools
import warnings
import numpy as np
import pandas as pd
import toolz
from numpy import searchsorted
from pandas import DataFrame, date_range
from pandas.tseries.holiday import AbstractHolidayCalendar
from pandas.tseries.offsets import CustomBusinessDay
from pytz import UTC
from exchange_calendars import errors
from .calendar_helpers import (
NP_NAT,
NANOSECONDS_PER_MINUTE,
compute_all_minutes,
one_minute_later,
one_minute_earlier,
next_divider_idx,
previous_divider_idx,
Session,
Date,
Minute,
TradingMinute,
parse_timestamp,
parse_trading_minute,
parse_session,
parse_date,
)
from .utils.memoize import lazyval
from .utils.pandas_utils import days_at_time
from .pandas_extensions.offsets import MultipleWeekmaskCustomBusinessDay
GLOBAL_DEFAULT_START = pd.Timestamp.now(tz=UTC).floor("D") - pd.DateOffset(years=20)
# Give an aggressive buffer for logic that needs to use the next trading
# day or minute.
GLOBAL_DEFAULT_END = pd.Timestamp.now(tz=UTC).floor("D") + pd.DateOffset(years=1)
NANOS_IN_MINUTE = 60000000000
MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY = range(7)
WEEKDAYS = (MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY)
WEEKENDS = (SATURDAY, SUNDAY)
def selection(arr, start, end):
predicates = []
if start is not None:
predicates.append(start.tz_localize(UTC) <= arr)
if end is not None:
predicates.append(arr < end.tz_localize(UTC))
if not predicates:
return arr
return arr[np.all(predicates, axis=0)]
def _group_times(all_days, times, tz, offset=0):
if times is None:
return None
elements = [
days_at_time(selection(all_days, start, end), time, tz, offset)
for (start, time), (end, _) in toolz.sliding_window(
2, toolz.concatv(times, [(None, None)])
)
]
return elements[0].append(elements[1:])
class deprecate:
"""Decorator for deprecated/renamed ExchangeCalendar methods."""
def __init__(
self,
deprecated_release: str = "3.4",
removal_release: str = "4.0",
alt_method: str = "",
renamed: bool = True,
):
self.deprecated_release = "release " + deprecated_release
self.removal_release = "release " + removal_release
self.alt_method = alt_method
self.renamed = renamed
if renamed:
assert alt_method, "pass `alt_method` if renaming"
def __call__(self, f):
@functools.wraps(f)
def wrapped_f(*args, **kwargs):
warnings.warn(self._message(f), FutureWarning)
return f(*args, **kwargs)
return wrapped_f
def _message(self, f):
msg = (
f"`{f.__name__}` was deprecated in {self.deprecated_release}"
f" and will be removed in {self.removal_release}."
)
if self.alt_method:
if self.renamed:
msg += (
f" The method has been renamed `{self.alt_method}`."
f" NB parameter names may also have changed (see "
f" documentation for `{self.alt_method}`)."
)
else:
msg += f" Use `{self.alt_method}`."
return msg
class ExchangeCalendar(ABC):
"""Representation of timing information of a single market exchange.
The timing information comprises sessions, open/close times and, for
exchanges that observe an intraday break, break_start/break_end times.
For exchanges that do not observe an intraday break a session
represents a contiguous set of minutes. Where an exchange observes
an intraday break a session represents two contiguous sets of minutes
separated by the intraday break.
Each session has a label that is midnight UTC. It is important to note
that a session label should not be considered a specific point in time,
and that midnight UTC is just being used for convenience.
For each session, we store the open and close time together with, for
those exchanges with breaks, the break start and break end. All times
are defined as UTC.
Parameters
----------
start : default: later of 20 years ago or first supported start date.
First calendar session will be `start`, if `start` is a session, or
first session after `start`.
end : default: earliest of 1 year from 'today' or last supported end date.
Last calendar session will be `end`, if `end` is a session, or last
session before `end`.
side : default: "both" ("left" for 24 hour calendars)
Define which of session open/close and break start/end should
be treated as a trading minute:
"left" - treat session open and break_start as trading minutes,
do not treat session close or break_end as trading minutes.
"right" - treat session close and break_end as trading minutes,
do not treat session open or break_start as tradng minutes.
"both" - treat all of session open, session close, break_start
and break_end as trading minutes.
"neither" - treat none of session open, session close,
break_start or break_end as trading minutes.
Raises
------
ValueError
If `start` is earlier than the earliest supported start date.
If `end` is later than the latest supported end date.
If `start` parses to a later date than `end`.
Notes
-----
Exchange calendars were originally defined for the Zipline package from
Quantopian under the package 'trading_calendars'. Since 2021 they have
been maintained under the 'exchange_calendars' package (a fork of
'trading_calendars') by an active community of contributing users.
Some calendars have defined start and end bounds within which
contributors have endeavoured to ensure the calendar's accuracy and
outside of which the calendar would not be accurate. These bounds
are enforced such that passing `start` or `end` as dates that are
out-of-bounds will raise a ValueError. The bounds of each calendar are
exposed via the `bound_start` and `bound_end` properties.
Many calendars do not have bounds defined (in these cases `bound_start`
and/or `bound_end` return None). These calendars can be created through
any date range although it should be noted that the earlier the start
date, the greater the potential for inaccuracies.
In all cases, no guarantees are offered as to the accuracy of any
calendar.
Internal method parameters:
_parse: bool
Determines if a `minute` or `session` parameter should be
parsed (default True). Passed as False:
- internally to prevent double parsing.
- by tests for efficiency.
"""
_LEFT_SIDES = ["left", "both"]
_RIGHT_SIDES = ["right", "both"]
def __init__(
self,
start: Date | None = None,
end: Date | None = None,
side: str | None = None,
):
side = side if side is not None else self.default_side
if side not in self.valid_sides:
raise ValueError(
f"`side` must be in {self.valid_sides} although received as {side}."
)
self._side = side
if start is None:
start = self.default_start
else:
start = parse_date(start, "start")
if self.bound_start is not None and start < self.bound_start:
raise ValueError(self._bound_start_error_msg(start))
if end is None:
end = self.default_end
else:
end = parse_date(end, "end")
if self.bound_end is not None and end > self.bound_end:
raise ValueError(self._bound_end_error_msg(end))
if start >= end:
raise ValueError(
"`start` must be earlier than `end` although `start` parsed as"
f" '{start}' and `end` as '{end}'."
)
# Midnight in UTC for each trading day.
_all_days = date_range(start, end, freq=self.day, tz=UTC)
if _all_days.empty:
raise errors.NoSessionsError(calendar_name=self.name, start=start, end=end)
# `DatetimeIndex`s of standard opens/closes for each day.
self._opens = _group_times(
_all_days,
self.open_times,
self.tz,
self.open_offset,
)
self._break_starts = _group_times(
_all_days,
self.break_start_times,
self.tz,
)
self._break_ends = _group_times(
_all_days,
self.break_end_times,
self.tz,
)
self._closes = _group_times(
_all_days,
self.close_times,
self.tz,
self.close_offset,
)
# Apply special offsets first
self._calculate_and_overwrite_special_offsets(_all_days, start, end)
# Series mapping sessions with nonstandard opens/closes.
_special_opens = self._calculate_special_opens(start, end)
_special_closes = self._calculate_special_closes(start, end)
# Overwrite the special opens and closes on top of the standard ones.
_overwrite_special_dates(_all_days, self._opens, _special_opens)
_overwrite_special_dates(_all_days, self._closes, _special_closes)
_remove_breaks_for_special_dates(
_all_days,
self._break_starts,
_special_closes,
)
_remove_breaks_for_special_dates(
_all_days,
self._break_ends,
_special_closes,
)
if self._break_starts is None:
break_starts = None
else:
break_starts = self._break_starts.tz_localize(None)
if self._break_ends is None:
break_ends = None
else:
break_ends = self._break_ends.tz_localize(None)
self.schedule = DataFrame(
index=_all_days,
data=OrderedDict(
[
("market_open", self._opens.tz_localize(None)),
("break_start", break_starts),
("break_end", break_ends),
("market_close", self._closes.tz_localize(None)),
]
),
dtype="datetime64[ns]",
)
self.market_opens_nanos = self.schedule.market_open.values.astype(np.int64)
self.market_break_starts_nanos = self.schedule.break_start.values.astype(
np.int64
)
self.market_break_ends_nanos = self.schedule.break_end.values.astype(np.int64)
self.market_closes_nanos = self.schedule.market_close.values.astype(np.int64)
_check_breaks_match(
self.market_break_starts_nanos, self.market_break_ends_nanos
)
self.first_trading_session = _all_days[0]
self.last_trading_session = _all_days[-1]
self._late_opens = _special_opens.index
self._early_closes = _special_closes.index
# Methods and properties that define calendar and which should be
# overriden or extended, if and as required, by subclass.
@abstractproperty
def name(self) -> str:
raise NotImplementedError()
@property
def bound_start(self) -> pd.Timestamp | None:
"""Earliest date from which calendar can be constructed.
Returns
-------
pd.Timestamp or None
Earliest date from which calendar can be constructed. Must have
tz as "UTC". None if no limit.
Notes
-----
To impose a constraint on the earliest date from which a calendar
can be constructed subclass should override this method and
optionally override `_bound_start_error_msg`.
"""
return None
@property
def bound_end(self) -> pd.Timestamp | None:
"""Latest date to which calendar can be constructed.
Returns
-------
pd.Timestamp or None
Latest date to which calendar can be constructed. Must have tz
as "UTC". None if no limit.
Notes
-----
To impose a constraint on the latest date to which a calendar can
be constructed subclass should override this method and optionally
override `_bound_end_error_msg`.
"""
return None
def _bound_start_error_msg(self, start: pd.Timestamp) -> str:
"""Return error message to handle `start` being out-of-bounds.
See Also
--------
bound_start
"""
return (
f"The earliest date from which calendar {self.name} can be"
f" evaluated is {self.bound_start}, although received `start` as"
f" {start}."
)
def _bound_end_error_msg(self, end: pd.Timestamp) -> str:
"""Return error message to handle `end` being out-of-bounds.
See Also
--------
bound_end
"""
return (
f"The latest date to which calendar {self.name} can be evaluated"
f" is {self.bound_end}, although received `end` as {end}."
)
@property
def default_start(self) -> pd.Timestamp:
if self.bound_start is None:
return GLOBAL_DEFAULT_START
else:
return max(GLOBAL_DEFAULT_START, self.bound_start)
@property
def default_end(self) -> pd.Timestamp:
if self.bound_end is None:
return GLOBAL_DEFAULT_END
else:
return min(GLOBAL_DEFAULT_END, self.bound_end)
@abstractproperty
def tz(self):
raise NotImplementedError()
@abstractproperty
def open_times(self):
"""
Returns a list of tuples of (start_date, open_time). If the open
time is constant throughout the calendar, use None for the start_date.
"""
raise NotImplementedError()
@property
def break_start_times(self):
"""
Returns a optional list of tuples of (start_date, break_start_time).
If the break start time is constant throughout the calendar, use None
for the start_date. If there is no break, return `None`.
"""
return None
@property
def break_end_times(self):
"""
Returns a optional list of tuples of (start_date, break_end_time). If
the break end time is constant throughout the calendar, use None for
the start_date. If there is no break, return `None`.
"""
return None
@abstractproperty
def close_times(self):
"""
Returns a list of tuples of (start_date, close_time). If the close
time is constant throughout the calendar, use None for the start_date.
"""
raise NotImplementedError()
@property
def weekmask(self):
"""
String indicating the days of the week on which the market is open.
Default is '1111100' (i.e., Monday-Friday).
See Also
--------
numpy.busdaycalendar
"""
return "1111100"
@property
def open_offset(self):
return 0
@property
def close_offset(self):
return 0
@property
def regular_holidays(self):
"""
Returns
-------
pd.AbstractHolidayCalendar: a calendar containing the regular holidays
for this calendar
"""
return None
@property
def adhoc_holidays(self):
"""
Returns
-------
list: A list of tz-naive timestamps representing unplanned closes.
"""
return []
@property
def special_opens(self):
"""
A list of special open times and corresponding HolidayCalendars.
Returns
-------
list: List of (time, AbstractHolidayCalendar) tuples
"""
return []
@property
def special_opens_adhoc(self):
"""
Returns
-------
list: List of (time, DatetimeIndex) tuples that represent special
closes that cannot be codified into rules.
"""
return []
@property
def special_closes(self):
"""
A list of special close times and corresponding HolidayCalendars.
Returns
-------
list: List of (time, AbstractHolidayCalendar) tuples
"""
return []
@property
def special_closes_adhoc(self):
"""
Returns
-------
list: List of (time, DatetimeIndex) tuples that represent special
closes that cannot be codified into rules.
"""
return []
@property
def special_weekmasks(self):
"""
Returns
-------
list: List of (date, date, str) tuples that represent special
weekmasks that applies between dates.
"""
return []
@property
def special_offsets(self):
"""
Returns
-------
list: List of (timedelta, timedelta, timedelta, timedelta, AbstractHolidayCalendar) tuples
that represent special open, break_start, break_end, close offsets
and corresponding HolidayCalendars.
"""
return []
@property
def special_offsets_adhoc(self):
"""
Returns
-------
list: List of (timedelta, timedelta, timedelta, timedelta, DatetimeIndex) tuples
that represent special open, break_start, break_end, close offsets
and corresponding DatetimeIndexes.
"""
return []
# ------------------------------------------------------------------
# -- NO method below this line should be overriden on a subclass! --
# ------------------------------------------------------------------
# Methods and properties that define calendar (continued...).
@lazyval
def day(self):
if self.special_weekmasks:
return MultipleWeekmaskCustomBusinessDay(
holidays=self.adhoc_holidays,
calendar=self.regular_holidays,
weekmask=self.weekmask,
weekmasks=self.special_weekmasks,
)
else:
return CustomBusinessDay(
holidays=self.adhoc_holidays,
calendar=self.regular_holidays,
weekmask=self.weekmask,
)
@property
def valid_sides(self) -> list[str]:
"""List of valid `side` options."""
if self.close_times == self.open_times:
return ["left", "right"]
else:
return ["both", "left", "right", "neither"]
@property
def default_side(self) -> str:
"""Default `side` option."""
if self.close_times == self.open_times:
return "right"
else:
return "both"
@property
def side(self) -> str:
"""Side on which sessions are closed.
Returns
-------
str
"left" - Session open and break_start are trading minutes.
Session close and break_end are not trading minutes.
"right" - Session close and break_end are trading minutes,
Session open and break_start are not tradng minutes.
"both" - Session open, session close, break_start and
break_end are all trading minutes.
"neither" - Session open, session close, break_start and
break_end are all not trading minutes.
Notes
-----
Subclasses should NOT override this method.
"""
return self._side
# Properties covering all sessions.
@property
def all_sessions(self) -> pd.DatetimeIndex:
"""All calendar sessions."""
return self.schedule.index
@property
def opens(self) -> pd.Series:
"""Open time of each session.
Returns
-------
pd.Series
index : pd.DatetimeIndex
All sessions.
dtype : datetime64[ns]
Open time of corresponding session. NB Times are UTC
although dtype is timezone-naive.
"""
return self.schedule.market_open
@property
def closes(self) -> pd.Series:
"""Close time of each session.
Returns
-------
pd.Series
index : pd.DatetimeIndex
All sessions.
dtype : datetime64[ns]
Close time of corresponding session. NB Times are UTC
although dtype is timezone-naive.
"""
return self.schedule.market_close
@property
def break_starts(self) -> pd.Series:
"""Break start time of each session.
Returns
-------
pd.Series
index : pd.DatetimeIndex
All sessions.
dtype : datetime64[ns]
Break-start time of corresponding session. NB Times are UTC
although dtype is timezone-naive. Value is missing
(pd.NaT) for any session that does not have a break.
"""
return self.schedule.break_start
@property
def break_ends(self) -> pd.Series:
"""Break end time of each session.
Returns
-------
pd.Series
index : pd.DatetimeIndex
All sessions.
dtype : datetime64[ns]
Break-end time of corresponding session. NB Times are UTC
although dtype is timezone-naive. Value is missing
(pd.NaT) for any session that does not have a break.
"""
return self.schedule.break_end
@functools.lru_cache(maxsize=1) # cache last request
def _first_minute_nanos(self, side: str | None = None) -> np.ndarray:
side = side if side is not None else self.side
if side in self._LEFT_SIDES:
return self.market_opens_nanos
else:
return one_minute_later(self.market_opens_nanos)
@functools.lru_cache(maxsize=1) # cache last request
def _last_minute_nanos(self, side: str | None = None) -> np.ndarray:
side = side if side is not None else self.side
if side in self._RIGHT_SIDES:
return self.market_closes_nanos
else:
return one_minute_earlier(self.market_closes_nanos)
@functools.lru_cache(maxsize=1) # cache last request
def _last_am_minute_nanos(self, side: str | None = None) -> np.ndarray:
side = side if side is not None else self.side
if side in self._RIGHT_SIDES:
return self.market_break_starts_nanos
else:
return one_minute_earlier(self.market_break_starts_nanos)
@functools.lru_cache(maxsize=1) # cache last request
def _first_pm_minute_nanos(self, side: str | None = None) -> np.ndarray:
side = side if side is not None else self.side
if side in self._LEFT_SIDES:
return self.market_break_ends_nanos
else:
return one_minute_later(self.market_break_ends_nanos)
def _minutes_as_series(self, nanos: np.ndarray, name: str) -> pd.Series:
"""Convert trading minute nanos to pd.Series."""
ser = pd.Series(pd.DatetimeIndex(nanos, tz="UTC"), index=self.all_sessions)
ser.name = name
return ser
@property
def all_first_minutes(self) -> pd.Series:
"""First trading minute of each session."""
return self._minutes_as_series(self._first_minute_nanos(), "first_minutes")
@property
def all_last_minutes(self) -> pd.Series:
"""Last trading minute of each session."""
return self._minutes_as_series(self._last_minute_nanos(), "last_minutes")
@property
def all_last_am_minutes(self) -> pd.Series:
"""Last am trading minute of each session."""
return self._minutes_as_series(self._last_am_minute_nanos(), "last_am_minutes")
@property
def all_first_pm_minutes(self) -> pd.Series:
"""First pm trading minute of each session."""
return self._minutes_as_series(
self._first_pm_minute_nanos(), "first_pm_minutes"
)
# Properties covering all minutes.
def _all_minutes(self, side: str) -> pd.DatetimeIndex:
return pd.DatetimeIndex(
compute_all_minutes(
self.market_opens_nanos,
self.market_break_starts_nanos,
self.market_break_ends_nanos,
self.market_closes_nanos,
side,
),
tz="UTC",
)
@lazyval
def all_minutes(self) -> pd.DatetimeIndex:
"""All trading minutes."""
return self._all_minutes(self.side)
@lazyval
def all_minutes_nanos(self) -> np.ndarray:
"""All trading minutes as nanoseconds."""
return self.all_minutes.values.astype(np.int64)
# Calendar properties.
@property
def first_session(self) -> pd.Timestamp:
"""First calendar session."""
return self.all_sessions[0]
@property
def last_session(self) -> pd.Timestamp:
"""Last calendar session."""
return self.all_sessions[-1]
@property
def first_session_open(self) -> pd.Timestamp:
"""Open time of calendar's first session."""
return self.opens[0]
@property
def last_session_close(self) -> pd.Timestamp:
"""Close time of calendar's last session."""
return self.closes[-1]
@property
def first_trading_minute(self) -> pd.Timestamp:
"""Calendar's first trading minute."""
return pd.Timestamp(self.all_minutes_nanos[0], tz="UTC")
@property
def last_trading_minute(self) -> pd.Timestamp:
"""Calendar's last trading minute."""
return pd.Timestamp(self.all_minutes_nanos[-1], tz="UTC")
def has_breaks(
self, start: Date | None = None, end: Date | None = None, _parse: bool = True
) -> bool:
"""Query if at least one session of a calendar has a break.
Parameters
----------
start : optional
Limit query to sessions from `start`.
end : optional
Limit query to sessions through `end`.
Returns
-------
bool
True if any calendar session, or session of any range defined
from `start` to `end`, has a break. False otherwise.
"""
if _parse and start is not None:
start = self._parse_session_range_start(start)
if _parse and end is not None:
end = self._parse_session_range_end(end)
return self.break_starts[start:end].notna().any()
@property
def late_opens(self) -> pd.DatetimeIndex:
"""Sessions that open later than the prevailing normal open.
NB. Prevailing normal open as defined by `open_times`.
"""
return self._late_opens
@property
def early_closes(self) -> pd.DatetimeIndex:
"""Sessions that close earlier than the prevailing normal close.
NB. Prevailing normal close as defined by `close_times`.
"""
return self._early_closes
# Methods that interrogate a given session.
def session_open(self, session_label: Session, _parse: bool = True) -> pd.Timestamp:
"""Return open time for a given session."""
if _parse:
session_label = parse_session(self, session_label, "session_label")
return self.schedule.at[session_label, "market_open"].tz_localize(UTC)
def session_close(
self, session_label: Session, _parse: bool = True
) -> pd.Timestamp:
"""Return close time for a given session."""
if _parse:
session_label = parse_session(self, session_label, "session_label")
return self.schedule.at[session_label, "market_close"].tz_localize(UTC)
def session_break_start(
self, session_label: Session, _parse: bool = True
) -> pd.Timestamp | pd.NaT:
"""Return break-start time for a given session.
Returns pd.NaT if no break.
"""
if _parse:
session_label = parse_session(self, session_label, "session_label")
break_start = self.schedule.at[session_label, "break_start"]
if not pd.isnull(break_start):
break_start = break_start.tz_localize(UTC)
return break_start
def session_break_end(
self, session_label: Session, _parse: bool = True
) -> pd.Timestamp | pd.NaT:
"""Return break-end time for a given session.
Returns pd.NaT if no break.
"""
if _parse:
session_label = parse_session(self, session_label, "session_label")
break_end = self.schedule.at[session_label, "break_end"]
if not pd.isnull(break_end):
break_end = break_end.tz_localize(UTC)
return break_end
def open_and_close_for_session(
self, session_label: Session, _parse: bool = True
) -> tuple[pd.Timestamp, pd.Timestamp]:
"""Return open and close times for a given session.
Parameters
----------
session_label
Session for which require open and close.
Returns
-------
tuple[pd.Timestamp, pd.Timestamp]
[0] Open time of `session_label`.
[1] Close time of `session_label`.
"""
if _parse:
session_label = parse_session(self, session_label, "session_label")
return (
self.session_open(session_label),
self.session_close(session_label),
)
def break_start_and_end_for_session(
self, session_label: Session, _parse: bool = True
) -> tuple[pd.Timestamp | pd.NaT, pd.Timestamp | pd.NaT]:
"""Return break-start and break-end times for a given session.
Parameters
----------
session_label
Session for which require break-start and break-end.
Returns
-------
tuple[pd.Timestamp | pd.NaT, pd.Timestamp | pd.NaT]
[0] Break-start time of `session_label`, or pd.NaT if no break.
[1] Close time of `session_label`, or pd.NaT if no break.
"""
if _parse:
session_label = parse_session(self, session_label, "session_label")
return (
self.session_break_start(session_label),
self.session_break_end(session_label),
)
def _get_session_minute_from_nanos(
self, session: Session, nanos: np.ndarray, _parse: bool
) -> pd.Timestamp:
if _parse:
session = parse_session(self, session, "session")
idx = self.all_sessions.get_loc(session)
return pd.Timestamp(nanos[idx], tz="UTC")
def session_first_minute(
self, session: Session, _parse: bool = True
) -> pd.Timestamp:
"""Return first trading minute of a given session."""
nanos = self._first_minute_nanos()
return self._get_session_minute_from_nanos(session, nanos, _parse)
def session_last_minute(
self, session: Session, _parse: bool = True
) -> pd.Timestamp:
"""Return last trading minute of a given session."""
nanos = self._last_minute_nanos()
return self._get_session_minute_from_nanos(session, nanos, _parse)
def session_last_am_minute(
self, session: Session, _parse: bool = True
) -> pd.Timestamp | pd.NaT: # Literal[pd.NaT] - when move to min 3.8
"""Return last trading minute of am subsession of a given session."""
nanos = self._last_am_minute_nanos()
return self._get_session_minute_from_nanos(session, nanos, _parse)
def session_first_pm_minute(
self, session: Session, _parse: bool = True
) -> pd.Timestamp | pd.NaT: # Literal[pd.NaT] - when move to min 3.8
"""Return first trading minute of pm subsession of a given session."""
nanos = self._first_pm_minute_nanos()
return self._get_session_minute_from_nanos(session, nanos, _parse)
def session_first_and_last_minute(
self,
session: Session,
_parse: bool = True,
) -> tuple(pd.Timestamp, pd.Timestamp):
"""Return first and last trading minutes of a given session."""
if _parse:
session = parse_session(self, session, "session")
idx = self.all_sessions.get_loc(session)
first = pd.Timestamp(self._first_minute_nanos()[idx], tz="UTC")
last = pd.Timestamp(self._last_minute_nanos()[idx], tz="UTC")
return (first, last)
def session_has_break(self, session: Session, _parse: bool = True) -> bool:
"""Query if a given session has a break.
Parameters
----------
session
Session to query.
Returns
-------
bool
True if `session` has a break, false otherwise.
"""
if _parse:
session = parse_session(self, session, "session")
return pd.notna(self.session_break_start(session))
def next_session_label(
self, session_label: Session, _parse: bool = True
) -> pd.Timestamp:
"""Return session that immediately follows a given session.
Parameters
----------
session_label
Session whose next session is desired.
Raises
------
ValueError
If `session_label` is the last calendar session.
See Also
--------
date_to_session_label
"""
if _parse:
session_label = parse_session(self, session_label, "session_label")
idx = self.schedule.index.get_loc(session_label)
try:
return self.schedule.index[idx + 1]
except IndexError as err:
if idx == len(self.schedule.index) - 1:
raise ValueError(
"There is no next session as this is the end"
" of the exchange calendar."
) from err
else:
raise
def previous_session_label(
self, session_label: Session, _parse: bool = True
) -> pd.Timestamp:
"""Return session that immediately preceeds a given session.
Parameters
----------
session_label
Session whose previous session is desired.
Raises
------
ValueError
If `session_label` is the first calendar session.
See Also
--------
date_to_session_label
"""
if _parse:
session_label = parse_session(self, session_label, "session_label")
idx = self.schedule.index.get_loc(session_label)
if idx == 0:
raise ValueError(
"There is no previous session as this is the"
" beginning of the exchange calendar."
)
return self.schedule.index[idx - 1]
def minutes_for_session(
self, session_label: Session, _parse: bool = True
) -> pd.DatetimeIndex:
"""Return trading minutes corresponding to a given session.
Parameters
----------
session_label
Session for which require trading minutes.
Returns
-------
pd.DateTimeIndex
Trading minutes for `session`.
"""
if _parse:
session_label = parse_session(self, session_label, "session_label")
first, last = self.session_first_and_last_minute(session_label, _parse=False)
return self.minutes_in_range(start_minute=first, end_minute=last)
# Methods that interrogate a date.
def is_session(self, dt: Date, _parse: bool = True) -> bool:
"""Query if a date is a valid session.
Parameters
----------
dt
Date to be queried.
Return
------
bool
True if `dt` is a session, False otherwise.
Returns False if `dt` is earlier than the first calendar
session or later than the last calendar session.
"""
if _parse:
dt = parse_date(dt, "dt")
return dt in self.schedule.index
def date_to_session_label(
self,
date: Date,
direction: str = "none", # when min 3.8, Literal["none", "previous", "next"]
_parse: bool = True,
) -> pd.Timestamp:
"""Return a session label corresponding to a given date.
Parameters
----------
date
Date for which require session label. Can be a date that does not
represent an actual session (see `direction`).
direction : default: "none"
Defines behaviour if `date` does not represent a session:
"next" - return first session label following `date`.
"previous" - return first session label prior to `date`.
"none" - raise ValueError.
Returns
-------
pd.Timestamp (midnight UTC)
Label of the corresponding session.
See Also
--------
next_session_label
previous_session_label
"""
if _parse:
date = parse_date(date, "date")
if self.is_session(date):
return date
elif direction in ["next", "previous"]:
if direction == "previous" and date < self.first_session:
raise ValueError(
"Cannot get a session label prior to the first calendar"
f" session ('{self.first_session}'). Consider passing"
f" `direction` as 'next'."
)
if direction == "next" and date > self.last_session:
raise ValueError(
"Cannot get a session label later than the last calendar"
f" session ('{self.last_session}'). Consider passing"
f" `direction` as 'previous'."
)
idx = self.all_sessions.values.astype(np.int64).searchsorted(date.value)
if direction == "previous":
idx -= 1
return self.all_sessions[idx]
elif direction == "none":
raise ValueError(
f"`date` '{date}' does not represent a session. Consider passing"
" a `direction`."
)
else:
raise ValueError(
f"'{direction}' is not a valid `direction`. Valid `direction`"
' values are "next", "previous" and "none".'
)
# Methods that interrogate a given minute (trading or non-trading).
def is_trading_minute(self, minute: Minute, _parse: bool = True) -> bool:
"""Query if a given minute is a trading minute.
Minutes during breaks are not considered trading minutes.
Note: `self.side` determines whether exchange will be considered
open or closed on session open, session close, break start and
break end.
Parameters
----------
minute
Minute being queried.
Returns
-------
bool
Boolean indicting if `minute` is a trading minute.
See Also
--------
is_open_on_minute
"""
if _parse:
minute = parse_timestamp(
minute, "minute", raise_oob=True, calendar=self
).value
else:
minute = minute.value
idx = self.all_minutes_nanos.searchsorted(minute)
numpy_bool = minute == self.all_minutes_nanos[idx]
return bool(numpy_bool)
def is_break_minute(self, minute: Minute, _parse: bool = True) -> bool:
"""Query if a given minute is within a break.
Note: `self.side` determines whether either, both or one of break
start and break end are treated as break minutes.
Parameters
----------
minute
Minute being queried.
Returns
-------
bool
Boolean indicting if `minute` is a break minute.
"""
if _parse:
minute = parse_timestamp(
minute, "minute", raise_oob=True, calendar=self
).value
else:
minute = minute.value
session_idx = np.searchsorted(self._first_minute_nanos(), minute) - 1
break_start = self._last_am_minute_nanos()[session_idx]
break_end = self._first_pm_minute_nanos()[session_idx]
# NaT comparisions evalute as False
numpy_bool = break_start < minute < break_end
return bool(numpy_bool)
def is_open_on_minute(
self, dt: Minute, ignore_breaks: bool = False, _parse: bool = True
) -> bool:
"""Query if exchange is open on a given minute.
Note: `self.side` determines whether exchange will be considered
open or closed on session open, session close, break start and
break end.
Parameters
----------
dt
Minute being queried.
ignore_breaks
Should exchange be considered open during any break?
True - treat exchange as open during any break.
False - treat exchange as closed during any break.
Returns
-------
bool
Boolean indicting if exchange is open on `dt`.
See Also
--------
is_trading_minute
"""
if _parse:
minute = parse_timestamp(dt, "dt", raise_oob=True, calendar=self)
else:
minute = dt
is_trading_minute = self.is_trading_minute(minute, _parse=_parse)
if is_trading_minute or not ignore_breaks:
return is_trading_minute
else:
# not a trading minute although should return True if in break
return self.is_break_minute(minute, _parse=_parse)
def next_open(self, dt: Minute, _parse: bool = True) -> pd.Timestamp:
"""Return next open that follows a given minute.
If `dt` is a session open, the next session's open will be
returned.
Parameters
----------
dt
Minute for which to get the next open.
Returns
-------
pd.Timestamp
UTC timestamp of the next open.
"""
if _parse:
dt = parse_timestamp(dt, "dt", raise_oob=True, calendar=self)
try:
idx = next_divider_idx(self.market_opens_nanos, dt.value)
except IndexError:
if dt.tz_convert(None) >= self.opens[-1]:
raise ValueError(
"Minute cannot be the last open or later (received `dt`"
f" parsed as '{dt}'.)"
) from None
else:
raise
return pd.Timestamp(self.market_opens_nanos[idx], tz=UTC)
def next_close(self, dt: Minute, _parse: bool = True) -> pd.Timestamp:
"""Return next close that follows a given minute.
If `dt` is a session close, the next session's close will be
returned.
Parameters
----------
dt
Minute for which to get the next close.
Returns
-------
pd.Timestamp
UTC timestamp of the next close.
"""
if _parse:
dt = parse_timestamp(dt, "dt", raise_oob=True, calendar=self)
try:
idx = next_divider_idx(self.market_closes_nanos, dt.value)
except IndexError:
if dt.tz_convert(None) == self.closes[-1]:
raise ValueError(
"Minute cannot be the last close (received `dt` parsed as"
f" '{dt}'.)"
) from None
else:
raise
return pd.Timestamp(self.market_closes_nanos[idx], tz=UTC)
def previous_open(self, dt: Minute, _parse: bool = True) -> pd.Timestamp:
"""Return previous open that preceeds a given minute.
If `dt` is a session open, the previous session's open will be
returned.
Parameters
----------
dt
Minute for which to get the previous open.
Returns
-------
pd.Timestamp
UTC timestamp of the previous open.
"""
if _parse:
dt = parse_timestamp(dt, "dt", raise_oob=True, calendar=self)
try:
idx = previous_divider_idx(self.market_opens_nanos, dt.value)
except ValueError:
if dt.tz_convert(None) == self.opens[0]:
raise ValueError(
"Minute cannot be the first open (received `dt` parsed as"
f" '{dt}'.)"
) from None
else:
raise
return pd.Timestamp(self.market_opens_nanos[idx], tz=UTC)
def previous_close(self, dt: Minute, _parse: bool = True) -> pd.Timestamp:
"""Return previous close that preceeds a given minute.
If `dt` is a session close, the previous session's close will be
returned.
Parameters
----------
dt
Minute for which to get the previous close.
Returns
-------
pd.Timestamp
UTC timestamp of the previous close.
"""
if _parse:
dt = parse_timestamp(dt, "dt", raise_oob=True, calendar=self)
try:
idx = previous_divider_idx(self.market_closes_nanos, dt.value)
except ValueError:
if dt.tz_convert(None) <= self.closes[0]:
raise ValueError(
"Minute cannot be the first close or earlier (received"
f" `dt` parsed as '{dt}'.)"
) from None
else:
raise
return pd.Timestamp(self.market_closes_nanos[idx], tz=UTC)
def next_minute(self, dt: Minute, _parse: bool = True) -> pd.Timestamp:
"""Return trading minute that immediately follows a given minute.
Parameters
----------
dt
Minute for which to get next trading minute. Minute can be a
trading or a non-trading minute.
Returns
-------
pd.Timestamp
UTC timestamp of the next minute.
"""
if _parse:
dt = parse_timestamp(dt, "dt", raise_oob=True, calendar=self)
try:
idx = next_divider_idx(self.all_minutes_nanos, dt.value)
except IndexError:
# dt > last_trading_minute handled via parsing
if dt == self.last_trading_minute:
raise ValueError(
"Minute cannot be the last trading minute or later"
f" (received `dt` parsed as '{dt}'.)"
) from None
return self.all_minutes[idx]
def previous_minute(self, dt: Minute, _parse: bool = True) -> pd.Timestamp:
"""Return trading minute that immediately preceeds a given minute.
Parameters
----------
dt
Minute for which to get previous trading minute. Minute can be
a trading or a non-trading minute.
Returns
-------
pd.Timestamp
UTC timestamp of the previous minute.
"""
if _parse:
dt = parse_timestamp(dt, "dt", raise_oob=True, calendar=self)
try:
idx = previous_divider_idx(self.all_minutes_nanos, dt.value)
except ValueError:
# dt < first_trading_minute handled via parsing
if dt == self.first_trading_minute:
raise ValueError(
"Minute cannot be the first trading minute or earlier"
f" (received `dt` parsed as '{dt}'.)"
) from None
return self.all_minutes[idx]
def minute_to_session_label(
self,
dt: Minute,
direction: str = "next",
_parse: bool = True,
) -> pd.Timestamp:
"""Get session corresponding with a trading or break minute.
Parameters
----------
dt
Minute for which require corresponding session.
direction
How to resolve session in event that `dt` is not a trading
or break minute:
"next" (default) - return first session subsequent to
`dt`.
"previous" - return first session prior to `dt`.
"none" - raise ValueError.
Returns
-------
pd.Timestamp (midnight UTC)
Corresponding session label.
Raises
------
ValueError
If `dt` is not a trading minute and `direction` is "none".
"""
if _parse:
minute = parse_timestamp(dt, "dt", calendar=self).value
else:
minute = dt.value
if minute < self.first_trading_minute.value:
# Resolve call here.
if direction == "next":
return self.first_session
else:
raise ValueError(
"Received `minute` as '{0}' although this is earlier than the"
" calendar's first trading minute ({1}). Consider passing"
" `direction` as 'next' to get first session.".format(
| pd.Timestamp(minute, tz="UTC") | pandas.Timestamp |
#Converts .csv files containing all e-prime task data
#separates by subject and saves as .tsv in subject's bids folder
#written for <NAME>'s lab longitudinal lexicality project
#May 2018
import pandas as pd
import numpy as np
import json
import sys
np.warnings.filterwarnings('ignore') # ignore warnings
sub_map = json.load(open('files/subj_map.json','r')) #subj_map.json contains a dictonary of original subject numbers to consecutive bids subject numbers
# ask for user input instead, only works for one run at a time
# trial = input('Ses (T1, T2): ')
trial = input('Ses (T1, T2): ')
modality = input('Modality (AA, AV, VV): ')
task = input('Task (NonWord, Word): ')
run = input('Run (01, 02): ')
readingfiles = [trial+'_'+modality+task+'_Run'+run] #name of the file containing all e-prime data for that run
bidpath = input('\nPath to bids folder \n ./ for current directory \n ../ for back a directory: ')
for file in readingfiles:
file_ext = '.csv'
print('\nCSV file: '+file+file_ext)
stim = 'files/{}{}-stims_Run{}.csv'.format(modality, task, run)#name of the csv file that contains the stimuli file name per trial, separate from e-prime data due to standardization in naming
print('Stim file: '+stim)
# read in csv file
df = pd.read_csv('files/'+file+file_ext)
# rename columns for manageability
df = df.rename(columns={"Subject_Number": "Subject", "Condition": "trial_type", "TargetCRESP": "cresp", "durations": "duration",
"PrimeOnsetTime": "onset", "TargetACC": "accuracy", "TargetRESP": "resp", "TargetRT": "response_time"})
df = df[['Subject', 'onset', 'trial_type', 'accuracy', 'response_time', 'cresp', 'resp', 'duration']]
# convert times from ms to s
df['duration'] = df['duration'] + 1800 #add stimuli and iti duration to response period duration
df[['onset', 'response_time', 'duration']] = df[['onset', 'response_time', 'duration']].applymap(lambda x: x / 1000)
df
# break up tables for each subject
subjects = df['Subject'].unique()
subjects.sort() # might not need this anymore since we read in sub nums from a dict
print("Total subjects: " + str(len(subjects)))
for subj in subjects:
df0 = df[(df['Subject'] == subj)].reset_index(drop=True)
# error checking
for x in range(len(df0['response_time'].values)):
if np.isnan(df0['resp'].iloc[x]) and df0['accuracy'].iloc[x] == 0: #if RT is blank and accuracy is 0 reset RT to be n/a indicating no response
df0['response_time'].iloc[x] = 'n/a'
elif np.isnan(df0['resp'].iloc[x]) and df0['accuracy'].iloc[x] == 1: #if RT is blank and accuracy is 1 there is an error in e-prime
df0['response_time'].iloc[x] = 'err'
df0['accuracy'] = 'err'
# calculate onset time
init = df0['onset'][0]
df0[['onset']] = df0[['onset']].applymap(lambda x: x-init)
# ignore warning, meant for modifying the original df
# since we have a new df object, don't need to worry
pd.options.mode.chained_assignment = None # removes the warning message
# drop extra columns, rearrange and add in stim columns
#df0 = df0.drop(columns=['Subject', 'cresp', 'resp', 'ft'])
df0 = df0[['onset', 'duration', 'trial_type', 'accuracy', 'response_time']]
stims = | pd.read_csv(stim) | pandas.read_csv |
from scipy import stats
import random
import numpy as np
import pandas as pd
import CleanData
import timeit
import PullDataPostgreSQL
# Conditional Parameter Aggregation (CPA) is one of the most important parts
# of the entire SDV paper. It is what allows the user to synthesize an entire
# database instead of a single table. there are a couple steps that will be taken
# in this paper in accordance with the SDV paper. Keep in mind that the end result
# is to create a series of extended tables that include each original table's data
# and metrics from all the associated children tables
# 1) Start the extended table by saving all the data from the original table.
# 2) Iteratively go through each value in the original table's primary key
# 2a) Go iteratively through each child table
# 2ai) Find all primary key value instances in all children tables
# 2aj) Perform Gaussian Copula and find feature covariance
# 2ak) find alpha and beta values for distribution
# 2al) save all covariance and alpha beta values into the extended table
def ConditionalParameterAggregaation(df, children):
# df is the information from the original table. This includes missing value indices
# and has all datetime values converted to EPOCH
#
# children is a list of all child tables
#
# cur is a database cursor object that will be used to pull data in the future
for childstr in children:
print(childstr)
child = pd.DataFrame.from_csv('%s.csv' % childstr)
child.fillna(value=np.nan, inplace=True)
# saves all data as categorical or not. ignores the primary key
logicalCategorical = CleanData.IdentifyCategorical(child)
# preallocates memory for points to be appended to in the future
df = MakeBlankDataFrame(df, child, childstr, logicalCategorical)
# iterate over all IDs in the primary key with the intent of finding and
# inputting data
for c in range(len(df[df.columns[0]])):
print(c)
ID = df[df.columns[0]][c]
# pulls all data in the child table corresponding to the specific ID
data = pd.DataFrame(child[child[df.columns[0]] == ID])
# iterates over every column in the dataset
for y in range(1, len(child.columns)):
column = child.columns[y]
# if the column is Categorical
if logicalCategorical[y]:
uniqueCategories = sorted(child[child.columns[y]].unique().tolist())
# finds the percentage of each variable in the column of the temporary
# dataset. then saves that
if len(data) == 0:
for z in range(len(uniqueCategories)):
cat = uniqueCategories[z]
colname = 'Categ_%s_%s_%s' % (cat, column, childstr)
df.loc[c, colname] = None
else:
count = CalculateCategoricalPercentage(data, uniqueCategories, column)
# adds the points to the correct column
for z in range(len(uniqueCategories)):
cat = uniqueCategories[z]
colname = 'Categ_%s_%s_%s' % (cat, column, childstr)
df.loc[c, colname] = count.loc[0,cat]
# if the column is continuous
else:
points = ['alpha', 'beta', 'loc', 'scale']
# fit the data to a beta distribution and append it to the extended table
# initial dataframe to make sure that numbers aren't left out
if len(data) == 0:
for z in range(3):
colname = 'Cont_%s_%s_%s' % (points[z], column, childstr)
df.loc[c, colname] = None
else:
statistics = list(stats.beta.fit(data[column]))
for z in range(4):
colname = 'Cont_%s_%s_%s' % (points[z], column, childstr)
df.loc[c, colname] = statistics[z]
return df
def CalculateCategoricalPercentage(data, uniqueCategories, column):
# initial dataframe to make sure that numbers aren't left out
d1 = pd.DataFrame([0] * len(uniqueCategories)).T
d1.columns = uniqueCategories
# finds the percentages to be added to the df
count = data[column].value_counts()
count = (count + d1) / sum(count)
count[count.isnull()] = 0
count = count
return count
def MakeBlankDataFrame(df, child, childstr, logicalCategorical):
# The point of this function is to create a blank dataframe to enter points into int he future
# It uses column names as metadata storage.
#
# df is the original dataframe getting appended to.
# child is the child dataframe being appended
# childstr is the specific name of the child dataframe
# logicalCategorical is a logical list indicating whether each column is categorical or continuous
# ignores the primary key
for y in range(1, len(child.columns)):
column = child.columns[y]
# For categorical variables, we must create a column for each category.
if logicalCategorical[y]:
uniqueCategories = child[child.columns[y]].unique().tolist()
for z in range(len(uniqueCategories)):
cat = uniqueCategories[z]
colname = 'Categ_%s_%s_%s' % (cat, column, childstr)
df = pd.concat([df, pd.DataFrame(np.zeros([len(df)]), columns=[colname])], axis=1)
# For continuous, we must create 4 columns for beta distribution values
else:
points = ['alpha', 'beta', 'loc', 'scale']
for z in range(4):
colname = 'Cont_%s_%s_%s' % (points[z], column, childstr)
df = pd.concat([df, pd.DataFrame(np.zeros([len(df)]), columns=[colname])], axis=1)
return df
def MakeFakeData(Continuous):
# I'm making fake data to debug this with. It's a temporary funcution
if Continuous == 1:
primaryKey = np.linspace(1,100, num=100)
data1 = stats.norm.rvs(loc=10, scale=1, size=100)
data2 = stats.norm.rvs(loc=20, scale=1, size=100)
data3 = stats.norm.rvs(loc=5, scale=2, size=100)
data4 = stats.norm.rvs(loc=100, scale=10, size=100)
df = np.concatenate([primaryKey, data1, data2, data3, data4])
df = df.reshape([5, 100])
df = | pd.DataFrame(df.T) | pandas.DataFrame |
import pandas as pd
import tensorflow as tf
### Do not duplicate or change the name of the function
### train, you can choose to include callbacks, save checkpoints
### and return basic results as results and the history (we are
### converting it to a pandas DataFrame before sending) of the model.
def train(model, x, y):
history = model.fit(x, y, epochs=25, validation_split=0.1, shuffle=True)
results = model.evaluate(x, y)
results = | pd.DataFrame({'loss':[results[0]], 'acc': [results[1]]}) | pandas.DataFrame |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.