prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
"""Console script for zalando_classification."""
import sys
import click
import pandas as pd
from pathlib import Path
from gpdre.applications.covariate_shift import (
get_dataset, MLPCovariateShiftAdapter,
Classification2DCovariateShiftBenchmark,
ExactCovariateShiftAdapter)
from gpdre.utils import DensityRatio
from gpdre.metrics import normalized_mean_squared_error
# Sensible defaults
# DRE
OPTIMIZER1 = "adam"
EPOCHS1 = 50
BATCH_SIZE = 64
NUM_LAYERS = 1
NUM_UNITS = 8
ACTIVATION = "tanh"
L1_FACTOR = 0.0
L2_FACTOR = 0.0
OPTIMIZER2 = "lbfgs"
EPOCHS2 = 500
PENALTY = "l2"
SUMMARY_DIR = "logs/"
NUM_TRAIN = 500
NUM_TEST = 500
THRESHOLD = 0.5
SEED = 0
DATASET_SEED = 8888
class MLPExperiment:
def __init__(self, num_layers, num_units, activation, l1_factor, l2_factor,
optimizer_auxiliary, epochs_auxiliary, batch_size, optimizer,
epochs, penalty, seed=None):
# Downstream prediction task
self.classification_problem = Classification2DCovariateShiftBenchmark(
optimizer=optimizer, epochs=epochs, penalty=penalty, seed=seed)
# Importance weights
self.adapter = MLPCovariateShiftAdapter(num_layers, num_units,
activation, l1_factor,
l2_factor, optimizer_auxiliary,
epochs_auxiliary, batch_size,
seed=seed)
self.adapter_true = ExactCovariateShiftAdapter(
exact_density_ratio=DensityRatio.from_covariate_shift_example())
self.parameters = dict(num_layers=num_layers, num_units=num_units,
activation=activation, l1_factor=l1_factor,
l2_factor=l2_factor,
optimizer_auxiliary=optimizer_auxiliary,
epochs_auxiliary=epochs_auxiliary,
batch_size=batch_size, optimizer=optimizer,
epochs=epochs, penalty=penalty, seed=seed)
def get_result(self, X_train, y_train, X_test, y_test):
importance_weights_true = self.adapter_true.importance_weights(X_train,
X_test)
importance_weights = self.adapter.importance_weights(X_train, X_test)
auxiliary_accuracy = self.adapter.accuracy(X_train, X_test)
nmse = normalized_mean_squared_error(importance_weights_true,
importance_weights)
test_accuracy = self.classification_problem.test_metric(
train_data=(X_train, y_train), test_data=(X_test, y_test),
importance_weights=importance_weights)
results = dict(self.parameters)
results.update(dict(auxiliary_accuracy=auxiliary_accuracy, nmse=nmse,
test_accuracy=test_accuracy))
return results
@click.command()
@click.argument("name")
@click.option("--summary-dir", default=SUMMARY_DIR,
type=click.Path(file_okay=False, dir_okay=True),
help="Summary directory.")
@click.option("--num-train", default=NUM_TRAIN, type=int,
help="Number of training samples")
@click.option("--num-test", default=NUM_TEST, type=int,
help="Number of test samples")
@click.option("--threshold", default=THRESHOLD, type=float, help="Threshold")
@click.option("-l", "--num-layers", default=NUM_LAYERS, type=int,
help="Number of hidden layers.")
@click.option("-u", "--num-units", default=NUM_UNITS, type=int,
help="Number of hidden units.")
@click.option("--activation", default=ACTIVATION, type=str)
@click.option("--l1-factor", default=L1_FACTOR, type=float,
help="L1 regularization factor.")
@click.option("--l2-factor", default=L2_FACTOR, type=float,
help="L2 regularization factor.")
@click.option("--optimizer1", default=OPTIMIZER1,
help="Optimizer for DRE.")
@click.option("--epochs1", default=EPOCHS1, type=int,
help="Number of epochs.")
@click.option("-b", "--batch-size", default=BATCH_SIZE, type=int,
help="Batch size.")
@click.option("--optimizer2", default=OPTIMIZER2,
help="Optimizer for the downstream prediction task.")
@click.option("--epochs2", default=EPOCHS2, type=int,
help="Number of epochs for the downstream prediction task.")
@click.option("--dataset-seed", default=DATASET_SEED, type=int)
@click.option("-s", "--seed", default=SEED, type=int, help="Random seed")
def main(name, summary_dir, num_train, num_test, threshold, num_layers,
num_units, activation, l1_factor, l2_factor, optimizer1, epochs1,
batch_size, optimizer2, epochs2, dataset_seed, seed):
mlp_experiment = MLPExperiment(num_layers, num_units, activation,
l1_factor, l2_factor, optimizer1, epochs1,
batch_size, optimizer2, epochs2,
penalty=PENALTY, seed=seed)
# Get data
(X_train, y_train), (X_test, y_test) = get_dataset(num_train, num_test,
threshold=threshold,
seed=dataset_seed)
results = mlp_experiment.get_result(X_train, y_train, X_test, y_test)
click.secho("[Seed {seed:04d}] test accuracy: {test_accuracy:.3f}"
.format(**results), fg="green")
# Save results
summary_path = Path(summary_dir).joinpath(name)
summary_path.mkdir(parents=True, exist_ok=True)
data = | pd.Series(results) | pandas.Series |
# -*- coding: utf-8 -*-
"""
docstring goes here.
:copyright: Copyright 2014 by the Elephant team, see AUTHORS.txt.
:license: Modified BSD, see LICENSE.txt for details.
"""
from __future__ import division, print_function
import unittest
from itertools import chain
from neo.test.generate_datasets import fake_neo
import numpy as np
from numpy.testing.utils import assert_array_equal
import quantities as pq
try:
import pandas as pd
from pandas.util.testing import assert_frame_equal, assert_index_equal
except ImportError:
HAVE_PANDAS = False
else:
import elephant.pandas_bridge as ep
HAVE_PANDAS = True
@unittest.skipUnless(HAVE_PANDAS, 'requires pandas')
class MultiindexFromDictTestCase(unittest.TestCase):
def test__multiindex_from_dict(self):
inds = {'test1': 6.5,
'test2': 5,
'test3': 'test'}
targ = pd.MultiIndex(levels=[[6.5], [5], ['test']],
labels=[[0], [0], [0]],
names=['test1', 'test2', 'test3'])
res0 = ep._multiindex_from_dict(inds)
self.assertEqual(targ.levels, res0.levels)
self.assertEqual(targ.names, res0.names)
self.assertEqual(targ.labels, res0.labels)
def _convert_levels(levels):
"""Convert a list of levels to the format pandas returns for a MultiIndex.
Parameters
----------
levels : list
The list of levels to convert.
Returns
-------
list
The the level in `list` converted to values like what pandas will give.
"""
levels = list(levels)
for i, level in enumerate(levels):
if hasattr(level, 'lower'):
try:
level = unicode(level)
except NameError:
pass
elif hasattr(level, 'date'):
levels[i] = pd.DatetimeIndex(data=[level])
continue
elif level is None:
levels[i] = pd.Index([])
continue
levels[i] = pd.Index([level])
return levels
@unittest.skipUnless(HAVE_PANDAS, 'requires pandas')
class ConvertValueSafeTestCase(unittest.TestCase):
def test__convert_value_safe__float(self):
targ = 5.5
value = targ
res = ep._convert_value_safe(value)
self.assertIs(res, targ)
def test__convert_value_safe__str(self):
targ = 'test'
value = targ
res = ep._convert_value_safe(value)
self.assertIs(res, targ)
def test__convert_value_safe__bytes(self):
targ = 'test'
value = b'test'
res = ep._convert_value_safe(value)
self.assertEqual(res, targ)
def test__convert_value_safe__numpy_int_scalar(self):
targ = 5
value = np.array(5)
res = ep._convert_value_safe(value)
self.assertEqual(res, targ)
self.assertFalse(hasattr(res, 'dtype'))
def test__convert_value_safe__numpy_float_scalar(self):
targ = 5.
value = np.array(5.)
res = ep._convert_value_safe(value)
self.assertEqual(res, targ)
self.assertFalse(hasattr(res, 'dtype'))
def test__convert_value_safe__numpy_unicode_scalar(self):
targ = u'test'
value = np.array('test', dtype='U')
res = ep._convert_value_safe(value)
self.assertEqual(res, targ)
self.assertFalse(hasattr(res, 'dtype'))
def test__convert_value_safe__numpy_str_scalar(self):
targ = u'test'
value = np.array('test', dtype='S')
res = ep._convert_value_safe(value)
self.assertEqual(res, targ)
self.assertFalse(hasattr(res, 'dtype'))
def test__convert_value_safe__quantity_scalar(self):
targ = (10., 'ms')
value = 10. * pq.ms
res = ep._convert_value_safe(value)
self.assertEqual(res, targ)
self.assertFalse(hasattr(res[0], 'dtype'))
self.assertFalse(hasattr(res[0], 'units'))
@unittest.skipUnless(HAVE_PANDAS, 'requires pandas')
class SpiketrainToDataframeTestCase(unittest.TestCase):
def test__spiketrain_to_dataframe__parents_empty(self):
obj = fake_neo('SpikeTrain', seed=0)
res0 = ep.spiketrain_to_dataframe(obj)
res1 = ep.spiketrain_to_dataframe(obj, child_first=True)
res2 = ep.spiketrain_to_dataframe(obj, child_first=False)
res3 = ep.spiketrain_to_dataframe(obj, parents=True)
res4 = ep.spiketrain_to_dataframe(obj, parents=True,
child_first=True)
res5 = ep.spiketrain_to_dataframe(obj, parents=True,
child_first=False)
res6 = ep.spiketrain_to_dataframe(obj, parents=False)
res7 = ep.spiketrain_to_dataframe(obj, parents=False, child_first=True)
res8 = ep.spiketrain_to_dataframe(obj, parents=False,
child_first=False)
targvalues = pq.Quantity(obj.magnitude, units=obj.units)
targvalues = targvalues.rescale('s').magnitude[np.newaxis].T
targindex = np.arange(len(targvalues))
attrs = ep._extract_neo_attrs_safe(obj, parents=True, child_first=True)
keys, values = zip(*sorted(attrs.items()))
values = _convert_levels(values)
self.assertEqual(1, len(res0.columns))
self.assertEqual(1, len(res1.columns))
self.assertEqual(1, len(res2.columns))
self.assertEqual(1, len(res3.columns))
self.assertEqual(1, len(res4.columns))
self.assertEqual(1, len(res5.columns))
self.assertEqual(1, len(res6.columns))
self.assertEqual(1, len(res7.columns))
self.assertEqual(1, len(res8.columns))
self.assertEqual(len(obj), len(res0.index))
self.assertEqual(len(obj), len(res1.index))
self.assertEqual(len(obj), len(res2.index))
self.assertEqual(len(obj), len(res3.index))
self.assertEqual(len(obj), len(res4.index))
self.assertEqual(len(obj), len(res5.index))
self.assertEqual(len(obj), len(res6.index))
self.assertEqual(len(obj), len(res7.index))
self.assertEqual(len(obj), len(res8.index))
assert_array_equal(targvalues, res0.values)
assert_array_equal(targvalues, res1.values)
assert_array_equal(targvalues, res2.values)
assert_array_equal(targvalues, res3.values)
assert_array_equal(targvalues, res4.values)
assert_array_equal(targvalues, res5.values)
assert_array_equal(targvalues, res6.values)
assert_array_equal(targvalues, res7.values)
assert_array_equal(targvalues, res8.values)
assert_array_equal(targindex, res0.index)
assert_array_equal(targindex, res1.index)
assert_array_equal(targindex, res2.index)
assert_array_equal(targindex, res3.index)
assert_array_equal(targindex, res4.index)
assert_array_equal(targindex, res5.index)
assert_array_equal(targindex, res6.index)
assert_array_equal(targindex, res7.index)
assert_array_equal(targindex, res8.index)
self.assertEqual(['spike_number'], res0.index.names)
self.assertEqual(['spike_number'], res1.index.names)
self.assertEqual(['spike_number'], res2.index.names)
self.assertEqual(['spike_number'], res3.index.names)
self.assertEqual(['spike_number'], res4.index.names)
self.assertEqual(['spike_number'], res5.index.names)
self.assertEqual(['spike_number'], res6.index.names)
self.assertEqual(['spike_number'], res7.index.names)
self.assertEqual(['spike_number'], res8.index.names)
self.assertEqual(keys, res0.columns.names)
self.assertEqual(keys, res1.columns.names)
self.assertEqual(keys, res2.columns.names)
self.assertEqual(keys, res3.columns.names)
self.assertEqual(keys, res4.columns.names)
self.assertEqual(keys, res5.columns.names)
self.assertEqual(keys, res6.columns.names)
self.assertEqual(keys, res7.columns.names)
self.assertEqual(keys, res8.columns.names)
for value, level in zip(values, res0.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res1.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res2.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res3.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res4.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res5.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res6.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res7.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res8.columns.levels):
assert_index_equal(value, level)
def test__spiketrain_to_dataframe__noparents(self):
blk = fake_neo('Block', seed=0)
obj = blk.list_children_by_class('SpikeTrain')[0]
res0 = ep.spiketrain_to_dataframe(obj, parents=False)
res1 = ep.spiketrain_to_dataframe(obj, parents=False,
child_first=True)
res2 = ep.spiketrain_to_dataframe(obj, parents=False,
child_first=False)
targvalues = pq.Quantity(obj.magnitude, units=obj.units)
targvalues = targvalues.rescale('s').magnitude[np.newaxis].T
targindex = np.arange(len(targvalues))
attrs = ep._extract_neo_attrs_safe(obj, parents=False,
child_first=True)
keys, values = zip(*sorted(attrs.items()))
values = _convert_levels(values)
self.assertEqual(1, len(res0.columns))
self.assertEqual(1, len(res1.columns))
self.assertEqual(1, len(res2.columns))
self.assertEqual(len(obj), len(res0.index))
self.assertEqual(len(obj), len(res1.index))
self.assertEqual(len(obj), len(res2.index))
assert_array_equal(targvalues, res0.values)
assert_array_equal(targvalues, res1.values)
assert_array_equal(targvalues, res2.values)
assert_array_equal(targindex, res0.index)
assert_array_equal(targindex, res1.index)
assert_array_equal(targindex, res2.index)
self.assertEqual(['spike_number'], res0.index.names)
self.assertEqual(['spike_number'], res1.index.names)
self.assertEqual(['spike_number'], res2.index.names)
self.assertEqual(keys, res0.columns.names)
self.assertEqual(keys, res1.columns.names)
self.assertEqual(keys, res2.columns.names)
for value, level in zip(values, res0.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res1.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res2.columns.levels):
assert_index_equal(value, level)
def test__spiketrain_to_dataframe__parents_childfirst(self):
blk = fake_neo('Block', seed=0)
obj = blk.list_children_by_class('SpikeTrain')[0]
res0 = ep.spiketrain_to_dataframe(obj)
res1 = ep.spiketrain_to_dataframe(obj, child_first=True)
res2 = ep.spiketrain_to_dataframe(obj, parents=True)
res3 = ep.spiketrain_to_dataframe(obj, parents=True, child_first=True)
targvalues = pq.Quantity(obj.magnitude, units=obj.units)
targvalues = targvalues.rescale('s').magnitude[np.newaxis].T
targindex = np.arange(len(targvalues))
attrs = ep._extract_neo_attrs_safe(obj, parents=True, child_first=True)
keys, values = zip(*sorted(attrs.items()))
values = _convert_levels(values)
self.assertEqual(1, len(res0.columns))
self.assertEqual(1, len(res1.columns))
self.assertEqual(1, len(res2.columns))
self.assertEqual(1, len(res3.columns))
self.assertEqual(len(obj), len(res0.index))
self.assertEqual(len(obj), len(res1.index))
self.assertEqual(len(obj), len(res2.index))
self.assertEqual(len(obj), len(res3.index))
assert_array_equal(targvalues, res0.values)
assert_array_equal(targvalues, res1.values)
assert_array_equal(targvalues, res2.values)
assert_array_equal(targvalues, res3.values)
assert_array_equal(targindex, res0.index)
assert_array_equal(targindex, res1.index)
assert_array_equal(targindex, res2.index)
assert_array_equal(targindex, res3.index)
self.assertEqual(['spike_number'], res0.index.names)
self.assertEqual(['spike_number'], res1.index.names)
self.assertEqual(['spike_number'], res2.index.names)
self.assertEqual(['spike_number'], res3.index.names)
self.assertEqual(keys, res0.columns.names)
self.assertEqual(keys, res1.columns.names)
self.assertEqual(keys, res2.columns.names)
self.assertEqual(keys, res3.columns.names)
for value, level in zip(values, res0.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res1.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res2.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res3.columns.levels):
assert_index_equal(value, level)
def test__spiketrain_to_dataframe__parents_parentfirst(self):
blk = fake_neo('Block', seed=0)
obj = blk.list_children_by_class('SpikeTrain')[0]
res0 = ep.spiketrain_to_dataframe(obj, child_first=False)
res1 = ep.spiketrain_to_dataframe(obj, parents=True, child_first=False)
targvalues = pq.Quantity(obj.magnitude, units=obj.units)
targvalues = targvalues.rescale('s').magnitude[np.newaxis].T
targindex = np.arange(len(targvalues))
attrs = ep._extract_neo_attrs_safe(obj, parents=True,
child_first=False)
keys, values = zip(*sorted(attrs.items()))
values = _convert_levels(values)
self.assertEqual(1, len(res0.columns))
self.assertEqual(1, len(res1.columns))
self.assertEqual(len(obj), len(res0.index))
self.assertEqual(len(obj), len(res1.index))
assert_array_equal(targvalues, res0.values)
assert_array_equal(targvalues, res1.values)
assert_array_equal(targindex, res0.index)
assert_array_equal(targindex, res1.index)
self.assertEqual(['spike_number'], res0.index.names)
self.assertEqual(['spike_number'], res1.index.names)
self.assertEqual(keys, res0.columns.names)
self.assertEqual(keys, res1.columns.names)
for value, level in zip(values, res0.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res1.columns.levels):
assert_index_equal(value, level)
@unittest.skipUnless(HAVE_PANDAS, 'requires pandas')
class EventToDataframeTestCase(unittest.TestCase):
def test__event_to_dataframe__parents_empty(self):
obj = fake_neo('Event', seed=42)
res0 = ep.event_to_dataframe(obj)
res1 = ep.event_to_dataframe(obj, child_first=True)
res2 = ep.event_to_dataframe(obj, child_first=False)
res3 = ep.event_to_dataframe(obj, parents=True)
res4 = ep.event_to_dataframe(obj, parents=True, child_first=True)
res5 = ep.event_to_dataframe(obj, parents=True, child_first=False)
res6 = ep.event_to_dataframe(obj, parents=False)
res7 = ep.event_to_dataframe(obj, parents=False, child_first=True)
res8 = ep.event_to_dataframe(obj, parents=False, child_first=False)
targvalues = obj.labels[:len(obj.times)][np.newaxis].T.astype('U')
targindex = obj.times[:len(obj.labels)].rescale('s').magnitude
attrs = ep._extract_neo_attrs_safe(obj, parents=True, child_first=True)
keys, values = zip(*sorted(attrs.items()))
values = _convert_levels(values)
self.assertEqual(1, len(res0.columns))
self.assertEqual(1, len(res1.columns))
self.assertEqual(1, len(res2.columns))
self.assertEqual(1, len(res3.columns))
self.assertEqual(1, len(res4.columns))
self.assertEqual(1, len(res5.columns))
self.assertEqual(1, len(res6.columns))
self.assertEqual(1, len(res7.columns))
self.assertEqual(1, len(res8.columns))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res0.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res1.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res2.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res3.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res4.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res5.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res6.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res7.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res8.index))
assert_array_equal(targvalues, res0.values)
assert_array_equal(targvalues, res1.values)
assert_array_equal(targvalues, res2.values)
assert_array_equal(targvalues, res3.values)
assert_array_equal(targvalues, res4.values)
assert_array_equal(targvalues, res5.values)
assert_array_equal(targvalues, res6.values)
assert_array_equal(targvalues, res7.values)
assert_array_equal(targvalues, res8.values)
assert_array_equal(targindex, res0.index)
assert_array_equal(targindex, res1.index)
assert_array_equal(targindex, res2.index)
assert_array_equal(targindex, res3.index)
assert_array_equal(targindex, res4.index)
assert_array_equal(targindex, res5.index)
assert_array_equal(targindex, res6.index)
assert_array_equal(targindex, res7.index)
assert_array_equal(targindex, res8.index)
self.assertEqual(['times'], res0.index.names)
self.assertEqual(['times'], res1.index.names)
self.assertEqual(['times'], res2.index.names)
self.assertEqual(['times'], res3.index.names)
self.assertEqual(['times'], res4.index.names)
self.assertEqual(['times'], res5.index.names)
self.assertEqual(['times'], res6.index.names)
self.assertEqual(['times'], res7.index.names)
self.assertEqual(['times'], res8.index.names)
self.assertEqual(keys, res0.columns.names)
self.assertEqual(keys, res1.columns.names)
self.assertEqual(keys, res2.columns.names)
self.assertEqual(keys, res3.columns.names)
self.assertEqual(keys, res4.columns.names)
self.assertEqual(keys, res5.columns.names)
self.assertEqual(keys, res6.columns.names)
self.assertEqual(keys, res7.columns.names)
self.assertEqual(keys, res8.columns.names)
for value, level in zip(values, res0.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res1.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res2.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res3.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res4.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res5.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res6.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res7.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res8.columns.levels):
assert_index_equal(value, level)
def test__event_to_dataframe__noparents(self):
blk = fake_neo('Block', seed=42)
obj = blk.list_children_by_class('Event')[0]
res0 = ep.event_to_dataframe(obj, parents=False)
res1 = ep.event_to_dataframe(obj, parents=False, child_first=False)
res2 = ep.event_to_dataframe(obj, parents=False, child_first=True)
targvalues = obj.labels[:len(obj.times)][np.newaxis].T.astype('U')
targindex = obj.times[:len(obj.labels)].rescale('s').magnitude
attrs = ep._extract_neo_attrs_safe(obj, parents=False,
child_first=True)
keys, values = zip(*sorted(attrs.items()))
values = _convert_levels(values)
self.assertEqual(1, len(res0.columns))
self.assertEqual(1, len(res1.columns))
self.assertEqual(1, len(res2.columns))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res0.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res1.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res2.index))
assert_array_equal(targvalues, res0.values)
assert_array_equal(targvalues, res1.values)
assert_array_equal(targvalues, res2.values)
assert_array_equal(targindex, res0.index)
assert_array_equal(targindex, res1.index)
assert_array_equal(targindex, res2.index)
self.assertEqual(['times'], res0.index.names)
self.assertEqual(['times'], res1.index.names)
self.assertEqual(['times'], res2.index.names)
self.assertEqual(keys, res0.columns.names)
self.assertEqual(keys, res1.columns.names)
self.assertEqual(keys, res2.columns.names)
for value, level in zip(values, res0.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res1.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res2.columns.levels):
assert_index_equal(value, level)
def test__event_to_dataframe__parents_childfirst(self):
blk = fake_neo('Block', seed=42)
obj = blk.list_children_by_class('Event')[0]
res0 = ep.event_to_dataframe(obj)
res1 = ep.event_to_dataframe(obj, child_first=True)
res2 = ep.event_to_dataframe(obj, parents=True)
res3 = ep.event_to_dataframe(obj, parents=True, child_first=True)
targvalues = obj.labels[:len(obj.times)][np.newaxis].T.astype('U')
targindex = obj.times[:len(obj.labels)].rescale('s').magnitude
attrs = ep._extract_neo_attrs_safe(obj, parents=True, child_first=True)
keys, values = zip(*sorted(attrs.items()))
values = _convert_levels(values)
self.assertEqual(1, len(res0.columns))
self.assertEqual(1, len(res1.columns))
self.assertEqual(1, len(res2.columns))
self.assertEqual(1, len(res3.columns))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res0.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res1.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res2.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res3.index))
assert_array_equal(targvalues, res0.values)
assert_array_equal(targvalues, res1.values)
assert_array_equal(targvalues, res2.values)
assert_array_equal(targvalues, res3.values)
assert_array_equal(targindex, res0.index)
assert_array_equal(targindex, res1.index)
assert_array_equal(targindex, res2.index)
assert_array_equal(targindex, res3.index)
self.assertEqual(['times'], res0.index.names)
self.assertEqual(['times'], res1.index.names)
self.assertEqual(['times'], res2.index.names)
self.assertEqual(['times'], res3.index.names)
self.assertEqual(keys, res0.columns.names)
self.assertEqual(keys, res1.columns.names)
self.assertEqual(keys, res2.columns.names)
self.assertEqual(keys, res3.columns.names)
for value, level in zip(values, res0.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res1.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res2.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res3.columns.levels):
assert_index_equal(value, level)
def test__event_to_dataframe__parents_parentfirst(self):
blk = fake_neo('Block', seed=42)
obj = blk.list_children_by_class('Event')[0]
res0 = ep.event_to_dataframe(obj, child_first=False)
res1 = ep.event_to_dataframe(obj, parents=True, child_first=False)
targvalues = obj.labels[:len(obj.times)][np.newaxis].T.astype('U')
targindex = obj.times[:len(obj.labels)].rescale('s').magnitude
attrs = ep._extract_neo_attrs_safe(obj, parents=True,
child_first=False)
keys, values = zip(*sorted(attrs.items()))
values = _convert_levels(values)
self.assertEqual(1, len(res0.columns))
self.assertEqual(1, len(res1.columns))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res0.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res1.index))
assert_array_equal(targvalues, res0.values)
assert_array_equal(targvalues, res1.values)
assert_array_equal(targindex, res0.index)
assert_array_equal(targindex, res1.index)
self.assertEqual(['times'], res0.index.names)
self.assertEqual(['times'], res1.index.names)
self.assertEqual(keys, res0.columns.names)
self.assertEqual(keys, res1.columns.names)
for value, level in zip(values, res0.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res1.columns.levels):
assert_index_equal(value, level)
@unittest.skipUnless(HAVE_PANDAS, 'requires pandas')
class EpochToDataframeTestCase(unittest.TestCase):
def test__epoch_to_dataframe__parents_empty(self):
obj = fake_neo('Epoch', seed=42)
res0 = ep.epoch_to_dataframe(obj)
res1 = ep.epoch_to_dataframe(obj, child_first=True)
res2 = ep.epoch_to_dataframe(obj, child_first=False)
res3 = ep.epoch_to_dataframe(obj, parents=True)
res4 = ep.epoch_to_dataframe(obj, parents=True, child_first=True)
res5 = ep.epoch_to_dataframe(obj, parents=True, child_first=False)
res6 = ep.epoch_to_dataframe(obj, parents=False)
res7 = ep.epoch_to_dataframe(obj, parents=False, child_first=True)
res8 = ep.epoch_to_dataframe(obj, parents=False, child_first=False)
minlen = min([len(obj.times), len(obj.durations), len(obj.labels)])
targvalues = obj.labels[:minlen][np.newaxis].T.astype('U')
targindex = np.vstack([obj.durations[:minlen].rescale('s').magnitude,
obj.times[:minlen].rescale('s').magnitude])
targvalues = targvalues[targindex.argsort()[0], :]
targindex.sort()
attrs = ep._extract_neo_attrs_safe(obj, parents=True,
child_first=True)
keys, values = zip(*sorted(attrs.items()))
values = _convert_levels(values)
self.assertEqual(1, len(res0.columns))
self.assertEqual(1, len(res1.columns))
self.assertEqual(1, len(res2.columns))
self.assertEqual(1, len(res3.columns))
self.assertEqual(1, len(res4.columns))
self.assertEqual(1, len(res5.columns))
self.assertEqual(1, len(res6.columns))
self.assertEqual(1, len(res7.columns))
self.assertEqual(1, len(res8.columns))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res0.index))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res1.index))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res2.index))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res3.index))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res4.index))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res5.index))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res6.index))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res7.index))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res8.index))
assert_array_equal(targvalues, res0.values)
assert_array_equal(targvalues, res1.values)
assert_array_equal(targvalues, res2.values)
assert_array_equal(targvalues, res3.values)
assert_array_equal(targvalues, res4.values)
assert_array_equal(targvalues, res5.values)
assert_array_equal(targvalues, res6.values)
assert_array_equal(targvalues, res7.values)
assert_array_equal(targvalues, res8.values)
self.assertEqual(keys, res0.columns.names)
self.assertEqual(keys, res1.columns.names)
self.assertEqual(keys, res2.columns.names)
self.assertEqual(keys, res3.columns.names)
self.assertEqual(keys, res4.columns.names)
self.assertEqual(keys, res5.columns.names)
self.assertEqual(keys, res6.columns.names)
self.assertEqual(keys, res7.columns.names)
self.assertEqual(keys, res8.columns.names)
self.assertEqual([u'durations', u'times'], res0.index.names)
self.assertEqual([u'durations', u'times'], res1.index.names)
self.assertEqual([u'durations', u'times'], res2.index.names)
self.assertEqual([u'durations', u'times'], res3.index.names)
self.assertEqual([u'durations', u'times'], res4.index.names)
self.assertEqual([u'durations', u'times'], res5.index.names)
self.assertEqual([u'durations', u'times'], res6.index.names)
self.assertEqual([u'durations', u'times'], res7.index.names)
self.assertEqual([u'durations', u'times'], res8.index.names)
self.assertEqual(2, len(res0.index.levels))
self.assertEqual(2, len(res1.index.levels))
self.assertEqual(2, len(res2.index.levels))
self.assertEqual(2, len(res3.index.levels))
self.assertEqual(2, len(res4.index.levels))
self.assertEqual(2, len(res5.index.levels))
self.assertEqual(2, len(res6.index.levels))
self.assertEqual(2, len(res7.index.levels))
self.assertEqual(2, len(res8.index.levels))
assert_array_equal(targindex, res0.index.levels)
assert_array_equal(targindex, res1.index.levels)
assert_array_equal(targindex, res2.index.levels)
assert_array_equal(targindex, res3.index.levels)
assert_array_equal(targindex, res4.index.levels)
assert_array_equal(targindex, res5.index.levels)
assert_array_equal(targindex, res6.index.levels)
assert_array_equal(targindex, res7.index.levels)
assert_array_equal(targindex, res8.index.levels)
for value, level in zip(values, res0.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res1.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res2.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res3.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res4.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res5.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res6.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res7.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res8.columns.levels):
assert_index_equal(value, level)
def test__epoch_to_dataframe__noparents(self):
blk = fake_neo('Block', seed=42)
obj = blk.list_children_by_class('Epoch')[0]
res0 = ep.epoch_to_dataframe(obj, parents=False)
res1 = ep.epoch_to_dataframe(obj, parents=False, child_first=True)
res2 = ep.epoch_to_dataframe(obj, parents=False, child_first=False)
minlen = min([len(obj.times), len(obj.durations), len(obj.labels)])
targvalues = obj.labels[:minlen][np.newaxis].T.astype('U')
targindex = np.vstack([obj.durations[:minlen].rescale('s').magnitude,
obj.times[:minlen].rescale('s').magnitude])
targvalues = targvalues[targindex.argsort()[0], :]
targindex.sort()
attrs = ep._extract_neo_attrs_safe(obj, parents=False,
child_first=True)
keys, values = zip(*sorted(attrs.items()))
values = _convert_levels(values)
self.assertEqual(1, len(res0.columns))
self.assertEqual(1, len(res1.columns))
self.assertEqual(1, len(res2.columns))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res0.index))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res1.index))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res2.index))
assert_array_equal(targvalues, res0.values)
assert_array_equal(targvalues, res1.values)
assert_array_equal(targvalues, res2.values)
self.assertEqual(keys, res0.columns.names)
self.assertEqual(keys, res1.columns.names)
self.assertEqual(keys, res2.columns.names)
self.assertEqual([u'durations', u'times'], res0.index.names)
self.assertEqual([u'durations', u'times'], res1.index.names)
self.assertEqual([u'durations', u'times'], res2.index.names)
self.assertEqual(2, len(res0.index.levels))
self.assertEqual(2, len(res1.index.levels))
self.assertEqual(2, len(res2.index.levels))
assert_array_equal(targindex, res0.index.levels)
assert_array_equal(targindex, res1.index.levels)
assert_array_equal(targindex, res2.index.levels)
for value, level in zip(values, res0.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res1.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res2.columns.levels):
assert_index_equal(value, level)
def test__epoch_to_dataframe__parents_childfirst(self):
blk = fake_neo('Block', seed=42)
obj = blk.list_children_by_class('Epoch')[0]
res0 = ep.epoch_to_dataframe(obj)
res1 = ep.epoch_to_dataframe(obj, child_first=True)
res2 = ep.epoch_to_dataframe(obj, parents=True)
res3 = ep.epoch_to_dataframe(obj, parents=True, child_first=True)
minlen = min([len(obj.times), len(obj.durations), len(obj.labels)])
targvalues = obj.labels[:minlen][np.newaxis].T.astype('U')
targindex = np.vstack([obj.durations[:minlen].rescale('s').magnitude,
obj.times[:minlen].rescale('s').magnitude])
targvalues = targvalues[targindex.argsort()[0], :]
targindex.sort()
attrs = ep._extract_neo_attrs_safe(obj, parents=True, child_first=True)
keys, values = zip(*sorted(attrs.items()))
values = _convert_levels(values)
self.assertEqual(1, len(res0.columns))
self.assertEqual(1, len(res1.columns))
self.assertEqual(1, len(res2.columns))
self.assertEqual(1, len(res3.columns))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res0.index))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res1.index))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res2.index))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res3.index))
assert_array_equal(targvalues, res0.values)
assert_array_equal(targvalues, res1.values)
assert_array_equal(targvalues, res2.values)
assert_array_equal(targvalues, res3.values)
self.assertEqual(keys, res0.columns.names)
self.assertEqual(keys, res1.columns.names)
self.assertEqual(keys, res2.columns.names)
self.assertEqual(keys, res3.columns.names)
self.assertEqual([u'durations', u'times'], res0.index.names)
self.assertEqual([u'durations', u'times'], res1.index.names)
self.assertEqual([u'durations', u'times'], res2.index.names)
self.assertEqual([u'durations', u'times'], res3.index.names)
self.assertEqual(2, len(res0.index.levels))
self.assertEqual(2, len(res1.index.levels))
self.assertEqual(2, len(res2.index.levels))
self.assertEqual(2, len(res3.index.levels))
assert_array_equal(targindex, res0.index.levels)
assert_array_equal(targindex, res1.index.levels)
assert_array_equal(targindex, res2.index.levels)
assert_array_equal(targindex, res3.index.levels)
for value, level in zip(values, res0.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res1.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res2.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res3.columns.levels):
assert_index_equal(value, level)
def test__epoch_to_dataframe__parents_parentfirst(self):
blk = fake_neo('Block', seed=42)
obj = blk.list_children_by_class('Epoch')[0]
res0 = ep.epoch_to_dataframe(obj, child_first=False)
res1 = ep.epoch_to_dataframe(obj, parents=True, child_first=False)
minlen = min([len(obj.times), len(obj.durations), len(obj.labels)])
targvalues = obj.labels[:minlen][np.newaxis].T.astype('U')
targindex = np.vstack([obj.durations[:minlen].rescale('s').magnitude,
obj.times[:minlen].rescale('s').magnitude])
targvalues = targvalues[targindex.argsort()[0], :]
targindex.sort()
attrs = ep._extract_neo_attrs_safe(obj, parents=True,
child_first=False)
keys, values = zip(*sorted(attrs.items()))
values = _convert_levels(values)
self.assertEqual(1, len(res0.columns))
self.assertEqual(1, len(res1.columns))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res0.index))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res1.index))
assert_array_equal(targvalues, res0.values)
assert_array_equal(targvalues, res1.values)
self.assertEqual(keys, res0.columns.names)
self.assertEqual(keys, res1.columns.names)
self.assertEqual([u'durations', u'times'], res0.index.names)
self.assertEqual([u'durations', u'times'], res1.index.names)
self.assertEqual(2, len(res0.index.levels))
self.assertEqual(2, len(res1.index.levels))
assert_array_equal(targindex, res0.index.levels)
assert_array_equal(targindex, res1.index.levels)
for value, level in zip(values, res0.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res1.columns.levels):
assert_index_equal(value, level)
@unittest.skipUnless(HAVE_PANDAS, 'requires pandas')
class MultiSpiketrainsToDataframeTestCase(unittest.TestCase):
def setUp(self):
if hasattr(self, 'assertItemsEqual'):
self.assertCountEqual = self.assertItemsEqual
def test__multi_spiketrains_to_dataframe__single(self):
obj = fake_neo('SpikeTrain', seed=0, n=5)
res0 = ep.multi_spiketrains_to_dataframe(obj)
res1 = ep.multi_spiketrains_to_dataframe(obj, parents=False)
res2 = ep.multi_spiketrains_to_dataframe(obj, parents=True)
res3 = ep.multi_spiketrains_to_dataframe(obj, child_first=True)
res4 = ep.multi_spiketrains_to_dataframe(obj, parents=False,
child_first=True)
res5 = ep.multi_spiketrains_to_dataframe(obj, parents=True,
child_first=True)
res6 = ep.multi_spiketrains_to_dataframe(obj, child_first=False)
res7 = ep.multi_spiketrains_to_dataframe(obj, parents=False,
child_first=False)
res8 = ep.multi_spiketrains_to_dataframe(obj, parents=True,
child_first=False)
targ = ep.spiketrain_to_dataframe(obj)
keys = ep._extract_neo_attrs_safe(obj, parents=True,
child_first=True).keys()
keys = list(keys)
targwidth = 1
targlen = len(obj)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targwidth, len(res1.columns))
self.assertEqual(targwidth, len(res2.columns))
self.assertEqual(targwidth, len(res3.columns))
self.assertEqual(targwidth, len(res4.columns))
self.assertEqual(targwidth, len(res5.columns))
self.assertEqual(targwidth, len(res6.columns))
self.assertEqual(targwidth, len(res7.columns))
self.assertEqual(targwidth, len(res8.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertEqual(targlen, len(res1.index))
self.assertEqual(targlen, len(res2.index))
self.assertEqual(targlen, len(res3.index))
self.assertEqual(targlen, len(res4.index))
self.assertEqual(targlen, len(res5.index))
self.assertEqual(targlen, len(res6.index))
self.assertEqual(targlen, len(res7.index))
self.assertEqual(targlen, len(res8.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
self.assertCountEqual(keys, res1.columns.names)
self.assertCountEqual(keys, res2.columns.names)
self.assertCountEqual(keys, res3.columns.names)
self.assertCountEqual(keys, res4.columns.names)
self.assertCountEqual(keys, res5.columns.names)
self.assertCountEqual(keys, res6.columns.names)
self.assertCountEqual(keys, res7.columns.names)
self.assertCountEqual(keys, res8.columns.names)
assert_array_equal(targ.values, res0.values)
assert_array_equal(targ.values, res1.values)
assert_array_equal(targ.values, res2.values)
assert_array_equal(targ.values, res3.values)
assert_array_equal(targ.values, res4.values)
assert_array_equal(targ.values, res5.values)
assert_array_equal(targ.values, res6.values)
assert_array_equal(targ.values, res7.values)
assert_array_equal(targ.values, res8.values)
assert_frame_equal(targ, res0)
assert_frame_equal(targ, res0)
assert_frame_equal(targ, res1)
assert_frame_equal(targ, res2)
assert_frame_equal(targ, res3)
assert_frame_equal(targ, res4)
assert_frame_equal(targ, res5)
assert_frame_equal(targ, res6)
assert_frame_equal(targ, res7)
assert_frame_equal(targ, res8)
def test__multi_spiketrains_to_dataframe__unit_default(self):
obj = fake_neo('Unit', seed=0, n=5)
res0 = ep.multi_spiketrains_to_dataframe(obj)
objs = obj.spiketrains
targ = [ep.spiketrain_to_dataframe(iobj) for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=True,
child_first=True).keys()
keys = list(keys)
targwidth = len(objs)
targlen = max(len(iobj) for iobj in objs)
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
assert_array_equal(targ.values, res0.values)
assert_frame_equal(targ, res0)
def test__multi_spiketrains_to_dataframe__segment_default(self):
obj = fake_neo('Segment', seed=0, n=5)
res0 = ep.multi_spiketrains_to_dataframe(obj)
objs = obj.spiketrains
targ = [ep.spiketrain_to_dataframe(iobj) for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=True,
child_first=True).keys()
keys = list(keys)
targwidth = len(objs)
targlen = max(len(iobj) for iobj in objs)
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
assert_array_equal(targ.values, res0.values)
assert_frame_equal(targ, res0)
def test__multi_spiketrains_to_dataframe__block_noparents(self):
obj = fake_neo('Block', seed=0, n=3)
res0 = ep.multi_spiketrains_to_dataframe(obj, parents=False)
res1 = ep.multi_spiketrains_to_dataframe(obj, parents=False,
child_first=True)
res2 = ep.multi_spiketrains_to_dataframe(obj, parents=False,
child_first=False)
objs = obj.list_children_by_class('SpikeTrain')
targ = [ep.spiketrain_to_dataframe(iobj,
parents=False, child_first=True)
for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=False,
child_first=True).keys()
keys = list(keys)
targwidth = len(objs)
targlen = max(len(iobj) for iobj in objs)
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targwidth, len(res1.columns))
self.assertEqual(targwidth, len(res2.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertEqual(targlen, len(res1.index))
self.assertEqual(targlen, len(res2.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
self.assertCountEqual(keys, res1.columns.names)
self.assertCountEqual(keys, res2.columns.names)
assert_array_equal(targ.values, res0.values)
assert_array_equal(targ.values, res1.values)
assert_array_equal(targ.values, res2.values)
assert_frame_equal(targ, res0)
assert_frame_equal(targ, res1)
assert_frame_equal(targ, res2)
def test__multi_spiketrains_to_dataframe__block_parents_childfirst(self):
obj = fake_neo('Block', seed=0, n=3)
res0 = ep.multi_spiketrains_to_dataframe(obj)
res1 = ep.multi_spiketrains_to_dataframe(obj, parents=True)
res2 = ep.multi_spiketrains_to_dataframe(obj, child_first=True)
res3 = ep.multi_spiketrains_to_dataframe(obj, parents=True,
child_first=True)
objs = obj.list_children_by_class('SpikeTrain')
targ = [ep.spiketrain_to_dataframe(iobj,
parents=True, child_first=True)
for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=True,
child_first=True).keys()
keys = list(keys)
targwidth = len(objs)
targlen = max(len(iobj) for iobj in objs)
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targwidth, len(res1.columns))
self.assertEqual(targwidth, len(res2.columns))
self.assertEqual(targwidth, len(res3.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertEqual(targlen, len(res1.index))
self.assertEqual(targlen, len(res2.index))
self.assertEqual(targlen, len(res3.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
self.assertCountEqual(keys, res1.columns.names)
self.assertCountEqual(keys, res2.columns.names)
self.assertCountEqual(keys, res3.columns.names)
assert_array_equal(targ.values, res0.values)
assert_array_equal(targ.values, res1.values)
assert_array_equal(targ.values, res2.values)
assert_array_equal(targ.values, res3.values)
assert_frame_equal(targ, res0)
assert_frame_equal(targ, res1)
assert_frame_equal(targ, res2)
assert_frame_equal(targ, res3)
def test__multi_spiketrains_to_dataframe__block_parents_parentfirst(self):
obj = fake_neo('Block', seed=0, n=3)
res0 = ep.multi_spiketrains_to_dataframe(obj, child_first=False)
res1 = ep.multi_spiketrains_to_dataframe(obj, parents=True,
child_first=False)
objs = obj.list_children_by_class('SpikeTrain')
targ = [ep.spiketrain_to_dataframe(iobj,
parents=True, child_first=False)
for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=True,
child_first=False).keys()
keys = list(keys)
targwidth = len(objs)
targlen = max(len(iobj) for iobj in objs)
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targwidth, len(res1.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertEqual(targlen, len(res1.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
self.assertCountEqual(keys, res1.columns.names)
assert_array_equal(targ.values, res0.values)
assert_array_equal(targ.values, res1.values)
assert_frame_equal(targ, res0)
assert_frame_equal(targ, res1)
def test__multi_spiketrains_to_dataframe__list_noparents(self):
obj = [fake_neo('Block', seed=i, n=3) for i in range(3)]
res0 = ep.multi_spiketrains_to_dataframe(obj, parents=False)
res1 = ep.multi_spiketrains_to_dataframe(obj, parents=False,
child_first=True)
res2 = ep.multi_spiketrains_to_dataframe(obj, parents=False,
child_first=False)
objs = (iobj.list_children_by_class('SpikeTrain') for iobj in obj)
objs = list(chain.from_iterable(objs))
targ = [ep.spiketrain_to_dataframe(iobj,
parents=False, child_first=True)
for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=False,
child_first=True).keys()
keys = list(keys)
targwidth = len(objs)
targlen = max(len(iobj) for iobj in objs)
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targwidth, len(res1.columns))
self.assertEqual(targwidth, len(res2.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertEqual(targlen, len(res1.index))
self.assertEqual(targlen, len(res2.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
self.assertCountEqual(keys, res1.columns.names)
self.assertCountEqual(keys, res2.columns.names)
assert_array_equal(targ.values, res0.values)
assert_array_equal(targ.values, res1.values)
assert_array_equal(targ.values, res2.values)
assert_frame_equal(targ, res0)
assert_frame_equal(targ, res1)
assert_frame_equal(targ, res2)
def test__multi_spiketrains_to_dataframe__list_parents_childfirst(self):
obj = [fake_neo('Block', seed=i, n=3) for i in range(3)]
res0 = ep.multi_spiketrains_to_dataframe(obj)
res1 = ep.multi_spiketrains_to_dataframe(obj, parents=True)
res2 = ep.multi_spiketrains_to_dataframe(obj, child_first=True)
res3 = ep.multi_spiketrains_to_dataframe(obj, parents=True,
child_first=True)
objs = (iobj.list_children_by_class('SpikeTrain') for iobj in obj)
objs = list(chain.from_iterable(objs))
targ = [ep.spiketrain_to_dataframe(iobj,
parents=True, child_first=True)
for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=True,
child_first=True).keys()
keys = list(keys)
targwidth = len(objs)
targlen = max(len(iobj) for iobj in objs)
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targwidth, len(res1.columns))
self.assertEqual(targwidth, len(res2.columns))
self.assertEqual(targwidth, len(res3.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertEqual(targlen, len(res1.index))
self.assertEqual(targlen, len(res2.index))
self.assertEqual(targlen, len(res3.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
self.assertCountEqual(keys, res1.columns.names)
self.assertCountEqual(keys, res2.columns.names)
self.assertCountEqual(keys, res3.columns.names)
assert_array_equal(targ.values, res0.values)
assert_array_equal(targ.values, res1.values)
assert_array_equal(targ.values, res2.values)
assert_array_equal(targ.values, res3.values)
assert_frame_equal(targ, res0)
assert_frame_equal(targ, res1)
assert_frame_equal(targ, res2)
assert_frame_equal(targ, res3)
def test__multi_spiketrains_to_dataframe__list_parents_parentfirst(self):
obj = [fake_neo('Block', seed=i, n=3) for i in range(3)]
res0 = ep.multi_spiketrains_to_dataframe(obj, child_first=False)
res1 = ep.multi_spiketrains_to_dataframe(obj, parents=True,
child_first=False)
objs = (iobj.list_children_by_class('SpikeTrain') for iobj in obj)
objs = list(chain.from_iterable(objs))
targ = [ep.spiketrain_to_dataframe(iobj,
parents=True, child_first=False)
for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=True,
child_first=False).keys()
keys = list(keys)
targwidth = len(objs)
targlen = max(len(iobj) for iobj in objs)
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targwidth, len(res1.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertEqual(targlen, len(res1.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
self.assertCountEqual(keys, res1.columns.names)
assert_array_equal(targ.values, res0.values)
assert_array_equal(targ.values, res1.values)
assert_frame_equal(targ, res0)
assert_frame_equal(targ, res1)
def test__multi_spiketrains_to_dataframe__tuple_default(self):
obj = tuple(fake_neo('Block', seed=i, n=3) for i in range(3))
res0 = ep.multi_spiketrains_to_dataframe(obj)
objs = (iobj.list_children_by_class('SpikeTrain') for iobj in obj)
objs = list(chain.from_iterable(objs))
targ = [ep.spiketrain_to_dataframe(iobj) for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=True,
child_first=True).keys()
keys = list(keys)
targwidth = len(objs)
targlen = max(len(iobj) for iobj in objs)
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
assert_array_equal(targ.values, res0.values)
assert_frame_equal(targ, res0)
def test__multi_spiketrains_to_dataframe__iter_default(self):
obj = [fake_neo('Block', seed=i, n=3) for i in range(3)]
res0 = ep.multi_spiketrains_to_dataframe(iter(obj))
objs = (iobj.list_children_by_class('SpikeTrain') for iobj in obj)
objs = list(chain.from_iterable(objs))
targ = [ep.spiketrain_to_dataframe(iobj) for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=True,
child_first=True).keys()
keys = list(keys)
targwidth = len(objs)
targlen = max(len(iobj) for iobj in objs)
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
assert_array_equal(targ.values, res0.values)
assert_frame_equal(targ, res0)
def test__multi_spiketrains_to_dataframe__dict_default(self):
obj = dict((i, fake_neo('Block', seed=i, n=3)) for i in range(3))
res0 = ep.multi_spiketrains_to_dataframe(obj)
objs = (iobj.list_children_by_class('SpikeTrain') for iobj in
obj.values())
objs = list(chain.from_iterable(objs))
targ = [ep.spiketrain_to_dataframe(iobj) for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=True,
child_first=True).keys()
keys = list(keys)
targwidth = len(objs)
targlen = max(len(iobj) for iobj in objs)
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
assert_array_equal(targ.values, res0.values)
assert_frame_equal(targ, res0)
@unittest.skipUnless(HAVE_PANDAS, 'requires pandas')
class MultiEventsToDataframeTestCase(unittest.TestCase):
def setUp(self):
if hasattr(self, 'assertItemsEqual'):
self.assertCountEqual = self.assertItemsEqual
def test__multi_events_to_dataframe__single(self):
obj = fake_neo('Event', seed=0, n=5)
res0 = ep.multi_events_to_dataframe(obj)
res1 = ep.multi_events_to_dataframe(obj, parents=False)
res2 = ep.multi_events_to_dataframe(obj, parents=True)
res3 = ep.multi_events_to_dataframe(obj, child_first=True)
res4 = ep.multi_events_to_dataframe(obj, parents=False,
child_first=True)
res5 = ep.multi_events_to_dataframe(obj, parents=True,
child_first=True)
res6 = ep.multi_events_to_dataframe(obj, child_first=False)
res7 = ep.multi_events_to_dataframe(obj, parents=False,
child_first=False)
res8 = ep.multi_events_to_dataframe(obj, parents=True,
child_first=False)
targ = ep.event_to_dataframe(obj)
keys = ep._extract_neo_attrs_safe(obj, parents=True,
child_first=True).keys()
keys = list(keys)
targwidth = 1
targlen = min(len(obj.times), len(obj.labels))
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targwidth, len(res1.columns))
self.assertEqual(targwidth, len(res2.columns))
self.assertEqual(targwidth, len(res3.columns))
self.assertEqual(targwidth, len(res4.columns))
self.assertEqual(targwidth, len(res5.columns))
self.assertEqual(targwidth, len(res6.columns))
self.assertEqual(targwidth, len(res7.columns))
self.assertEqual(targwidth, len(res8.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertEqual(targlen, len(res1.index))
self.assertEqual(targlen, len(res2.index))
self.assertEqual(targlen, len(res3.index))
self.assertEqual(targlen, len(res4.index))
self.assertEqual(targlen, len(res5.index))
self.assertEqual(targlen, len(res6.index))
self.assertEqual(targlen, len(res7.index))
self.assertEqual(targlen, len(res8.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
self.assertCountEqual(keys, res1.columns.names)
self.assertCountEqual(keys, res2.columns.names)
self.assertCountEqual(keys, res3.columns.names)
self.assertCountEqual(keys, res4.columns.names)
self.assertCountEqual(keys, res5.columns.names)
self.assertCountEqual(keys, res6.columns.names)
self.assertCountEqual(keys, res7.columns.names)
self.assertCountEqual(keys, res8.columns.names)
assert_array_equal(targ.values, res0.values)
assert_array_equal(targ.values, res1.values)
assert_array_equal(targ.values, res2.values)
assert_array_equal(targ.values, res3.values)
assert_array_equal(targ.values, res4.values)
assert_array_equal(targ.values, res5.values)
assert_array_equal(targ.values, res6.values)
assert_array_equal(targ.values, res7.values)
assert_array_equal(targ.values, res8.values)
assert_frame_equal(targ, res0)
assert_frame_equal(targ, res1)
assert_frame_equal(targ, res2)
assert_frame_equal(targ, res3)
assert_frame_equal(targ, res4)
assert_frame_equal(targ, res5)
assert_frame_equal(targ, res6)
assert_frame_equal(targ, res7)
assert_frame_equal(targ, res8)
def test__multi_events_to_dataframe__segment_default(self):
obj = fake_neo('Segment', seed=0, n=5)
res0 = ep.multi_events_to_dataframe(obj)
objs = obj.events
targ = [ep.event_to_dataframe(iobj) for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=True,
child_first=True).keys()
keys = list(keys)
targwidth = len(objs)
targlen = [iobj.times[:min(len(iobj.times), len(iobj.labels))]
for iobj in objs]
targlen = len(np.unique(np.hstack(targlen)))
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
assert_array_equal(targ.values, res0.values)
assert_frame_equal(targ, res0)
def test__multi_events_to_dataframe__block_noparents(self):
obj = fake_neo('Block', seed=0, n=3)
res0 = ep.multi_events_to_dataframe(obj, parents=False)
res1 = ep.multi_events_to_dataframe(obj, parents=False,
child_first=True)
res2 = ep.multi_events_to_dataframe(obj, parents=False,
child_first=False)
objs = obj.list_children_by_class('Event')
targ = [ep.event_to_dataframe(iobj, parents=False, child_first=True)
for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=False,
child_first=True).keys()
keys = list(keys)
targwidth = len(objs)
targlen = [iobj.times[:min(len(iobj.times), len(iobj.labels))]
for iobj in objs]
targlen = len(np.unique(np.hstack(targlen)))
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targwidth, len(res1.columns))
self.assertEqual(targwidth, len(res2.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertEqual(targlen, len(res1.index))
self.assertEqual(targlen, len(res2.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
self.assertCountEqual(keys, res1.columns.names)
self.assertCountEqual(keys, res2.columns.names)
assert_array_equal(targ.values, res0.values)
assert_array_equal(targ.values, res1.values)
assert_array_equal(targ.values, res2.values)
assert_frame_equal(targ, res0)
assert_frame_equal(targ, res1)
assert_frame_equal(targ, res2)
def test__multi_events_to_dataframe__block_parents_childfirst(self):
obj = fake_neo('Block', seed=0, n=3)
res0 = ep.multi_events_to_dataframe(obj)
res1 = ep.multi_events_to_dataframe(obj, parents=True)
res2 = ep.multi_events_to_dataframe(obj, child_first=True)
res3 = ep.multi_events_to_dataframe(obj, parents=True,
child_first=True)
objs = obj.list_children_by_class('Event')
targ = [ep.event_to_dataframe(iobj, parents=True, child_first=True)
for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=True,
child_first=True).keys()
keys = list(keys)
targwidth = len(objs)
targlen = [iobj.times[:min(len(iobj.times), len(iobj.labels))]
for iobj in objs]
targlen = len(np.unique(np.hstack(targlen)))
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targwidth, len(res1.columns))
self.assertEqual(targwidth, len(res2.columns))
self.assertEqual(targwidth, len(res3.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertEqual(targlen, len(res1.index))
self.assertEqual(targlen, len(res2.index))
self.assertEqual(targlen, len(res3.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
self.assertCountEqual(keys, res1.columns.names)
self.assertCountEqual(keys, res2.columns.names)
self.assertCountEqual(keys, res3.columns.names)
assert_array_equal(targ.values, res0.values)
assert_array_equal(targ.values, res1.values)
assert_array_equal(targ.values, res2.values)
assert_array_equal(targ.values, res3.values)
assert_frame_equal(targ, res0)
assert_frame_equal(targ, res1)
assert_frame_equal(targ, res2)
assert_frame_equal(targ, res3)
def test__multi_events_to_dataframe__block_parents_parentfirst(self):
obj = fake_neo('Block', seed=0, n=3)
res0 = ep.multi_events_to_dataframe(obj, child_first=False)
res1 = ep.multi_events_to_dataframe(obj, parents=True,
child_first=False)
objs = obj.list_children_by_class('Event')
targ = [ep.event_to_dataframe(iobj, parents=True, child_first=False)
for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=True,
child_first=False).keys()
keys = list(keys)
targwidth = len(objs)
targlen = [iobj.times[:min(len(iobj.times), len(iobj.labels))]
for iobj in objs]
targlen = len(np.unique(np.hstack(targlen)))
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targwidth, len(res1.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertEqual(targlen, len(res1.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
self.assertCountEqual(keys, res1.columns.names)
assert_array_equal(targ.values, res0.values)
assert_array_equal(targ.values, res1.values)
assert_frame_equal(targ, res0)
assert_frame_equal(targ, res1)
def test__multi_events_to_dataframe__list_noparents(self):
obj = [fake_neo('Block', seed=i, n=3) for i in range(3)]
res0 = ep.multi_events_to_dataframe(obj, parents=False)
res1 = ep.multi_events_to_dataframe(obj, parents=False,
child_first=True)
res2 = ep.multi_events_to_dataframe(obj, parents=False,
child_first=False)
objs = (iobj.list_children_by_class('Event') for iobj in obj)
objs = list(chain.from_iterable(objs))
targ = [ep.event_to_dataframe(iobj, parents=False, child_first=True)
for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=False,
child_first=True).keys()
keys = list(keys)
targwidth = len(objs)
targlen = [iobj.times[:min(len(iobj.times), len(iobj.labels))]
for iobj in objs]
targlen = len(np.unique(np.hstack(targlen)))
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targwidth, len(res1.columns))
self.assertEqual(targwidth, len(res2.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertEqual(targlen, len(res1.index))
self.assertEqual(targlen, len(res2.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
self.assertCountEqual(keys, res1.columns.names)
self.assertCountEqual(keys, res2.columns.names)
assert_array_equal(targ.values, res0.values)
assert_array_equal(targ.values, res1.values)
assert_array_equal(targ.values, res2.values)
assert_frame_equal(targ, res0)
assert_frame_equal(targ, res1)
assert_frame_equal(targ, res2)
def test__multi_events_to_dataframe__list_parents_childfirst(self):
obj = [fake_neo('Block', seed=i, n=3) for i in range(3)]
res0 = ep.multi_events_to_dataframe(obj)
res1 = ep.multi_events_to_dataframe(obj, parents=True)
res2 = ep.multi_events_to_dataframe(obj, child_first=True)
res3 = ep.multi_events_to_dataframe(obj, parents=True,
child_first=True)
objs = (iobj.list_children_by_class('Event') for iobj in obj)
objs = list(chain.from_iterable(objs))
targ = [ep.event_to_dataframe(iobj, parents=True, child_first=True)
for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=True,
child_first=True).keys()
keys = list(keys)
targwidth = len(objs)
targlen = [iobj.times[:min(len(iobj.times), len(iobj.labels))]
for iobj in objs]
targlen = len(np.unique(np.hstack(targlen)))
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targwidth, len(res1.columns))
self.assertEqual(targwidth, len(res2.columns))
self.assertEqual(targwidth, len(res3.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertEqual(targlen, len(res1.index))
self.assertEqual(targlen, len(res2.index))
self.assertEqual(targlen, len(res3.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
self.assertCountEqual(keys, res1.columns.names)
self.assertCountEqual(keys, res2.columns.names)
self.assertCountEqual(keys, res3.columns.names)
assert_array_equal(targ.values, res0.values)
assert_array_equal(targ.values, res1.values)
assert_array_equal(targ.values, res2.values)
assert_array_equal(targ.values, res3.values)
assert_frame_equal(targ, res0)
assert_frame_equal(targ, res1)
assert_frame_equal(targ, res2)
assert_frame_equal(targ, res3)
def test__multi_events_to_dataframe__list_parents_parentfirst(self):
obj = [fake_neo('Block', seed=i, n=3) for i in range(3)]
res0 = ep.multi_events_to_dataframe(obj, child_first=False)
res1 = ep.multi_events_to_dataframe(obj, parents=True,
child_first=False)
objs = (iobj.list_children_by_class('Event') for iobj in obj)
objs = list(chain.from_iterable(objs))
targ = [ep.event_to_dataframe(iobj, parents=True, child_first=False)
for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=True,
child_first=False).keys()
keys = list(keys)
targwidth = len(objs)
targlen = [iobj.times[:min(len(iobj.times), len(iobj.labels))]
for iobj in objs]
targlen = len(np.unique(np.hstack(targlen)))
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targwidth, len(res1.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertEqual(targlen, len(res1.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
self.assertCountEqual(keys, res1.columns.names)
assert_array_equal(targ.values, res0.values)
assert_array_equal(targ.values, res1.values)
assert_frame_equal(targ, res0)
assert_frame_equal(targ, res1)
def test__multi_events_to_dataframe__tuple_default(self):
obj = tuple(fake_neo('Block', seed=i, n=3) for i in range(3))
res0 = ep.multi_events_to_dataframe(obj)
objs = (iobj.list_children_by_class('Event') for iobj in obj)
objs = list(chain.from_iterable(objs))
targ = [ep.event_to_dataframe(iobj) for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=True,
child_first=True).keys()
keys = list(keys)
targwidth = len(objs)
targlen = [iobj.times[:min(len(iobj.times), len(iobj.labels))]
for iobj in objs]
targlen = len(np.unique(np.hstack(targlen)))
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
assert_array_equal(targ.values, res0.values)
assert_frame_equal(targ, res0)
def test__multi_events_to_dataframe__iter_default(self):
obj = [fake_neo('Block', seed=i, n=3) for i in range(3)]
res0 = ep.multi_events_to_dataframe(iter(obj))
objs = (iobj.list_children_by_class('Event') for iobj in obj)
objs = list(chain.from_iterable(objs))
targ = [ep.event_to_dataframe(iobj) for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=True,
child_first=True).keys()
keys = list(keys)
targwidth = len(objs)
targlen = [iobj.times[:min(len(iobj.times), len(iobj.labels))]
for iobj in objs]
targlen = len(np.unique(np.hstack(targlen)))
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
assert_array_equal(targ.values, res0.values)
assert_frame_equal(targ, res0)
def test__multi_events_to_dataframe__dict_default(self):
obj = dict((i, fake_neo('Block', seed=i, n=3)) for i in range(3))
res0 = ep.multi_events_to_dataframe(obj)
objs = (iobj.list_children_by_class('Event') for iobj in
obj.values())
objs = list(chain.from_iterable(objs))
targ = [ep.event_to_dataframe(iobj) for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=True,
child_first=True).keys()
keys = list(keys)
targwidth = len(objs)
targlen = [iobj.times[:min(len(iobj.times), len(iobj.labels))]
for iobj in objs]
targlen = len(np.unique(np.hstack(targlen)))
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
assert_array_equal(targ.values, res0.values)
assert_frame_equal(targ, res0)
@unittest.skipUnless(HAVE_PANDAS, 'requires pandas')
class MultiEpochsToDataframeTestCase(unittest.TestCase):
def setUp(self):
if hasattr(self, 'assertItemsEqual'):
self.assertCountEqual = self.assertItemsEqual
def test__multi_epochs_to_dataframe__single(self):
obj = fake_neo('Epoch', seed=0, n=5)
res0 = ep.multi_epochs_to_dataframe(obj)
res1 = ep.multi_epochs_to_dataframe(obj, parents=False)
res2 = ep.multi_epochs_to_dataframe(obj, parents=True)
res3 = ep.multi_epochs_to_dataframe(obj, child_first=True)
res4 = ep.multi_epochs_to_dataframe(obj, parents=False,
child_first=True)
res5 = ep.multi_epochs_to_dataframe(obj, parents=True,
child_first=True)
res6 = ep.multi_epochs_to_dataframe(obj, child_first=False)
res7 = ep.multi_epochs_to_dataframe(obj, parents=False,
child_first=False)
res8 = ep.multi_epochs_to_dataframe(obj, parents=True,
child_first=False)
targ = ep.epoch_to_dataframe(obj)
keys = ep._extract_neo_attrs_safe(obj, parents=True,
child_first=True).keys()
keys = list(keys)
targwidth = 1
targlen = min(len(obj.times), len(obj.durations), len(obj.labels))
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targwidth, len(res1.columns))
self.assertEqual(targwidth, len(res2.columns))
self.assertEqual(targwidth, len(res3.columns))
self.assertEqual(targwidth, len(res4.columns))
self.assertEqual(targwidth, len(res5.columns))
self.assertEqual(targwidth, len(res6.columns))
self.assertEqual(targwidth, len(res7.columns))
self.assertEqual(targwidth, len(res8.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertEqual(targlen, len(res1.index))
self.assertEqual(targlen, len(res2.index))
self.assertEqual(targlen, len(res3.index))
self.assertEqual(targlen, len(res4.index))
self.assertEqual(targlen, len(res5.index))
self.assertEqual(targlen, len(res6.index))
self.assertEqual(targlen, len(res7.index))
self.assertEqual(targlen, len(res8.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
self.assertCountEqual(keys, res1.columns.names)
self.assertCountEqual(keys, res2.columns.names)
self.assertCountEqual(keys, res3.columns.names)
self.assertCountEqual(keys, res4.columns.names)
self.assertCountEqual(keys, res5.columns.names)
self.assertCountEqual(keys, res6.columns.names)
self.assertCountEqual(keys, res7.columns.names)
self.assertCountEqual(keys, res8.columns.names)
assert_array_equal(targ.values, res0.values)
assert_array_equal(targ.values, res1.values)
assert_array_equal(targ.values, res2.values)
assert_array_equal(targ.values, res3.values)
assert_array_equal(targ.values, res4.values)
assert_array_equal(targ.values, res5.values)
assert_array_equal(targ.values, res6.values)
assert_array_equal(targ.values, res7.values)
assert_array_equal(targ.values, res8.values)
assert_frame_equal(targ, res0)
assert_frame_equal(targ, res1)
assert_frame_equal(targ, res2)
assert_frame_equal(targ, res3)
assert_frame_equal(targ, res4)
assert_frame_equal(targ, res5)
assert_frame_equal(targ, res6)
assert_frame_equal(targ, res7)
assert_frame_equal(targ, res8)
def test__multi_epochs_to_dataframe__segment_default(self):
obj = fake_neo('Segment', seed=0, n=5)
res0 = ep.multi_epochs_to_dataframe(obj)
objs = obj.epochs
targ = [ep.epoch_to_dataframe(iobj) for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=True,
child_first=True).keys()
keys = list(keys)
targwidth = len(objs)
targlen = [iobj.times[:min(len(iobj.times), len(iobj.durations),
len(iobj.labels))] for iobj in objs]
targlen = len(np.unique(np.hstack(targlen)))
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
assert_array_equal(targ.values, res0.values)
assert_frame_equal(targ, res0)
def test__multi_epochs_to_dataframe__block_noparents(self):
obj = fake_neo('Block', seed=0, n=3)
res0 = ep.multi_epochs_to_dataframe(obj, parents=False)
res1 = ep.multi_epochs_to_dataframe(obj, parents=False,
child_first=True)
res2 = ep.multi_epochs_to_dataframe(obj, parents=False,
child_first=False)
objs = obj.list_children_by_class('Epoch')
targ = [ep.epoch_to_dataframe(iobj, parents=False, child_first=True)
for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=False,
child_first=True).keys()
keys = list(keys)
targwidth = len(objs)
targlen = [iobj.times[:min(len(iobj.times), len(iobj.durations),
len(iobj.labels))] for iobj in objs]
targlen = len(np.unique(np.hstack(targlen)))
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targwidth, len(res1.columns))
self.assertEqual(targwidth, len(res2.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertEqual(targlen, len(res1.index))
self.assertEqual(targlen, len(res2.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
self.assertCountEqual(keys, res1.columns.names)
self.assertCountEqual(keys, res2.columns.names)
assert_array_equal(targ.values, res0.values)
assert_array_equal(targ.values, res1.values)
assert_array_equal(targ.values, res2.values)
assert_frame_equal(targ, res0)
| assert_frame_equal(targ, res1) | pandas.util.testing.assert_frame_equal |
"""
Tests the financial data structures
"""
import unittest
import os
import numpy as np
import pandas as pd
from mlfinlab.data_structures import standard_data_structures as ds
class TestDataStructures(unittest.TestCase):
"""
Test the various financial data structures:
1. Dollar bars
2. Volume bars
3. Tick bars
"""
def setUp(self):
"""
Set the file path for the tick data csv
"""
project_path = os.path.dirname(__file__)
self.path = project_path + '/test_data/tick_data.csv'
def test_dollar_bars(self):
"""
Tests the dollar bars implementation.
"""
threshold = 100000
db1 = ds.get_dollar_bars(self.path, threshold=threshold, batch_size=1000, verbose=False)
db2 = ds.get_dollar_bars(self.path, threshold=threshold, batch_size=50, verbose=False)
db3 = ds.get_dollar_bars(self.path, threshold=threshold, batch_size=10, verbose=False)
ds.get_dollar_bars(self.path, threshold=threshold, batch_size=50, verbose=False,
to_csv=True, output_path='test.csv')
db4 = pd.read_csv('test.csv', parse_dates=[0])
# Assert diff batch sizes have same number of bars
self.assertTrue(db1.shape == db2.shape)
self.assertTrue(db1.shape == db3.shape)
self.assertTrue(db4.shape == db1.shape)
# Assert same values
self.assertTrue(np.all(db1.values == db2.values))
self.assertTrue(np.all(db1.values == db3.values))
self.assertTrue(np.all(db4.values == db1.values))
# Assert OHLC is correct
self.assertTrue(db1.loc[0, 'open'] == 1205)
self.assertTrue(db1.loc[0, 'high'] == 1904.75)
self.assertTrue(db1.loc[0, 'low'] == 1005.0)
self.assertTrue(db1.loc[0, 'close'] == 1304.5)
# delete generated csv file (if it wasn't generated test would fail)
os.remove('test.csv')
def test_volume_bars(self):
"""
Tests the volume bars implementation.
"""
threshold = 30
db1 = ds.get_volume_bars(self.path, threshold=threshold, batch_size=1000, verbose=False)
db2 = ds.get_volume_bars(self.path, threshold=threshold, batch_size=50, verbose=False)
db3 = ds.get_volume_bars(self.path, threshold=threshold, batch_size=10, verbose=False)
ds.get_volume_bars(self.path, threshold=threshold, batch_size=50, verbose=False,
to_csv=True, output_path='test.csv')
db4 = pd.read_csv('test.csv', parse_dates=[0])
# Assert diff batch sizes have same number of bars
self.assertTrue(db1.shape == db2.shape)
self.assertTrue(db1.shape == db3.shape)
self.assertTrue(db4.shape == db1.shape)
# Assert same values
self.assertTrue(np.all(db1.values == db2.values))
self.assertTrue(np.all(db1.values == db3.values))
self.assertTrue(np.all(db4.values == db1.values))
# Assert OHLC is correct
self.assertTrue(db1.loc[0, 'open'] == 1205)
self.assertTrue(db1.loc[0, 'high'] == 1904.75)
self.assertTrue(db1.loc[0, 'low'] == 1005.0)
self.assertTrue(db1.loc[0, 'close'] == 1304.75)
# delete generated csv file (if it wasn't generated test would fail)
os.remove('test.csv')
def test_tick_bars(self):
"""
Test the tick bars implementation.
"""
threshold = 10
db1 = ds.get_tick_bars(self.path, threshold=threshold, batch_size=1000, verbose=False)
db2 = ds.get_tick_bars(self.path, threshold=threshold, batch_size=50, verbose=False)
db3 = ds.get_tick_bars(self.path, threshold=threshold, batch_size=10, verbose=False)
ds.get_tick_bars(self.path, threshold=threshold, batch_size=50, verbose=False,
to_csv=True, output_path='test.csv')
db4 = pd.read_csv('test.csv', parse_dates=[0])
# Assert diff batch sizes have same number of bars
self.assertTrue(db1.shape == db2.shape)
self.assertTrue(db1.shape == db3.shape)
self.assertTrue(db4.shape == db1.shape)
# Assert same values
self.assertTrue(np.all(db1.values == db2.values))
self.assertTrue(np.all(db1.values == db3.values))
self.assertTrue(np.all(db4.values == db1.values))
# Assert OHLC is correct
self.assertTrue(db1.loc[0, 'open'] == 1205)
self.assertTrue(db1.loc[0, 'high'] == 1904.75)
self.assertTrue(db1.loc[0, 'low'] == 1005.0)
self.assertTrue(db1.loc[0, 'close'] == 1304.50)
# delete generated csv file (if it wasn't generated test would fail)
os.remove('test.csv')
def test_multiple_csv_file_input(self):
"""
Tests that bars generated for multiple csv files and Pandas Data Frame yield the same result
"""
threshold = 100000
data = pd.read_csv(self.path)
idx = int(np.round(len(data) / 2))
data1 = data.iloc[:idx]
data2 = data.iloc[idx:]
tick1 = "tick_data_1.csv"
tick2 = "tick_data_2.csv"
data1.to_csv(tick1, index=False)
data2.to_csv(tick2, index=False)
file_paths = [tick1, tick2]
db1 = ds.get_dollar_bars(file_paths, threshold=threshold, batch_size=1000, verbose=False)
db2 = ds.get_dollar_bars(file_paths, threshold=threshold, batch_size=50, verbose=False)
db3 = ds.get_dollar_bars(file_paths, threshold=threshold, batch_size=10, verbose=False)
ds.get_dollar_bars(self.path, threshold=threshold, batch_size=50, verbose=False,
to_csv=True, output_path='test.csv')
db4 = pd.read_csv('test.csv', parse_dates=[0])
# Assert diff batch sizes have same number of bars
self.assertTrue(db1.shape == db2.shape)
self.assertTrue(db1.shape == db3.shape)
self.assertTrue(db4.shape == db1.shape)
# Assert same values
self.assertTrue(np.all(db1.values == db2.values))
self.assertTrue(np.all(db1.values == db3.values))
self.assertTrue(np.all(db4.values == db1.values))
# Assert OHLC is correct
self.assertTrue(db1.loc[0, 'open'] == 1205)
self.assertTrue(db1.loc[0, 'high'] == 1904.75)
self.assertTrue(db1.loc[0, 'low'] == 1005.0)
self.assertTrue(db1.loc[0, 'close'] == 1304.50)
# delete generated csv files (if they weren't generated test would fail)
for csv in (tick1, tick2, "test.csv"):
os.remove(csv)
def test_df_as_batch_run_input(self):
"""
Tests that bars generated for csv file and Pandas Data Frame yield the same result
"""
threshold = 100000
tick_data = pd.read_csv(self.path)
tick_data['Date and Time'] = pd.to_datetime(tick_data['Date and Time'])
db1 = ds.get_dollar_bars(self.path, threshold=threshold, batch_size=1000, verbose=False)
ds.get_dollar_bars(self.path, threshold=threshold, batch_size=50, verbose=False,
to_csv=True, output_path='test.csv')
db2 = pd.read_csv('test.csv')
db2['date_time'] = pd.to_datetime(db2.date_time)
db3 = ds.get_dollar_bars(tick_data, threshold=threshold, batch_size=10, verbose=False)
# Assert diff batch sizes have same number of bars
self.assertTrue(db1.shape == db2.shape)
self.assertTrue(db1.shape == db3.shape)
# Assert same values
self.assertTrue(np.all(db1.values == db2.values))
self.assertTrue(np.all(db1.values == db3.values))
def test_list_as_run_input(self):
"""
Tests that data generated with csv file and list yield the same result
"""
threshold = 100000
tick_data = pd.read_csv(self.path)
tick_data['Date and Time'] = pd.to_datetime(tick_data['Date and Time'])
db1 = ds.get_dollar_bars(self.path, threshold=threshold, batch_size=1000, verbose=False)
ds.get_dollar_bars(self.path, threshold=threshold, batch_size=50, verbose=False,
to_csv=True, output_path='test.csv')
db2 = | pd.read_csv('test.csv') | pandas.read_csv |
"""Implement custom daily and weekly trading day calendars and datetime methods
- pandas custom business calendar
Author: <NAME>
License: MIT
"""
import datetime
import numpy as np
import pandas as pd
from pandas import DataFrame, Series
import pandas_datareader as pdr
from pandas.tseries.holiday import USFederalHolidayCalendar
from sqlalchemy import Column, Integer
from pandas.api.types import is_list_like
from pandas.tseries.offsets import MonthEnd, YearEnd, QuarterEnd
import config
# .to_pydatetime() - convert pandas format (Timestamp, datetime64) to datetime
# datetime.date.strftime(d, '%Y%m%d') - convert datetime to string
# np.array([self(dates)], dtype='datetime64[D]') - converts to numpy date format
# datetime.datetime(year, month, day) - returns datetime.datetime format
def to_monthend(dt):
"""Return calendar monthend date given an int date or list"""
if is_list_like(dt):
return [to_monthend(d) for d in dt]
if dt <= 9999:
d = datetime.datetime(year=dt, month=12, day=1) + MonthEnd(0)
elif dt <= 999999:
d = datetime.datetime(year=dt//100, month=dt % 100, day=1) + MonthEnd(0)
else:
d = pd.to_datetime(str(dt)) + MonthEnd(0)
return int(d.strftime('%Y%m%d'))
from collections import namedtuple
def int2date(date):
if is_list_like(date):
return [int2date(d) for d in date]
return namedtuple('Date', ['year', 'month', 'day'])(
date // 10000, (date // 100) % 100, date % 100)
def str2date(date, informat='%Y-%m-%d', outformat='%Y%m%d'):
"""Extract int components from date strings by input and output formats
Parameters
----------
date : str or list of str
input date strings to convert
informat : str, default is '%F'
date format of input string
outformat : str or list of str, or dict of {key: str}, default is '%Y%m%d'
date format of output. If dict, then output is dict with same key names
Returns
-------
output : int or list of int or dict of int
int date components corresponding to outformat
Notes
-----
Formats specified as in strptime() and strftime():
%b %B %h = input month name
%F = %Y-%m-%d
%T = %H:%M:%S
%n = whitespace
%w %W %U %V = week number
%u = day of week (1-7)
"""
if is_list_like(date):
return [str2date(s, informat, outformat) for s in date]
if isinstance(date, int):
date = str(date)
dt = datetime.datetime.strptime(date, informat)
if isinstance(outformat, dict):
return {k: int(dt.strftime(v)) for k,v in outformat.items()}
elif is_list_like(outformat):
return [int(dt.strftime(f)) for f in outformat]
else:
return int(dt.strftime(outformat))
def minibatch(x, batchsize):
"""Group the rows of x into minibatches of length batchsize"""
return [x[i:(i + batchsize)] for i in range(0, len(x), batchsize)]
class BusDay:
"""Implement custom trading date calendar, based on FamaFrench and NYSE
Parameters
----------
sql : SQL instance
SQL connection
create : bool
If True, recreate from FamaFrench library pdr datareader, else from SQL
Attributes
----------
busdaycalendar_ : numpy busdaycalendar object
Customized business days, excluding trading holidays
custom_ : pd.offsets.CDay object
For determing offsets with custom business days
begmo_ : pd.offsets.CBMonthBegin
For determining custom beginning of month date
endmo_ : pd.offsets.CBEndBegin
For determining custom end of month date
max_date : int
last date of calendar
"""
def __init__(self, sql, create=False, start=19251231, end=20401231):
"""Retrieve or create custom calendar (from F-F_Research_Data_Factors)"""
self.sql = sql
self.table = sql.Table('busdates',
Column('date', Integer, primary_key=True))
if create: # reload the series using pandas reader
f = pdr.data.DataReader(
name='F-F_ST_Reversal_Factor_daily',
data_source='famafrench', # 'F-F_Research_Data_Factors_daily'
start=1900, end=2050)[0].index.sort_values().unique()
df = DataFrame(str2date(f.astype(str), '%Y-%m-%d', '%Y%m%d'),
columns=['date'])
self.table.create(checkfirst=True)
sql.load_dataframe('busdates', df)
else:
df = sql.read_dataframe('SELECT * FROM busdates')
# 1. Initially, dates = actual FamaFrench busdays
dates = pd.DatetimeIndex(sorted(list(df['date'].unique().astype(str))))
last = pd.to_datetime(str(df.iloc[-1]['date']))
# 2. Extend with pandas 5-day calendar from last through to 20221231
dates = dates.append(pd.date_range(last, end=pd.to_datetime('20221231'),
freq='B')[1:])
# 3. But remove current list of anticipated NYSE holidays
hols = ['20210101', '20210118', '20210215', '20210402', '20210531',
'20210705', '20210906', '20211125', '20211224', '20220117',
'20220221', '20220415', '20220530', '20220704', '20220905',
'20221124','20221226']
self.max_date = max(int(max(hols)[:4])*10000+1231, max(df['date']))
hols = pd.to_datetime(hols)
dates = sorted(list(set(dates).difference(set(hols)))) # actual busdays
# 4. Generate a list of all potential busdays from pandas 6-day calendar
alldates = set(pd.date_range(dates[0], dates[-1], freq=pd.offsets.CDay(
calendar=np.busdaycalendar('1111110'), normalize=True)))
# 5. Finalize actual holidays: hols = all dates less actual dates
hols = np.array(list(alldates.difference(dates)), dtype='datetime64[D]')
hols = sorted(set(hols).union([np.datetime64('1926-01-01')]))
# Custom and offset calendar objects is 6-day week less actual holidays
self.busdaycalendar_ = np.busdaycalendar(weekmask='1111110',
holidays=hols)
self.custom_ = pd.offsets.CDay(calendar=self.busdaycalendar_)
self.begmo_ = pd.offsets.CBMonthBegin(calendar=self.busdaycalendar_)
self.endmo_ = | pd.offsets.CBMonthEnd(calendar=self.busdaycalendar_) | pandas.offsets.CBMonthEnd |
#!/home/cab22/miniconda3/bin/python
#SBATCH --account=commons
#SBATCH --export=All
#SBATCH --partition=commons
#SBATCH --time=24:00:00
#SBATCH --ntasks=1
#SBATCH --threads-per-core=1
#SBATCH --cpus-per-task=2
#SBATCH --gres=gpu:1
#SBATCH --time=24:00:00
#SBATCH --export=ALL
#SBATCH --array=0-15
#SBATCH --mem=16G
import os
import subprocess
import itertools
import numpy as np
import warnings
import pandas
import time
import argparse
class SlurmJobArray():
""" Selects a single condition from an array of parameters using the SLURM_ARRAY_TASK_ID environment variable. The parameters need to be supplied as a dictionary. if the task is not in a slurm environment, the test parameters will supersede the parameters, and the job_id would be taken as 0. Example:
parameters={"epsilon":[100],
"aligned":[True,False],
"actinLen":[20,40,60,80,100,120,140,160,180,200,220,240,260,280,300],
"repetition":range(5),
"temperature":[300],
"system2D":[False],
"simulation_platform":["OpenCL"]}
test_parameters={"simulation_platform":"CPU"}
sjob=SlurmJobArray("ActinSimv6", parameters, test_parameters)
:var test_run: Boolean: This simulation is a test
:var job_id: SLURM_ARRAY_TASK_ID
:var all_parameters: Parameters used to initialize the job
:var parameters: Parameters for this particular job
:var name: The name (and relative path) of the output
"""
def __init__(self, name, parameters, test_parameters={},test_id=0):
"""
Args:
name:
parameters:
Returns:
name:
parameters:
"""
self.all_parameters=parameters
self.test_parameters=test_parameters
#Parse the slurm variables
self.slurm_variables={}
for key in os.environ:
if len(key.split("_"))>1 and key.split("_")[0]=='SLURM':
self.slurm_variables.update({key:os.environ[key]})
#Check if there is a job id
self.test_run=False
try:
self.job_id=int(self.slurm_variables["SLURM_ARRAY_TASK_ID"])
except KeyError:
self.test_run=True
warnings.warn("Test Run: SLURM_ARRAY_TASK_ID not in environment variables")
self.job_id=test_id
keys=parameters.keys()
self.all_conditions=list(itertools.product(*[parameters[k] for k in keys]))
self.parameter=dict(zip(keys,self.all_conditions[self.job_id]))
#The name only includes enough information to differentiate the simulations.
self.name=f"{name}_{self.job_id:03d}_" + '_'.join([f"{a[0]}_{self[a]}" for a in self.parameter if len(self.all_parameters[a])>1])
def __getitem__(self, name):
if self.test_run:
try:
return self.test_parameters[name]
except KeyError:
return self.parameter[name]
else:
return self.parameter[name]
def __getattr__(self, name: str):
""" The keys of the parameters can be called as attributes
"""
if name in self.__dict__:
return object.__getattribute__(self, name)
elif name in self.parameter:
return self[name]
else:
return object.__getattribute__(self, name)
def __repr__(self):
return str(self.parameter)
def keys(self):
return str(self.parameters.keys())
def print_parameters(self):
print(f"Number of conditions: {len(self.all_conditions)}")
print("Running Conditions")
for k in self.parameter.keys():
print(f"{k} :", f"{self[k]}")
print()
def print_slurm_variables(self):
print("Slurm Variables")
for key in self.slurm_variables:
print (key,":",self.slurm_variables[key])
print()
def write_csv(self, out=""):
s=pandas.concat([ | pandas.Series(self.parameter) | pandas.Series |
#########################
#########################
# Need to account for limit in input period
#########################
#########################
# Baseline M67 long script -- NO crowding
# New script copied from quest - want to take p and ecc from each population (all, obs, rec) and put them into separate file
# Doing this so we don't have to run analyse each time
# Can write separate script for p-ecc plots
# Quest paths in this version of script
import pandas as pd
import numpy as np
import os
from astropy.coordinates import SkyCoord
from astropy import units, constants
from astropy.modeling import models, fitting
import scipy.stats
from scipy.integrate import quad
#for Quest
import matplotlib
matplotlib.use('Agg')
doIndividualPlots = True
from matplotlib import pyplot as plt
def file_len(fname):
i = 0
with open(fname) as f:
for i, l in enumerate(f):
pass
return i + 1
def getPhs(sigma, m1=1*units.solMass, m2=1*units.solMass, m3=0.5*units.solMass):
Phs = np.pi*constants.G/np.sqrt(2.)*(m1*m2/m3)**(3./2.)*(m1 + m2)**(-0.5)*sigma**(-3.)
return Phs.decompose().to(units.day)
#similar to field, but limiting by the hard-soft boundary
def fitRagfb():
x = [0.05, 0.1, 1, 8, 15] #estimates of midpoints in bins, and using this: https://sites.uni.edu/morgans/astro/course/Notes/section2/spectralmasses.html
y = [0.20, 0.35, 0.50, 0.70, 0.75]
init = models.PowerLaw1D(amplitude=0.5, x_0=1, alpha=-1.)
fitter = fitting.LevMarLSQFitter()
fit = fitter(init, x, y)
return fit
def RagNormal(x, cdf = False):
mean = 5.03
std = 2.28
if (cdf):
return scipy.stats.norm.cdf(x,mean,std)
return scipy.stats.norm.pdf(x,mean,std)
def saveHist(histAll, histObs, histRec, bin_edges, xtitle, fname, filters = ['u_', 'g_', 'r_', 'i_', 'z_', 'y_','all']):
c1 = '#5687A6' #Dali Blue (Andrew's AAS Poster)
c2 = '#A62B1F' #Dai Red
c3 = '#BF8A26' #Dali Beige
fig,ax1 = plt.subplots(figsize=(8,6), sharex=True)#can change to include cdf with ax1, ax2
histAll = np.insert(histAll,0,0)
histObs = np.insert(histObs,0,0)
for f in filters:
histRec[f] = np.insert(histRec[f],0,0)
#PDF
ax1.step(bin_edges, histAll/np.sum(histAll), color=c1)
ax1.step(bin_edges, histObs/np.sum(histObs), color=c2)
for f in filters:
lw = 1
if (f == 'all'):
lw = 0.5
ax1.step(bin_edges, histRec[f]/np.sum(histRec[f]), color=c3, linewidth=lw)
ax1.set_ylabel('PDF')
ax1.set_yscale('log')
ax1.set_title('Globular Clusters - Baseline', fontsize = 16)
ax1.set_xlabel(xtitle)
#CDF
#cdfAll = []
#cdfObs = []
#cdfRec = dict()
#for f in filters:
# cdfRec[f] = []
# for i in range(len(histAll)):
# cdfAll.append(np.sum(histAll[:i])/np.sum(histAll))
# for i in range(len(histObs)):
# cdfObs.append(np.sum(histObs[:i])/np.sum(histObs))
# for f in filters:
# for i in range(len(histRec[f])):
# cdfRec[f].append(np.sum(histRec[f][:i])/np.sum(histRec[f]))
#ax2.step(bin_edges, cdfAll, color=c1)
#ax2.step(bin_edges, cdfObs, color=c2)
#for f in filters:
# lw = 1
# if (f == 'all'):
# lw = 0.5
# ax2.step(bin_edges, cdfRec[f], color=c3, linewidth=lw)
#ax2.set_ylabel('CDF')
#ax2.set_xlabel(xtitle)
fig.subplots_adjust(hspace=0)
fig.savefig('./plots/' + fname+'.pdf',format='pdf', bbox_inches = 'tight')
#write to a text file
with open('./eblsst_files/' + fname+'.csv','w') as fl:
outline = 'binEdges,histAll,histObs'
for f in filters:
outline += ','+f+'histRec'
outline += '\n'
fl.write(outline)
for i in range(len(bin_edges)):
outline = str(bin_edges[i])+','+str(histAll[i])+','+str(histObs[i])
for f in filters:
outline += ','+str(histRec[f][i])
outline += '\n'
fl.write(outline)
if __name__ == "__main__":
filters = ['u_', 'g_', 'r_', 'i_', 'z_', 'y_', 'all']
#get the Raghavan binary fraction fit
fbFit= fitRagfb()
print(fbFit)
#to normalize
intAll, err = quad(RagNormal, -20, 20)
intCut, err = quad(RagNormal, -20, np.log10(365*10.))
intNorm = intCut/intAll
#cutoff in percent error for "recovered"
Pcut = 0.1
#assumed mean stellar mass
mMean = 0.5
#minimum number of lines to consider in file
Nlim = 3
if (doIndividualPlots):
fmass, axmass = plt.subplots()
fqrat, axqrat = plt.subplots()
fecc, axecc = plt.subplots()
flper, axlper = plt.subplots()
fdist, axdist = plt.subplots()
fmag, axmag = plt.subplots()
frad, axrad = plt.subplots()
#bins for all the histograms
Nbins = 25
mbins = np.arange(0,10, 0.1, dtype='float')
qbins = np.arange(0,1, 0.1, dtype='float')
ebins = np.arange(0, 1.05, 0.05, dtype='float')
lpbins = np.arange(-2, 10, 0.5, dtype='float')
dbins = np.arange(0, 40, 1, dtype='float')
magbins = np.arange(11, 25, 1, dtype='float')
rbins = np.arange(0, 100, 0.2, dtype='float')
#blanks for the histograms
#All
m1hAll = np.zeros_like(mbins)[1:]
qhAll = np.zeros_like(qbins)[1:]
ehAll = np.zeros_like(ebins)[1:]
lphAll = np.zeros_like(lpbins)[1:]
dhAll = np.zeros_like(dbins)[1:]
maghAll = np.zeros_like(magbins)[1:]
rhAll = np.zeros_like(rbins)[1:]
#Observable
m1hObs = np.zeros_like(mbins)[1:]
qhObs = np.zeros_like(qbins)[1:]
ehObs = np.zeros_like(ebins)[1:]
lphObs = np.zeros_like(lpbins)[1:]
dhObs = np.zeros_like(dbins)[1:]
maghObs = np.zeros_like(magbins)[1:]
rhObs = np.zeros_like(rbins)[1:]
#Recovered
m1hRec = dict()
qhRec = dict()
ehRec = dict()
lphRec = dict()
dhRec = dict()
maghRec = dict()
rhRec = dict()
for f in filters:
m1hRec[f] = np.zeros_like(mbins)[1:]
qhRec[f] = np.zeros_like(qbins)[1:]
ehRec[f] = np.zeros_like(ebins)[1:]
lphRec[f] = np.zeros_like(lpbins)[1:]
dhRec[f] = np.zeros_like(dbins)[1:]
maghRec[f] = np.zeros_like(magbins)[1:]
rhRec[f] = np.zeros_like(rbins)[1:]
RA = []
Dec = []
recFrac = []
recN = []
rawN = []
obsN = []
fileN = []
fileObsN = []
fileRecN = []
allNPrsa = []
obsNPrsa = []
recNPrsa = []
# Lists for period and eccentricity for Andrew's circularization plots
eccAll = []
eccObs = []
eccRec = []
pAll = []
pObs = []
pRec = []
# Using prsa dataframes for these lists because of period cutoff at 1000 days
# Dataframes to write to files later; 3 files for each sub-population - append everything to these
peccAll = pd.DataFrame(columns = ['e', 'p'])
peccObs = pd.DataFrame(columns = ['e', 'p'])
peccRec = pd.DataFrame(columns = ['e', 'p'])
#Read in all the data and make the histograms
d = "./input_files/"
files = os.listdir(d)
IDs = []
for i, f in enumerate(files):
print(round(i/len(files),4), f)
fl = file_len(d+f)
if (fl >= 4):
#read in the header
header = pd.read_csv(d+f, nrows=1)
######################
#NEED TO ACCOUNT FOR THE BINARY FRACTION when combining histograms
#####################
Nmult = header['clusterMass'][0]/mMean
#Nmult = 1.
RA.append(header['OpSimRA'])
Dec.append(header['OpSimDec'])
#read in rest of the file
data = pd.read_csv(d+f, header = 2).fillna(-999)
rF = 0.
rN = 0.
Nrec = 0.
Nobs = 0.
raN = 0.
obN = 0.
fiN = 0.
fioN = 0.
firN = 0.
NallPrsa = 0.
NobsPrsa = 0.
NrecPrsa = 0.
Nall = len(data.index)/intNorm ###is this correct? (and the only place I need to normalize?)
prsa = data.loc[(data['appMagMean_r'] <= 19.5) & (data['appMagMean_r'] > 15.8) & (data['p'] < 1000) & (data['p'] > 0.5)]
# Appending for Andrew
eccAll.append(prsa['e'].values)
pAll.append(prsa['p'].values)
NallPrsa = len(prsa.index)
if (Nall >= Nlim):
#create histograms
#All
m1hAll0, m1b = np.histogram(data["m1"], bins=mbins)
qhAll0, qb = np.histogram(data["m2"]/data["m1"], bins=qbins)
ehAll0, eb = np.histogram(data["e"], bins=ebins)
lphAll0, lpb = np.histogram(np.ma.log10(data["p"].values).filled(-999), bins=lpbins)
dhAll0, db = np.histogram(data["d"], bins=dbins)
maghAll0, magb = np.histogram(data["appMagMean_r"], bins=magbins)
rhAll0, rb = np.histogram(data["r2"]/data["r1"], bins=rbins)
if (doIndividualPlots):
axmass.step(m1b[0:-1], m1hAll0/np.sum(m1hAll0), color='black', alpha=0.1)
axqrat.step(qb[0:-1], qhAll0/np.sum(qhAll0), color='black', alpha=0.1)
axecc.step(eb[0:-1], ehAll0/np.sum(ehAll0), color='black', alpha=0.1)
axlper.step(lpb[0:-1], lphAll0/np.sum(lphAll0), color='black', alpha=0.1)
axdist.step(db[0:-1], dhAll0/np.sum(dhAll0), color='black', alpha=0.1)
axmag.step(magb[0:-1], maghAll0/np.sum(maghAll0), color='black', alpha=0.1)
axrad.step(rb[0:-1], rhAll0/np.sum(rhAll0), color='black', alpha=0.1)
#account for the binary fraction, as a function of mass
dm1 = np.diff(m1b)
m1val = m1b[:-1] + dm1/2.
fb = np.sum(m1hAll0/len(data.index)*fbFit(m1val))
#account for the hard-soft boundary
Phs = getPhs(header['clusterVdisp'].iloc[0]*units.km/units.s).to(units.day).value
fb *= RagNormal(np.log10(Phs), cdf = True)
print("fb, Phs = ", fb, Phs)
Nmult *= fb
m1hAll += m1hAll0/Nall*Nmult
qhAll += qhAll0/Nall*Nmult
ehAll += ehAll0/Nall*Nmult
lphAll += lphAll0/Nall*Nmult
dhAll += dhAll0/Nall*Nmult
maghAll += maghAll0/Nall*Nmult
rhAll += rhAll0/Nall*Nmult
#Obs
obs = data.loc[data['LSM_PERIOD'] != -999]
Nobs = len(obs.index)
prsaObs = data.loc[(data['appMagMean_r'] <= 19.5) & (data['appMagMean_r'] > 15.8) & (data['p'] < 1000) & (data['p'] >0.5) & (data['LSM_PERIOD'] != -999)]
NobsPrsa = len(prsaObs.index)
# Appending for Andrew's files
eccObs.append(prsaObs['e'].values)
pObs.append(prsaObs['p'].values)
if (Nobs >= Nlim):
m1hObs0, m1b = np.histogram(obs["m1"], bins=mbins)
qhObs0, qb = np.histogram(obs["m2"]/obs["m1"], bins=qbins)
ehObs0, eb = np.histogram(obs["e"], bins=ebins)
lphObs0, lpb = np.histogram(np.ma.log10(obs["p"].values).filled(-999), bins=lpbins)
dhObs0, db = np.histogram(obs["d"], bins=dbins)
maghObs0, magb = np.histogram(obs["appMagMean_r"], bins=magbins)
rhObs0, rb = np.histogram(obs["r2"]/obs["r1"], bins=rbins)
m1hObs += m1hObs0/Nall*Nmult
qhObs += qhObs0/Nall*Nmult
ehObs += ehObs0/Nall*Nmult
lphObs += lphObs0/Nall*Nmult
dhObs += dhObs0/Nall*Nmult
maghObs += maghObs0/Nall*Nmult
rhObs += rhObs0/Nall*Nmult
#Rec
recCombined = pd.DataFrame()
prsaRecCombined = | pd.DataFrame() | pandas.DataFrame |
import unittest
import numpy as np
from pandas.core.api import Series
import pandas.core.algorithms as algos
import pandas.util.testing as tm
class TestMatch(unittest.TestCase):
def test_ints(self):
values = np.array([0, 2, 1])
to_match = np.array([0, 1, 2, 2, 0, 1, 3, 0])
result = algos.match(to_match, values)
expected = np.array([0, 2, 1, 1, 0, 2, -1, 0])
self.assert_(np.array_equal(result, expected))
def test_strings(self):
values = ['foo', 'bar', 'baz']
to_match = ['bar', 'foo', 'qux', 'foo', 'bar', 'baz', 'qux']
result = algos.match(to_match, values)
expected = np.array([1, 0, -1, 0, 1, 2, -1])
self.assert_(np.array_equal(result, expected))
class TestUnique(unittest.TestCase):
def test_ints(self):
arr = np.random.randint(0, 100, size=50)
result = algos.unique(arr)
self.assert_(isinstance(result, np.ndarray))
def test_objects(self):
arr = np.random.randint(0, 100, size=50).astype('O')
result = | algos.unique(arr) | pandas.core.algorithms.unique |
# vim: set fileencoding=<utf-8> :
# Copyright 2018-2020 <NAME> and <NAME>
'''Network functions'''
# universal
import os
import sys
import re
# additional
import glob
import operator
import shutil
import subprocess
import numpy as np
import pandas as pd
from scipy.stats import rankdata
from tempfile import mkstemp, mkdtemp
from collections import defaultdict, Counter
from functools import partial
from multiprocessing import Pool
import pickle
import graph_tool.all as gt
import dendropy
import poppunk_refine
# Load GPU libraries
try:
import cupyx
import cugraph
import cudf
import cupy as cp
from numba import cuda
import rmm
gpu_lib = True
except ImportError as e:
gpu_lib = False
from .__main__ import accepted_weights_types
from .__main__ import betweenness_sample_default
from .sketchlib import addRandom
from .utils import iterDistRows
from .utils import listDistInts
from .utils import readIsolateTypeFromCsv
from .utils import readRfile
from .utils import setupDBFuncs
from .utils import isolateNameToLabel
from .utils import check_and_set_gpu
from .unwords import gen_unword
def fetchNetwork(network_dir, model, refList, ref_graph = False,
core_only = False, accessory_only = False, use_gpu = False):
"""Load the network based on input options
Returns the network as a graph-tool format graph, and sets
the slope parameter of the passed model object.
Args:
network_dir (str)
A network used to define clusters
model (ClusterFit)
A fitted model object
refList (list)
Names of references that should be in the network
ref_graph (bool)
Use ref only graph, if available
[default = False]
core_only (bool)
Return the network created using only core distances
[default = False]
accessory_only (bool)
Return the network created using only accessory distances
[default = False]
use_gpu (bool)
Use cugraph library to load graph
Returns:
genomeNetwork (graph)
The loaded network
cluster_file (str)
The CSV of cluster assignments corresponding to this network
"""
# If a refined fit, may use just core or accessory distances
dir_prefix = network_dir + "/" + os.path.basename(network_dir)
# load CUDA libraries - here exit without switching to CPU libraries
# to avoid loading an unexpected file
use_gpu = check_and_set_gpu(use_gpu, gpu_lib, quit_on_fail = True)
if use_gpu:
graph_suffix = '.csv.gz'
else:
graph_suffix = '.gt'
if core_only and model.type == 'refine':
if ref_graph:
network_file = dir_prefix + '_core.refs_graph' + graph_suffix
else:
network_file = dir_prefix + '_core_graph' + graph_suffix
cluster_file = dir_prefix + '_core_clusters.csv'
elif accessory_only and model.type == 'refine':
if ref_graph:
network_file = dir_prefix + '_accessory.refs_graph' + graph_suffix
else:
network_file = dir_prefix + '_accessory_graph' + graph_suffix
cluster_file = dir_prefix + '_accessory_clusters.csv'
else:
if ref_graph and os.path.isfile(dir_prefix + '.refs_graph' + graph_suffix):
network_file = dir_prefix + '.refs_graph' + graph_suffix
else:
network_file = dir_prefix + '_graph' + graph_suffix
cluster_file = dir_prefix + '_clusters.csv'
if core_only or accessory_only:
sys.stderr.write("Can only do --core or --accessory fits from "
"a refined fit. Using the combined distances.\n")
# Load network file
sys.stderr.write("Loading network from " + network_file + "\n")
genomeNetwork = load_network_file(network_file, use_gpu = use_gpu)
# Ensure all in dists are in final network
checkNetworkVertexCount(refList, genomeNetwork, use_gpu)
return genomeNetwork, cluster_file
def load_network_file(fn, use_gpu = False):
"""Load the network based on input options
Returns the network as a graph-tool format graph, and sets
the slope parameter of the passed model object.
Args:
fn (str)
Network file name
use_gpu (bool)
Use cugraph library to load graph
Returns:
genomeNetwork (graph)
The loaded network
"""
# Load the network from the specified file
if use_gpu:
G_df = cudf.read_csv(fn, compression = 'gzip')
if 'src' in G_df.columns:
G_df.rename(columns={'src': 'source','dst': 'destination'}, inplace=True)
genomeNetwork = cugraph.Graph()
if 'weights' in G_df.columns:
G_df = G_df[['source','destination','weights']]
genomeNetwork.from_cudf_edgelist(G_df, edge_attr='weights', renumber=False)
else:
genomeNetwork.from_cudf_edgelist(G_df,renumber=False)
sys.stderr.write("Network loaded: " + str(genomeNetwork.number_of_vertices()) + " samples\n")
else:
genomeNetwork = gt.load_graph(fn)
sys.stderr.write("Network loaded: " + str(len(list(genomeNetwork.vertices()))) + " samples\n")
return genomeNetwork
def checkNetworkVertexCount(seq_list, G, use_gpu):
"""Checks the number of network vertices matches the number
of sequence names.
Args:
seq_list (list)
The list of sequence names
G (graph)
The network of sequences
use_gpu (bool)
Whether to use cugraph for graph analyses
"""
vertex_list = set(get_vertex_list(G, use_gpu = use_gpu))
networkMissing = set(set(range(len(seq_list))).difference(vertex_list))
if len(networkMissing) > 0:
sys.stderr.write("ERROR: " + str(len(networkMissing)) + " samples are missing from the final network\n")
sys.exit(1)
def getCliqueRefs(G, reference_indices = set()):
"""Recursively prune a network of its cliques. Returns one vertex from
a clique at each stage
Args:
G (graph)
The graph to get clique representatives from
reference_indices (set)
The unique list of vertices being kept, to add to
"""
cliques = gt.max_cliques(G)
try:
# Get the first clique, and see if it has any members already
# contained in the vertex list
clique = frozenset(next(cliques))
if clique.isdisjoint(reference_indices):
reference_indices.add(list(clique)[0])
# Remove the clique, and prune the resulting subgraph (recursively)
subgraph = gt.GraphView(G, vfilt=[v not in clique for v in G.vertices()])
if subgraph.num_vertices() > 1:
getCliqueRefs(subgraph, reference_indices)
elif subgraph.num_vertices() == 1:
reference_indices.add(subgraph.get_vertices()[0])
except StopIteration:
pass
return reference_indices
def cliquePrune(component, graph, reference_indices, components_list):
"""Wrapper function around :func:`~getCliqueRefs` so it can be
called by a multiprocessing pool
"""
if gt.openmp_enabled():
gt.openmp_set_num_threads(1)
subgraph = gt.GraphView(graph, vfilt=components_list == component)
refs = reference_indices.copy()
if subgraph.num_vertices() <= 2:
refs.add(subgraph.get_vertices()[0])
ref_list = refs
else:
ref_list = getCliqueRefs(subgraph, refs)
return(list(ref_list))
def translate_network_indices(G_ref_df, reference_indices):
"""Function for ensuring an updated reference network retains
numbering consistent with sample names
Args:
G_ref_df (cudf data frame)
List of edges in reference network
reference_indices (list)
The ordered list of reference indices in the original network
Returns:
G_ref (cugraph network)
Network of reference sequences
"""
# Translate network indices to match name order
G_ref_df['source'] = [reference_indices.index(x) for x in G_ref_df['old_source'].to_arrow().to_pylist()]
G_ref_df['destination'] = [reference_indices.index(x) for x in G_ref_df['old_destination'].to_arrow().to_pylist()]
G_ref = add_self_loop(G_ref_df, len(reference_indices) - 1, renumber = True)
return(G_ref)
def extractReferences(G, dbOrder, outPrefix, outSuffix = '', type_isolate = None,
existingRefs = None, threads = 1, use_gpu = False):
"""Extract references for each cluster based on cliques
Writes chosen references to file by calling :func:`~writeReferences`
Args:
G (graph)
A network used to define clusters
dbOrder (list)
The order of files in the sketches, so returned references are in the same order
outPrefix (str)
Prefix for output file
outSuffix (str)
Suffix for output file (.refs will be appended)
type_isolate (str)
Isolate to be included in set of references
existingRefs (list)
References that should be used for each clique
use_gpu (bool)
Use cugraph for graph analysis (default = False)
Returns:
refFileName (str)
The name of the file references were written to
references (list)
An updated list of the reference names
"""
if existingRefs == None:
references = set()
reference_indices = set()
else:
references = set(existingRefs)
index_lookup = {v:k for k,v in enumerate(dbOrder)}
reference_indices = set([index_lookup[r] for r in references])
# Add type isolate, if necessary
type_isolate_index = None
if type_isolate is not None:
if type_isolate in dbOrder:
type_isolate_index = dbOrder.index(type_isolate)
else:
sys.stderr.write('Type isolate ' + type_isolate + ' not found\n')
sys.exit(1)
if use_gpu:
# For large network, use more approximate method for extracting references
reference = {}
# Record the original components to which sequences belonged
component_assignments = cugraph.components.connectivity.connected_components(G)
# Leiden method has resolution parameter - higher values give greater precision
partition_assignments, score = cugraph.leiden(G, resolution = 0.1)
# group by partition, which becomes the first column, so retrieve second column
reference_index_df = partition_assignments.groupby('partition').nth(0)
reference_indices = reference_index_df['vertex'].to_arrow().to_pylist()
# Add type isolate if necessary - before edges are added
if type_isolate_index is not None and type_isolate_index not in reference_indices:
reference_indices.append(type_isolate_index)
# Order found references as in sketchlib database
reference_names = [dbOrder[int(x)] for x in sorted(reference_indices)]
# Extract reference edges
G_df = G.view_edge_list()
if 'src' in G_df.columns:
G_df.rename(columns={'src': 'old_source','dst': 'old_destination'}, inplace=True)
else:
G_df.rename(columns={'source': 'old_source','destination': 'old_destination'}, inplace=True)
G_ref_df = G_df[G_df['old_source'].isin(reference_indices) & G_df['old_destination'].isin(reference_indices)]
# Translate network indices to match name order
G_ref = translate_network_indices(G_ref_df, reference_indices)
# Check references in same component in overall graph are connected in the reference graph
# First get components of original reference graph
reference_component_assignments = cugraph.components.connectivity.connected_components(G_ref)
reference_component_assignments.rename(columns={'labels': 'ref_labels'}, inplace=True)
# Merge with component assignments from overall graph
combined_vertex_assignments = reference_component_assignments.merge(component_assignments,
on = 'vertex',
how = 'left')
combined_vertex_assignments = combined_vertex_assignments[combined_vertex_assignments['vertex'].isin(reference_indices)]
# Find the number of components in the reference graph associated with each component in the overall graph -
# should be one if there is a one-to-one mapping of components - else links need to be added
max_ref_comp_count = combined_vertex_assignments.groupby(['labels'], sort = False)['ref_labels'].nunique().max()
if max_ref_comp_count > 1:
# Iterate through components
for component, component_df in combined_vertex_assignments.groupby(['labels'], sort = False):
# Find components in the overall graph matching multiple components in the reference graph
if component_df.groupby(['labels'], sort = False)['ref_labels'].nunique().iloc[0] > 1:
# Make a graph of the component from the overall graph
vertices_in_component = component_assignments[component_assignments['labels']==component]['vertex']
references_in_component = vertices_in_component[vertices_in_component.isin(reference_indices)].values
G_component_df = G_df[G_df['source'].isin(vertices_in_component) & G_df['destination'].isin(vertices_in_component)]
G_component = cugraph.Graph()
G_component.from_cudf_edgelist(G_component_df)
# Find single shortest path from a reference to all other nodes in the component
traversal = cugraph.traversal.sssp(G_component,source = references_in_component[0])
reference_index_set = set(reference_indices)
# Add predecessors to reference sequences on the SSSPs
predecessor_list = traversal[traversal['vertex'].isin(reference_indices)]['predecessor'].values
predecessors = set(predecessor_list[predecessor_list >= 0].flatten().tolist())
# Add predecessors to reference set and check whether this results in complete paths
# where complete paths are indicated by references' predecessors being within the set of
# references
while len(predecessors) > 0 and len(predecessors - reference_index_set) > 0:
reference_index_set = reference_index_set.union(predecessors)
predecessor_list = traversal[traversal['vertex'].isin(reference_indices)]['predecessor'].values
predecessors = set(predecessor_list[predecessor_list >= 0].flatten().tolist())
# Add expanded reference set to the overall list
reference_indices = list(reference_index_set)
# Create new reference graph
G_ref_df = G_df[G_df['old_source'].isin(reference_indices) & G_df['old_destination'].isin(reference_indices)]
G_ref = translate_network_indices(G_ref_df, reference_indices)
else:
# Each component is independent, so can be multithreaded
components = gt.label_components(G)[0].a
# Turn gt threading off and on again either side of the parallel loop
if gt.openmp_enabled():
gt.openmp_set_num_threads(1)
# Cliques are pruned, taking one reference from each, until none remain
sys.setrecursionlimit = 5000
with Pool(processes=threads) as pool:
ref_lists = pool.map(partial(cliquePrune,
graph=G,
reference_indices=reference_indices,
components_list=components),
set(components))
sys.setrecursionlimit = 1000
# Returns nested lists, which need to be flattened
reference_indices = set([entry for sublist in ref_lists for entry in sublist])
# Add type isolate if necessary - before edges are added
if type_isolate_index is not None and type_isolate_index not in reference_indices:
reference_indices.add(type_isolate_index)
if gt.openmp_enabled():
gt.openmp_set_num_threads(threads)
# Use a vertex filter to extract the subgraph of refences
# as a graphview
reference_vertex = G.new_vertex_property('bool')
for n, vertex in enumerate(G.vertices()):
if n in reference_indices:
reference_vertex[vertex] = True
else:
reference_vertex[vertex] = False
G_ref = gt.GraphView(G, vfilt = reference_vertex)
G_ref = gt.Graph(G_ref, prune = True) # https://stackoverflow.com/questions/30839929/graph-tool-graphview-object
# Find any clusters which are represented by >1 references
# This creates a dictionary: cluster_id: set(ref_idx in cluster)
clusters_in_full_graph = printClusters(G, dbOrder, printCSV=False)
reference_clusters_in_full_graph = defaultdict(set)
for reference_index in reference_indices:
reference_clusters_in_full_graph[clusters_in_full_graph[dbOrder[reference_index]]].add(reference_index)
# Calculate the component membership within the reference graph
ref_order = [name for idx, name in enumerate(dbOrder) if idx in frozenset(reference_indices)]
clusters_in_reference_graph = printClusters(G_ref, ref_order, printCSV=False)
# Record the components/clusters the references are in the reference graph
# dict: name: ref_cluster
reference_clusters_in_reference_graph = {}
for reference_name in ref_order:
reference_clusters_in_reference_graph[reference_name] = clusters_in_reference_graph[reference_name]
# Check if multi-reference components have been split as a validation test
# First iterate through clusters
network_update_required = False
for cluster_id, ref_idxs in reference_clusters_in_full_graph.items():
# Identify multi-reference clusters by this length
if len(ref_idxs) > 1:
check = list(ref_idxs)
# check if these are still in the same component in the reference graph
for i in range(len(check)):
component_i = reference_clusters_in_reference_graph[dbOrder[check[i]]]
for j in range(i + 1, len(check)):
# Add intermediate nodes
component_j = reference_clusters_in_reference_graph[dbOrder[check[j]]]
if component_i != component_j:
network_update_required = True
vertex_list, edge_list = gt.shortest_path(G, check[i], check[j])
# update reference list
for vertex in vertex_list:
reference_vertex[vertex] = True
reference_indices.add(int(vertex))
# update reference graph if vertices have been added
if network_update_required:
G_ref = gt.GraphView(G, vfilt = reference_vertex)
G_ref = gt.Graph(G_ref, prune = True) # https://stackoverflow.com/questions/30839929/graph-tool-graphview-object
# Order found references as in sketch files
reference_names = [dbOrder[int(x)] for x in sorted(reference_indices)]
refFileName = writeReferences(reference_names, outPrefix, outSuffix = outSuffix)
return reference_indices, reference_names, refFileName, G_ref
def writeReferences(refList, outPrefix, outSuffix = ""):
"""Writes chosen references to file
Args:
refList (list)
Reference names to write
outPrefix (str)
Prefix for output file
outSuffix (str)
Suffix for output file (.refs will be appended)
Returns:
refFileName (str)
The name of the file references were written to
"""
# write references to file
refFileName = outPrefix + "/" + os.path.basename(outPrefix) + outSuffix + ".refs"
with open(refFileName, 'w') as rFile:
for ref in refList:
rFile.write(ref + '\n')
return refFileName
def network_to_edges(prev_G_fn, rlist, adding_qq_dists = False,
old_ids = None, previous_pkl = None, weights = False,
use_gpu = False):
"""Load previous network, extract the edges to match the
vertex order specified in rlist, and also return weights if specified.
Args:
prev_G_fn (str or graph object)
Path of file containing existing network, or already-loaded
graph object
adding_qq_dists (bool)
Boolean specifying whether query-query edges are being added
to an existing network, such that not all the sequence IDs will
be found in the old IDs, which should already be correctly ordered
rlist (list)
List of reference sequence labels in new network
old_ids (list)
List of IDs of vertices in existing network
previous_pkl (str)
Path of pkl file containing names of sequences in
previous network
weights (bool)
Whether to return edge weights
(default = False)
use_gpu (bool)
Whether to use cugraph for graph analyses
Returns:
source_ids (list)
Source nodes for each edge
target_ids (list)
Target nodes for each edge
edge_weights (list)
Weights for each new edge
"""
# Load graph from file if passed string; else use graph object passed in
# as argument
if isinstance(prev_G_fn, str):
prev_G = load_network_file(prev_G_fn, use_gpu = use_gpu)
else:
prev_G = prev_G_fn
# load list of names in previous network if pkl name supplied
if previous_pkl is not None:
with open(previous_pkl, 'rb') as pickle_file:
old_rlist, old_qlist, self = pickle.load(pickle_file)
if self:
old_ids = old_rlist
else:
old_ids = old_rlist + old_qlist
elif old_ids is None:
sys.stderr.write('Missing .pkl file containing names of sequences in '
'previous network\n')
sys.exit(1)
# Get edges as lists of source,destination,weight using original IDs
if use_gpu:
G_df = prev_G.view_edge_list()
if weights:
if len(G_df.columns) < 3:
sys.stderr.write('Loaded network does not have edge weights; try a different '
'network or turn off graph weights\n')
exit(1)
if 'src' in G_df.columns:
G_df.rename(columns={'source': 'src','destination': 'dst'}, inplace=True)
edge_weights = G_df['weights'].to_arrow().to_pylist()
G_df.rename(columns={'src': 'source','dst': 'destination'}, inplace=True)
old_source_ids = G_df['source'].astype('int32').to_arrow().to_pylist()
old_target_ids = G_df['destination'].astype('int32').to_arrow().to_pylist()
else:
# get the source and target nodes
old_source_ids = gt.edge_endpoint_property(prev_G, prev_G.vertex_index, "source")
old_target_ids = gt.edge_endpoint_property(prev_G, prev_G.vertex_index, "target")
# get the weights
if weights:
if prev_G.edge_properties.keys() is None or 'weight' not in prev_G.edge_properties.keys():
sys.stderr.write('Loaded network does not have edge weights; try a different '
'network or turn off graph weights\n')
exit(1)
edge_weights = list(prev_G.ep['weight'])
# If appending queries to an existing network, then the recovered links can be left
# unchanged, as the new IDs are the queries, and the existing sequences will not be found
# in the list of IDs
if adding_qq_dists:
source_ids = old_source_ids
target_ids = old_target_ids
else:
# Update IDs to new versions
old_id_indices = [rlist.index(x) for x in old_ids]
# translate to indices
source_ids = [old_id_indices[x] for x in old_source_ids]
target_ids = [old_id_indices[x] for x in old_target_ids]
# return values
if weights:
return source_ids, target_ids, edge_weights
else:
return source_ids, target_ids
def print_network_summary(G, betweenness_sample = betweenness_sample_default, use_gpu = False):
"""Wrapper function for printing network information
Args:
G (graph)
List of reference sequence labels
betweenness_sample (int)
Number of sequences per component used to estimate betweenness using
a GPU. Smaller numbers are faster but less precise [default = 100]
use_gpu (bool)
Whether to use GPUs for network construction
"""
# print some summaries
(metrics, scores) = networkSummary(G, betweenness_sample = betweenness_sample, use_gpu = use_gpu)
sys.stderr.write("Network summary:\n" + "\n".join(["\tComponents\t\t\t\t" + str(metrics[0]),
"\tDensity\t\t\t\t\t" + "{:.4f}".format(metrics[1]),
"\tTransitivity\t\t\t\t" + "{:.4f}".format(metrics[2]),
"\tMean betweenness\t\t\t" + "{:.4f}".format(metrics[3]),
"\tWeighted-mean betweenness\t\t" + "{:.4f}".format(metrics[4]),
"\tScore\t\t\t\t\t" + "{:.4f}".format(scores[0]),
"\tScore (w/ betweenness)\t\t\t" + "{:.4f}".format(scores[1]),
"\tScore (w/ weighted-betweenness)\t\t" + "{:.4f}".format(scores[2])])
+ "\n")
def initial_graph_properties(rlist, qlist):
"""Initial processing of sequence names for
network construction.
Args:
rlist (list)
List of reference sequence labels
qlist (list)
List of query sequence labels
Returns:
vertex_labels (list)
Ordered list of sequences in network
self_comparison (bool)
Whether the network is being constructed from all-v-all distances or
reference-v-query information
"""
if rlist == qlist:
self_comparison = True
vertex_labels = rlist
else:
self_comparison = False
vertex_labels = rlist + qlist
return vertex_labels, self_comparison
def process_weights(distMat, weights_type):
"""Calculate edge weights from the distance matrix
Args:
distMat (2 column ndarray)
Numpy array of pairwise distances
weights_type (str)
Measure to calculate from the distMat to use as edge weights in network
- options are core, accessory or euclidean distance
Returns:
processed_weights (list)
Edge weights
"""
processed_weights = []
if weights_type is not None and distMat is not None:
# Check weights type is valid
if weights_type not in accepted_weights_types:
sys.stderr.write("Unable to calculate distance type " + str(weights_type) + "; "
"accepted types are " + str(accepted_weights_types) + "\n")
if weights_type == 'euclidean':
processed_weights = np.linalg.norm(distMat, axis = 1).tolist()
elif weights_type == 'core':
processed_weights = distMat[:, 0].tolist()
elif weights_type == 'accessory':
processed_weights = distMat[:, 1].tolist()
else:
sys.stderr.write('Require distance matrix to calculate distances\n')
return processed_weights
def process_previous_network(previous_network = None, adding_qq_dists = False, old_ids = None,
previous_pkl = None, vertex_labels = None, weights = False, use_gpu = False):
"""Extract edge types from an existing network
Args:
previous_network (str or graph object)
Name of file containing a previous network to be integrated into this new
network, or already-loaded graph object
adding_qq_dists (bool)
Boolean specifying whether query-query edges are being added
to an existing network, such that not all the sequence IDs will
be found in the old IDs, which should already be correctly ordered
old_ids (list)
Ordered list of vertex names in previous network
previous_pkl (str)
Name of file containing the names of the sequences in the previous_network
ordered based on the original network construction
vertex_labels (list)
Ordered list of sequence labels
weights (bool)
Whether weights should be extracted from the previous network
use_gpu (bool)
Whether to use GPUs for network construction
Returns:
extra_sources (list)
List of source node identifiers
extra_targets (list)
List of destination node identifiers
extra_weights (list or None)
List of edge weights
"""
if previous_pkl is not None or old_ids is not None:
if weights:
# Extract from network
extra_sources, extra_targets, extra_weights = network_to_edges(previous_network,
vertex_labels,
adding_qq_dists = adding_qq_dists,
old_ids = old_ids,
previous_pkl = previous_pkl,
weights = True,
use_gpu = use_gpu)
else:
# Extract from network
extra_sources, extra_targets = network_to_edges(previous_network,
vertex_labels,
adding_qq_dists = adding_qq_dists,
old_ids = old_ids,
previous_pkl = previous_pkl,
weights = False,
use_gpu = use_gpu)
extra_weights = None
else:
sys.stderr.write('A distance pkl corresponding to ' + previous_pkl + ' is required for loading\n')
sys.exit(1)
return extra_sources, extra_targets, extra_weights
def construct_network_from_edge_list(rlist,
qlist,
edge_list,
weights = None,
distMat = None,
previous_network = None,
adding_qq_dists = False,
old_ids = None,
previous_pkl = None,
betweenness_sample = betweenness_sample_default,
summarise = True,
use_gpu = False):
"""Construct an undirected network using a data frame of edges. Nodes are samples and
edges where samples are within the same cluster
Will print summary statistics about the network to ``STDERR``
Args:
rlist (list)
List of reference sequence labels
qlist (list)
List of query sequence labels
G_df (cudf or pandas data frame)
Data frame in which the first two columns are the nodes linked by edges
weights (list)
List of edge weights
distMat (2 column ndarray)
Numpy array of pairwise distances
previous_network (str or graph object)
Name of file containing a previous network to be integrated into this new
network, or the already-loaded graph object
adding_qq_dists (bool)
Boolean specifying whether query-query edges are being added
to an existing network, such that not all the sequence IDs will
be found in the old IDs, which should already be correctly ordered
old_ids (list)
Ordered list of vertex names in previous network
previous_pkl (str)
Name of file containing the names of the sequences in the previous_network
betweenness_sample (int)
Number of sequences per component used to estimate betweenness using
a GPU. Smaller numbers are faster but less precise [default = 100]
summarise (bool)
Whether to calculate and print network summaries with :func:`~networkSummary`
(default = True)
use_gpu (bool)
Whether to use GPUs for network construction
Returns:
G (graph)
The resulting network
"""
# Check GPU library use
use_gpu = check_and_set_gpu(use_gpu, gpu_lib, quit_on_fail = True)
# data structures
vertex_labels, self_comparison = initial_graph_properties(rlist, qlist)
# Create new network
if use_gpu:
# benchmarking concurs with https://stackoverflow.com/questions/55922162/recommended-cudf-dataframe-construction
if len(edge_list) > 1:
edge_array = cp.array(edge_list, dtype = np.int32)
edge_gpu_matrix = cuda.to_device(edge_array)
G_df = cudf.DataFrame(edge_gpu_matrix, columns = ['source','destination'])
else:
# Cannot generate an array when one edge
G_df = cudf.DataFrame(columns = ['source','destination'])
G_df['source'] = [edge_list[0][0]]
G_df['destination'] = [edge_list[0][1]]
if weights is not None:
G_df['weights'] = weights
G = construct_network_from_df(rlist, qlist, G_df,
weights = (weights is not None),
distMat = distMat,
adding_qq_dists = adding_qq_dists,
old_ids = old_ids,
previous_network = previous_network,
previous_pkl = previous_pkl,
summarise = False,
use_gpu = use_gpu)
else:
# Load previous network
if previous_network is not None:
extra_sources, extra_targets, extra_weights = \
process_previous_network(previous_network = previous_network,
adding_qq_dists = adding_qq_dists,
old_ids = old_ids,
previous_pkl = previous_pkl,
vertex_labels = vertex_labels,
weights = (weights is not None),
use_gpu = use_gpu)
# Construct list of tuples for graph-tool
# Include information from previous graph if supplied
if weights is not None:
weighted_edges = []
for ((src, dest), weight) in zip(edge_list, weights):
weighted_edges.append((src, dest, weight))
if previous_network is not None:
for (src, dest, weight) in zip(extra_sources, extra_targets, extra_weights):
weighted_edges.append((src, dest, weight))
edge_list = weighted_edges
else:
if previous_network is not None:
for (src, dest) in zip(extra_sources, extra_targets):
edge_list.append((src, dest))
# build the graph
G = gt.Graph(directed = False)
G.add_vertex(len(vertex_labels))
if weights is not None:
eweight = G.new_ep("float")
G.add_edge_list(edge_list, eprops = [eweight])
G.edge_properties["weight"] = eweight
else:
G.add_edge_list(edge_list)
if summarise:
print_network_summary(G, betweenness_sample = betweenness_sample, use_gpu = use_gpu)
return G
def construct_network_from_df(rlist,
qlist,
G_df,
weights = False,
distMat = None,
previous_network = None,
adding_qq_dists = False,
old_ids = None,
previous_pkl = None,
betweenness_sample = betweenness_sample_default,
summarise = True,
use_gpu = False):
"""Construct an undirected network using a data frame of edges. Nodes are samples and
edges where samples are within the same cluster
Will print summary statistics about the network to ``STDERR``
Args:
rlist (list)
List of reference sequence labels
qlist (list)
List of query sequence labels
G_df (cudf or pandas data frame)
Data frame in which the first two columns are the nodes linked by edges
weights (bool)
Whether weights in the G_df data frame should be included in the network
distMat (2 column ndarray)
Numpy array of pairwise distances
previous_network (str or graph object)
Name of file containing a previous network to be integrated into this new
network, or the already-loaded graph object
adding_qq_dists (bool)
Boolean specifying whether query-query edges are being added
to an existing network, such that not all the sequence IDs will
be found in the old IDs, which should already be correctly ordered
old_ids (list)
Ordered list of vertex names in previous network
previous_pkl (str)
Name of file containing the names of the sequences in the previous_network
betweenness_sample (int)
Number of sequences per component used to estimate betweenness using
a GPU. Smaller numbers are faster but less precise [default = 100]
summarise (bool)
Whether to calculate and print network summaries with :func:`~networkSummary`
(default = True)
use_gpu (bool)
Whether to use GPUs for network construction
Returns:
G (graph)
The resulting network
"""
# Check GPU library use
use_gpu = check_and_set_gpu(use_gpu, gpu_lib, quit_on_fail = True)
# data structures
vertex_labels, self_comparison = initial_graph_properties(rlist, qlist)
# Check df format is correct
if weights:
G_df.columns = ['source','destination','weights']
else:
G_df.columns = ['source','destination']
# Load previous network
if previous_network is not None:
extra_sources, extra_targets, extra_weights = process_previous_network(previous_network = previous_network,
adding_qq_dists = adding_qq_dists,
old_ids = old_ids,
previous_pkl = previous_pkl,
vertex_labels = vertex_labels,
weights = weights,
use_gpu = use_gpu)
if use_gpu:
G_extra_df = cudf.DataFrame()
else:
G_extra_df = | pd.DataFrame() | pandas.DataFrame |
"""
This script is designed to take already processed matrix timing and properties
files and perform a variety of machine learning techniques using the Scikit-Learn
Python library.
""" # Written using Anaconda with Python 3.5
# <NAME>
# 1-22-17
import time
from os import path as path
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from imblearn import pipeline as pl
from imblearn.over_sampling import RandomOverSampler
from scipy import interp
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import confusion_matrix, roc_curve, auc
from sklearn.model_selection import StratifiedKFold, StratifiedShuffleSplit
# Printing options
np.set_printoptions(precision=3)
rng = np.random.RandomState()
# Number of splits for k-fold cross validation
skf = StratifiedKFold(n_splits=3, random_state=rng)
sss = StratifiedShuffleSplit(n_splits=3, random_state=rng)
stampede = [1, 4, 8, 12, 16]
bridges = [1, 4, 8, 12, 16, 20, 24, 28]
comet = [1, 4, 8, 12, 16, 20, 24]
janus = [1, 2, 4, 6, 8, 10, 12]
summit = [1, 4, 8, 12, 16, 20, 24]
laptop = [1, 2, 4]
JANUS_ID = 0
BRIDGES_ID = 1
COMET_ID = 2
SUMMIT_ID = 3
STAMPEDE_ID = 4
LAPTOP_ID = 5
system_nps = {JANUS_ID: janus, BRIDGES_ID: bridges, COMET_ID: comet,
SUMMIT_ID: summit, STAMPEDE_ID: stampede, LAPTOP_ID: laptop}
# For roc curves
linestyles = [
(0, ()),
(0, (3, 1, 1, 1)),
(0, (3, 5, 3, 5)),
(0, (5, 1, 10, 3)),
(0, (1, 1)),
(0, (5, 1)),
(0, (3, 5, 1, 5)),
(0, (5, 5)),
(0, (1, 5)),
(0, (2, 3, 4, 1))
]
classifier_list = [
['RandomForest', RandomForestClassifier()]]
# ['GradientBoosting', GradientBoostingClassifier()],
# ['GaussianNB', GaussianNB()],
# ['DecisionTree', DecisionTreeClassifier()],
# ['LogisticRegression', LogisticRegression()],
# ['MLP', MLPClassifier()],
# ['AdaBoost', AdaBoostClassifier()],
# ['KNN', KNeighborsClassifier()]
# ['SVC', SVC(probability=True)],
# ['QDA', QuadraticDiscriminantAnalysis()],
samplers_list = [
['RandomOverSampler', RandomOverSampler()],
# ['SMOTE', SMOTE()],
# ['DummySampler', DummySampler()],
# ['SMOTEENN', SMOTEENN()],
# ['SMOTETomek', SMOTETomek()],
# ['ADASYN', ADASYN()] prohibitively expensive on larger datasets (3k+ secs)
]
def show_confusion_matrix(C, training_systems, training_numprocs, testing_systems, testing_numprocs,
class_labels=['-1', '1']):
"""Draws confusion matrix with associated metrics"""
assert C.shape == (2, 2), "Confusion matrix should be from binary classification only."
# true negative, false positive, etc...
tn = C[0, 0]
fp = C[0, 1]
fn = C[1, 0]
tp = C[1, 1]
NP = fn + tp # Num positive examples
NN = tn + fp # Num negative examples
N = NP + NN
confusion_matrix_output = open('cnf_output.csv', 'a')
# print('TrueNeg\tNumNeg\tTruePos\tNumPos\tFalseNeg\tFalsePos\tTruePosRate\tFalsePosRate\tPosPredVal\tNegPredVal\tAccuracy')
nps_and_systems = str(training_systems) + '\t' + str(training_numprocs) + '\t' + \
str(testing_systems) + '\t' + str(testing_numprocs)
cnf_numbers = ('%d\t%d\t%d\t%d\t%d\t%d\t'
'%.2f\t%.2f\t%.2f\t%.2f\t%.2f' %
(tn, NN, tp, NP, fn, fp,
tp / (tp + fn + 0.),
fp / (fp + tn + 0.),
tp / (tp + fp + 0.),
tn / (tn + fn + 0.),
(tp + tn + 0.) / N))
nps_and_systems = nps_and_systems.replace('[', '')
nps_and_systems = nps_and_systems.replace(']', '')
confusion_matrix_output.write(nps_and_systems + '\t' + cnf_numbers + '\n')
# print(nps_and_systems + '\t' + cnf_numbers + '\n')
def classify_good_bad(combined, system, numprocs):
# process np first
a = pd.DataFrame()
if type(numprocs) == str and numprocs == "all":
a = combined
elif type(numprocs) == int:
a = combined[(combined.np == numprocs)]
elif type(numprocs) == tuple:
for num in numprocs:
a = a.append(combined[(combined.numprocs == num)], ignore_index=True)
# now process systems
if type(system) == str and system == "all":
a = a
elif type(system) == int:
a = a[(a.system_id == system)]
elif type(system) == tuple:
for num in system:
a = a.append(a[(a.system_id == num)], ignore_index=True)
# Determine the best times for each matrix
good_bad_list = []
new_time_list = []
grouped = a.groupby(['matrix', 'status_id'])
best_times = grouped['time'].aggregate(np.min)
for index, row in a.iterrows():
current_matrix_time = row['time']
matrix_name = row['matrix']
# Check for matrices which never converged
try:
matrix_min_time = best_times[matrix_name][1] # 1 indicates converged
except:
matrix_min_time = np.inf
# Error or unconverged runs = max float time
if row['status_id'] != 1 or matrix_min_time == np.inf:
good_bad_list.append(-1)
new_time_list.append(np.inf)
# Good = anything within 25% of the fastest run for that matrix
elif current_matrix_time <= 1.25 * matrix_min_time:
good_bad_list.append(1)
new_time_list.append(current_matrix_time)
# Bad = anything else outside of that range but still converged
else:
good_bad_list.append(-1)
new_time_list.append(current_matrix_time)
# Create Pandas series from the lists which used to contain strings
new_time_series = pd.Series(new_time_list)
good_bad_series = pd.Series(good_bad_list)
# Add the series to the dataframe as columns
a.reset_index(drop=True, inplace=True)
a = a.assign(new_time=pd.Series(new_time_series.values))
a = a.assign(good_or_bad=pd.Series(good_bad_series))
return a
def get_properties(properties_filename):
properties = pd.read_csv(properties_filename, header=0, index_col=0)
return properties
def get_times(time_files):
times_array = []
for t in time_files:
times_array.append(pd.read_csv(t, header=0, index_col=0))
combined_times = pd.concat(times_array)
combined_times = combined_times.drop(labels=['system', 'solver', 'prec', 'status',
'new_time', 'good_or_bad', 'resid', 'iters'],
axis=1)
combined_times = combined_times.drop_duplicates()
return combined_times
def get_classification(combined_times, testing_systems, testing_numprocs):
start_time = time.time()
if type(testing_systems) is not list:
testing_systems = [testing_systems]
if type(testing_numprocs) is not list:
testing_numprocs = [testing_numprocs]
testing_classified = pd.DataFrame()
for sys in testing_systems:
for numproc in testing_numprocs:
# Check if the np and/or sys even exist
if numproc in system_nps[sys]:
filename = '../classifications/classified_' + str(sys) + '_' + str(numproc) + '.csv'
if not path.exists(filename):
print("Saving classification to ", filename)
temp = classify_good_bad(combined_times, sys, numproc)
testing_classified = testing_classified.append(temp)
temp.to_csv(filename)
print("Classification time: ", round(time.time() - start_time, 3), '\n')
else:
print('Classification file exists, loading from ' + filename, '\n')
temp = pd.read_csv(filename, header=0, index_col=0)
testing_classified = testing_classified.append(temp)
return testing_classified
def merge_properties_and_times(properties_data, timing_data, system_data):
merged = pd.merge(properties_data, timing_data, on='matrix_id')
merged = pd.merge(system_data, merged, on='system_id')
merged = merged.dropna()
merged = merged.drop(
labels=['system', 'matrix_y', 'matrix_x', 'status_id', 'time', 'new_time', 'matrix_id'], axis=1)
return merged
def classify_and_merge(properties_data, timing_data, system_data, specific_nps, specific_systems):
# Reduce info to just those nps and systems we are wanting to look at
good_bad_list = []
new_time_list = []
specific_nps.sort()
specific_systems.sort()
timing_subset = timing_data[timing_data['np'].isin(specific_nps)]
timing_subset = timing_subset[timing_subset['system_id'].isin(specific_systems)]
grouped = timing_subset.groupby(['matrix', 'status_id'])
best_times = grouped['time'].aggregate(np.min)
# If an existing classification file exists then just load it instead of repeating work
filename = '../classifications/classified_' + str(specific_systems) + '_' + str(specific_nps) + '.csv'
filename = filename.replace(' ', '')
if path.exists(filename):
print('Classification file exists, loading from ' + filename, '\n')
timing_subset = pd.read_csv(filename, header=0, index_col=0)
else:
print("Saving classification to ", filename)
for index, row in timing_subset.iterrows():
current_matrix_time = row['time']
matrix_name = row['matrix']
# Check for matrices which never converged or ended in error and set to inf
try:
matrix_min_time = best_times[matrix_name][1] # 1 indicates converged
except:
matrix_min_time = np.inf
# Error or unconverged runs = max float time
if row['status_id'] != 1 or matrix_min_time == np.inf:
good_bad_list.append(-1)
new_time_list.append(np.inf)
# Good = anything within 25% of the fastest run for that matrix
elif current_matrix_time <= 1.25 * matrix_min_time:
good_bad_list.append(1)
new_time_list.append(current_matrix_time)
# Bad = anything else outside of that range but still converged
else:
good_bad_list.append(-1)
new_time_list.append(current_matrix_time)
# Create Pandas series from the resulting lists
new_time_series = pd.Series(new_time_list)
good_bad_series = pd.Series(good_bad_list)
# Add the series to the dataframe as columns
timing_subset.reset_index(drop=True, inplace=True)
timing_subset = timing_subset.assign(new_time=pd.Series(new_time_series.values))
timing_subset = timing_subset.assign(good_or_bad= | pd.Series(good_bad_series) | pandas.Series |
from pathlib import Path
import numpy as np
import pandas as pd
import time
import pickle
import json
import h5py
import sys
import traceback
import warnings
import Analyses.spike_functions as spike_funcs
import Analyses.spatial_functions as spatial_funcs
import Analyses.open_field_functions as of_funcs
import Pre_Processing.pre_process_functions as pp_funcs
from Utils.robust_stats import robust_zscore
import Analyses.tree_maze_functions as tmf
import Analyses.plot_functions as pf
import scipy.signal as signal
"""
Classes in this file will have several retrieval processes to acquire the required information for each
subject and session.
:class SubjectInfo
-> class that takes a subject as an input. contains general information about what processes have been performed,
clusters, and importantly all the session paths. The contents of this class are saved as a pickle in the results
folder.
:class SubjectSessionInfo
-> children class of SubjectInfo, takes session as an input. This class contains session specific retrieval methods
Low level things, like reading position (eg. 'get_track_dat') are self contained in the class. Higher level
functions like 'get_spikes', are outsourced to the appropriate submodules in the Analyses folder.
If it is the first time calling a retrieval method, the call will save the contents according the paths variable
Otherwise the contents will be loaded from existing data, as opposed to recalculation. Exception is the get_time
method, as this is easily regenerated on each call.
"""
class SummaryInfo:
subjects = ['Li', 'Ne', 'Cl', 'Al', 'Ca', 'Mi']
min_n_units = 1
min_n_trials = 50 # task criteria
min_pct_coverage = 0.75 # open field criteria
invalid_sessions = ['Li_OF_080718']
figure_names = [f"f{ii}" for ii in range(5)]
_root_paths = dict(GD=Path("/home/alexgonzalez/google-drive/TreeMazeProject/"),
BigPC=Path("/mnt/Data_HD2T/TreeMazeProject/"))
def __init__(self, data_root='BigPC'):
self.main_path = self._root_paths[data_root]
self.paths = self._get_paths()
self.unit_table = self.get_unit_table()
self.analyses_table = self.get_analyses_table()
self.valid_track_table = self.get_track_validity_table()
self.sessions_by_subject = {}
self.tasks_by_subject = {}
for s in self.subjects:
self.sessions_by_subject[s] = self.unit_table[self.unit_table.subject == s].session.unique()
self.tasks_by_subject[s] = self.unit_table[self.unit_table.subject == s].task.unique()
def run_analyses(self, task='all', which='all', verbose=False, overwrite=False):
interrupt_flag = False
for subject in self.subjects:
if not interrupt_flag:
subject_info = SubjectInfo(subject)
for session in subject_info.sessions:
try:
if task == 'all':
pass
elif task not in session:
continue
else:
pass
if verbose:
t0 = time.time()
print(f'Processing Session {session}')
session_info = SubjectSessionInfo(subject, session)
session_info.run_analyses(overwrite=overwrite, which=which, verbose=verbose)
if verbose:
t1 = time.time()
print(f"Session Processing Completed: {t1 - t0:0.2f}s")
print()
else:
print(".", end='')
except KeyboardInterrupt:
interrupt_flag = True
break
except ValueError:
pass
except FileNotFoundError:
pass
except:
if verbose:
traceback.print_exc(file=sys.stdout)
pass
if verbose:
print(f"Subject {subject} Analyses Completed.")
def get_analyses_table(self, overwrite=False):
if not self.paths['analyses_table'].exists() or overwrite:
analyses_table = pd.DataFrame()
for subject in self.subjects:
analyses_table = analyses_table.append(SubjectInfo(subject).get_sessions_analyses())
analyses_table.to_csv(self.paths['analyses_table'])
else:
analyses_table = pd.read_csv(self.paths['analyses_table'], index_col=0)
self.analyses_table = analyses_table
return analyses_table
def get_track_validity_table(self, overwrite=False):
if not self.paths['valid_track_table'].exists() or overwrite:
valid_track_table = pd.DataFrame()
for subject in self.subjects:
valid_track_table = valid_track_table.append(SubjectInfo(subject).valid_track_table)
valid_track_table.to_csv(self.paths['valid_track_table'])
else:
valid_track_table = pd.read_csv(self.paths['valid_track_table'], index_col=0)
return valid_track_table
def get_behav_perf(self, overwrite=False):
if not self.paths['behavior'].exists() or overwrite:
perf = pd.DataFrame()
for subject in self.subjects:
subject_info = SubjectInfo(subject)
for session in subject_info.sessions:
if 'T3' in session:
try:
session_info = SubjectSessionInfo(subject, session)
b = session_info.get_event_behavior()
sp = b.get_session_perf()
sp['session'] = session
sp['task'] = session_info.task
sp['subject'] = subject
sp['n_units'] = session_info.n_units
sp['n_cells'] = session_info.n_cells
sp['n_mua'] = session_info.n_mua
perf = pd.concat((perf, sp), ignore_index=True)
except:
pass
perf.to_csv(self.paths['behavior'])
else:
perf = pd.read_csv(self.paths['behavior'], index_col=0)
return perf
def _get_paths(self, root_path=None):
if root_path is None:
results_path = self.main_path / 'Results_Summary'
figures_path = self.main_path / 'Figures'
else:
results_path = root_path / 'Results_Summary'
figures_path = root_path / 'Figures'
paths = dict(
analyses_table=results_path / 'analyses_table.csv',
valid_track_table=results_path / 'valid_track_table.csv',
behavior=results_path / 'behavior_session_perf.csv',
units=results_path / 'all_units_table.csv',
of_metric_scores=results_path / 'of_metric_scores_summary_table.csv',
of_model_scores=results_path / 'of_model_scores_summary_table_agg.csv',
zone_rates_comps=results_path / 'zone_rates_comps_summary_table.csv',
zone_rates_remap=results_path / 'zone_rates_remap_summary_table.csv',
bal_conds_seg_rates=results_path / 'bal_conds_seg_rates_summary_table.csv',
)
paths['results'] = results_path
paths['figures'] = figures_path
return paths
def update_paths(self):
for subject in self.subjects:
_ = SubjectInfo(subject, overwrite=True)
def get_zone_rates_comps(self, overwrite=False):
"""
Aggregates tables across sessions and adds unit information.
Note, that overwrite only overwrites the aggregate table and does not perform the analysis on each session.
:param overwrite:
:return:
pandas data frame with n_units as index
"""
if not self.paths['zone_rates_comps'].exists() or overwrite:
sessions_validity = self.get_track_validity_table()
zone_rates = pd.DataFrame()
unit_count = 0
valid_sessions = list(self.analyses_table.loc[self.analyses_table.zone_rates_comps == True].index)
for subject in self.subjects:
subject_info = SubjectInfo(subject)
for session in subject_info.sessions:
if session in valid_sessions:
session_info = SubjectSessionInfo(subject, session)
n_session_units = session_info.n_units
if n_session_units > 0:
try:
session_zone_rate_comp_table = session_info.get_zone_rates_remapping()
comp_table_columns = session_zone_rate_comp_table.columns
session_table = pd.DataFrame(index=np.arange(n_session_units),
columns=['unit_id', 'subject', 'session',
'session_pct_cov', 'session_valid',
'session_unit_id', 'unit_type', 'tt', 'tt_cl',
'cl_name'])
session_table['session'] = session
session_table['subject'] = session_info.subject
session_table['session_unit_id'] = np.arange(n_session_units)
session_table['unit_id'] = np.arange(n_session_units) + unit_count
session_table['unit_type'] = [v[0] for k, v in session_info.cluster_ids.items()]
session_table['tt'] = [v[1] for k, v in session_info.cluster_ids.items()]
session_table['tt_cl'] = [v[2] for k, v in session_info.cluster_ids.items()]
if session in sessions_validity.columns:
session_table['session_pct_cov'] = sessions_validity[session]
session_table['session_valid'] = 1
else:
session_table['session_pct_cov'] = 0
session_table['session_valid'] = 0
cl_names = []
for k, v in session_info.cluster_ids.items():
tt = v[1]
cl = v[2]
depth = subject_info.sessions_tt_positions.loc[session, f"tt_{tt}"]
cl_name = f"{session}-tt{tt}_d{depth}_cl{cl}"
cl_names.append(cl_name)
session_table['cl_name'] = cl_names
unit_count += n_session_units
session_table = session_table.join(session_zone_rate_comp_table)
except:
print(f'Error Processing Session {session}')
traceback.print_exc(file=sys.stdout)
continue
zone_rates = zone_rates.append(session_table)
zone_rates = zone_rates.reset_index(drop=True)
zone_rates.to_csv(self.paths['zone_rates_comps'])
else:
zone_rates = pd.read_csv(self.paths['zone_rates_comps'], index_col=0)
return zone_rates
def get_bal_conds_seg_rates(self, segment_type='bigseg', overwrite=False):
fn = self.paths['bal_conds_seg_rates']
if segment_type != 'bigseg':
name = fn.name.split('.')
name2 = name[0] + segment_type + name[1]
fn = fn.parent / name2
if not fn.exists() or overwrite:
sessions_validity = self.get_track_validity_table()
seg_rates = pd.DataFrame()
unit_count = 0
valid_sessions = list(self.analyses_table.loc[self.analyses_table.bal_conds_seg_rates == True].index)
for subject in self.subjects:
subject_info = SubjectInfo(subject)
for session in subject_info.sessions:
if session in valid_sessions:
session_info = SubjectSessionInfo(subject, session)
n_session_units = session_info.n_units
if n_session_units > 0:
try:
session_zone_rate_comp_table = session_info.get_bal_conds_seg_rates(segment_type=segment_type)
comp_table_columns = session_zone_rate_comp_table.columns
session_table = pd.DataFrame(index=np.arange(n_session_units),
columns=['unit_id', 'subject', 'session',
'session_pct_cov', 'session_valid',
'session_unit_id', 'unit_type', 'tt', 'tt_cl',
'cl_name'])
session_table['session'] = session
session_table['subject'] = session_info.subject
session_table['session_unit_id'] = np.arange(n_session_units)
session_table['unit_id'] = np.arange(n_session_units) + unit_count
session_table['unit_type'] = [v[0] for k, v in session_info.cluster_ids.items()]
session_table['tt'] = [v[1] for k, v in session_info.cluster_ids.items()]
session_table['tt_cl'] = [v[2] for k, v in session_info.cluster_ids.items()]
if session in sessions_validity.columns:
session_table['session_pct_cov'] = sessions_validity[session]
session_table['session_valid'] = 1
else:
session_table['session_pct_cov'] = 0
session_table['session_valid'] = 0
cl_names = []
for k, v in session_info.cluster_ids.items():
tt = v[1]
cl = v[2]
depth = subject_info.sessions_tt_positions.loc[session, f"tt_{tt}"]
cl_name = f"{session}-tt{tt}_d{depth}_cl{cl}"
cl_names.append(cl_name)
session_table['cl_name'] = cl_names
unit_count += n_session_units
session_table = session_table.join(session_zone_rate_comp_table)
except:
print(f'Error Processing Session {session}')
traceback.print_exc(file=sys.stdout)
continue
seg_rates = seg_rates.append(session_table)
seg_rates = seg_rates.reset_index(drop=True)
seg_rates.to_csv(fn)
else:
seg_rates = pd.read_csv(fn, index_col=0)
return seg_rates
def get_zone_rates_remap(self, overwrite=False):
if not self.paths['zone_rates_remap'].exists() or overwrite:
sessions_validity = self.get_track_validity_table()
zone_rates = pd.DataFrame()
unit_count = 0
valid_sessions = list(self.analyses_table.loc[self.analyses_table.zone_rates_comps == True].index)
for subject in self.subjects:
subject_info = SubjectInfo(subject)
for session in subject_info.sessions:
if session in valid_sessions:
session_info = SubjectSessionInfo(subject, session)
n_session_units = session_info.n_units
if n_session_units > 0:
try:
session_zone_rate_comp_table = session_info.get_zone_rates_remap()
comp_table_columns = session_zone_rate_comp_table.columns
session_table = pd.DataFrame(index=np.arange(n_session_units),
columns=['unit_id', 'subject', 'session',
'session_pct_cov', 'session_valid',
'session_unit_id', 'unit_type', 'tt', 'tt_cl',
'cl_name'])
session_table['session'] = session
session_table['subject'] = session_info.subject
session_table['session_unit_id'] = np.arange(n_session_units)
session_table['unit_id'] = np.arange(n_session_units) + unit_count
session_table['unit_type'] = [v[0] for k, v in session_info.cluster_ids.items()]
session_table['tt'] = [v[1] for k, v in session_info.cluster_ids.items()]
session_table['tt_cl'] = [v[2] for k, v in session_info.cluster_ids.items()]
if session in sessions_validity.columns:
session_table['session_pct_cov'] = sessions_validity[session]
session_table['session_valid'] = 1
else:
session_table['session_pct_cov'] = 0
session_table['session_valid'] = 0
cl_names = []
for k, v in session_info.cluster_ids.items():
tt = v[1]
cl = v[2]
depth = subject_info.sessions_tt_positions.loc[session, f"tt_{tt}"]
cl_name = f"{session}-tt{tt}_d{depth}_cl{cl}"
cl_names.append(cl_name)
session_table['cl_name'] = cl_names
unit_count += n_session_units
session_table = session_table.join(session_zone_rate_comp_table)
except:
print(f'Error Processing Session {session}')
traceback.print_exc(file=sys.stdout)
continue
zone_rates = zone_rates.append(session_table)
zone_rates = zone_rates.reset_index(drop=True)
zone_rates.to_csv(self.paths['zone_rates_remap'])
else:
zone_rates = pd.read_csv(self.paths['zone_rates_remap'], index_col=0)
return zone_rates
def get_of_results(self, overwrite=False):
curate_flag = False
# get metrics
if not self.paths['of_metric_scores'].exists() or overwrite:
metric_scores = self._get_of_metric_scores()
curate_flag = True
else:
metric_scores = pd.read_csv(self.paths['of_metric_scores'], index_col=0)
# get models
if not self.paths['of_model_scores'].exists() or overwrite:
model_scores = self._get_of_models_scores()
curate_flag = True
else:
model_scores = pd.read_csv(self.paths['of_model_scores'], index_col=0)
if curate_flag:
metric_scores, model_scores = self._match_unit_ids(metric_scores, model_scores)
for session in self.invalid_sessions:
unit_idx = self.unit_table[self.unit_table.session == session].unique_cl_name
metric_scores.loc[metric_scores.cl_name.isin(unit_idx), 'session_valid'] = False
model_scores.loc[model_scores.cl_name.isin(unit_idx), 'session_valid'] = False
metric_scores.to_csv(self.paths['of_metric_scores'])
model_scores.to_csv(self.paths['of_model_scores'])
return metric_scores, model_scores
def _get_of_metric_scores(self, overwrite=False):
if not self.paths['of_metric_scores'].exists() or overwrite:
analyses = ['speed', 'hd', 'border', 'grid', 'stability']
output_scores_names = ['score', 'sig']
n_analyses = len(analyses)
unit_count = 0
metric_scores = pd.DataFrame()
for subject in self.subjects:
subject_info = SubjectInfo(subject)
for session in subject_info.sessions:
if 'OF' in session:
session_info = SubjectSessionInfo(subject, session)
if session_info.n_units > 0:
temp = session_info.get_scores()
session_scores = pd.DataFrame(index=np.arange(session_info.n_units * n_analyses),
columns=['unit_id', 'subject', 'session',
'session_pct_cov', 'session_valid',
'session_unit_id', 'unit_type', 'tt', 'tt_cl',
'cl_name', 'analysis_type',
'score', 'sig', ])
session_scores['analysis_type'] = np.repeat(np.array(analyses), session_info.n_units)
session_scores['session'] = session_info.session
session_scores['subject'] = session_info.subject
session_scores['session_unit_id'] = np.tile(np.arange(session_info.n_units), n_analyses)
session_scores['unit_id'] = np.tile(np.arange(session_info.n_units),
n_analyses) + unit_count
session_scores['unit_type'] = [v[0] for k, v in
session_info.cluster_ids.items()] * n_analyses
session_scores['tt'] = [v[1] for k, v in session_info.cluster_ids.items()] * n_analyses
session_scores['tt_cl'] = [v[2] for k, v in
session_info.cluster_ids.items()] * n_analyses
behav = session_info.get_track_data()
# noinspection PyTypeChecker
coverage = np.around(behav['pos_valid_mask'].mean(), 2)
session_scores['session_pct_cov'] = coverage
session_scores['session_valid'] = coverage >= self.min_pct_coverage
cl_names = []
for k, v in session_info.cluster_ids.items():
tt = v[1]
cl = v[2]
depth = subject_info.sessions_tt_positions.loc[session, f"tt_{tt}"]
cl_name = f"{session}-tt{tt}_d{depth}_cl{cl}"
cl_names.append(cl_name)
session_scores['cl_name'] = cl_names * n_analyses
unit_count += session_info.n_units
try:
for ii, analysis in enumerate(analyses):
indices = np.arange(session_info.n_units) + ii * session_info.n_units
session_scores.at[indices, 'sig'] = temp[analysis + '_sig'].values
if analysis == 'stability':
session_scores.at[indices, 'score'] = temp[analysis + '_corr'].values
else:
session_scores.at[indices, 'score'] = temp[analysis + '_score'].values
except:
print(f'Error Processing Session {session}')
traceback.print_exc(file=sys.stdout)
pass
session_scores[output_scores_names] = session_scores[output_scores_names].astype(float)
metric_scores = metric_scores.append(session_scores)
metric_scores = metric_scores.reset_index(drop=True)
metric_scores.to_csv(self.paths['of_metric_scores'])
else:
metric_scores = pd.read_csv(self.paths['of_metric_scores'], index_col=0)
return metric_scores
def _get_of_models_scores(self):
models = ['speed', 'hd', 'border', 'grid', 'pos', 'agg_all', 'agg_sdp', 'agg_sdbg']
metrics = ['r2', 'map_r', 'n_err', 'coef', 'agg_all_coef', 'agg_sdbg_coef', 'agg_sdp_coef']
splits = ['train', 'test']
unit_count = 0
model_scores = pd.DataFrame()
for subject in self.subjects:
subject_info = SubjectInfo(subject)
for session in subject_info.sessions:
if 'OF' in session:
session_info = SubjectSessionInfo(subject, session)
n_session_units = session_info.n_units
if n_session_units > 0:
try:
temp = session_info.get_encoding_models_scores()
if temp.empty:
continue
# noinspection PyTypeChecker
mask = (temp['metric'].isin(metrics)) & (temp['model'].isin(models))
session_models_scores = pd.DataFrame(index=range(mask.sum()),
columns=['unit_id', 'subject', 'session',
'session_unit_id',
'unit_type', 'session_pct_cov',
'session_valid',
'tt', 'tt_cl', 'model', 'split', 'metric',
'value'])
session_models_scores.loc[:, ['model', 'split', 'metric', 'value']] = \
temp.loc[mask, ['model', 'split', 'metric', 'value']].values
session_models_scores['session'] = session_info.session
session_models_scores['subject'] = session_info.subject
session_models_scores['session_unit_id'] = temp.loc[mask, 'unit_id'].values
session_models_scores['unit_id'] = session_models_scores['session_unit_id'] + unit_count
for session_unit_id, cluster_info in session_info.cluster_ids.items():
mask = session_models_scores.session_unit_id == int(session_unit_id)
tt = cluster_info[1]
cl = cluster_info[2]
depth = subject_info.sessions_tt_positions.loc[session, f"tt_{tt}"]
cl_name = f"{session}-tt{tt}_d{depth}_cl{cl}"
session_models_scores.loc[mask, 'unit_type'] = cluster_info[0]
session_models_scores.loc[mask, 'tt'] = tt
session_models_scores.loc[mask, 'tt_cl'] = cl
session_models_scores.loc[mask, 'cl_name'] = cl_name
behav = session_info.get_track_data()
# noinspection PyTypeChecker
coverage = np.around(behav['pos_valid_mask'].mean(), 2)
session_models_scores['session_pct_cov'] = coverage
session_models_scores['session_valid'] = coverage >= self.min_pct_coverage
#
model_scores = model_scores.append(session_models_scores)
unit_count += n_session_units
except ValueError:
traceback.print_exc(file=sys.stdout)
pass
#
model_scores = model_scores.reset_index(drop=True)
model_scores = model_scores.astype({"value": float})
model_scores = model_scores.to_csv(self.paths['of_model_scores'])
return model_scores
def _match_unit_ids(self, metric_scores, model_scores):
session_unit_id_array = metric_scores[['session', 'session_unit_id']].values
session_unit_id_tuple = [tuple(ii) for ii in session_unit_id_array]
sid_2_uid = {}
uid_2_sid = {}
used_ids = []
unique_id_cnt = 0
for suid in session_unit_id_tuple:
if not suid in used_ids:
sid_2_uid[suid] = unique_id_cnt
uid_2_sid[unique_id_cnt] = suid
unique_id_cnt += 1
used_ids += [suid]
session_unit_id_array = model_scores[['session', 'session_unit_id']].values
session_unit_id_tuple = [tuple(ii) for ii in session_unit_id_array]
model_scores['unit_id'] = [sid_2_uid[suid] for suid in session_unit_id_tuple]
metric_scores.to_csv(self.paths['of_metric_scores'])
model_scores = model_scores.to_csv(self.paths['of_model_scores'])
return metric_scores, model_scores
def get_unit_table(self, overwrite=False):
if not self.paths['units'].exists() or overwrite:
raise NotImplementedError
else:
unit_table = pd.read_csv(self.paths['units'], index_col=0)
return unit_table
def plot(self, fig_id, save=False, dpi=1000, root_dir=None, fig_format='jpg'):
if fig_id == 'f1':
f1 = pf.Fig1()
f = f1.plot_all()
else:
return
if save:
fn = f"{fig_id}.{fig_format}"
if root_dir is None:
f.savefig(self.paths['figures'] / fn, dpi=dpi, bbox_inches='tight')
else:
if root_dir in self._root_paths.keys():
paths = self._get_paths(self._root_paths[root_dir])
f.savefig(paths['figures'] / fn, dpi=dpi, bbox_inches='tight')
return f
class SubjectInfo:
def __init__(self, subject, sorter='KS2', data_root='BigPC', overwrite=False, time_step=0.02,
samp_rate=32000, n_tetrodes=16, fr_temporal_smoothing=0.125, spk_outlier_thr=None,
overwrite_cluster_stats=False, overwrite_session_clusters=False):
subject = str(subject.title())
self.subject = subject
self.sorter = sorter
self.params = {'time_step': time_step, 'samp_rate': samp_rate, 'n_tetrodes': n_tetrodes,
'fr_temporal_smoothing': fr_temporal_smoothing, 'spk_outlier_thr': spk_outlier_thr,
'spk_recording_buffer': 3}
self.tetrodes = np.arange(n_tetrodes, dtype=int) + 1
if data_root == 'BigPC':
if subject in ['Li', 'Ne']:
self.root_path = Path('/mnt/Data1_SSD2T/Data')
elif subject in ['Cl']:
self.root_path = Path('/mnt/Data2_SSD2T/Data')
elif subject in ['Ca', 'Mi', 'Al']:
self.root_path = Path('/mnt/Data3_SSD2T/Data')
self.raw_path = Path('/mnt/Raw_Data/Data', subject)
elif data_root == 'oak':
self.root_path = Path('/mnt/o/giocomo/alexg/')
self.raw_path = self.root_path / 'RawData/InVivo' / subject
# self.sorted_path = self.root_path / 'Clustered' / subject
# self.results_path = self.root_path / 'Analyses' / subject
else:
self.root_path = Path(data_root)
self.raw_path = self.root_path / 'Raw_Data' / subject
self.preprocessed_path = self.root_path / 'PreProcessed' / subject
self.sorted_path = self.root_path / 'Sorted' / subject
self.results_path = self.root_path / 'Results' / subject
self.subject_info_file = self.results_path / ('subject_info_{}_{}.pkl'.format(sorter, subject))
# check if instance of DataPaths for subject and sorter exists already
if self.subject_info_file.exists() and not overwrite:
self.load_subject_info()
else:
# get channel table
self._channel_table_file = self.preprocessed_path / ('chan_table_{}.csv'.format(subject))
if not self._channel_table_file.exists():
_task_fn = self.preprocessed_path / 'TasksDir' / f"pp_table_{self.subject}.json"
if _task_fn.exists():
with _task_fn.open(mode='r') as f:
_task_table = json.load(f)
pp_funcs.post_process_channel_table(self.subject, _task_table)
else:
sys.exit(f"Error. Task table for pre-processing does not exists: {_task_fn}")
self.channel_table = pd.read_csv(self._channel_table_file, index_col=0)
# get sessions from channel table information
self.sessions = list(self.channel_table.index)
self.n_sessions = len(self.sessions)
self.session_paths = {}
for session in self.sessions:
self.session_paths[session] = self._session_paths(session)
# get cluster information
try:
if overwrite_cluster_stats:
# overwrite cluster stats & clusters tables
self.update_clusters()
else:
# load tables
self.session_clusters = self.get_session_clusters(overwrite=overwrite_session_clusters)
self.sort_tables = self.get_sort_tables(overwrite=overwrite_session_clusters)
except:
print("Error obtaining clusters.")
print(sys.exc_info()[0], sys.exc_info()[1], sys.exc_info()[2].tb_lineno)
traceback.print_exc(file=sys.stdout)
# get tetrode depths & match sessions
self.sessions_tt_positions = self.get_sessions_tt_position()
self.tt_depth_match = self.get_tetrode_depth_match()
# hack because get sessions analyses calls subjects info before it saves,
# so need to save it first. alternative is to feed subject session info the subject_info object instead.
# TO DO #
self.save_subject_info()
# check analyses table
self.analyses_table = self.get_sessions_analyses()
self.valid_track_table = self.check_track_data_validty()
self.save_subject_info()
def load_subject_info(self):
with self.subject_info_file.open(mode='rb') as f:
loaded_self = pickle.load(f)
self.__dict__.update(loaded_self.__dict__)
return self
def save_subject_info(self):
with self.subject_info_file.open(mode='wb') as f:
pickle.dump(self, f, protocol=pickle.HIGHEST_PROTOCOL)
def get_sessions_analyses(self):
analyses_table = pd.DataFrame()
for session in self.sessions:
session_info = SubjectSessionInfo(self.subject, session)
analyses_table = analyses_table.append(session_info.session_analyses_table)
analyses_table.fillna(-1, inplace=True)
return analyses_table
def check_track_data_validty(self):
df = pd.DataFrame(index=self.sessions, columns=['task', 'validity'])
for session in self.sessions:
if self.analyses_table.loc[session, 'track_data'] == 1:
session_info = SubjectSessionInfo(self.subject, session)
df.loc[session, 'task'] = session_info.task
df.loc[session, 'validity'] = session_info.check_track_data_validity()
return df
# tetrode methods
def update_clusters(self):
self.session_clusters = self.get_session_clusters(overwrite=True)
self.sort_tables = self.get_sort_tables(overwrite=True)
self.save_subject_info()
def get_sessions_tt_position(self):
p = Path(self.results_path / f"{self.subject}_tetrodes.csv")
if p.exists():
tt_pos = pd.read_csv(p)
tt_pos['Date'] = pd.to_datetime(tt_pos['Date']).dt.strftime('%m%d%y')
tt_pos = tt_pos.set_index('Date')
tt_pos = tt_pos[['TT' + str(tt) + '_overall' for tt in self.tetrodes]]
session_dates = {session: session.split('_')[2] for session in self.sessions}
sessions_tt_pos = pd.DataFrame(index=self.sessions, columns=['tt_' + str(tt) for tt in self.tetrodes])
tt_pos_dates = tt_pos.index
prev_date = tt_pos_dates[0]
for session in self.sessions:
date = session_dates[session]
# below if is to correct for incorrect session dates for Cl
if (date in ['010218', '010318', '010418']) & (self.subject == 'Cl'):
date = date[:5] + '9'
# this part accounts for missing dates by assigning it to the previous update
if date in tt_pos_dates:
sessions_tt_pos.loc[session] = tt_pos.loc[date].values
prev_date = str(date)
else:
sessions_tt_pos.loc[session] = tt_pos.loc[prev_date].values
return sessions_tt_pos
else:
print(f"Tetrode depth table not found at '{str(p)}'")
return None
def get_depth_wf(self):
raise NotImplementedError
def get_session_tt_wf(self, session, tt, cluster_ids=None, wf_lims=None, n_wf=200):
import Sorting.sort_functions as sort_funcs
if wf_lims is None:
wf_lims = [-12, 20]
tt_str = 'tt_' + str(tt)
_sort_path = Path(self.session_paths[session]['Sorted'], tt_str, self.sorter)
_cluster_spike_time_fn = _sort_path / 'spike_times.npy'
_cluster_spike_ids_fn = _sort_path / 'spike_clusters.npy'
_hp_data_fn = _sort_path / 'recording.dat'
if _hp_data_fn.exists():
hp_data = sort_funcs.load_hp_binary_data(_hp_data_fn)
else: # filter data
hp_data = self._spk_filter_data(session, tt)
spike_times = np.load(_cluster_spike_time_fn)
spike_ids = np.load(_cluster_spike_ids_fn)
wf_samps = np.arange(wf_lims[0], wf_lims[1])
if cluster_ids is None:
cluster_ids = np.unique(spike_ids)
n_clusters = len(cluster_ids)
out = np.zeros((n_clusters, n_wf, len(wf_samps) * 4), dtype=np.float16)
for cl_idx, cluster in enumerate(cluster_ids):
cl_spk_times = spike_times[spike_ids == cluster]
n_cl_spks = len(cl_spk_times)
if n_wf == 'all':
sampled_spikes = cl_spk_times
elif n_wf > n_cl_spks:
# Note that if number of spikes < n_wf, spikes will be repeated such that sampled_spikes has n_wf
sampled_spikes = cl_spk_times[np.random.randint(n_cl_spks, size=n_wf)]
else: # sample from spikes
sampled_spikes = cl_spk_times[np.random.choice(np.arange(n_cl_spks), size=n_wf, replace=False)]
for wf_idx, samp_spk in enumerate(sampled_spikes):
out[cl_idx, wf_idx] = hp_data[:, wf_samps + samp_spk].flatten()
return out
def get_session_clusters(self, overwrite=False):
_clusters_file = self.sorted_path / ('clusters_{}_{}.json'.format(self.sorter, self.subject))
if _clusters_file.exists() and not overwrite: # load
with _clusters_file.open(mode='r') as f:
session_clusters = json.load(f)
else: # create
session_clusters = {}
for session in self.sessions:
self._cluster_stats(session)
session_clusters[session] = self._session_clusters(session)
try:
with _clusters_file.open(mode='w') as f:
json.dump(session_clusters, f, indent=4)
except TypeError:
print(session)
return session_clusters
# cluster matching methods
def get_tetrode_depth_match(self):
tt_pos = self.sessions_tt_positions
try:
tt_depth_matchs = {tt: {} for tt in self.tetrodes}
for tt in self.tetrodes:
tt_str = 'tt_' + str(tt)
tt_depths = tt_pos[tt_str].unique()
for depth in tt_depths:
tt_depth_matchs[tt][depth] = list(tt_pos[tt_pos[tt_str] == depth].index)
return tt_depth_matchs
except:
print("Error Matching Sessions based on tetrode depth")
print(sys.exc_info()[0], sys.exc_info()[1], sys.exc_info()[2].tb_lineno)
traceback.print_exc(file=sys.stdout)
return None
def get_session_match_analysis(self):
# # determine sessions/tt to match based on depth
# matching_analyses = []
# for tt in np.arange(1, 17):
# tt_depths = list(self.tt_depth_match[tt].keys())
#
# for tt_d in tt_depths:
# tt_d_sessions = self.tt_depth_match[tt][tt_d]
# # check if there are more 2 or more sessions with units
# n_cells_session = np.zeros(len(tt_d_sessions), dtype=int)
#
# for ii, session in enumerate(tt_d_sessions):
# session_cell_ids = self.session_clusters[session]['cell_IDs']
# if tt in session_cell_ids.keys():
# n_cells_session[ii] = len(session_cell_ids[tt])
# sessions_with_cells = np.where(n_cells_session > 0)[0]
#
# if len(sessions_with_cells) >= 2:
# n_units = n_cells_session[sessions_with_cells].sum()
# matching_analyses.append((tt, tt_d, np.array(tt_d_sessions)[sessions_with_cells].tolist(),
# n_units, n_cells_session[sessions_with_cells].tolist()))
## version as a dict ##
matching_analyses = {}
cnt = 0
for tt in np.arange(1, 17):
tt_depths = list(self.tt_depth_match[tt].keys())
for tt_d in tt_depths:
tt_d_sessions = self.tt_depth_match[tt][tt_d]
# check if there are more 2 or more sessions with units
n_cells_session = np.zeros(len(tt_d_sessions), dtype=int)
for ii, session in enumerate(tt_d_sessions):
session_cell_ids = self.session_clusters[session]['cell_IDs']
if tt in session_cell_ids.keys():
n_cells_session[ii] = len(session_cell_ids[tt])
sessions_with_cells = np.where(n_cells_session > 0)[0]
n_units = n_cells_session[sessions_with_cells].sum()
if len(sessions_with_cells) >= 1:
matching_analyses[cnt] = {'tt': tt, 'd': tt_d, 'n_units': n_units,
'sessions': np.array(tt_d_sessions)[sessions_with_cells].tolist(),
'n_session_units': n_cells_session[sessions_with_cells].tolist()}
cnt += 1
return matching_analyses
def get_cluster_dists(self, overwrite=False, **kwargs):
import Analyses.cluster_match_functions as cmf
params = {'dim_reduc_method': 'umap', 'n_wf': 1000, 'zscore_wf': True}
params.update(kwargs)
cl_dists_fn = self.results_path / f"cluster_dists.pickle"
if not cl_dists_fn.exists() or overwrite:
matching_analyses = self.get_session_match_analysis()
n_wf = params['n_wf']
dim_reduc_method = params['dim_reduc_method']
n_samps = 32 * 4
cluster_dists = {k: {} for k in np.arange(len(matching_analyses))}
for analysis_id, analysis in matching_analyses.items():
tt, d, sessions = analysis['tt'], analysis['d'], analysis['sessions']
n_units, n_session_units = analysis['n_units'], analysis['n_session_units']
# Obtain cluster labels & mapping between labels [this part can be improved]
cl_names = []
for session_num, session in enumerate(sessions):
cluster_ids = self.session_clusters[session]['cell_IDs'][tt]
for cl_num, cl_id in enumerate(cluster_ids):
cl_name = f"{session}-tt{tt}_d{d}_cl{cl_id}"
cl_names.append(cl_name)
# load waveforms
X = np.empty((0, n_wf, n_samps), dtype=np.float16)
for session in sessions:
cluster_ids = self.session_clusters[session]['cell_IDs'][tt]
session_cell_wf = self.get_session_tt_wf(session, tt, cluster_ids=cluster_ids, n_wf=n_wf)
X = np.concatenate((X, session_cell_wf), axis=0)
if params['zscore_wf']:
X = robust_zscore(X, axis=2)
X[np.isnan(X)] = 0
X[np.isinf(X)] = 0
# Obtain cluster label namess
clusters_label_num = np.arange(n_units).repeat(n_wf)
# Reduce dims
X_2d = cmf.dim_reduction(X.reshape(-1, X.shape[-1]), method=dim_reduc_method)
# compute covariance and location
clusters_loc, clusters_cov = cmf.get_clusters_moments(data=X_2d, labels=clusters_label_num)
# compute distance metrics
dist_mats = cmf.get_clusters_all_dists(clusters_loc, clusters_cov, data=X_2d, labels=clusters_label_num)
# create data frames with labeled cluster names
dists_mats_df = {}
for metric, dist_mat in dist_mats.items():
dists_mats_df[metric] = pd.DataFrame(dist_mat, index=cl_names, columns=cl_names)
# store
clusters_loc = {k: v for k, v in zip(cl_names, clusters_loc)}
clusters_cov = {k: v for k, v in zip(cl_names, clusters_cov)}
cluster_dists[analysis_id] = {'analysis': analysis, 'cl_names': cl_names,
'clusters_loc': clusters_loc, 'clusters_cov': clusters_cov,
'dists_mats': dists_mats_df}
print(".", end="")
with cl_dists_fn.open(mode='wb') as f:
pickle.dump(cluster_dists, f, protocol=pickle.HIGHEST_PROTOCOL)
else:
with cl_dists_fn.open(mode='rb') as f:
cluster_dists = pickle.load(f)
return cluster_dists
def match_clusters(self, overwrite=False, require_subsets=True, **kwargs):
import Analyses.cluster_match_functions as cmf
params = {'dist_metric': 'pe', 'dist_metric_thr': 0.5, 'select_lower': True}
params.update(kwargs)
dist_metric = params['dist_metric']
dist_metric_thr = params['dist_metric_thr']
select_lower = params['select_lower']
if require_subsets: # rs -> require subsets, conservative in grouping clusters
cl_match_results_fn = self.results_path / f"cluster_matches_rs_{params['dist_metric']}.pickle"
else: # nrs -> doesn't require subsets, results in more sessions being grouped
cl_match_results_fn = self.results_path / f"cluster_matches_nrs_{params['dist_metric']}.pickle"
if not cl_match_results_fn.exists() or overwrite:
cluster_dists = self.get_cluster_dists()
matching_analyses = self.get_session_match_analysis()
# [cluster_dists[k]['analysis'] for k in cluster_dists.keys()]
cluster_match_results = {k: {} for k in np.arange(len(matching_analyses))}
for analysis_id, analysis in matching_analyses.items():
dist_mat = cluster_dists[analysis_id]['dists_mats'][dist_metric]
matches_dict = cmf.find_session_cl_matches(dist_mat, thr=dist_metric_thr,
session_cl_sep="-", select_lower=select_lower)
unique_matches_sets, unique_matches_dict = \
cmf.matches_dict_to_unique_sets(matches_dict, dist_mat, select_lower=select_lower,
require_subsets=require_subsets)
cluster_match_results[analysis_id] = {'analysis': analysis,
'matches_dict': unique_matches_dict,
'matches_sets': unique_matches_sets
}
with cl_match_results_fn.open(mode='wb') as f:
pickle.dump(cluster_match_results, f, protocol=pickle.HIGHEST_PROTOCOL)
else:
with cl_match_results_fn.open(mode='rb') as f:
cluster_match_results = pickle.load(f)
return cluster_match_results
# sort/unit tables methods
def get_sort_tables(self, overwrite=False):
_sort_table_ids = ['tt', 'valid', 'curated', 'summary']
_sort_table_files = {ii: Path(self.sorted_path, 'sort_{}_{}_{}'.format(ii, self.sorter, self.subject))
for ii in _sort_table_ids}
if _sort_table_files['summary'].exists() and not overwrite:
sort_tables = {ii: [] for ii in _sort_table_ids}
for ii in _sort_table_ids:
sort_tables[ii] = pd.read_csv(_sort_table_files[ii], index_col=0)
else:
sort_tables = self._sort_tables()
for ii in _sort_table_ids:
sort_tables[ii].to_csv(_sort_table_files[ii])
return sort_tables
def get_units_table(self, overwrite=False):
units_table_fn = self.results_path / f"units_table.csv"
if not units_table_fn.exists() or overwrite:
n_total_units = 0
for session in self.sessions:
n_total_units += self.session_clusters[session]['n_cell']
n_total_units += self.session_clusters[session]['n_mua']
subject_units_table = pd.DataFrame(index=np.arange(n_total_units),
columns=["subject_cl_id", "subject", "session", "task", "date",
"subsession", "tt", "depth", "unique_cl_name",
"session_cl_id", "unit_type", "n_matches_con",
"subject_cl_match_con_id", "n_matches_lib",
"subject_cl_match_lib_id",
"snr", "fr", "isi_viol_rate"])
subject_units_table["subject"] = self.subject
subject_cl_matches_con = self.match_clusters()
matches_con_sets = {}
matches_con_set_num = {}
matches_con_dict = {}
cnt = 0
for k, cma in subject_cl_matches_con.items():
matches_con_sets.update({cnt + ii: clx_set for ii, clx_set in enumerate(cma['matches_sets'])})
cnt = len(matches_con_sets)
matches_con_dict.update(cma['matches_dict'])
# creates a dict indexing each session to a set number
for set_num, clm_set in matches_con_sets.items():
for cl in clm_set:
matches_con_set_num[cl] = set_num
subject_cl_matches_lib = self.match_clusters(require_subsets=False)
matches_lib_sets = {}
matches_lib_set_num = {}
matches_lib_dict = {}
cnt = 0
for k, cma in subject_cl_matches_lib.items():
matches_lib_sets.update({cnt + ii: clx_set for ii, clx_set in enumerate(cma['matches_sets'])})
cnt = len(matches_lib_sets)
matches_lib_dict.update(cma['matches_dict'])
for set_num, clm_set in matches_lib_sets.items():
for cl in clm_set:
matches_lib_set_num[cl] = set_num
try:
unit_cnt = 0
for session in self.sessions:
session_details = session.split("_")
if len(session_details) > 3:
subsession = session_details[3]
else:
subsession = "0000"
session_clusters = self.session_clusters[session]
n_session_cells = session_clusters['n_cell']
n_session_mua = session_clusters['n_mua']
n_session_units = n_session_cells + n_session_mua
session_unit_idx = np.arange(n_session_units) + unit_cnt
subject_units_table.loc[session_unit_idx, "subject_cl_id"] = session_unit_idx
subject_units_table.loc[session_unit_idx, "session"] = session
subject_units_table.loc[session_unit_idx, "task"] = session_details[1]
subject_units_table.loc[session_unit_idx, "date"] = session_details[2]
subject_units_table.loc[session_unit_idx, "subsession"] = subsession
for unit_type in ['cell', 'mua']:
for tt, tt_clusters in session_clusters[f'{unit_type}_IDs'].items():
if len(tt_clusters) > 0:
depth = self.sessions_tt_positions.loc[session, f"tt_{tt}"]
for cl in tt_clusters:
cl_name = f"{session}-tt{tt}_d{depth}_cl{cl}"
subject_units_table.loc[unit_cnt, "subject_cl_id"] = unit_cnt
subject_units_table.loc[unit_cnt, "unique_cl_name"] = cl_name
subject_units_table.loc[unit_cnt, "tt"] = tt
subject_units_table.loc[unit_cnt, "depth"] = depth
subject_units_table.loc[unit_cnt, "unit_type"] = unit_type
subject_units_table.loc[unit_cnt, "session_cl_id"] = cl
subject_units_table.loc[unit_cnt, "snr"] = session_clusters["clusters_snr"][tt][cl]
subject_units_table.loc[unit_cnt, "fr"] = session_clusters["clusters_fr"][tt][cl]
subject_units_table.loc[unit_cnt, "isi_viol_rate"] = \
session_clusters["clusters_isi_viol_rate"][tt][cl]
if unit_type == 'cell':
# add fields of conservative cluster matching (requires subset)
if cl_name in matches_con_dict.keys():
cl_matches = matches_con_dict[cl_name][0]
subject_units_table.loc[unit_cnt, "n_matches_con"] = len(cl_matches)
subject_units_table.loc[unit_cnt, "subject_cl_match_con_id"] = \
matches_con_set_num[cl_name]
# add fields of liberal cluster matching ( does not require subset matching)
if cl_name in matches_lib_dict.keys():
cl_matches = matches_lib_dict[cl_name][0]
subject_units_table.loc[unit_cnt, "n_matches_lib"] = len(cl_matches)
subject_units_table.loc[unit_cnt, "subject_cl_match_lib_id"] = \
matches_lib_set_num[cl_name]
unit_cnt += 1
except:
print(session, tt, cl)
traceback.print_exc(file=sys.stdout)
subject_units_table.to_csv(units_table_fn)
else:
subject_units_table = pd.read_csv(units_table_fn, index_col=0)
return subject_units_table
# private methods
def _spk_filter_data(self, session, tt):
tt_str = 'tt_' + str(tt)
sos, _ = pp_funcs.get_sos_filter_bank(['Sp'], fs=self.params['samp_rate'])
sig = np.load(self.session_paths[session]['PreProcessed'] / (tt_str + '.npy'))
hp_data = np.zeros_like(sig)
for ch in range(4):
hp_data[ch] = signal.sosfiltfilt(sos, sig[ch])
return hp_data
def _session_paths(self, session):
time_step = self.params['time_step']
samp_rate = self.params['samp_rate']
tmp = session.split('_')
subject = tmp[0]
task = tmp[1]
date = tmp[2]
paths = {'session': session, 'subject': subject, 'task': task, 'date': date, 'step': time_step, 'SR': samp_rate,
'Sorted': self.sorted_path / session, 'Raw': self.raw_path / session,
'PreProcessed': self.preprocessed_path / session, 'Results': self.results_path / session}
paths['Results'].mkdir(parents=True, exist_ok=True)
paths['behav_track_data'] = paths['Results'] / ('behav_track_data{}ms.pkl'.format(int(time_step * 1000)))
# these paths are mostly legacy
paths['Spike_IDs'] = paths['Results'] / 'Spike_IDs.json'
for ut in ['Cell', 'Mua']:
paths[ut + '_wf_info'] = paths['Results'] / (ut + '_wf_info.pkl')
paths[ut + '_Spikes'] = paths['Results'] / (ut + '_Spikes.json')
paths[ut + '_WaveForms'] = paths['Results'] / (ut + '_WaveForms.pkl')
paths[ut + '_Bin_Spikes'] = paths['Results'] / ('{}_Bin_Spikes_{}ms.npy'.format(ut, int(time_step * 1000)))
paths[ut + '_FR'] = paths['Results'] / ('{}_FR_{}ms.npy'.format(ut, int(time_step * 1000)))
paths['cluster_spikes'] = paths['Results'] / 'spikes.npy'
paths['cluster_spikes_ids'] = paths['Results'] / 'spikes_ids.json'
paths['cluster_wf_info'] = paths['Results'] / 'wf_info.pkl'
paths['cluster_binned_spikes'] = paths['Results'] / f'binned_spikes_{int(time_step * 1000)}ms.npy'
paths['cluster_fr'] = paths['Results'] / 'fr.npy'
paths['cluster_spike_maps'] = paths['Results'] / 'spike_maps.npy'
paths['cluster_fr_maps'] = paths['Results'] / 'maps.npy'
if task == 'OF':
paths['cluster_OF_metrics'] = paths['Results'] / 'OF_metrics.csv'
paths['cluster_OF_encoding_models'] = paths['Results'] / 'OF_encoding.csv'
paths['cluster_OF_encoding_agg_coefs'] = paths['Results'] / 'OF_encoding_agg_coefs.csv'
else:
paths['trial_table'] = paths['Results'] / 'trial_table.csv'
paths['event_table'] = paths['Results'] / 'event_table.csv'
paths['track_table'] = paths['Results'] / 'track_table.csv'
paths['event_time_series'] = paths['Results'] / 'event_time_series.csv'
paths['not_valid_pos_samps'] = paths['Results'] / 'not_valid_pos_samps.npy'
paths['pos_zones'] = paths['Results'] / 'pos_zones.npy'
paths['pos_zones_invalid_samps'] = paths['Results'] / 'pos_zones_invalid_samps.npy'
paths['trial_zone_rates'] = paths['Results'] / 'trial_zone_rates.npy'
paths['zone_rates_comps'] = paths['Results'] / 'zone_rates_comps.csv'
paths['zone_rates_remap'] = paths['Results'] / 'zone_rates_remap.csv'
paths['bal_conds_seg_rates'] = paths['Results'] / 'bal_conds_seg_rates.csv'
paths['zone_analyses'] = paths['Results'] / 'ZoneAnalyses.pkl'
paths['TrialInfo'] = paths['Results'] / 'TrInfo.pkl'
paths['TrialCondMat'] = paths['Results'] / 'TrialCondMat.csv'
paths['TrLongPosMat'] = paths['Results'] / 'TrLongPosMat.csv'
paths['TrLongPosFRDat'] = paths['Results'] / 'TrLongPosFRDat.csv'
paths['TrModelFits2'] = paths['Results'] / 'TrModelFits2.csv'
paths['CueDesc_SegUniRes'] = paths['Results'] / 'CueDesc_SegUniRes.csv'
paths['CueDesc_SegDecRes'] = paths['Results'] / 'CueDesc_SegDecRes.csv'
paths['CueDesc_SegDecSumRes'] = paths['Results'] / 'CueDesc_SegDecSumRes.csv'
paths['PopCueDesc_SegDecSumRes'] = paths['Results'] / 'PopCueDesc_SegDecSumRes.csv'
# plots directories
# paths['Plots'] = paths['Results'] / 'Plots'
# # paths['Plots'].mkdir(parents=True, exist_ok=True)
# paths['SampCountsPlots'] = paths['Plots'] / 'SampCountsPlots'
# # paths['SampCountsPlots'].mkdir(parents=True, exist_ok=True)
#
# paths['ZoneFRPlots'] = paths['Plots'] / 'ZoneFRPlots'
# # paths['ZoneFRPlots'].mkdir(parents=True, exist_ok=True)
#
# paths['ZoneCorrPlots'] = paths['Plots'] / 'ZoneCorrPlots'
# # paths['ZoneCorrPlots'].mkdir(parents=True, exist_ok=True)
# paths['SIPlots'] = paths['Plots'] / 'SIPlots'
# # paths['SIPlots'].mkdir(parents=True, exist_ok=True)
#
# paths['TrialPlots'] = paths['Plots'] / 'TrialPlots'
# # paths['TrialPlots'].mkdir(parents=True, exist_ok=True)
#
# paths['CueDescPlots'] = paths['Plots'] / 'CueDescPlots'
# # paths['CueDescPlots'].mkdir(parents=True, exist_ok=True)
return paths
def _cluster_stats(self, session):
import Sorting.sort_functions as sort_funcs
sort_path = self.session_paths[session]['Sorted']
for tt in self.tetrodes:
tt_str = 'tt_' + str(tt)
_cluster_spike_time_fn = Path(sort_path, tt_str, self.sorter, 'spike_times.npy')
_cluster_spike_ids_fn = Path(sort_path, tt_str, self.sorter, 'spike_clusters.npy')
_cluster_groups_fn = Path(sort_path, ('tt_' + str(tt)), self.sorter, 'cluster_group.tsv')
_cluster_stats_fn = Path(sort_path, ('tt_' + str(tt)), self.sorter, 'cluster_stats.csv')
_hp_data_fn = Path(sort_path, tt_str, self.sorter, 'recording.dat')
_hp_data_info_fn = Path(sort_path, tt_str, tt_str + '_info.pickle')
_cluster_stats_fn2 = Path(sort_path, tt_str, self.sorter, 'cluster_stats_curated.csv')
try:
# load
cluster_groups = | pd.read_csv(_cluster_groups_fn, sep='\t') | pandas.read_csv |
#!/usr/bin/env python
from __future__ import print_function
from .tabulate import tabulate as tabulate_
import sys
import pandas as pd
import re
import datetime
def _get_version():
import ph._version
return ph._version.__version__
def print_version():
print(_get_version())
# Command line parsing of (1) --abc and (2) --abc=def
KWARG = re.compile("^--[a-z0-9_-]+$")
KWARG_WITH_VALUE = re.compile("^--[a-z0-9_-]+=")
USAGE_TEXT = """
ph is a command line tool for streaming csv data.
If you have a csv file `a.csv`, you can pipe it through `ph` on the
command line by using
$ cat a.csv | ph columns x y | ph eval "z = x**2 - y" | ph show
Use ph help [command] for help on the individual commands.
A list of available commands follows.
"""
COMMANDS = {}
DOCS = {}
def _gpx(fname):
try:
import gpxpy
except ImportError:
sys.exit("ph gpx needs gpxpy, pip install ph[gpx]")
def from_trackpoint(tp=None):
if tp is None:
return "time", "latitude", "longitude", "elevation", "distance"
p = tp.point
return str(p.time), p.latitude, p.longitude, p.elevation, tp.distance_from_start
with open(fname, "r") as fin:
gpx = gpxpy.parse(fin)
data = gpx.get_points_data()
columns = from_trackpoint()
dfdata = [from_trackpoint(tp) for tp in data]
return pd.DataFrame(dfdata, columns=columns)
def _tsv(*args, **kwargs):
kwargs["sep"] = "\t"
return pd.read_csv(*args, **kwargs)
# These are all lambdas because they lazy load, and some of these
# readers are introduced in later pandas.
READERS = {
"csv": pd.read_csv,
"clipboard": pd.read_clipboard,
"fwf": pd.read_fwf,
"json": pd.read_json,
"html": pd.read_html,
"tsv": _tsv,
"gpx": _gpx,
}
try:
READERS["excel"] = pd.read_excel
READERS["xls"] = pd.read_excel
READERS["odf"] = pd.read_excel
except AttributeError:
pass
try:
READERS["hdf5"] = pd.read_hdf
except AttributeError:
pass
try:
READERS["feather"] = pd.read_feather
except AttributeError:
pass
try:
READERS["parquet"] = pd.read_parquet
except AttributeError:
pass
try:
READERS["orc"] = pd.read_orc
except AttributeError:
pass
try:
READERS["msgpack"] = pd.read_msgpack
except AttributeError:
pass
try:
READERS["stata"] = pd.read_stata
except AttributeError:
pass
try:
READERS["sas"] = pd.read_sas
except AttributeError:
pass
try:
READERS["spss"] = pd.read_spss
except AttributeError:
pass
try:
READERS["pickle"] = pd.read_pickle
except AttributeError:
pass
try:
READERS["gbq"] = pd.read_gbq
except AttributeError:
pass
try:
READERS["google"] = pd.read_gbq
except AttributeError:
pass
try:
READERS["bigquery"] = pd.read_gb
except AttributeError:
pass
WRITERS = {
"csv": "to_csv",
"fwf": "to_fwf",
"json": "to_json",
"html": "to_html",
"clipboard": "to_clipboard",
"xls": "to_excel",
"odf": "to_excel",
"hdf5": "to_hdf",
"feather": "to_feather",
"parquet": "to_parquet",
"orc": "to_orc",
"msgpack": "to_msgpack",
"stata": "to_stata",
"sas": "to_sas",
"spss": "to_spss",
"pickle": "to_pickle",
"gbq": "to_gbq",
"google": "to_gbq",
"bigquery": "to_gbq",
# extras
"tsv": "to_csv",
}
FALSY = ("False", "false", "No", "no", "0", False, 0, "None")
TRUTHY = ("True", "true", "Yes", "yes", "1", True, 1)
def _assert_col(df, col, caller=None):
if col not in df.columns:
if caller is not None:
sys.exit("ph {}: Unknown column {}".format(caller, col))
sys.exit("Unknown column {}".format(col))
def _assert_cols(df, cols, caller=None):
for col in cols:
_assert_col(df, col, caller=caller)
def register(fn, name=None):
if name is None:
name = fn.__name__
COMMANDS[name] = fn
DOCS[name] = fn.__doc__
return fn
def registerx(name):
def inner(fn):
register(fn, name)
return fn
return inner
@register
def dataset(dset=None):
"""Load dataset as csv.
Usage: ph dataset linnerud | ph describe
"""
try:
import sklearn.datasets
except ImportError:
sys.exit("You need scikit-learn. Install ph[data].")
REALDATA = {
"olivetti_faces": sklearn.datasets.fetch_olivetti_faces,
"20newsgroups": sklearn.datasets.fetch_20newsgroups,
"20newsgroups_vectorized": sklearn.datasets.fetch_20newsgroups_vectorized,
"lfw_people": sklearn.datasets.fetch_lfw_people,
"lfw_pairs": sklearn.datasets.fetch_lfw_pairs,
"covtype": sklearn.datasets.fetch_covtype,
"rcv1": sklearn.datasets.fetch_rcv1,
"kddcup99": sklearn.datasets.fetch_kddcup99,
"california_housing": sklearn.datasets.fetch_california_housing,
}
TOYDATA = {
"boston": sklearn.datasets.load_boston,
"iris": sklearn.datasets.load_iris,
"diabetes": sklearn.datasets.load_diabetes,
"digits": sklearn.datasets.load_digits,
"linnerud": sklearn.datasets.load_linnerud,
"wine": sklearn.datasets.load_wine,
"breast_cancer": sklearn.datasets.load_breast_cancer,
}
if dset is None:
print("type,name")
print("\n".join("{},{}".format("real", k) for k in REALDATA))
print("\n".join("{},{}".format("toy", k) for k in TOYDATA))
sys.exit()
if dset not in TOYDATA.keys() | REALDATA.keys():
sys.exit("Unknown dataset {}. See ph help dataset.".format(dset))
if dset in TOYDATA:
data = TOYDATA[dset]()
else:
data = REALDATA[dset]()
try:
df = pd.DataFrame(data.data, columns=data.feature_names)
except AttributeError:
df = pd.DataFrame(data.data)
try:
df["target"] = pd.Series(data.target)
except Exception:
pass
pipeout(df)
@register
def diff(*cols, periods=1, axis=0):
df = pipein()
if not cols:
df = df.diff(periods=periods, axis=axis)
else:
_assert_cols(df, cols, "diff")
columns = list(cols)
df[columns] = df[columns].diff(periods=periods, axis=axis)
pipeout(df)
@register
def dropna(axis=0, how="any", thresh=None):
"""Remove rows (or columns) with N/A values.
Argument: --axis=0
Defaults to axis=0 (columns), use --axis=1 to remove rows.
Argument: --how=any
Defaults to how='any', which removes columns (resp. rows) containing
nan values. Use how='all' to remove columns (resp. rows) containing
only nan values.
Argument: --thresh=5
If --thresh=x is specified, will delete any column (resp. row) with
fewer than x non-na values.
Usage: cat a.csv | ph dropna
cat a.csv | ph dropna --axis=1 # for row-wise
cat a.csv | ph dropna --thresh=5 # keep cols with >= 5 non-na
cat a.csv | ph dropna --how=all # delete only if all vals na
"""
try:
axis = int(axis)
if axis not in (0, 1):
sys.exit("ph dropna --axis=0 or --axis=1, not {}".format(axis))
except ValueError:
sys.exit("ph dropna --axis=0 or --axis=1, not {}".format(axis))
if thresh is not None:
try:
thresh = int(thresh)
except ValueError:
sys.exit("ph dropna --thresh=0 or --thresh=1, not {}".format(thresh))
df = pipein()
try:
df = df.dropna(axis=axis, how=how, thresh=thresh)
except Exception as err:
sys.exit(str(err))
pipeout(df)
def _safe_out(output):
"""Prints output to standard out, catching broken pipe."""
try:
print(output)
except BrokenPipeError:
try:
sys.stdout.close()
except IOError:
pass
try:
sys.stderr.close()
except IOError:
pass
def pipeout(df, sep=",", index=False, *args, **kwargs):
csv = df.to_csv(sep=sep, index=index, *args, **kwargs)
output = csv.rstrip("\n")
_safe_out(output)
def pipein(ftype="csv", **kwargs):
skiprows = kwargs.get("skiprows")
if skiprows is not None:
try:
skiprows = int(skiprows)
if skiprows < 0:
raise ValueError("Negative")
except ValueError:
sys.exit("skiprows must be a non-negative int, not {}".format(skiprows))
kwargs["skiprows"] = skiprows
if kwargs.get("sep") == "\\t":
kwargs["sep"] = "\t"
try:
return READERS[ftype](sys.stdin, **kwargs)
except pd.errors.EmptyDataError:
return pd.DataFrame()
except pd.errors.ParserError as err:
sys.exit(str(err))
@register
def fillna(value=None, method=None, limit=None):
"""Fill na values with a certain value or method, at most `limit` many.
Takes either a value, or a method using (e.g.) --method=ffill.
Argument: value
If provided, replaces all N/A values with prescribed value.
Argument: --method=pad
If provided, value cannot be provided. Allowed methods are
backfill, bfill, pad, ffill
Argument: --limit=x
If provided, limits number of consecutive N/A values to fill.
Usage: cat a.csv | ph fillna 999.75
cat a.csv | ph fillna -1
cat a.csv | ph fillna --method=pad
cat a.csv | ph fillna --method=pad --limit=5
"""
if limit is not None:
try:
limit = int(limit)
except ValueError:
sys.exit("--limit=x must be an integer, not {}".format(limit))
METHODS = ("backfill", "bfill", "pad", "ffill")
if method is not None:
if method not in METHODS:
sys.exit("method must be one of {}, not {}".format(METHODS, method))
pipeout(pipein().fillna(method=method, limit=limit))
elif value is not None:
value = __tryparse(value)
pipeout(pipein().fillna(value=value, limit=limit))
else:
sys.exit("'ph fillna' needs exactly one of value and method")
@register
def query(expr):
"""Using pandas queries.
Usage: cat a.csv | ph query "x > 5"
"""
df = pipein()
new_df = df.query(expr)
pipeout(new_df)
@register
def grep(*expr, case=True, na=float("nan"), regex=True, column=None):
"""Grep (with regex) for content in csv file.
Usage: cat a.csv | ph grep 0
cat a.csv | ph grep search_string
cat a.csv | ph grep "A|B" # search hits a or b
cat a.csv | ph grep "a|b" --case=False # case insensitive
cat a.csv | ph grep 4 --column=x
To disable regex (e.g. simple search for "." or "*" characters, use
--regex=False).
Search only in a specific column with --column=col.
Supports regex search queries such as "0-9A-F" and "\\d" (possibly
double-escaped.)
"""
df = pipein()
if case is True or case in TRUTHY:
case = True
elif case in FALSY:
case = False
else:
sys.exit("ph grep: Unknown --case={} should be True or False".format(case))
if regex is True or regex in TRUTHY:
regex = True
elif regex in FALSY:
regex = False
else:
sys.exit("ph grep: Unknown --regex={} should be True or False".format(regex))
if column is not None:
_assert_col(df, column, "grep")
expr = " ".join(str(e) for e in expr) # force string input
try:
import numpy
except ImportError:
sys.exit("numpy needed for grep. pip install numpy")
retval = df[
numpy.logical_or.reduce(
[
df[col].astype(str).str.contains(expr, case=case, na=na, regex=regex)
for col in (df.columns if column is None else [column])
]
)
]
pipeout(retval)
@register
def appendstr(col, s, newcol=None):
"""Special method to append a string to the end of a column.
Usage: cat e.csv | ph appendstr year -01-01 | ph date year
"""
df = pipein()
if newcol is None:
newcol = col
df[newcol] = df[col].astype(str) + s
pipeout(df)
@register
def split(col, pat=" "):
df = pipein()
_assert_col(df, col, "split")
df[[col, col + "_rhs"]] = df[col].str.split(pat=pat, n=1, expand=True)
pipeout(df)
@register
def strip(*cols, lstrip=False, rstrip=False):
"""Strip (trim) a string.
Usage: cat x.csv | ph strip
cat x.csv | ph strip --lstrip=True
cat x.csv | ph strip --rstrip=True
"""
df = pipein()
if not cols:
cols = list(df.columns)
else:
cols = list(cols)
_assert_cols(df, cols, "strip")
for c in cols:
if lstrip in TRUTHY:
df[c] = df[c].str.lstrip()
elif rstrip in TRUTHY:
df[c] = df[c].str.rstrip()
else:
df[c] = df[c].str.strip()
pipeout(df)
@register
def removeprefix(col, prefix=" "):
"""Remove prefix of contents of a column.
Usage: cat a.csv | ph removeprefix col1 ..
See also @removesuffix @strip
"""
prefix = str(prefix)
plen = len(prefix)
df = pipein()
_assert_col(df, col, "removeprefix")
df[col] = df[col].apply(
lambda s: str(s)[plen:] if str(s).startswith(prefix) else str(s)
)
pipeout(df)
@register
def removesuffix(col, suffix=" "):
"""Remove suffix of contents of a column.
Usage: cat a.csv | ph removesuffix col1 ..
See also @removeprefix @strip
"""
suffix = str(suffix)
plen = len(suffix)
df = pipein()
_assert_col(df, col, "removesuffix")
df[col] = df[col].apply(
lambda s: str(s)[:-plen] if str(s).endswith(suffix) else str(s)
)
pipeout(df)
@register
def astype(type, column=None, newcolumn=None):
"""Cast a column to a different type.
Usage: cat a.csv | ph astype double x [new_x]
"""
df = pipein()
try:
if column is None:
df = df.astype(type)
elif newcolumn is not None:
df[newcolumn] = df[column].astype(type)
else:
df[column] = df[column].astype(type)
except ValueError as err:
sys.exit("Could not convert to {}: {}".format(type, err))
pipeout(df)
@register
def dtypes(t=None):
"""If no argument is provided, output types, otherwise filter on types.
If no argument is provided, output a csv with two columns, "column" and
"dtype". The "column" column contains the names of the columns in the input
csv and the "dtype" column contains their respective types.
If an argument is provided, all columns with the prescribed type is output.
Usage: cat a.csv | ph dtypes
cat a.csv | ph dtypes float64
"""
if t is None:
df = pipein()
newdf = pd.DataFrame( | pd.Series(df.columns) | pandas.Series |
import random
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import operator
import openpyxl
class DEError(Exception):
'''Base class for throw exception error in this module.'''
pass
class InitializationError(DEError):
'''Exception for initialization errors.'''
pass
class DEManager():
def __init__(self, run, de_class, kw_args, excel_results='results.xlsx'):
self.__de_class = de_class
self.__run = run
self.__kw_args = kw_args
self._df_res_columns = ['ID', 'Population_Size', 'Mutation_Factor', 'Crossover_Prob', 'Max_Iter', 'Max_FES', 'Min_Error', 'Min_Error_Length', 'Run',
'Iterations', 'FES', 'Eval', 'XBest']
self._df_res = pd.DataFrame(columns=self._df_res_columns)
self._res_path = excel_results
def run(self):
final_results = []
# For each configuration
for i in range(len(self.__kw_args)):
results = []
# Run {self.run} times
for j in range(self.__run):
#Instanciate DE
DE = eval(self.__de_class)(**self.__kw_args[i])
DE.fit() # Runs DE instance
run_results = DE.get_results() # Get results
results.append(run_results) # Save results
# Save result in df
res_to_df = [i, self.__kw_args[i].get('population_size'), self.__kw_args[i].get('mutation_factor'), self.__kw_args[i].get('cross_prob'), self.__kw_args[i].get('max_it'), self.__kw_args[i].get('max_fes'),
self.__kw_args[i].get('min_error'), self.__kw_args[i].get('min_error_length'), j]
res_to_df.extend(run_results[:-1])
res_to_df.append('; '.join([str(x) for x in run_results[-1]]))
res_to_df = | pd.Series(res_to_df, index=self._df_res_columns) | pandas.Series |
import pandas as pd
import numpy as np
import pdb
# Necessary for matching filenames downloaded from the
# CONAPO website
STATES = ["Aguascalientes", "BajaCalifornia", "BajaCaliforniaSur",
"Campeche", "Chiapas", "Chihuahua", "Coahuila", "Colima",
"DistritoFederal", "Durango", "Mexico", "Guanajuato",
"Guerrero", "Hidalgo", "Jalisco", "Michoacan", "Morelos",
"Nayarit", "NuevoLeon", "Oaxaca", "Puebla", "Queretaro",
"QuintanaRoo", "SanLuisPotosi", "Sinaloa", "Sonora", "Tabasco",
"Tamaulipas", "Tlaxcala", "Veracruz", "Yucatan", "Zacatecas"]
years = range(2010, 2031)
def sex(x):
'''
Switch statement to translate sex
'''
return {
'Ambos': 'Total',
'Hombres': 'Males',
'Mujeres' : 'Females',
}[x]
def read_colmex(sheet, year):
xlsxfile = pd.ExcelFile("data/" + "colmex" + "/" + str(year) + "total" + ".xls")
data = xlsxfile.parse(sheet,
index_col=None, header=None, skiprows = 9, skip_footer = 1)
data = data[pd.notnull(data[0])]
data = data.iloc[0:len(data), 0:3]
data['Year'] = year
data.columns = ['Code', 'MunName', 'Population', 'Year']
del data['MunName']
data['Sex'] = {
'Total': 'Total',
'Hombres': 'Males',
'Mujeres' : 'Females',
}[sheet]
return data
df_colmex = pd.DataFrame()
for year in range(1990, 2010):
df_colmex = df_colmex.append(read_colmex("Total", year))
df_colmex = df_colmex.append(read_colmex("Hombres", year))
df_colmex = df_colmex.append(read_colmex("Mujeres", year))
#df[['Code', 'Population']] = df[['Code', 'Population']].apply(lambda col: np.round(col).astype(int))
#df[['Code', 'Population']] = df[['Code', 'Population']].apply(lambda col: col.astype(float))
df_colmex.Population[df_colmex.Year > 2005] = np.nan
df_colmex.to_csv('clean-data/municipio-population1990-2009.csv',
encoding='utf-8',
index = False)
df_conapo = pd.DataFrame()
#rows with data star from the 17th row
offset = 16
#2010-2030 excluding the colums for sex, year, etc
ncols = 25
for file in STATES:
xlsxfile = pd.ExcelFile("data/" + "municipalities" + "/" + file + ".xlsx")
data = xlsxfile.parse(file,
index_col=None, header=None)
#Get rid of all the blank rows
data = data[pd.notnull(data[4])]
for i in range(offset, len(data), 5):
x = pd.DataFrame(data.iloc[(i):(i + 5), 4:ncols].sum())
x['Code'] = data.iloc[(i)][0]#.astype(int)
x['Sex'] = sex(data.iloc[(i)][2])
x['Year'] = years
df_conapo = df_conapo.append(x)
df_conapo .columns = ['Population', 'Code', 'Sex', 'Year']
df_conapo [['Population', 'Code']] = \
df_conapo [['Population', 'Code']].\
apply(lambda col: np.round(col).astype(int))
df_conapo .to_csv('clean-data/municipio-population2010-2030.csv',
encoding='utf-8',
index = False)
df = df_colmex.append(df_conapo)
df = df.sort(['Code', 'Sex', 'Year'])
df['Code2'] = df['Code']
#Replace San Ignacio Cerro Gordo with Arandas to match the population data
df.Code2[df.Code2 == 14125] = 14008
#Tulum was created from Solidaridad
df.Code2[df.Code2 == 23009] = 23008
#Bacalar was created from <NAME>
df.Code2[df.Code2 == 23010] = 23004
del df['Code']
#pdb.set_trace()
df= df.groupby(['Code2', 'Sex', 'Year']).aggregate(np.sum)
df = | pd.DataFrame(df) | pandas.DataFrame |
from context import dero
import dero.data.ff.create.sort as ff_sort
import pandas as pd
from pandas.util.testing import assert_frame_equal
from pandas import Timestamp
from numpy import nan
import numpy
import datetime
class DataFrameTest:
df_3_fac = pd.DataFrame([
(10516, 'a', '1/1/2000', 1.01, .51, 1000),
(10516, 'a', '1/2/2000', 1.02, .52, 2000),
(10516, 'a', '1/3/2000', 1.03, .53, 3000),
(10516, 'a', '1/4/2000', 1.04, .54, 4000),
(10516, 'b', '1/1/2000', 1.05, 1.55, 50000),
(10516, 'b', '1/2/2000', 1.06, 1.56, 60000),
(10516, 'b', '1/3/2000', 1.07, 1.57, 70000),
(10516, 'b', '1/4/2000', 1.08, 1.58, 80000),
(10517, 'a', '1/1/2000', 1.09, .59, 9000),
(10517, 'a', '1/2/2000', 1.10, .60, 10000),
(10517, 'a', '1/3/2000', 1.11, .61, 11000),
(10517, 'a', '1/4/2000', 1.12, .62, 12000),
(10517, 'b', '1/1/2000', 1.13, .63, 13000),
(10517, 'b', '1/2/2000', 1.14, .64, 14000),
(10517, 'b', '1/3/2000', 1.15, .65, 15000),
(10517, 'b', '1/4/2000', 1.16, .66, 16000),
(10518, 'a', '1/1/2000', 1.17, .67, 17000),
(10518, 'a', '1/2/2000', 1.18, .68, 18000),
(10518, 'a', '1/3/2000', 1.19, .69, 19000),
(10518, 'a', '1/4/2000', 1.20, .70, 20000),
(10518, 'b', '1/1/2000', 1.21, .71, 21000),
(10518, 'b', '1/2/2000', 1.22, .72, 22000),
(10518, 'b', '1/3/2000', 1.23, .73, 23000),
(10518, 'b', '1/4/2000', 1.24, .74, 24000),
], columns=['PERMNO', 'byvar', 'Date', 'RET', 'be/me', 'me'])
df_3_fac['Date'] = pd.to_datetime(df_3_fac['Date'])
class TestCalculateFFFactors(DataFrameTest):
def test_create_portfolios(self):
expect_df = pd.DataFrame(data=[
(10516, 'a', Timestamp('2000-01-01 00:00:00'), 1.01, 0.51, 1000, 1, Timestamp('2000-01-01 00:00:00'), 1,
Timestamp('2000-01-01 00:00:00')),
(10516, 'a', Timestamp('2000-01-02 00:00:00'), 1.02, 0.52, 2000, 1, Timestamp('2000-01-01 00:00:00'), 1,
Timestamp('2000-01-01 00:00:00')),
(10516, 'a', Timestamp('2000-01-03 00:00:00'), 1.03, 0.53, 3000, 1, Timestamp('2000-01-01 00:00:00'), 1,
Timestamp('2000-01-01 00:00:00')),
(10516, 'a', Timestamp('2000-01-04 00:00:00'), 1.04, 0.54, 4000, 1, Timestamp('2000-01-01 00:00:00'), 1,
Timestamp('2000-01-01 00:00:00')),
(10516, 'b', Timestamp('2000-01-01 00:00:00'), 1.05, 1.55, 50000, 3, Timestamp('2000-01-01 00:00:00'), 2,
Timestamp('2000-01-01 00:00:00')),
(10516, 'b', Timestamp('2000-01-02 00:00:00'), 1.06, 1.56, 60000, 3, Timestamp('2000-01-01 00:00:00'), 2,
| Timestamp('2000-01-01 00:00:00') | pandas.Timestamp |
# -*- coding: utf-8 -*-
from ftplib import FTP
from urllib.parse import urlparse
import errno
import os
import shutil
import sys
import tarfile
import urllib.request
import zipfile
from datapackage import Package, Resource
from geojson import Feature, FeatureCollection, dump, load
from shapely.geometry import shape
import pandas as pd
import paramiko
import toml
def infer_resources(directory="data/elements"):
""" Method looks at all files in `directory` and creates
datapackage.Resource object that will be stored
Parameters
----------
directory: string
Path to directory from where resources are inferred
"""
if not os.path.exists("resources"):
os.makedirs("resources")
# create meta data resources
for f in os.listdir(directory):
r = Resource({"path": os.path.join(directory, f)})
r.infer()
r.save(os.path.join("resources", f.replace(".csv", ".json")))
def update_package_descriptor():
"""
"""
p = Package("datapackage.json")
for f in os.listdir("resources"):
path = os.path.join("resources", f)
r = Resource(path)
p.add_resource(r.descriptor)
p.commit()
os.remove(path)
os.rmdir("resources")
p.save("datapackage.json")
def infer_metadata(
package_name="default-name",
keep_resources=False,
foreign_keys={
"bus": [
"volatile",
"dispatchable",
"storage",
"load",
"reservoir",
"shortage",
"excess",
],
"profile": ["load", "volatile", "ror"],
"from_to_bus": ["connection", "line", "conversion"],
"chp": ["backpressure", "extraction", "chp"],
},
path=None,
):
""" Add basic meta data for a datapackage
Parameters
----------
package_name: string
Name of the data package
keep_resource: boolean
Flag indicating of the resources meta data json-files should be kept
after main datapackage.json is created. The reource meta data will
be stored in the `resources` directory.
foreign_keys: dict
Dictionary with foreign key specification. Keys for dictionary are:
'bus', 'profile', 'from_to_bus'. Values are list with
strings with the name of the resources
path: string
Absoltue path to root-folder of the datapackage
"""
current_path = os.getcwd()
if path:
print("Setting current work directory to {}".format(path))
os.chdir(path)
p = Package()
p.descriptor["name"] = package_name
p.descriptor["profile"] = "tabular-data-package"
p.commit()
if not os.path.exists("resources"):
os.makedirs("resources")
# create meta data resources elements
if not os.path.exists("data/elements"):
print(
"No data path found in directory {}. Skipping...".format(
os.getcwd()
)
)
else:
for f in os.listdir("data/elements"):
r = Resource({"path": os.path.join("data/elements", f)})
r.infer()
r.descriptor["schema"]["primaryKey"] = "name"
if r.name in foreign_keys.get("bus", []):
r.descriptor["schema"]["foreignKeys"] = [
{
"fields": "bus",
"reference": {"resource": "bus", "fields": "name"},
}
]
if r.name in foreign_keys.get("profile", []):
r.descriptor["schema"]["foreignKeys"].append(
{
"fields": "profile",
"reference": {"resource": r.name + "_profile"},
}
)
elif r.name in foreign_keys.get("from_to_bus", []):
r.descriptor["schema"]["foreignKeys"] = [
{
"fields": "from_bus",
"reference": {"resource": "bus", "fields": "name"},
},
{
"fields": "to_bus",
"reference": {"resource": "bus", "fields": "name"},
},
]
elif r.name in foreign_keys.get("chp", []):
r.descriptor["schema"]["foreignKeys"] = [
{
"fields": "fuel_bus",
"reference": {"resource": "bus", "fields": "name"},
},
{
"fields": "electricity_bus",
"reference": {"resource": "bus", "fields": "name"},
},
{
"fields": "heat_bus",
"reference": {"resource": "bus", "fields": "name"},
},
]
r.commit()
r.save(os.path.join("resources", f.replace(".csv", ".json")))
p.add_resource(r.descriptor)
# create meta data resources elements
if not os.path.exists("data/sequences"):
print(
"No data path found in directory {}. Skipping...".format(
os.getcwd()
)
)
else:
for f in os.listdir("data/sequences"):
r = Resource({"path": os.path.join("data/sequences", f)})
r.infer()
r.commit()
r.save(os.path.join("resources", f.replace(".csv", ".json")))
p.add_resource(r.descriptor)
p.commit()
p.save("datapackage.json")
if not keep_resources:
shutil.rmtree("resources")
os.chdir(current_path)
def package_from_resources(resource_path, output_path, clean=True):
""" Collects resource descriptors and merges them in a datapackage.json
Parameters
----------
resource_path: string
Path to directory with resources (in .json format)
output_path: string
Root path of datapackage where the newly created datapckage.json is
stored
clean: boolean
If true, resources will be deleted
"""
p = Package()
p.descriptor["profile"] = "tabular-data-package"
p.commit()
for f in os.listdir(resource_path):
path = os.path.join(resource_path, f)
r = Resource(path)
p.add_resource(r.descriptor)
p.commit()
os.remove(path)
if clean:
os.rmdir(resource_path)
p.save(os.path.join(output_path, "datapackage.json"))
def _ftp(remotepath, localpath, hostname, username=None, passwd=""):
""" Download data with FTP
Parameters
----------
remotepath: str
The remote file to copy.
localpath: str
The destination path on localhost.
hostname: str
The server to connect to.
username: str
The username to authenticate as.
passwd: str
The password to authenticate with.
"""
ftp = FTP(hostname)
if username:
ftp.login(user=username, passwd=passwd)
else:
ftp.login()
ftp.retrbinary("RETR " + remotepath, open(localpath, "wb").write)
ftp.quit()
return
def _sftp(
remotepath, localpath, hostname="", username="rutherford", password=""
):
""" Download data with SFTP
Parameters
----------
remotepath: str
The remote file to copy.
localpath: str
The destination path on localhost.
hostname: str
The server to connect to.
username:
The username to authenticate as.
"""
client = paramiko.SSHClient()
client.load_host_keys(os.path.expanduser("~/.ssh/known_hosts"))
client.connect(hostname=hostname, username=username, password=password)
sftp = client.open_sftp()
sftp.get(remotepath, localpath)
sftp.close()
client.close()
return
def _http(url, path):
""" Download data with HTTP
Parameters
----------
url: str
Url of file to be downloaded.
path: str
The destination path on localhost.
"""
user_agent = (
"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.0.7) "
"Gecko/2009021910 "
"Firefox/3.0.7"
)
headers = {"User-Agent": user_agent}
request = urllib.request.Request(url, None, headers)
f = urllib.request.urlopen(request)
data = f.read()
with open(path, "wb") as code:
code.write(data)
return
def download_data(url, directory="cache", unzip_file=None, **kwargs):
"""
Downloads data and stores it in specified directory
Parameters
----------
url: str
Url of file to be downloaded.
directory: str
Name of directory where to store the downloaded data.
Default is 'cache'-
unzip_file: str
Regular or directory file name to be extracted from zip source.
kwargs:
Additional keyword arguments.
"""
scheme, netloc, path, params, query, fragment = urlparse(url)
if not unzip_file:
filepath = os.path.join(directory, os.path.basename(path))
copypath = filepath
else:
filepath = os.path.join(directory, unzip_file)
copypath = os.path.join(directory, os.path.basename(path))
if os.path.exists(filepath):
return filepath
else:
if scheme in ["http", "https"]:
_http(url, copypath)
elif scheme == "sftp":
_sftp(path, copypath, hostname=netloc, **kwargs)
elif scheme == "ftp":
_ftp(path, copypath, hostname=netloc, **kwargs)
else:
raise ValueError(
"Cannot download data. Not supported scheme \
in {}.".format(
url
)
)
if unzip_file is not None:
def member(x):
return x.startswith(unzip_file.split("/")[0])
if copypath.endswith(".zip"):
zipped = zipfile.ZipFile(copypath, "r")
if unzip_file.endswith("/"):
zipped.extractall(
filepath, members=list(filter(member, zipped.namelist()))
)
else:
zipped.extract(unzip_file, directory)
zipped.close()
elif copypath.endswith(".tar.gz"):
tar = tarfile.open(copypath, "r:gz")
if unzip_file.endswith("/"):
tar.extractall(
filepath,
members=list(
filter(member, [t.name for t in tar.getmembers()])
),
)
else:
tar.extract(unzip_file, directory)
tar.close()
os.remove(copypath)
return filepath
def timeindex(year, periods=8760, freq='H'):
""" Create pandas datetimeindex.
Parameters
----------
year: string
Year of the index
periods: string
Number of periods, default: 8760
freq: string
Freq of the datetimeindex, default: 'H'
"""
idx = pd.date_range(start=year, periods=periods, freq=freq)
return idx
def initialize(config, directory='.'):
""" Initialize datapackage by reading config file and creating required
directories (data/elements, data/sequences etc.) if directories are
not specified in the config file, the default directory setup up
will be used.
"""
sub_directories = {
"elements": "data/elements",
"sequences": "data/sequences",
"geometries": "data/geometries",
}
if not config:
try:
default = "config.json"
config = read_build_config(default)
except FileNotFoundError as e:
message = (
"{}\n"
"Cause:\n"
"Default path `{}` of config file could not be found!"
).format(e, default)
raise FileNotFoundError(message).with_traceback(
sys.exc_info()[2]
) from None
sub_directories.update(config.get("sub-directories", {}))
for subdir in sub_directories.values():
try:
os.makedirs(os.path.join(directory, subdir))
except OSError as e:
if e.errno != errno.EEXIST:
raise
return sub_directories
def input_filepath(file, directory="archive/"):
"""
"""
file_path = os.path.join(directory, file)
if not os.path.exists(file_path):
raise FileNotFoundError(
"""File with name
{}
does not exist. Please make sure you download the file from
the sources listed and store it in the directory:
{}.
""".format(
file_path, directory
)
)
return file_path
def read_build_config(file="build.toml"):
""" Read config build file in toml format
Parameters
----------
file: string
String with name of config file
"""
try:
config = toml.load(file)
# create paths
if config.get("directories"):
config["directories"] = {
k: os.path.join(os.getcwd(), v)
for k, v in config["directories"].items()
}
except Exception as e:
message = (
"{}\n" "Cause:\n" "Build config file '{}' could not be read."
).format(e, file)
raise type(e)(message).with_traceback(sys.exc_info()[2]) from None
return config
def read_sequences(filename, directory="data/sequences"):
""" Reads sequence resources from the datapackage
Parameters
----------
filename: string
Name of the sequences to be read, for example `load_profile.csv`
directory: string
Directory from where the file should be read. Default: `data/sequences`
"""
path = os.path.join(directory, filename)
if os.path.exists(path):
sequences = pd.read_csv(
path, sep=";", index_col=["timeindex"], parse_dates=True
)
else:
sequences = pd.DataFrame(columns=["timeindex"]).set_index("timeindex")
return sequences
def read_elements(filename, directory="data/elements"):
"""
Reads element resources from the datapackage
Parameters
----------
filename: string
Name of the elements to be read, for example `load.csv`
directory: string
Directory where the file is located. Default: `data/elements`
Returns
-------
pd.DataFrame
"""
path = os.path.join(directory, filename)
if os.path.exists(path):
elements = pd.read_csv(path, sep=";")
elements.set_index("name", inplace=True)
else:
elements = pd.DataFrame(columns=["name"]).set_index("name")
return elements
def read_geometries(filename, directory="data/geometries"):
"""
Reads geometry resources from the datapackage. Data may either be stored
in geojson format or as WKT representation in CSV-files.
Parameters
----------
filename: string
Name of the elements to be read, for example `buses.geojson`
directory: string
Directory where the file is located. Default: `data/geometries`
Returns
-------
pd.Series
"""
path = os.path.join(directory, filename)
if os.path.splitext(filename)[1] == ".geojson":
if os.path.exists(path):
with open(path, "r") as infile:
features = load(infile)["features"]
names = [f["properties"]["name"] for f in features]
geometries = [shape(f["geometry"]) for f in features]
geometries = pd.Series(dict(zip(names, geometries)))
if os.path.splitext(filename)[1] == ".csv":
if os.path.exists(path):
geometries = pd.read_csv(path, sep=";", index_col=["name"])
else:
geometries = | pd.Series(name="geometry") | pandas.Series |
#!/usr/bin/env python
import os
import sys
import pandas as pd
import numpy as np
import time
from sqlalchemy import create_engine
from itertools import repeat
import multiprocessing
import tqdm
import genes
import eqtls
def find_snps(
inter_df,
gene_info_df,
tissues,
output_dir,
C,
genotypes_fp,
_eqtl_project_db,
covariates_dir,
expression_dir,
pval_threshold,
maf_threshold,
fdr_threshold,
num_processes,
_db,
logger,
suppress_intermediate_files=False
):
start_time = time.time()
global eqtl_project_db
eqtl_project_db = _eqtl_project_db
global db
db = _db
enzymes = inter_df['enzyme'].drop_duplicates().tolist()
hic_libs = genes.fetch_hic_libs(db)
hic_libs = hic_libs.rename(columns={'rep_count': 'cell_line_replicates'})
inter_df = inter_df.merge(
hic_libs, how='left',
left_on=['cell_line', 'enzyme'], right_on=['library', 'enzyme'])
default_chrom = ['chr' + str(i)
for i in list(range(1, 23))] + ['X', 'Y', 'M']
chrom_list = inter_df['fragment_chr'].drop_duplicates().tolist()
chrom_list = [i for i in default_chrom if i in chrom_list]
inter_df = inter_df[inter_df['fragment_chr'].isin(default_chrom)]
inter_df = inter_df.astype({'fragment': int})
gene_info_df = gene_info_df.rename(
columns={
'name': 'gene',
'chr': 'gene_chr',
'start': 'gene_start',
'end': 'gene_end',
'fragment': 'gene_fragment',
'id': 'gene_id'})
all_snps = []
all_genes = []
all_eqtls = []
logger.write('Finding SNPs within fragments interacting with genes in...')
for chrom in sorted(chrom_list):
chrom_dir = os.path.join(output_dir, chrom)
#if os.path.exists(os.path.join(chrom_dir, 'eqtls.txt')):
# logger.write(' Warning: {} already exists. Skipping.'.format(
# os.path.join(chrom_dir, 'eqtls.txt')))
# continue
logger.write(' Chromosome {}'.format(chrom))
snp_cols = ['snp', 'variant_id', 'chr',
'locus', 'id', 'fragment', 'enzyme']
chrom_df = inter_df[inter_df['fragment_chr'] == chrom]
chrom_df = chrom_df.astype({'fragment': int,
'fragment_chr': object})
enzymes = chrom_df['enzyme'].drop_duplicates().tolist()
snp_df = []
for enzyme in enzymes:
enzyme_df = chrom_df[chrom_df['enzyme'] == enzyme]
enzyme_df = enzyme_df.merge(
gene_info_df, how='inner',
left_on=['query_chr', 'query_fragment', 'enzyme'],
right_on=['chrom', 'gene_fragment', 'enzyme'])
fragment_df = enzyme_df[
['gencode_id', 'fragment_chr', 'fragment']].drop_duplicates()
enzyme_df = enzyme_df.sort_values(by=['fragment'])
chunksize = 20000
enzyme_chunks = [enzyme_df[i:i+chunksize]
for i in range(0, enzyme_df.shape[0], chunksize)]
manager = multiprocessing.Manager()
snps = manager.list()
desc = ' * Hi-C libraries restricted with {}'.format(
enzyme)
bar_format = '{desc}: {percentage:3.0f}% |{bar}| {n_fmt}/{total_fmt} {unit}'
'''
for df in tqdm.tqdm(enzyme_chunks, desc=desc, unit='batches',
ncols=80, bar_format=bar_format):
find_gene_snps(
df,
enzyme,
snps)
'''
with multiprocessing.Pool(processes=4) as pool:
for _ in tqdm.tqdm(
pool.istarmap(
find_gene_snps,
zip(enzyme_chunks,
repeat(enzyme),
repeat(snps))
),
total=len(enzyme_chunks), desc=desc, unit='batches',
ncols=80, bar_format=bar_format):
pass
for df in snps:
df['enzyme'] = enzyme
snp_df.append(df)
if len(snp_df) == 0:
continue
snp_df = pd.concat(snp_df)
logger.verbose = False
gene_df, snp_df = filter_snp_fragments(
snp_df, logger)
snp_df.sort_values(by=['variant_id'], inplace=True)
snp_list = snp_df['variant_id'].drop_duplicates().tolist()
batchsize = 2000
snp_batches = [snp_list[i:i + batchsize]
for i in range(0, len(snp_list), batchsize)]
chrom_eqtl_df = []
for batch_num, snp_batch in enumerate(snp_batches):
if len(snp_batches) > 1:
logger.verbose = True
logger.write(' Mapping eQTLs batch {} of {}'.format(
batch_num+1, len(snp_batches)))
logger.verbose = False
batch_gene_df = gene_df[gene_df['variant_id'].isin(snp_batch)]
eqtl_df = eqtls.map_eqtls(
batch_gene_df,
tissues,
output_dir,
C,
genotypes_fp,
num_processes,
eqtl_project_db,
covariates_dir,
expression_dir,
pval_threshold,
maf_threshold,
fdr_threshold,
logger)
if eqtl_df is None:
continue
chrom_eqtl_df.append(eqtl_df)
if len(chrom_eqtl_df) > 0:
chrom_eqtl_df = pd.concat(chrom_eqtl_df)
else:
chrom_eqtl_df = pd.DataFrame()
if not suppress_intermediate_files:
os.makedirs(chrom_dir, exist_ok=True)
snp_df.to_csv(os.path.join(chrom_dir, 'snps.txt'),
sep='\t', index=False)
gene_df.to_csv(os.path.join(chrom_dir, 'genes.txt'),
sep='\t', index=False)
chrom_eqtl_df.to_csv(os.path.join(chrom_dir, 'eqtls.txt'),
sep='\t', index=False)
all_eqtls.append(chrom_eqtl_df)
all_snps.append(snp_df)
all_genes.append(gene_df)
logger.verbose = True
if len(all_eqtls) == 0:
snp_df = pd.DataFrame()
gene_df = pd.DataFrame()
eqtl_df = pd.DataFrame()
else:
snp_df = pd.concat(all_snps)
gene_df = pd.concat(all_genes)
eqtl_df = pd.concat(all_eqtls)
logger.verbose = True
logger.write(' Time elasped: {:.2f} mins.'.format(
(time.time() - start_time)/60))
return snp_df, gene_df, eqtl_df
def find_gene_snps(
inter_df,
enzyme,
snps#,
#db,
):
db.dispose()
eqtl_project_db.dispose()
table = 'variant_lookup_{}'
if enzyme in ['MboI', 'DpnII']: # MboI and DpnII have the same restriction sites
table = table.format('mboi')
else:
table = table.format(enzyme.lower())
chrom = inter_df['fragment_chr'].drop_duplicates().tolist()[0]
sql = '''SELECT * FROM {} WHERE chrom = '{}' AND frag_id >= {} AND frag_id <= {}'''
df = pd.DataFrame()
con = eqtl_project_db.connect()
res = con.execute(
sql.format(
table, chrom, inter_df['fragment'].min(), inter_df['fragment'].max())
).fetchall()
con.close()
if res:
df = pd.DataFrame(res, columns=['frag_id', 'chrom', 'id'])
inter_df = inter_df.rename(columns={'chrom': 'gene_chr'})
df = inter_df.merge(
df, how='inner', left_on=['fragment'], right_on=['frag_id'])
df['id'] = df['id'].astype('Int64')
df = df[df['frag_id'].notnull()]
df = df.drop_duplicates()
snp_df = find_snp_by_id(df, eqtl_project_db)
if snp_df.empty:
return
snp_df = snp_df.merge(df, how='inner', on=['id', 'chrom'])
snp_df['enzyme'] = enzyme
snp_df = snp_df.rename(columns={'rsid': 'snp'})
snps.append(snp_df.drop_duplicates())
def find_snp_by_id(df, db):
df = df.sort_values(by=['id'])
chunksize = eqtls.calc_chunksize(
df['id'].tolist(), 2000000)
chunks = [df[i:i+chunksize] for i in range(0, len(df), chunksize)]
snp_df = []
sql = '''SELECT rsid, variant_id, chrom, locus, id FROM variants WHERE id >= {}
and id <= {}'''
con = db.connect()
for chunk in chunks:
res = con.execute(sql.format(chunk['id'].min(), chunk['id'].max())).fetchall()
if not res:
continue
res = pd.DataFrame(res, columns=['rsid', 'variant_id', 'chrom', 'locus', 'id'])
snp_df.append(res[res['id'].isin(chunk['id'])])
con.close()
if len(snp_df) == 0:
return pd.DataFrame()
return pd.concat(snp_df)
def find_snp_by_variant_id(df, db):
snp_df = []
sql = '''SELECT rsid, variant_id, chrom, locus, id FROM variants WHERE
variant_id >= '{}' and variant_id <= '{}' '''
with db.connect() as con:
snp_df = pd.read_sql(
sql.format(df['variant_id'].min(), df['variant_id'].max()), con=con)
if len(snp_df) == 0:
return pd.DataFrame()
snp_df = snp_df[snp_df['variant_id'].isin(df['variant_id'])]
omitted_snps = df[
~df['variant_id'].isin(snp_df['variant_id'])
]['variant_id'].drop_duplicates().tolist()
return snp_df, omitted_snps
def filter_snp_fragments(
snp_df,
logger):
''' Filter snp-fragment interactions '''
logger.write(' * Filtering gene-SNP interactions...')
snp_df['interactions'] = snp_df.groupby(
['variant_id', 'gene', 'cell_line', 'enzyme'])[
'gene_fragment'].transform('count')
snp_df['replicates'] = snp_df.groupby(
['variant_id', 'gene', 'cell_line', 'enzyme'])[
'replicate'].transform('count')
snp_df = snp_df.drop(columns=['replicate', 'gene_fragment'])
snp_df = snp_df.drop_duplicates()
snp_df['sum_interactions'] = snp_df.groupby(
['variant_id', 'gene'])[
'interactions'].transform('sum')
snp_df['sum_replicates'] = snp_df.groupby(
['variant_id', 'gene'])[
'replicates'].transform('sum')
snp_df['sum_cell_lines'] = snp_df.groupby(
['variant_id', 'gene'])['cell_line'].transform('count')
condition = (
(snp_df['interactions'] / snp_df['cell_line_replicates'] <= 1) &
(snp_df['sum_replicates'] < 2) &
(snp_df['sum_cell_lines'] < 2))
gene_df = snp_df[~condition]
snp_df = gene_df[
['id', 'snp', 'chrom', 'locus', 'variant_id', 'fragment', 'enzyme']
]
snp_df.drop_duplicates(inplace=True)
cols = ['snp', 'chrom', 'locus', 'variant_id',
'gene', 'gencode_id', 'gene_chr', 'gene_start', 'gene_end',
'interactions', 'replicates', 'enzyme',
'cell_line', 'cell_line_replicates', 'sum_interactions',
'sum_replicates', 'sum_cell_lines']
return gene_df[cols], snp_df
def process_rs_df_whole(rs_df, db):
rsid_tuple = ()
if len(rs_df) == 1:
rsid_tuple = str(tuple(rs_df['snp'])).replace(',', '')
else:
rsid_tuple = str(tuple(rs_df['snp']))
sql = 'SELECT id, rsid, chrom, locus, variant_id FROM variants WHERE rsid IN {}'
with db.connect() as con:
return pd.read_sql_query(sql.format(rsid_tuple), con=con)
def process_rs_df(rs_df, db):
rsid_tuple = ()
df = []
sql = '''SELECT id, rsid, chrom, locus, variant_id FROM variants WHERE rsid = '{}' '''
with db.connect() as con:
for idx, row in rs_df.iterrows():
df.append(pd.read_sql_query(sql.format(row['snp']), con=con))
df = pd.concat(df)
return df
def process_position(position, db):
chrom = ''
locus = None
temp_df = None
try:
chrom = position.split(':')[0].strip()
locus = position.split(':')[1].strip()
if '-' in locus:
start = locus.split('-')[0].strip()
end = locus.split('-')[1].strip()
sql = '''SELECT id, rsid, chrom, locus, variant_id FROM variants
WHERE chrom ='{}' AND locus >= {} AND locus <= {}'''
with db.connect() as con:
temp_df = pd.read_sql_query(sql.format(chrom, start, end), con=con)
else:
sql = '''SELECT id, rsid, chrom, locus, variant_id FROM variants
WHERE chrom ='{}' AND locus = {}'''
with db.connect() as con:
temp_df = pd.read_sql_query(sql.format(chrom, locus), con=con)
if not temp_df.empty:
return temp_df
else:
return pd.DataFrame.from_dict(
{'id': [np.nan], 'rsid': [position], 'chr': [np.nan],
'locus': [np.nan], 'variant_id': [np.nan]})
except IndexError:
return pd.DataFrame.from_dict({'id': [np.nan], 'rsid': [position], 'chr': [np.nan],
'locus': [np.nan], 'variant_id': [np.nan]})
def get_snp_fragments_whole(snp_df, restriction_enzymes, db):
snp_df = snp_df.sort_values(by=['id'])
fragment_df = []
chunksize = 1000
chunks = [snp_df[i:i+chunksize]
for i in range(0, snp_df.shape[0], chunksize)]
for enzyme in restriction_enzymes:
table = 'variant_lookup_{}'
if enzyme in ['MboI', 'DpnII']: # MboI and DpnII have the same restriction sites
table = table.format('mboi')
else:
table = table.format(enzyme.lower())
with db.connect() as con:
for chunk in chunks:
sql = '''SELECT * FROM {} WHERE id >= {} AND id <= {}'''
df = pd.read_sql(sql.format(
table, chunk['id'].min(), chunk['id'].max()), con=con)
df['enzyme'] = enzyme
fragment_df.append(df.drop(columns=['chrom']))
fragment_df = pd.concat(fragment_df)
snp_df = pd.merge(snp_df, fragment_df, how='inner', on=['id'])
db.dispose()
return snp_df
def get_snp_fragments(snp_df, restriction_enzymes, db):
snp_df = snp_df.sort_values(by=['id'])
fragment_df = []
chunksize = 1000
chunks = [snp_df[i:i+chunksize]
for i in range(0, snp_df.shape[0], chunksize)]
for enzyme in restriction_enzymes:
table = 'variant_lookup_{}'
df = []
if enzyme in ['MboI', 'DpnII']: # MboI and DpnII have the same restriction sites
table = table.format('mboi')
else:
table = table.format(enzyme.lower())
with db.connect() as con:
for idx, row in snp_df.iterrows():
sql = '''SELECT * FROM {} WHERE id = {}; '''
df.append(pd.read_sql(sql.format(table, row['id']), con=con))
df = pd.concat(df)
df['enzyme'] = enzyme
fragment_df.append(df.drop(columns=['chrom']))
fragment_df = | pd.concat(fragment_df) | pandas.concat |
import gym
from gym import error, spaces, utils
from gym.utils import seeding
import pandas as pd
import numpy as np
from model import *
'''
In this version:
1. The time step is fixed at 1 hour
2. The tank volume of H2 station is unlimited
3. Generating H2 using electricity is not supported
'''
class hydrogenCommunity(gym.Env):
""" AlphaHydorgen is a custom Gym Environment to simulate a community equiped with on-site renewables,
hydrogen station, hydrogen vehicles and smart grid
------------------------------------------------------------------------------------------------------
Args:
- stepLenth: length of time step, unit: s
- building_list: a list of buildings, each element in the list is a tuple of (buildingLoad.csv, number of buildings)
example: [('inputs/building1.csv', 10), ('inputs/building2.csv', 10), ('inputs/building3.csv', 10)]
- pv_list: a list of on-site pvs, each element in the list is a tuple of (pvGeneration.csv, number of PVs)
example: [('inputs/pv1.csv', 10), ('inputs/pv2.csv', 10), ('inputs/pv3.csv', 10)]
- vehicle_list: a list of hydrogen vehicles,
first element is parkSchedule.csv,
the remaining elements in the list are tuples of
(vehicleParameter.csv, fuelCellEff, fuelCellCap, number of vehicles)
example: ['inputs/vehicle_atHomeSchd.csv', ('inputs/vehicle1.csv', 100, 300, 10),
('inputs/vehicle2.csv', 100, 300, 10), ('inputs/vehicle3.csv', 100, 300, 10)]
- station_info: a hydrogen station parameter file.
------------------------------------------------------------------------------------------------------
States:
- buildingLoad: total building load of the community, [kW]
- pvGeneration: total on-site PV generation, [kW]
- tank_h2Vol: total onsite-produced hydrogen stored in the station tank, [kg]
- tank_spareVol: rest tank space for storing onsite-produced hydrogen, [kg]
- vehicle_park: binary variable, whether the vechile is parked at home or not
- vehicle_max_dist: predicted maximum travel distance of today, dist_mu_wd+5*dist_sigma_wd [km]
- vehicle_tank: hydorgen stored in the vehicle's tank, [g]
------------------------------------------------------------------------------------------------------
Actions:
- station_control: value, meaning the charge/discharge power of the hydrogen station for balancing the microgrid energy, [kW]
positive means charging the hydrogen station for H2 production, negative means discharging the hydrogen station for powering microgrid.
- vehicle_charge: array, each element is H2 charge/discharge rate of each vehicle,
positive means charge from H2 station, negative means discharge to grid/building, [g]
"""
def __init__(self, building_list, pv_list, vehicle_list, station_info):
'''
In this version:
-- The step length is fixed at 1 hour
'''
super().__init__()
self.episode_idx = 0
self.time_step_idx = 0
self.stepLenth =3600 # To be revised when the step length is not 1 hour
self.simulationYear = 2019 # Fixed in this version
start_time = datetime(year = self.simulationYear, month = 1, day =1)
self.n_steps = 8760*3600//self.stepLenth # Simulate a whole year
freq = '{}H'.format(self.stepLenth/3600)
self.timeIndex = pd.date_range(start_time, periods=self.n_steps, freq=freq)
# Calculate the load for each time step
self.buildingLoad = self._calculateBuildingLoad(building_list, self.stepLenth, self.simulationYear)
self.pvGeneration = self._calculatePVGeneration(pv_list, self.stepLenth, self.simulationYear)
# Initialize the hydrogen station
self.station = Station(station_info, self.stepLenth/3600)
# Initialize the vehicles
self.vehicles = []
self.vehicle_schl_file = vehicle_list[0]
for vehicle_tuple in vehicle_list[1:]:
fuelCell = FuelCell(vehicle_tuple[1], vehicle_tuple[2])
vehicle = Vehicle(vehicle_tuple[0], self.vehicle_schl_file, fuelCell, self.stepLenth)
for _ in range(vehicle_tuple[3]):
self.vehicles.append(vehicle)
# define the state and action space
vehicle_n = len(self.vehicles) # Only control the vehicles
self.action_names = ['station_tank'] + \
['vehicle_{}'.format(vehicle_i) for vehicle_i in range(vehicle_n)]
self.actions_low = np.array([-10000] + [-100 for _ in range(vehicle_n)]) # Maximum discharging rate -100g/s
self.actions_high = np.array([10000] + [100 for _ in range(vehicle_n)]) # Maximum charging rate 100g/s
self.action_space = spaces.Box(low=self.actions_low,
high=self.actions_high,
dtype=np.float32)
self.obs_names = ['buildingLoad', 'pvGeneration', 'tank_h2Vol','tank_spareVol'] + \
['vehicle_park_{}'.format(vehicle_i) for vehicle_i in range(vehicle_n)] + \
['vehicle_max_dist_{}'.format(vehicle_i) for vehicle_i in range(vehicle_n)] + \
['vehicle_tank_{}'.format(vehicle_i) for vehicle_i in range(vehicle_n)]# + \
#['h2Production', 'h2forGrid', 'h2forVehicle']
self.obs_low = np.array([0, 0, 0, 0] + [0 for _ in range(vehicle_n)] + \
[0 for _ in range(vehicle_n)] + [0 for _ in range(vehicle_n)] + \
[0, 0, 0])
self.obs_high = np.array([10000, 10000, 10000, 10000] + [1 for _ in range(vehicle_n)] + \
[1000 for _ in range(vehicle_n)] + [10000 for _ in range(vehicle_n)] + \
[10000, 10000, 10000])
self.observation_space = spaces.Box(low=self.obs_low,
high=self.obs_high,
dtype=np.float32)
def reset(self):
self.episode_idx += 1
self.time_step_idx = 0
load = self._getLoad(self.time_step_idx)
stationTank = [0]
stationTankVol = [0]
stationTankSpare = [0]
vehicles_park = []
vehicles_max_dist = []
vehicles_tank = []
for vehicle in self.vehicles:
vehicle_park, vehicle_max_dist, _ = self._getVihicleStateStatic(vehicle)
vehicles_park.append(vehicle_park)
vehicles_max_dist.append(vehicle_max_dist)
vehicles_tank.append(vehicle.tankVol) # Half the tank at the begining
obs = load + stationTankVol + stationTankSpare + vehicles_park + vehicles_max_dist + vehicles_tank# + h2Production + h2forGrid + h2forVehicle
return obs
def step(self, actions):
load = self._getLoad(self.time_step_idx)
stationTankVol = [max(self.station.tankVol, 0)]
stationTankSpare = [self.station.capacityMax - max(self.station.tankVol, 0)]
vehicles_park = []
vehicles_max_dist = []
vehicles_tank = []
totalH2Charging = 0
# Charge the tank of the H2 station
if actions[0] >= 0:
power_H2Production, h2Production = self.station.h2Production(actions[0])
powertoGrid = 0
h2forGrid = 0
elif actions[0] < 0:
powertoGrid, h2forGrid = self.station.powerGrid(-actions[0])
power_H2Production = 0
h2Production = 0
for action, vehicle in zip(actions[1:], self.vehicles):
vehicle_park, vehicle_max_dist, cruiseBackHour = self._getVihicleStateStatic(vehicle)
if action > 0: # Charge the vehicle tank from the H2 station
if totalH2Charging < (self.station.chargeCap*1000):
realH2ChargeRate = vehicle.h2FromStation(action)
totalH2Charging += realH2ChargeRate
else:
realH2ChargeRate = vehicle.h2FromStation(0)
elif action < 0: # discharge the grid
realDischargePower = vehicle.eleToGrid(-action)
totalGridLoad -= realDischargePower
# Vehicle's gas tank is reduced at the hour when vehicle is back
if cruiseBackHour:
workingDay = self.timeIndex[self.time_step_idx].weekday()
vehicle.cruise(workingDay)
vehicles_park.append(vehicle_park)
vehicles_max_dist.append(vehicle_max_dist)
vehicles_tank.append(vehicle.tankVol)
h2forVehicle = self.station.h2toVehicle(totalH2Charging)
h2Change = h2Production - h2forGrid - h2forVehicle
h2netuse = -self.station.tankVol
totalGridLoad = load[0] - 0.95*load[1] + power_H2Production - powertoGrid
obs = load + stationTankVol + stationTankSpare + vehicles_park + vehicles_max_dist + vehicles_tank
reward = (totalGridLoad, h2Change)
done = self.time_step_idx == len(self.timeIndex)-1
comments = (h2netuse, h2Production, h2forGrid, h2forVehicle)
self.time_step_idx += 1
if done:
load = self._getLoad(self.time_step_idx-1)
else:
load = self._getLoad(self.time_step_idx)
obs = load + stationTankVol + stationTankSpare + vehicles_park + vehicles_max_dist + vehicles_tank
return obs, reward, done, comments
def _calculateBuildingLoad(self, building_list, stepLenth, simulationYear):
'''Calculate the total building load from the building list
'''
buildings = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 11 16:31:58 2021
@author: snoone
"""
import os
import glob
import pandas as pd
pd.options.mode.chained_assignment = None # default='warn'
OUTDIR = "D:/Python_CDM_conversion/hourly/qff/cdm_out/observations_table"
os.chdir("D:/Python_CDM_conversion/hourly/qff/test")
extension = 'qff'
#my_file = open("D:/Python_CDM_conversion/hourly/qff/ls1.txt", "r")
#all_filenames = my_file.readlines()
#print(all_filenames)
##use alist of file name sto run 5000 parallel
#with open("D:/Python_CDM_conversion/hourly/qff/ls.txt", "r") as f:
# all_filenames = f.read().splitlines()
all_filenames = [i for i in glob.glob('*.{}'.format(extension))]
##to start at begining of files
for filename in all_filenames:
##to start at next file after last processe
#for filename in all_filenames[all_filenames.index('SWM00002338.qff'):] :
df=pd.read_csv(filename, sep="|")
##set up master df to extrcat each variable
df["report_id"]=""
df["observation_id"]=""
df["data_policy_licence"]=""
df["date_time_meaning"]="1"
df["observation_duration"]="0"
df["latitude"]=df["Latitude"]
df["longitude"]=df["Longitude"]
df["crs"]=""
df["z_coordinate"]=""
df["z_coordinate_type"]=""
df["observation_height_above_station_surface"]=""
df["observed_variable"]=""
df["secondary_variable"]=""
df["observation_value"]=""
df["value_significance"]="12"
df["secondary_value"]=""
df["units"]=""
df["code_table"]=""
df["conversion_flag"]=""
df["location_method"]=""
df["location_precision"]=""
df["z_coordinate_method"]=""
df["bbox_min_longitude"]=""
df["bbox_max_longitude"]=""
df["bbox_min_latitude"]=""
df["bbox_max_latitude"]=""
df["spatial_representativeness"]=""
df["original_code_table"]=""
df["quality_flag"]=""
df["numerical_precision"]=""
df["sensor_id"]=""
df["sensor_automation_status"]=""
df["exposure_of_sensor"]=""
df["original_precision"]=""
df["original_units"]=""
df["original_code_table"]=""
df["original_value"]=""
df["conversion_method"]=""
df["processing_code"]=""
df["processing_level"]="0"
df["adjustment_id"]=""
df["traceability"]=""
df["advanced_qc"]=""
df["advanced_uncertainty"]=""
df["advanced_homogenisation"]=""
df["advanced_assimilation_feedback"]=""
df["source_id"]=""
df["source_record_id"]=""
df["primary_station_id"]=df["Station_ID"]
df["Timestamp2"] = df["Year"].map(str) + "-" + df["Month"].map(str)+ "-" + df["Day"].map(str)
df["Seconds"]="00"
df["offset"]="+00"
df["date_time"] = df["Timestamp2"].map(str)+ " " + df["Hour"].map(str)+":"+df["Minute"].map(str)+":"+df["Seconds"].map(str)
df['date_time'] = pd.to_datetime(df['date_time'], format='%Y/%m/%d' " ""%H:%M")
df['date_time'] = df['date_time'].astype('str')
df.date_time = df.date_time + '+00'
#=========================================================================================
##convert temperature changes for each variable
dft = df[["observation_id","report_id","data_policy_licence","date_time",
"date_time_meaning","observation_duration","longitude","latitude",
"crs","z_coordinate","z_coordinate_type","observation_height_above_station_surface",
"observed_variable","secondary_variable","observation_value",
"value_significance","secondary_value","units","code_table",
"conversion_flag","location_method","location_precision",
"z_coordinate_method","bbox_min_longitude","bbox_max_longitude",
"bbox_min_latitude","bbox_max_latitude","spatial_representativeness",
"quality_flag","numerical_precision","sensor_id","sensor_automation_status",
"exposure_of_sensor","original_precision","original_units",
"original_code_table","original_value","conversion_method",
"processing_code","processing_level","adjustment_id","traceability",
"advanced_qc","advanced_uncertainty","advanced_homogenisation",
"advanced_assimilation_feedback","source_id","primary_station_id"]]
##change for each variable to convert to cdm compliant values
dft["observation_value"]=df["temperature"]+273.15
dft["source_id"]=df["temperature_Source_Code"]
dft["Seconds"]="00"
dft["quality_flag"]=df["temperature_QC_flag"]
dft["qc_method"]=dft["quality_flag"]
dft["conversion_flag"]="0"
dft["conversion_method"]="1"
dft["numerical_precision"]="0.01"
dft["original_precision"]="0.1"
dft["original_units"]="60"
dft["original_value"]=df["temperature"]
dft["observation_height_above_station_surface"]="2"
dft["units"]="5"
dft["observed_variable"]="85"
##set quality flag from df master for variable and fill all nan with Null then change all nonnan to
dft.loc[dft['quality_flag'].notnull(), "quality_flag"] = 1
dft = dft.fillna("Null")
dft.quality_flag[dft.quality_flag == "Null"] = 0
#change for each variable if required
##remove unwanted mising data rows
dft = dft.fillna("null")
dft = dft.replace({"null":"-99999"})
dft = dft[dft.observation_value != -99999]
#df = df.astype(str)
dft["source_id"] = pd.to_numeric(dft["source_id"],errors='coerce')
#df = df.astype(str)
#concatenate columns for joining df for next step
dft['source_id'] = dft['source_id'].astype(str).apply(lambda x: x.replace('.0',''))
dft['primary_station_id_2']=dft['primary_station_id'].astype(str)+'-'+dft['source_id'].astype(str)
dft["observation_value"] = pd.to_numeric(dft["observation_value"],errors='coerce')
#dft.to_csv("ttest.csv", index=False, sep=",")
###add data policy and record number to df
df2=pd.read_csv("D:/Python_CDM_conversion/new recipe tables/record_id.csv")
dft = dft.astype(str)
df2 = df2.astype(str)
dft= df2.merge(dft, on=['primary_station_id_2'])
dft['data_policy_licence'] = dft['data_policy_licence_x']
dft['data_policy_licence'] = dft['data_policy_licence'].astype(str).apply(lambda x: x.replace('.0',''))
dft['observation_id']=dft['primary_station_id'].astype(str)+'-'+dft['record_number'].astype(str)+'-'+dft['date_time'].astype(str)
dft['observation_id'] = dft['observation_id'].str.replace(r' ', '-')
##remove unwanted last twpo characters
dft['observation_id'] = dft['observation_id'].str[:-6]
dft["observation_id"]=dft["observation_id"]+'-'+dft['observed_variable'].astype(str)+'-'+dft['value_significance'].astype(str)
dft["report_id"]=dft["observation_id"].str[:-6]
##set up qc table
dft = dft[["observation_id","report_id","data_policy_licence","date_time",
"date_time_meaning","observation_duration","longitude","latitude",
"crs","z_coordinate","z_coordinate_type","observation_height_above_station_surface",
"observed_variable","secondary_variable","observation_value",
"value_significance","secondary_value","units","code_table",
"conversion_flag","location_method","location_precision",
"z_coordinate_method","bbox_min_longitude","bbox_max_longitude",
"bbox_min_latitude","bbox_max_latitude","spatial_representativeness",
"quality_flag","numerical_precision","sensor_id","sensor_automation_status",
"exposure_of_sensor","original_precision","original_units",
"original_code_table","original_value","conversion_method",
"processing_code","processing_level","adjustment_id","traceability",
"advanced_qc","advanced_uncertainty","advanced_homogenisation",
"advanced_assimilation_feedback","source_id"]]
df.dropna(subset = ["observation_value"], inplace=True)
dft['source_id'] = dft['source_id'].astype(str).apply(lambda x: x.replace('.0',''))
dft['data_policy_licence'] = dft['data_policy_licence'].astype(str).apply(lambda x: x.replace('.0',''))
dft["source_id"] = pd.to_numeric(dft["source_id"],errors='coerce')
dft["observation_value"] = pd.to_numeric(dft["observation_value"],errors='coerce')
dft["observation_value"]= dft["observation_value"].round(2)
#dft.to_csv("isuest.csv", index=False, sep=",")
#=================================================================================
##convert dew point temperature changes for each variable
dfdpt= df[["observation_id","report_id","data_policy_licence","date_time",
"date_time_meaning","observation_duration","longitude","latitude",
"crs","z_coordinate","z_coordinate_type","observation_height_above_station_surface",
"observed_variable","secondary_variable","observation_value",
"value_significance","secondary_value","units","code_table",
"conversion_flag","location_method","location_precision",
"z_coordinate_method","bbox_min_longitude","bbox_max_longitude",
"bbox_min_latitude","bbox_max_latitude","spatial_representativeness",
"quality_flag","numerical_precision","sensor_id","sensor_automation_status",
"exposure_of_sensor","original_precision","original_units",
"original_code_table","original_value","conversion_method",
"processing_code","processing_level","adjustment_id","traceability",
"advanced_qc","advanced_uncertainty","advanced_homogenisation",
"advanced_assimilation_feedback","source_id","primary_station_id"]]
##change for each variable to convert to cdm compliant values
dfdpt["observation_value"]=df["dew_point_temperature"]+273.15
dfdpt["source_id"]=df["dew_point_temperature_Source_Code"]
dfdpt["Seconds"]="00"
dfdpt["quality_flag"]=df["dew_point_temperature_QC_flag"]
dfdpt["conversion_flag"]="0"
dfdpt["conversion_method"]="1"
dfdpt["numerical_precision"]="0.01"
dfdpt["original_precision"]="0.1"
dfdpt["original_units"]="60"
dfdpt["original_value"]=df["dew_point_temperature"]
dfdpt["observation_height_above_station_surface"]="2"
dfdpt["units"]="5"
dfdpt["observed_variable"]="36"
##set quality flag from df master for variable and fill all nan with Null then change all nonnan to
dfdpt.loc[dfdpt['quality_flag'].notnull(), "quality_flag"] = 1
dfdpt= dfdpt.fillna("Null")
dfdpt.quality_flag[dfdpt.quality_flag == "Null"] = 0
##remove unwanted mising data rows
dfdpt= dfdpt.fillna("null")
dfdpt= dfdpt.replace({"null":"-99999"})
dfdpt= dfdpt[dfdpt.observation_value != -99999]
#df = df.astype(str)
dfdpt["source_id"] = pd.to_numeric(dfdpt["source_id"],errors='coerce')
#df = df.astype(str)
#concatenate columns for joining df for next step
dfdpt['source_id'] = dfdpt['source_id'].astype(str).apply(lambda x: x.replace('.0',''))
dfdpt['primary_station_id_2']=dfdpt['primary_station_id'].astype(str)+'-'+dfdpt['source_id'].astype(str)
dfdpt["observation_value"] = pd.to_numeric(dfdpt["observation_value"],errors='coerce')
#dfdpt.to_csv("ttest.csv", index=False, sep=",")
###add data policy and record number to df
df2=pd.read_csv("D:/Python_CDM_conversion/new recipe tables/record_id.csv")
dfdpt= dfdpt.astype(str)
df2 = df2.astype(str)
dfdpt= df2.merge(dfdpt, on=['primary_station_id_2'])
dfdpt['data_policy_licence'] = dfdpt['data_policy_licence_x']
dfdpt['data_policy_licence'] = dfdpt['data_policy_licence'].astype(str).apply(lambda x: x.replace('.0',''))
dfdpt['observation_id']=dfdpt['primary_station_id'].astype(str)+'-'+dfdpt['record_number'].astype(str)+'-'+dfdpt['date_time'].astype(str)
dfdpt['observation_id'] = dfdpt['observation_id'].str.replace(r' ', '-')
##remove unwanted last twpo characters
dfdpt['observation_id'] = dfdpt['observation_id'].str[:-6]
dfdpt["observation_id"]=dfdpt["observation_id"]+'-'+dfdpt['observed_variable'].astype(str)+'-'+dfdpt['value_significance'].astype(str)
dfdpt["report_id"]=dfdpt["observation_id"].str[:-6]
##set up qc table
dfdpt= dfdpt[["observation_id","report_id","data_policy_licence","date_time",
"date_time_meaning","observation_duration","longitude","latitude",
"crs","z_coordinate","z_coordinate_type","observation_height_above_station_surface",
"observed_variable","secondary_variable","observation_value",
"value_significance","secondary_value","units","code_table",
"conversion_flag","location_method","location_precision",
"z_coordinate_method","bbox_min_longitude","bbox_max_longitude",
"bbox_min_latitude","bbox_max_latitude","spatial_representativeness",
"quality_flag","numerical_precision","sensor_id","sensor_automation_status",
"exposure_of_sensor","original_precision","original_units",
"original_code_table","original_value","conversion_method",
"processing_code","processing_level","adjustment_id","traceability",
"advanced_qc","advanced_uncertainty","advanced_homogenisation",
"advanced_assimilation_feedback","source_id"]]
dfdpt.dropna(subset = ["observation_value"], inplace=True)
dfdpt['source_id'] = dfdpt['source_id'].astype(str).apply(lambda x: x.replace('.0',''))
dfdpt['data_policy_licence'] = dfdpt['data_policy_licence'].astype(str).apply(lambda x: x.replace('.0',''))
dfdpt["source_id"] = pd.to_numeric(dfdpt["source_id"],errors='coerce')
dfdpt["observation_value"] = pd.to_numeric(dfdpt["observation_value"],errors='coerce')
dfdpt["observation_value"]= dfdpt["observation_value"].round(2)
#====================================================================================
#convert station level to cdmlite
dfslp = df[["observation_id","report_id","data_policy_licence","date_time",
"date_time_meaning","observation_duration","longitude","latitude",
"crs","z_coordinate","z_coordinate_type","observation_height_above_station_surface",
"observed_variable","secondary_variable","observation_value",
"value_significance","secondary_value","units","code_table",
"conversion_flag","location_method","location_precision",
"z_coordinate_method","bbox_min_longitude","bbox_max_longitude",
"bbox_min_latitude","bbox_max_latitude","spatial_representativeness",
"quality_flag","numerical_precision","sensor_id","sensor_automation_status",
"exposure_of_sensor","original_precision","original_units",
"original_code_table","original_value","conversion_method",
"processing_code","processing_level","adjustment_id","traceability",
"advanced_qc","advanced_uncertainty","advanced_homogenisation",
"advanced_assimilation_feedback","source_id","primary_station_id"]]
##change for each variable to convert to cdm compliant values
dfslp["observation_value"]=df["station_level_pressure"]
dfslp["source_id"]=df["station_level_pressure_Source_Code"]
dfslp["Seconds"]="00"
dfslp["quality_flag"]=df["station_level_pressure_QC_flag"]
dfslp["conversion_flag"]="0"
dfslp["conversion_method"]="7"
dfslp["numerical_precision"]="10"
dfslp["original_precision"]="0.1"
dfslp["original_units"]="530"
dfslp["original_value"]=df["station_level_pressure"]
dfslp["observation_height_above_station_surface"]="2"
dfslp["units"]="32"
dfslp["observed_variable"]="57"
##set quality flag from df master for variable and fill all nan with Null then change all nonnan to
dfslp.loc[dfslp['quality_flag'].notnull(), "quality_flag"] = 1
dfslp = dfslp.fillna("Null")
dfslp.quality_flag[dfslp.quality_flag == "Null"] = 0
#change for each variable if required
##remove unwanted mising data rows
dfslp = dfslp.fillna("null")
dfslp = dfslp.replace({"null":"-99999"})
dfslp = dfslp[dfslp.observation_value != -99999]
#df = df.astype(str)
dfslp["source_id"] = | pd.to_numeric(dfslp["source_id"],errors='coerce') | pandas.to_numeric |
'''Assignment 2
In this assignment you'll explore the relationship between model complexity and generalization performance,
by adjusting key parameters of various supervised learning models. Part 1 of this assignment will look at
regression and Part 2 will look at classification.'''
'''Part 1 - Regression
First, run the following block to set up the variables needed for later sections.'''
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
np.random.seed(0)
n = 15
x = np.linspace(0,10,n) + np.random.randn(n)/5
y = np.sin(x)+x/6 + np.random.randn(n)/10
X_train, X_test, y_train, y_test = train_test_split(x, y, random_state=0)
# You can use this function to help you visualize the dataset by
# plotting a scatterplot of the data points
# in the training and test sets.
#---------- ANSWER CODE ----------
def part1_scatter():
#%matplotlib notebook
plt.figure()
plt.scatter(X_train, y_train, label='training data')
plt.scatter(X_test, y_test, label='test data')
plt.legend(loc=4);
#plt.show()
plt.savefig('/mnt/d/SebasUbuntu/Documentos/Graficas/part1_scatter.png')
return X_test, y_test, X_train, y_train
part1_scatter()
#---------- ANSWER ----------
(array([0.79431716, 4.47573197, 5.69364194, 6.51069113]),
array([ 0.99517935, -0.16081 , 0.3187423 , 1.53763897]),
array([10.08877265, 3.23065446, 1.62431903, 9.31004929, 7.17166586,
4.96972856, 8.14799756, 2.59103578, 0.35281047, 3.375973 ,
8.72363612]),
array([ 1.21213026, 0.36408873, 1.24877201, 1.81942995, 1.82595557,
-0.05233879, 2.31966323, 0.98630796, 0.43770571, 0.07512287,
2.08031157]))
#-----------------------------------------------------------------------
'''Question 1
Write a function that fits a polynomial LinearRegression model on the training data X_train for degrees
1, 3, 6, and 9. (Use PolynomialFeatures in sklearn.preprocessing to create the polynomial features and
then fit a linear regression model) For each model, find 100 predicted values over the interval x = 0 to 10
(e.g. np.linspace(0,10,100)) and store this in a numpy array. The first row of this array should correspond
to the output from the model trained on degree 1, the second row degree 3, the third row degree 6, and the
fourth row degree 9.
The figure above shows the fitted models plotted on top of the original data (using plot_one()).
This function should return a numpy array with shape (4, 100)'''
#---------- ANSWER CODE ----------
def answer_one():
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
predictions= np.empty((4,100))
data = np.linspace(0,10,100).reshape(-1, 1)
for i,degree in enumerate([1,3,6,9]):
cls = PolynomialFeatures(degree=degree)
poly = cls.fit_transform(X_train.reshape(-1, 1))
linreg = LinearRegression().fit(poly, y_train)
transformed_data = cls.transform(data)
predictions[i,:] = linreg.predict(transformed_data)
return predictions
answer_one()
# feel free to use the function plot_one() to replicate the figure
# from the prompt once you have completed question one
def plot_one(degree_predictions):
import matplotlib.pyplot as plt
#%matplotlib notebook
plt.figure(figsize=(10,5))
plt.plot(X_train, y_train, 'o', label='training data', markersize=10)
plt.plot(X_test, y_test, 'o', label='test data', markersize=10)
for i,degree in enumerate([1,3,6,9]):
plt.plot(np.linspace(0,10,100), degree_predictions[i], alpha=0.8, lw=2, label='degree={}'.format(degree))
plt.ylim(-1,2.5)
plt.legend(loc=4)
plt.savefig('/mnt/d/SebasUbuntu/Documentos/Graficas/plot_one.png')
plot_one(answer_one())
#-----------------------------------------------------------------------
'''Question 2
Write a function that fits a polynomial LinearRegression model on the training data X_train for degrees 0
through 9. For each model compute the R2R2 (coefficient of determination) regression score on the training
data as well as the the test data, and return both of these arrays in a tuple.
This function should return one tuple of numpy arrays (r2_train, r2_test). Both arrays should have shape
(10,)'''
#---------- ANSWER CODE ----------
def answer_two():
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
#from sklearn.metrics.regression import r2_score
r2_train = np.empty(10)
r2_test = np.empty(10)
for i,degree in enumerate(range(10)):
cls = PolynomialFeatures(degree=degree)
poly = cls.fit_transform(X_train.reshape(-1, 1))
linreg = LinearRegression().fit(poly, y_train)
r2_train[i] = linreg.score(poly, y_train)
r2_test[i] = linreg.score(
cls.transform(X_test.reshape(-1, 1)),
y_test
)
return r2_train, r2_test
answer_two()
#---------- ANSWER ----------
(array([0. , 0.42924578, 0.4510998 , 0.58719954, 0.91941945,
0.97578641, 0.99018233, 0.99352509, 0.99637545, 0.99803706]),
array([-0.47808642, -0.45237104, -0.06856984, 0.00533105, 0.73004943,
0.87708301, 0.9214094 , 0.92021504, 0.63247953, -0.64525322]))
#-----------------------------------------------------------------------
'''Question 3
Based on the R2R2 scores from question 2 (degree levels 0 through 9), what degree level corresponds to a
model that is underfitting? What degree level corresponds to a model that is overfitting? What choice of
degree level would provide a model with good generalization performance on this dataset?
Hint: Try plotting the R2R2 scores from question 2 to visualize the relationship between degree level and
R2R2 . Remember to comment out the import matplotlib line before submission.
This function should return one tuple with the degree values in this order: (Underfitting, Overfitting,
Good_Generalization). There might be multiple correct solutions, however, you only need to return one
possible solution, for example, (1,2,3).'''
#---------- ANSWER CODE ----------
def answer_three():
# Read in the results of answer_two
r2_train, r2_test = answer_two()
# Sort the scores
r2_train_sorted = np.sort(r2_train)
r2_test_sorted = np.sort(r2_test)
# Initialize the values
Underfitting = 0
Overfitting = 0
Good_Generalization = 0
min_r2_train = np.min(r2_train)
max_r2_train = np.max(r2_train)
min_r2_test = np.max(r2_test)
max_r2_test = np.max(r2_test)
for deg, data in enumerate(zip(r2_train, r2_test)):
if data[0] < r2_train_sorted[5] and data[1] < r2_test_sorted[5]:
Underfitting = deg
if data[0] >= r2_train_sorted[5] and data[1] < r2_test_sorted[5]:
Overfitting = deg
if data[0] >= r2_train_sorted[5] and data[1] >= r2_test_sorted[7]:
Good_Generalization = deg
return Underfitting, Overfitting, Good_Generalization
answer_three()
#---------- ANSWER ----------
(3, 9, 7)
def plot_three():
import matplotlib.pyplot as plt
#%matplotlib notebook
r2_train, r2_test = answer_two()
plt.figure(figsize=(10,5))
plt.plot(range(10), r2_train, alpha=0.8, lw=2, label='R2_Train')
plt.plot(range(10), r2_test, alpha=0.8, lw=2, label='R2_Test')
plt.legend()
plt.xlabel('Degree')
plt.ylabel('R2')
plt.xticks(range(10))
plt.grid()
plt.savefig('/mnt/d/SebasUbuntu/Documentos/Graficas/plot_three.png')
plot_three()
#-----------------------------------------------------------------------
'''Question 4
Training models on high degree polynomial features can result in overly complex models that overfit, so we
often use regularized versions of the model to constrain model complexity, as we saw with Ridge and Lasso
linear regression.
For this question, train two models: a non-regularized LinearRegression model (default parameters) and a
regularized Lasso Regression model (with parameters alpha=0.01, max_iter=10000) both on polynomial features
of degree 12. Return the $R^2$ score for both the LinearRegression and Lasso model's test sets.
This function should return one tuple (LinearRegression_R2_test_score, Lasso_R2_test_score)'''
#---------- ANSWER CODE ----------
def answer_four():
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import Lasso, LinearRegression
#from sklearn.metrics.regression import r2_score
poly = PolynomialFeatures(degree=12)
X_train_poly = poly.fit_transform(X_train.reshape(-1, 1))
X_test_poly = poly.transform(X_test.reshape(-1, 1))
linreg = LinearRegression().fit(X_train_poly, y_train)
linreg_test_score = linreg.score(X_test_poly, y_test )
#print(linreg.score(X_train_poly, y_train))
#print(linreg.score(X_test_poly, y_test ))
linlasso = Lasso(alpha=0.01, max_iter = 10000).fit(X_train_poly, y_train)
lasso_test_score = linlasso.score(X_test_poly, y_test )
#print(linlasso.score(X_train_poly, y_train))
#print(linlasso.score(X_test_poly, y_test ))
return linreg_test_score, lasso_test_score
answer_four()
#---------- ANSWER ----------
(-4.311955012614453, 0.8406625614750236)
#-----------------------------------------------------------------------
'''Part 2 - Classification
Here's an application of machine learning that could save your life! For this section of the assignment we will be working with the UCI Mushroom Data Set stored in readonly/mushrooms.csv. The data will be used to train a model to predict whether or not a mushroom is poisonous. The following attributes are provided:
Attribute Information:
1.cap-shape: bell=b, conical=c, convex=x, flat=f, knobbed=k, sunken=s
2.cap-surface: fibrous=f, grooves=g, scaly=y, smooth=s
3.cap-color: brown=n, buff=b, cinnamon=c, gray=g, green=r, pink=p, purple=u, red=e, white=w, yellow=y
4.bruises?: bruises=t, no=f
5.odor: almond=a, anise=l, creosote=c, fishy=y, foul=f, musty=m, none=n, pungent=p, spicy=s
6.gill-attachment: attached=a, descending=d, free=f, notched=n
7.gill-spacing: close=c, crowded=w, distant=d
8.gill-size: broad=b, narrow=n
9.gill-color: black=k, brown=n, buff=b, chocolate=h, gray=g, green=r, orange=o, pink=p, purple=u, red=e, white=w, yellow=y
10.stalk-shape: enlarging=e, tapering=t
11.stalk-root: bulbous=b, club=c, cup=u, equal=e, rhizomorphs=z, rooted=r, missing=?
12.stalk-surface-above-ring: fibrous=f, scaly=y, silky=k, smooth=s
13.stalk-surface-below-ring: fibrous=f, scaly=y, silky=k, smooth=s
14.stalk-color-above-ring: brown=n, buff=b, cinnamon=c, gray=g, orange=o, pink=p, red=e, white=w, yellow=y
15.stalk-color-below-ring: brown=n, buff=b, cinnamon=c, gray=g, orange=o, pink=p, red=e, white=w, yellow=y
16.veil-type: partial=p, universal=u
17.veil-color: brown=n, orange=o, white=w, yellow=y
18.ring-number: none=n, one=o, two=t
19.ring-type: cobwebby=c, evanescent=e, flaring=f, large=l, none=n, pendant=p, sheathing=s, zone=z
20.spore-print-color: black=k, brown=n, buff=b, chocolate=h, green=r, orange=o, purple=u, white=w, yellow=y
21.population: abundant=a, clustered=c, numerous=n, scattered=s, several=v, solitary=y
22.habitat: grasses=g, leaves=l, meadows=m, paths=p, urban=u, waste=w, woods=d
The data in the mushrooms dataset is currently encoded with strings. These values will need to be encoded to
numeric to work with sklearn. We'll use pd.get_dummies to convert the categorical variables into indicator
variables.'''
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
mush_df = | pd.read_csv('mushrooms.csv') | pandas.read_csv |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 5 00:37:32 2019
@author: tungutokyo
"""
import pandas as pd
import numpy as np
pd.options.mode.chained_assignment = None
pd.set_option("display.max_columns", 60)
import matplotlib.pyplot as plt
import seaborn as sns
import itertools
from scipy import interp
from itertools import cycle
from sklearn.preprocessing import label_binarize
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import f1_score
from sklearn import metrics
from sklearn.metrics import roc_curve, auc
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import accuracy_score
from sklearn.metrics import plot_confusion_matrix
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import cross_validate
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import KFold
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
from sklearn.model_selection import TimeSeriesSplit, GridSearchCV, RandomizedSearchCV
from xgboost import XGBClassifier
def get_RandSearchCV(X_train, y_train, X_test, y_test, scoring, type_search, output_file):
from sklearn.model_selection import TimeSeriesSplit
from datetime import datetime as dt
st_t = dt.now()
# Numer of trees are used
n_estimators = [5, 10, 50, 100, 150, 200, 250, 300]
#n_estimators = list(np.arange(100,1000,50))
#n_estimators = [1000]
# Maximum depth of each tree
max_depth = [5, 10, 25, 50, 75, 100]
# Minimum number of samples per leaf
min_samples_leaf = [1, 2, 4, 8, 10]
# Minimum number of samples to split a node
min_samples_split = [2, 4, 6, 8, 10]
# Maximum numeber of features to consider for making splits
max_features = ["auto", "sqrt", "log2", None]
hyperparameter = {'n_estimators': n_estimators,
'max_depth': max_depth,
'min_samples_leaf': min_samples_leaf,
'min_samples_split': min_samples_split,
'max_features': max_features}
cv_timeSeries = TimeSeriesSplit(n_splits=5).split(X_train)
base_model_rf = RandomForestClassifier(criterion="gini", random_state=42)
base_model_gb = GradientBoostingClassifier(criterion="friedman_mse", random_state=42)
# Run randomzed search
n_iter_search = 30
if type_search == "RandomSearchCV-RandomForest":
rsearch_cv = RandomizedSearchCV(estimator=base_model_rf,
random_state=42,
param_distributions=hyperparameter,
n_iter=n_iter_search,
cv=cv_timeSeries,
scoring=scoring,
n_jobs=-1)
else:
rsearch_cv = RandomizedSearchCV(estimator=base_model_gb,
random_state=42,
param_distributions=hyperparameter,
n_iter=n_iter_search,
cv=cv_timeSeries,
scoring=scoring,
n_jobs=-1)
rsearch_cv.fit(X_train, y_train)
#f = open("output.txt", "a")
print("Best estimator obtained from CV data: \n", rsearch_cv.best_estimator_, file=output_file)
print("Best Score: ", rsearch_cv.best_score_, file=output_file)
return rsearch_cv
def performance_rand(best_clf, X_train, y_train, X_test, y_test, type_search, num_class, output_file, class_name):
#f = open("output.txt", "a")
print("-"*100)
print("~~~~~~~~~~~~~~~~~~ PERFORMANCE EVALUATION ~~~~~~~~~~~~~~~~~~~~~~~~", file=output_file)
print("Detailed report for the {} algorithm".format(type_search), file=output_file)
best_clf.fit(X_train, y_train)
y_pred = best_clf.predict(X_test)
y_pred_prob = best_clf.predict_proba(X_test)
test_accuracy = accuracy_score(y_test, y_pred, normalize=True) * 100
points = accuracy_score(y_test, y_pred, normalize=False)
print("The number of accurate predictions out of {} data points on unseen data is {}".format(
X_test.shape[0], points), file=output_file)
print("Accuracy of the {} model on unseen data is {}".format(
type_search, np.round(test_accuracy, 2)), file=output_file)
print("Precision of the {} model on unseen data is {}".format(
type_search, np.round(metrics.precision_score(y_test, y_pred, average="macro"), 4)), file=output_file)
print("Recall of the {} model on unseen data is {}".format(
type_search, np.round(metrics.recall_score(y_test, y_pred, average="macro"), 4)), file=output_file)
print("F1 score of the {} model on unseen data is {}".format(
type_search, np.round(metrics.f1_score(y_test, y_pred, average="macro"), 4)), file=output_file)
print("\nClassification report for {} model: \n".format(type_search), file=output_file)
print(metrics.classification_report(y_test, y_pred), file=output_file)
plt.figure(figsize=(12,12))
cnf_matrix = metrics.confusion_matrix(y_test, y_pred)
cnf_matrix_norm = cnf_matrix.astype('float') / cnf_matrix.sum(axis=1)[:, np.newaxis]
print("\nThe Confusion Matrix: \n", file=output_file)
print(cnf_matrix, file=output_file)
#class_name = ["CDI", "ignore-nonCDI", "Health"]
#class_name = ["CRC", "Adenomas", "Health"]
# class_name = ["OB", "OW", "Health"]
class_name = class_name
cmap = plt.cm.Blues
plt.imshow(cnf_matrix_norm, interpolation="nearest", cmap=cmap)
plt.colorbar()
fmt = ".2g"
thresh = cnf_matrix_norm.max()/2
for i, j in itertools.product(range(cnf_matrix_norm.shape[0]), range(cnf_matrix_norm.shape[1])):
plt.text(j,i,format(cnf_matrix_norm[i,j], fmt), ha="center", va="center",
color="white" if cnf_matrix_norm[i,j] > thresh else "black", fontsize=35)
plt.xticks(np.arange(num_class), labels = class_name, fontsize=30)
plt.yticks(np.arange(num_class), labels = class_name, fontsize=30)
plt.ylabel("True label", fontsize=30)
plt.xlabel("Predicted label", fontsize=30)
plt.ylim((num_class - 0.5, -0.5))
plt.show()
#plt.setp(ax.get_xticklabels(), rotation=xticks_rotation)
"""
cmap = plt.cm.Blues
sns.heatmap(cnf_matrix_norm, annot=True, cmap=cmap, fmt=".2f", annot_kws={"size":15}, linewidths=.05)
if type_search == "RandomSearchCV-RandomForest":
plt.title("The Normalized Confusion Matrix - {}".format("RandomForest"), fontsize=20)
else:
plt.title("The Normalized Confusion Matrix - {}".format("GradientBoosting"), fontsize=20)
plt.ylabel("True label", fontsize=15)
plt.xlabel("Predicted label", fontsize=15)
plt.show()
"""
print("\nROC curve and AUC")
y_pred = best_clf.predict(X_test)
y_pred_prob = best_clf.predict_proba(X_test)
y_test_cat = np.array( | pd.get_dummies(y_test) | pandas.get_dummies |
import os
import pandas as pd
import tensorflow as tf
import tensorflow_io as tfio
TRAIN_RATIO = 0.8
VALIDATION_RATIO = 0.1
TEST_RATIO = 0.1
@tf.function
def load_wav_16k_mono(wav_path: str):
""" Load a WAV file, convert it to a float tensor, resample to 16 kHz single-channel audio. """
file_contents = tf.io.read_file(wav_path)
wav, sample_rate = tf.audio.decode_wav(
file_contents,
desired_channels=1
)
wav = tf.squeeze(wav, axis=-1)
sample_rate = tf.cast(sample_rate, dtype=tf.int64)
wav = tfio.audio.resample(wav, rate_in=sample_rate, rate_out=16000)
return wav
def load_wav_for_map(filename: str, label, split):
return (load_wav_16k_mono(filename), label, split)
def load(path: str, dataset: str = 'sleep_scoring'):
if dataset == 'sleep_scoring':
classes = ['NONE', 'SLEEP-REM', 'SLEEP-S0', 'SLEEP-S1', 'SLEEP-S2', 'SLEEP-S3']
map_class_to_id = {'NONE': 0, 'SLEEP-REM': 3, 'SLEEP-S0': 0, 'SLEEP-S1': 1, 'SLEEP-S2': 2, 'SLEEP-S3': 2}
new_classes = ['wake', 'light', 'deep', 'REM']
print(f'Sleep Scoring Classes: {new_classes}')
else:
classes = ['NONE', 'SLEEP-REM', 'SLEEP-S0', 'SLEEP-S1', 'SLEEP-S2', 'SLEEP-S3']
map_class_to_id = {'NONE': 0, 'SLEEP-REM': 3, 'SLEEP-S0': 0, 'SLEEP-S1': 1, 'SLEEP-S2': 2, 'SLEEP-S3': 2}
new_classes = ['wake', 'light', 'deep', 'REM']
print(f'data.load else')
#map_split_to_id = {'train': 0, 'validation': 1, 'eval': 2}
metadata = pd.DataFrame(columns=['filename', 'class', 'split'])
for class_name in classes:
tempdata = | pd.DataFrame(columns=['filename', 'class', 'split']) | pandas.DataFrame |
import numpy as np
# For loading the data into Dataframes
import pandas as pd
# For string literal
import ast
# import packages related to gdal
from osgeo import gdal
import pyproj
# For time Zone Conversions
import pytz
# for plots
import matplotlib.pyplot as plt
with open('../data_collection/track_data/r2_Queen_of_Nanaimo.json') as f:
for line in f:
data = line
# Imported data now is in string format
# Convert it into list of lists which we know the data structure of imported file.
data = ast.literal_eval(data)
# Covnert into numpy array
np.array(data)
# Assigning datatypes
mt_data = np.array([tuple(x) for x in data], dtype = [('lon', 'f8'), ('lat', 'f8'), ('speed', 'i4'), ('course', 'i4'), ('heading', 'i4'), ('timestamp', 'M8[s]'), ('ut', 'i8'), ('station', 'i8'), ('gap','i4')])
mt_data = | pd.DataFrame(mt_data) | pandas.DataFrame |
from keras.layers import Dense
from keras.layers import LSTM
from keras.layers import Lambda
from keras.layers import Input
from keras.layers import Conv1D
from keras.layers import Bidirectional
from keras.models import Model
from keras.optimizers import Adam
from keras.constraints import Constraint
from keras.constraints import NonNeg
import os
import csv
import ast
import pandas as pd
import numpy as np
import datetime
# Suppress noisy Tensorflow debug logging
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
INCLUDE_CV_PREDICTION = False
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
DATA_PATH = os.path.join(ROOT_DIR, 'data')
DATA_FILE_PATH = os.path.join(DATA_PATH, 'OxCGRT_latest.csv')
DATA_FILE_CV_PATH = os.path.join(DATA_PATH, 'OxfordComunitatValenciana.csv')
ADDITIONAL_CONTEXT_FILE = os.path.join(DATA_PATH, "Additional_Context_Data_Global.csv")
ADDITIONAL_US_STATES_CONTEXT = os.path.join(DATA_PATH, "US_states_populations.csv")
ADDITIONAL_UK_CONTEXT = os.path.join(DATA_PATH, "uk_populations.csv")
ADDITIONAL_BRAZIL_CONTEXT = os.path.join(DATA_PATH, "brazil_populations.csv")
MODEL_PATH = os.path.join(ROOT_DIR, 'models')
MODEL_WEIGHTS_CLUSTER_FILE = os.path.join(MODEL_PATH, "weightscluster{}_280traineddays.h5")
MODEL_WEIGHTS_SCO_V0_FILE = os.path.join(MODEL_PATH, "sco_v0_trained_model_weights.h5")
MODEL_WEIGHTS_SCO_V1_FILE = os.path.join(MODEL_PATH, "sco_v1_trained_model_weights.h5")
MODEL_WEIGHTS_SCO_V2_FILE = os.path.join(MODEL_PATH, "sco_v2_trained_model_weights.h5")
ID_COLS = ['CountryName',
'RegionName',
'GeoID',
'Date']
CASES_COL = ['NewCases']
CONTEXT_COLUMNS = ['CountryName',
'RegionName',
'GeoID',
'Date',
'ConfirmedCases',
'ConfirmedDeaths',
'Population']
NPI_COLUMNS = ["C1_School closing",
"C2_Workplace closing",
"C3_Cancel public events",
"C4_Restrictions on gatherings",
"C5_Close public transport",
"C6_Stay at home requirements",
"C7_Restrictions on internal movement",
"C8_International travel controls"]
NB_LOOKBACK_DAYS = 21
WINDOW_SIZE = 7
LSTM_SIZE = 32
US_PREFIX = "United States / "
Cluster_1 = [('Central African Republic', ''),('Chile', ''),('China', ''),('Lithuania', ''),('Niger', ''),('Panama', ''),
('Sweden', ''),('Switzerland', ''),('United States', 'Arizona'),('United States', 'Hawaii'),
('United States', 'Maine'),('United States', 'Rhode Island')]
Cluster_2 = [('Bahrain', ''),('Bangladesh', ''),('El Salvador', ''),('Estonia', ''),('Japan', ''),('Kosovo', ''),
('Luxembourg', ''),('Moldova', ''),('Peru', ''),('Vietnam', '')]
Cluster_3 = [('Andorra', ''),('Aruba', ''),('Australia', ''),('Belarus', ''),('Belgium', ''),('Bolivia', ''),
('Bulgaria', ''),('Burkina Faso', ''),('Croatia', ''),("Cote d'Ivoire", ''),('Czech Republic', ''),
('Dominican Republic', ''),('Finland', ''),('France', ''),('Greece', ''),('Guatemala', ''),('Iceland', ''),
('India', ''),('Ireland', ''),('Israel', ''),('Kosovos', ''),('Latvia', ''),('Mongolia', ''),('Myanmar', ''),
('Nepal', ''),('Norway', ''),('Oman', ''),('Puerto Rico', ''),('Romania', ''),('Russia', ''),('Saudi Arabia', ''),
('Slovenia', ''),('Tajikistan', ''),('Trinidad and Tobago', ''),('Uganda', ''),('Ukraine', ''),
('United Arab Emirates', ''),('United States', 'California'),('United States', 'Georgia'),
('United States', 'Idaho'),('United States', 'New Hampshire'),('United States', 'North Carolina'),('Uruguay', ''),
('Venezuela', ''),('Zambia', '')]
Cluster_4 = [('United States', 'South Carolina')]
Cluster_6 = [('Cameroon', ''),('Ethiopia', ''),('Jordan', ''),('Uzbekistan', ''),('Zimbabwe', '')]
Cluster_7 = [('Eswatini', ''),('Kenya', ''),('Libya', ''),('Singapore', ''),('Suriname', ''),('United States', 'Illinois')]
Cluster_10 = [('Algeria', ''), ('Iran', ''), ('Morocco', ''), ('United States', 'Texas')]
Cluster_11 = [('United States', 'Florida')]
Cluster_v0 = [ ('Afghanistan', ''), ('Bahamas', ''), ('Azerbaijan', ''), ('Burundi', ''), ('Comoros', ''),
('Democratic Republic of Congo', ''), ('Hong Kong', ''), ('Indonesia', ''), ('Kazakhstan', ''),
('Kyrgyz Republic', ''), ('Mauritius', ''), ('New Zealand', ''), ('Nicaragua', ''), ('Sudan', ''),
('Taiwan', '')]
class Positive(Constraint):
def __call__(self, w):
return K.abs(w)
class ValenciaPredictor(object):
"""
A class that computes a fitness for Prescriptor candidates.
"""
def __init__(self):
# Carga el modelo y sus pesos
# self.model = self._create_model_default(MODEL_WEIGHTS_DEFAULT_FILE)
nb_context = 1 # Only time series of new cases rate is used as context
nb_action = len(NPI_COLUMNS)
self.model_v0 = self._create_model_sco_v0(nb_context=nb_context, nb_action=nb_action, lstm_size=LSTM_SIZE, nb_lookback_days=NB_LOOKBACK_DAYS)
self.model_v0.load_weights(MODEL_WEIGHTS_SCO_V0_FILE)
self.model_v1 = self._create_model_sco_v1(nb_context=nb_context, nb_action=nb_action, lstm_size=LSTM_SIZE, nb_lookback_days=NB_LOOKBACK_DAYS)
self.model_v1.load_weights(MODEL_WEIGHTS_SCO_V1_FILE)
self.model_v2 = self._create_model_sco_v2(nb_context=nb_context, nb_action=nb_action, lstm_size=LSTM_SIZE, nb_lookback_days=NB_LOOKBACK_DAYS)
self.model_v2.load_weights(MODEL_WEIGHTS_SCO_V2_FILE)
self.cluster_dict = self._load_clusters()
self.df = self._prepare_dataframe()
def predict_df(self, start_date_str: str, end_date_str: str, path_to_ips_file: str, verbose=False):
# Load historical intervention plans, since inception
hist_ips_df = self._load_original_data(path_to_ips_file)
return self.predict_from_df(start_date_str, end_date_str, hist_ips_df, verbose=verbose)
def predict_from_df(self,
start_date_str: str,
end_date_str: str,
npis_df: pd.DataFrame,
verbose=False) -> pd.DataFrame:
"""
Generates a file with daily new cases predictions for the given countries, regions and npis, between
start_date and end_date, included.
:param start_date_str: day from which to start making predictions, as a string, format YYYY-MM-DDD
:param end_date_str: day on which to stop making predictions, as a string, format YYYY-MM-DDD
:param path_to_ips_file: path to a csv file containing the intervention plans between inception_date and end_date
:param verbose: True to print debug logs
:return: a Pandas DataFrame containing the predictions
"""
start_date = pd.to_datetime(start_date_str, format='%Y-%m-%d')
end_date = pd.to_datetime(end_date_str, format='%Y-%m-%d')
nb_days = (end_date - start_date).days + 1
# Load historical intervention plans, since inception
hist_ips_df = npis_df
# Fill any missing NPIs by assuming they are the same as previous day
for npi_col in NPI_COLUMNS:
hist_ips_df.update(hist_ips_df.groupby(['CountryName', 'RegionName'])[npi_col].ffill().fillna(0))
# Intervention plans to forecast for: those between start_date and end_date
ips_df = hist_ips_df[(hist_ips_df.Date >= start_date) & (hist_ips_df.Date <= end_date)]
# Make predictions for each country,region pair
geo_pred_dfs = []
for g in ips_df.GeoID.unique():
if verbose:
print('\nPredicting for', g)
# Pull out all relevant data for country c
ips_gdf = ips_df[ips_df.GeoID == g]
hist_ips_gdf = hist_ips_df[hist_ips_df.GeoID == g]
hist_cases_gdf = self.df[self.df.GeoID == g]
last_known_date = hist_cases_gdf.Date.max()
# Start predicting from start_date, unless there's a gap since last known date
current_date = min(last_known_date + np.timedelta64(1, 'D'), start_date)
past_cases_gdf = hist_cases_gdf[hist_cases_gdf.Date < current_date]
past_ips_gdf = hist_ips_gdf[hist_ips_gdf.Date < current_date]
future_ips_gdf = hist_ips_gdf[(hist_ips_gdf.Date >= current_date) & (hist_ips_gdf.Date <= end_date)]
past_cases = np.array(past_cases_gdf[CASES_COL]).flatten()
past_npis = np.array(past_ips_gdf[NPI_COLUMNS])
future_npis = np.array(future_ips_gdf[NPI_COLUMNS])
pop_size = hist_cases_gdf.Population.max()
past_cum_cases = np.cumsum(past_cases)
zn = np.array(compute_7days_mean(past_cases))
rn = np.array(compute_rns(past_cum_cases, zn, pop_size))
# Loads custom model
cluster_id = self.cluster_dict.get(g)
if cluster_id is None:
current_model = self.model_v1
elif cluster_id == -1:
current_model = self.model_v0
else:
file_name = MODEL_WEIGHTS_CLUSTER_FILE.format(cluster_id)
current_model = self._create_model(file_name)
# Make prediction for each day
geo_preds = []
geo_ratios = []
days_ahead = 0
while current_date <= end_date:
# Prepare data
X_rns = rn[-NB_LOOKBACK_DAYS:].reshape(1, 21, 1)
X_npis = past_npis[-NB_LOOKBACK_DAYS:].reshape(1, 21, 8)
# Make the prediction (reshape so that sklearn is happy)
pred_rn = current_model.predict([X_rns, X_npis])[0][0]
pred_cases = int(((((pred_rn * ((pop_size - past_cum_cases[-1]) / pop_size)) - 1.0) * 7.0 * zn[-1])) + past_cases[-7])
pred = max(0, pred_cases) # Do not allow predicting negative cases
# Add if it's a requested date
if current_date >= start_date:
geo_preds.append(pred)
geo_ratios.append(pred_rn)
if verbose:
print(f"{current_date.strftime('%Y-%m-%d')}: {pred}")
else:
if verbose:
print(f"{current_date.strftime('%Y-%m-%d')}: {pred} - Skipped (intermediate missing daily cases)")
# Append the prediction and npi's for next day
# in order to rollout predictions for further days.
past_cases = np.append(past_cases, pred)
past_npis = np.append(past_npis, future_npis[days_ahead:days_ahead + 1], axis=0)
past_cum_cases = np.append(past_cum_cases, past_cum_cases[-1] + pred)
zn = np.append(zn, compute_last_7days_mean(past_cases))
rn = np.append(rn, pred_rn) # compute_last_rn(past_cum_cases, zn, pop_size)
# Move to next day
current_date = current_date + np.timedelta64(1, 'D')
days_ahead += 1
# we don't have historical data for this geo: return zeroes
if len(geo_preds) != nb_days:
geo_preds = [0] * nb_days
geo_ratios = [0] * nb_days
if g=='Mauritania':
geo_preds = [140] * nb_days
geo_ratios = [0] * nb_days
if g=='Yemen':
geo_preds = [5] * nb_days
geo_ratios = [0] * nb_days
# Create geo_pred_df with pred column
geo_pred_df = ips_gdf[ID_COLS].copy()
geo_pred_df['PredictedDailyNewCases'] = geo_preds
geo_pred_df['PredictedDailyNewRatios'] = geo_ratios
geo_pred_dfs.append(geo_pred_df)
# Combine all predictions into a single dataframe
pred_df = | pd.concat(geo_pred_dfs) | pandas.concat |
from datetime import date, datetime, timedelta
from dateutil import tz
import numpy as np
import pytest
import pandas as pd
from pandas import DataFrame, Index, Series, Timestamp, date_range
import pandas._testing as tm
class TestDatetimeIndex:
def test_setitem_with_datetime_tz(self):
# 16889
# support .loc with alignment and tz-aware DatetimeIndex
mask = np.array([True, False, True, False])
idx = date_range("20010101", periods=4, tz="UTC")
df = DataFrame({"a": np.arange(4)}, index=idx).astype("float64")
result = df.copy()
result.loc[mask, :] = df.loc[mask, :]
tm.assert_frame_equal(result, df)
result = df.copy()
result.loc[mask] = df.loc[mask]
tm.assert_frame_equal(result, df)
idx = date_range("20010101", periods=4)
df = DataFrame({"a": np.arange(4)}, index=idx).astype("float64")
result = df.copy()
result.loc[mask, :] = df.loc[mask, :]
tm.assert_frame_equal(result, df)
result = df.copy()
result.loc[mask] = df.loc[mask]
tm.assert_frame_equal(result, df)
def test_indexing_with_datetime_tz(self):
# GH#8260
# support datetime64 with tz
idx = Index(date_range("20130101", periods=3, tz="US/Eastern"), name="foo")
dr = date_range("20130110", periods=3)
df = DataFrame({"A": idx, "B": dr})
df["C"] = idx
df.iloc[1, 1] = pd.NaT
df.iloc[1, 2] = pd.NaT
# indexing
result = df.iloc[1]
expected = Series(
[Timestamp("2013-01-02 00:00:00-0500", tz="US/Eastern"), pd.NaT, pd.NaT],
index=list("ABC"),
dtype="object",
name=1,
)
tm.assert_series_equal(result, expected)
result = df.loc[1]
expected = Series(
[Timestamp("2013-01-02 00:00:00-0500", tz="US/Eastern"), pd.NaT, pd.NaT],
index=list("ABC"),
dtype="object",
name=1,
)
tm.assert_series_equal(result, expected)
# indexing - fast_xs
df = DataFrame({"a": date_range("2014-01-01", periods=10, tz="UTC")})
result = df.iloc[5]
expected = Series(
[Timestamp("2014-01-06 00:00:00+0000", tz="UTC")], index=["a"], name=5
)
tm.assert_series_equal(result, expected)
result = df.loc[5]
tm.assert_series_equal(result, expected)
# indexing - boolean
result = df[df.a > df.a[3]]
expected = df.iloc[4:]
tm.assert_frame_equal(result, expected)
# indexing - setting an element
df = DataFrame(
data=pd.to_datetime(["2015-03-30 20:12:32", "2015-03-12 00:11:11"]),
columns=["time"],
)
df["new_col"] = ["new", "old"]
df.time = df.set_index("time").index.tz_localize("UTC")
v = df[df.new_col == "new"].set_index("time").index.tz_convert("US/Pacific")
# trying to set a single element on a part of a different timezone
# this converts to object
df2 = df.copy()
df2.loc[df2.new_col == "new", "time"] = v
expected = Series([v[0], df.loc[1, "time"]], name="time")
tm.assert_series_equal(df2.time, expected)
v = df.loc[df.new_col == "new", "time"] + pd.Timedelta("1s")
df.loc[df.new_col == "new", "time"] = v
tm.assert_series_equal(df.loc[df.new_col == "new", "time"], v)
def test_consistency_with_tz_aware_scalar(self):
# xef gh-12938
# various ways of indexing the same tz-aware scalar
df = Series([Timestamp("2016-03-30 14:35:25", tz="Europe/Brussels")]).to_frame()
df = pd.concat([df, df]).reset_index(drop=True)
expected = Timestamp("2016-03-30 14:35:25+0200", tz="Europe/Brussels")
result = df[0][0]
assert result == expected
result = df.iloc[0, 0]
assert result == expected
result = df.loc[0, 0]
assert result == expected
result = df.iat[0, 0]
assert result == expected
result = df.at[0, 0]
assert result == expected
result = df[0].loc[0]
assert result == expected
result = df[0].at[0]
assert result == expected
def test_indexing_with_datetimeindex_tz(self):
# GH 12050
# indexing on a series with a datetimeindex with tz
index = date_range("2015-01-01", periods=2, tz="utc")
ser = Series(range(2), index=index, dtype="int64")
# list-like indexing
for sel in (index, list(index)):
# getitem
tm.assert_series_equal(ser[sel], ser)
# setitem
result = ser.copy()
result[sel] = 1
expected = Series(1, index=index)
tm.assert_series_equal(result, expected)
# .loc getitem
tm.assert_series_equal(ser.loc[sel], ser)
# .loc setitem
result = ser.copy()
result.loc[sel] = 1
expected = Series(1, index=index)
tm.assert_series_equal(result, expected)
# single element indexing
# getitem
assert ser[index[1]] == 1
# setitem
result = ser.copy()
result[index[1]] = 5
expected = Series([0, 5], index=index)
tm.assert_series_equal(result, expected)
# .loc getitem
assert ser.loc[index[1]] == 1
# .loc setitem
result = ser.copy()
result.loc[index[1]] = 5
expected = Series([0, 5], index=index)
tm.assert_series_equal(result, expected)
def test_partial_setting_with_datetimelike_dtype(self):
# GH9478
# a datetimeindex alignment issue with partial setting
df = DataFrame(
np.arange(6.0).reshape(3, 2),
columns=list("AB"),
index=date_range("1/1/2000", periods=3, freq="1H"),
)
expected = df.copy()
expected["C"] = [expected.index[0]] + [pd.NaT, pd.NaT]
mask = df.A < 1
df.loc[mask, "C"] = df.loc[mask].index
tm.assert_frame_equal(df, expected)
def test_loc_setitem_datetime(self):
# GH 9516
dt1 = Timestamp("20130101 09:00:00")
dt2 = Timestamp("20130101 10:00:00")
for conv in [
lambda x: x,
lambda x: x.to_datetime64(),
lambda x: x.to_pydatetime(),
lambda x: np.datetime64(x),
]:
df = DataFrame()
df.loc[conv(dt1), "one"] = 100
df.loc[conv(dt2), "one"] = 200
expected = DataFrame({"one": [100.0, 200.0]}, index=[dt1, dt2])
tm.assert_frame_equal(df, expected)
def test_series_partial_set_datetime(self):
# GH 11497
idx = date_range("2011-01-01", "2011-01-02", freq="D", name="idx")
ser = Series([0.1, 0.2], index=idx, name="s")
result = ser.loc[[Timestamp("2011-01-01"), Timestamp("2011-01-02")]]
exp = Series([0.1, 0.2], index=idx, name="s")
tm.assert_series_equal(result, exp, check_index_type=True)
keys = [
Timestamp("2011-01-02"),
Timestamp("2011-01-02"),
Timestamp("2011-01-01"),
]
exp = Series(
[0.2, 0.2, 0.1], index=pd.DatetimeIndex(keys, name="idx"), name="s"
)
tm.assert_series_equal(ser.loc[keys], exp, check_index_type=True)
keys = [
Timestamp("2011-01-03"),
Timestamp("2011-01-02"),
Timestamp("2011-01-03"),
]
with pytest.raises(KeyError, match="with any missing labels"):
ser.loc[keys]
def test_series_partial_set_period(self):
# GH 11497
idx = pd.period_range("2011-01-01", "2011-01-02", freq="D", name="idx")
ser = Series([0.1, 0.2], index=idx, name="s")
result = ser.loc[
[pd.Period("2011-01-01", freq="D"), pd.Period("2011-01-02", freq="D")]
]
exp = Series([0.1, 0.2], index=idx, name="s")
tm.assert_series_equal(result, exp, check_index_type=True)
keys = [
pd.Period("2011-01-02", freq="D"),
pd.Period("2011-01-02", freq="D"),
pd.Period("2011-01-01", freq="D"),
]
exp = Series([0.2, 0.2, 0.1], index=pd.PeriodIndex(keys, name="idx"), name="s")
tm.assert_series_equal(ser.loc[keys], exp, check_index_type=True)
keys = [
pd.Period("2011-01-03", freq="D"),
pd.Period("2011-01-02", freq="D"),
pd.Period("2011-01-03", freq="D"),
]
with pytest.raises(KeyError, match="with any missing labels"):
ser.loc[keys]
def test_nanosecond_getitem_setitem_with_tz(self):
# GH 11679
data = ["2016-06-28 08:30:00.123456789"]
index = pd.DatetimeIndex(data, dtype="datetime64[ns, America/Chicago]")
df = DataFrame({"a": [10]}, index=index)
result = df.loc[df.index[0]]
expected = Series(10, index=["a"], name=df.index[0])
tm.assert_series_equal(result, expected)
result = df.copy()
result.loc[df.index[0], "a"] = -1
expected = DataFrame(-1, index=index, columns=["a"])
tm.assert_frame_equal(result, expected)
def test_loc_getitem_across_dst(self):
# GH 21846
idx = pd.date_range(
"2017-10-29 01:30:00", tz="Europe/Berlin", periods=5, freq="30 min"
)
series2 = pd.Series([0, 1, 2, 3, 4], index=idx)
t_1 = pd.Timestamp(
"2017-10-29 02:30:00+02:00", tz="Europe/Berlin", freq="30min"
)
t_2 = pd.Timestamp(
"2017-10-29 02:00:00+01:00", tz="Europe/Berlin", freq="30min"
)
result = series2.loc[t_1:t_2]
expected = pd.Series([2, 3], index=idx[2:4])
tm.assert_series_equal(result, expected)
result = series2[t_1]
expected = 2
assert result == expected
def test_loc_incremental_setitem_with_dst(self):
# GH 20724
base = datetime(2015, 11, 1, tzinfo=tz.gettz("US/Pacific"))
idxs = [base + timedelta(seconds=i * 900) for i in range(16)]
result = pd.Series([0], index=[idxs[0]])
for ts in idxs:
result.loc[ts] = 1
expected = pd.Series(1, index=idxs)
tm.assert_series_equal(result, expected)
def test_loc_setitem_with_existing_dst(self):
# GH 18308
start = | pd.Timestamp("2017-10-29 00:00:00+0200", tz="Europe/Madrid") | pandas.Timestamp |
import pandas as pd
import seaborn as sns
from sklearn.decomposition import PCA
sns.set(style="white", color_codes=True)
import warnings
import matplotlib.pyplot as plt
warnings.filterwarnings("ignore")
dataset = | pd.read_csv('CC.csv') | pandas.read_csv |
import logging
import multiprocessing as mp
from multiprocessing.pool import Pool
import pandas as pd
from .. import util
_logger = logging.getLogger(__name__)
_rec_context = None
class MPRecContext:
def __init__(self, algo):
self.algo = algo
def __enter__(self):
global _rec_context
_logger.debug('installing context for %s', self.algo)
_rec_context = self
return self
def __exit__(self, *args, **kwargs):
global _rec_context
_logger.debug('uninstalling context for %s', self.algo)
_rec_context = None
def _predict_user(algo, user, udf):
watch = util.Stopwatch()
res = algo.predict_for_user(user, udf['item'])
res = | pd.DataFrame({'user': user, 'item': res.index, 'prediction': res.values}) | pandas.DataFrame |
import datetime
import pandas as pd
from dateutil.relativedelta import relativedelta
from timeseries import slice_by_timestamp
from yearly import replace_year
def set_to_begin(values):
"""Set the dates and times in the list to the begin of the month
:param values:
:type values:
:return:
:rtype:
"""
return [pd.Timestamp(v).replace(day=1, hour=0, minute=0, second=0, microsecond=0) for v in values]
def set_to_end(values):
"""Set the dates and times in the list to the end of the month
:param values:
:type values:
:return:
:rtype:
"""
try:
return [pd.Timestamp(v).replace(day=last_day(v), hour=23, minute=59, second=59, microsecond=999999) for v in values]
except TypeError:
return pd.Timestamp(values).replace(day=last_day(values), hour=23, minute=59, second=59, microsecond=999999)
def last_day(dt):
return (pd.Timestamp(dt) + pd.tseries.offsets.MonthEnd(n=0)).day
def is_last_day(dt):
"""Check whether day in ``dt`` is the last day of the month
:param dt: datetime
:type dt: datetime, pd.Timestamp, np.datetime64
:return: True/False
:rtype: bool
"""
return pd.Timestamp(dt).day == last_day(dt)
def increment(dt, months=1, microseconds=0):
"""Increment ``ts`` by ``months``. Default is to increment one month. Return a ``pd.Timestamp``
:param dt: timestamp
:type dt: datetime, pd.Timestamp, np.datetime64
:param months: number of months to increment. Negative values are allowed. Default months = 1
:type months: int
:param microseconds: microseconds to add to the right interval: 0 for closed, -1 for right opened interval
:type microseconds: int
:return: ts incremented by ``months``
:rtype: pd.Timestamp
"""
# Don't use pd.Timedelta:
# pd.Timestamp('2000-12-30 07:30') + pd.Timedelta(1, unit='M') == Timestamp('2001-01-29 17:59:06')
dt = pd.Timestamp(dt)
ts1 = pd.Timestamp( | pd.Timestamp(dt) | pandas.Timestamp |
from time import time
from typing import Tuple, Mapping, Optional, Sequence, TYPE_CHECKING
from itertools import product
import sys
import pytest
from scanpy import settings as s
from anndata import AnnData
from scanpy.datasets import blobs
import scanpy as sc
from pandas.testing import assert_frame_equal
import numpy as np
import pandas as pd
from squidpy.gr import ligrec
from squidpy.gr._ligrec import PermutationTest
from squidpy._constants._pkg_constants import Key
_CK = "leiden"
Interactions_t = Tuple[Sequence[str], Sequence[str]]
Complexes_t = Sequence[Tuple[str, str]]
class TestInvalidBehavior:
def test_not_adata(self):
with pytest.raises(TypeError, match=r"Expected `adata` to be of type `anndata.AnnData`"):
ligrec(None, _CK)
def test_adata_no_raw(self, adata: AnnData):
del adata.raw
with pytest.raises(AttributeError, match=r"No `.raw` attribute"):
ligrec(adata, _CK, use_raw=True)
def test_raw_has_different_n_obs(self, adata: AnnData):
adata.raw = blobs(n_observations=adata.n_obs + 1)
with pytest.raises(ValueError, match=rf"Expected `{adata.n_obs}` cells in `.raw`"):
ligrec(adata, _CK)
def test_invalid_cluster_key(self, adata: AnnData, interactions: Interactions_t):
with pytest.raises(KeyError, match=r"Cluster key `foobar` not found"):
ligrec(adata, cluster_key="foobar", interactions=interactions)
def test_cluster_key_is_not_categorical(self, adata: AnnData, interactions: Interactions_t):
adata.obs[_CK] = adata.obs[_CK].astype("string")
with pytest.raises(TypeError, match=rf"Expected `adata.obs\[{_CK!r}\]` to be `categorical`"):
ligrec(adata, _CK, interactions=interactions)
def test_only_1_cluster(self, adata: AnnData, interactions: Interactions_t):
adata.obs["foo"] = 1
adata.obs["foo"] = adata.obs["foo"].astype("category")
with pytest.raises(ValueError, match=r"Expected at least `2` clusters, found `1`."):
ligrec(adata, "foo", interactions=interactions)
def test_invalid_complex_policy(self, adata: AnnData, interactions: Interactions_t):
with pytest.raises(ValueError, match=r"Invalid option `foobar` for `ComplexPolicy`."):
ligrec(adata, _CK, interactions=interactions, complex_policy="foobar")
def test_invalid_fdr_axis(self, adata: AnnData, interactions: Interactions_t):
with pytest.raises(ValueError, match=r"Invalid option `foobar` for `CorrAxis`."):
ligrec(adata, _CK, interactions=interactions, corr_axis="foobar", corr_method="fdr_bh")
def test_too_few_permutations(self, adata: AnnData, interactions: Interactions_t):
with pytest.raises(ValueError, match=r"Expected `n_perms` to be positive"):
ligrec(adata, _CK, interactions=interactions, n_perms=0)
def test_invalid_interactions_type(self, adata: AnnData):
with pytest.raises(TypeError, match=r"Expected either a `pandas.DataFrame`"):
ligrec(adata, _CK, interactions=42)
def test_invalid_interactions_dict(self, adata: AnnData):
with pytest.raises(KeyError, match=r"Column .* is not in `interactions`."):
ligrec(adata, _CK, interactions={"foo": ["foo"], "target": ["bar"]})
with pytest.raises(KeyError, match=r"Column .* is not in `interactions`."):
ligrec(adata, _CK, interactions={"source": ["foo"], "bar": ["bar"]})
def test_invalid_interactions_dataframe(self, adata: AnnData, interactions: Interactions_t):
df = pd.DataFrame(interactions, columns=["foo", "target"])
with pytest.raises(KeyError, match=r"Column .* is not in `interactions`."):
ligrec(adata, _CK, interactions=df)
df = pd.DataFrame(interactions, columns=["source", "bar"])
with pytest.raises(KeyError, match=r"Column .* is not in `interactions`."):
ligrec(adata, _CK, interactions=df)
def test_interactions_invalid_sequence(self, adata: AnnData, interactions: Interactions_t):
interactions += ("foo", "bar", "bar") # type: ignore
with pytest.raises(ValueError, match=r"Not all interactions are of length `2`."):
ligrec(adata, _CK, interactions=interactions)
def test_interactions_only_invalid_names(self, adata: AnnData):
with pytest.raises(ValueError, match=r"After filtering by genes"):
ligrec(adata, _CK, interactions=["foo", "bar", "baz"])
def test_invalid_clusters(self, adata: AnnData, interactions: Interactions_t):
with pytest.raises(ValueError, match=r"Invalid cluster `'foo'`."):
ligrec(adata, _CK, interactions=interactions, clusters=["foo"])
def test_invalid_clusters_mix(self, adata: AnnData, interactions: Interactions_t):
with pytest.raises(ValueError, match=r"Expected a `tuple` of length `2`, found `3`."):
ligrec(adata, _CK, interactions=interactions, clusters=["foo", ("bar", "baz")])
class TestValidBehavior:
def test_do_not_use_raw(self, adata: AnnData, interactions: Interactions_t):
del adata.raw
_ = PermutationTest(adata, use_raw=False)
def test_all_genes_capitalized(self, adata: AnnData, interactions: Interactions_t):
pt = PermutationTest(adata).prepare(interactions=interactions)
genes = pd.Series([g for gs in pt.interactions[["source", "target"]].values for g in gs], dtype="string")
np.testing.assert_array_equal(genes.values, genes.str.upper().values)
np.testing.assert_array_equal(pt._data.columns, pt._data.columns.str.upper())
def test_complex_policy_min(self, adata: AnnData, complexes: Complexes_t):
g = adata.raw.var_names
pt = PermutationTest(adata).prepare(interactions=complexes, complex_policy="min")
assert pt.interactions.shape == (5, 2)
assert np.mean(adata.raw[:, g[2]].X) > np.mean(adata.raw[:, g[3]].X) # S
assert np.mean(adata.raw[:, g[6]].X) < np.mean(adata.raw[:, g[7]].X) # T
assert np.mean(adata.raw[:, g[8]].X) < np.mean(adata.raw[:, g[9]].X) # S
assert np.mean(adata.raw[:, g[10]].X) > np.mean(adata.raw[:, g[11]].X) # T
np.testing.assert_array_equal(pt.interactions["source"], list(map(str.upper, [g[0], g[3], g[5], g[8], g[12]])))
np.testing.assert_array_equal(pt.interactions["target"], list(map(str.upper, [g[1], g[4], g[6], g[11], g[13]])))
def test_complex_policy_all(self, adata: AnnData, complexes: Complexes_t):
g = adata.raw.var_names
pt = PermutationTest(adata).prepare(interactions=complexes, complex_policy="all")
assert pt.interactions.shape == (10, 2)
np.testing.assert_array_equal(
pt.interactions.values,
pd.DataFrame(
[
[g[0], g[1]],
[g[2], g[4]],
[g[3], g[4]],
[g[5], g[6]],
[g[5], g[7]],
[g[8], g[10]],
[g[8], g[11]],
[g[9], g[10]],
[g[9], g[11]],
[g[12], g[13]],
]
)
.applymap(str.upper)
.values,
)
def test_fdr_axis_works(self, adata: AnnData, interactions: Interactions_t):
rc = ligrec(
adata,
_CK,
interactions=interactions,
n_perms=5,
corr_axis="clusters",
seed=42,
n_jobs=1,
show_progress_bar=False,
copy=True,
)
ri = ligrec(
adata,
_CK,
interactions=interactions,
n_perms=5,
corr_axis="interactions",
n_jobs=1,
show_progress_bar=False,
seed=42,
copy=True,
)
np.testing.assert_array_equal(np.where(np.isnan(rc["pvalues"])), np.where(np.isnan(ri["pvalues"])))
mask = np.isnan(rc["pvalues"])
assert not np.allclose(rc["pvalues"].values[mask], ri["pvalues"].values[mask])
def test_inplace_default_key(self, adata: AnnData, interactions: Interactions_t):
key = Key.uns.ligrec(_CK)
assert key not in adata.uns
res = ligrec(adata, _CK, interactions=interactions, n_perms=5, copy=False, show_progress_bar=False)
assert res is None
assert isinstance(adata.uns[key], dict)
r = adata.uns[key]
assert len(r) == 3
assert isinstance(r["means"], pd.DataFrame)
assert isinstance(r["pvalues"], pd.DataFrame)
assert isinstance(r["metadata"], pd.DataFrame)
def test_inplace_key_added(self, adata: AnnData, interactions: Interactions_t):
assert "foobar" not in adata.uns
res = ligrec(
adata, _CK, interactions=interactions, n_perms=5, copy=False, key_added="foobar", show_progress_bar=False
)
assert res is None
assert isinstance(adata.uns["foobar"], dict)
r = adata.uns["foobar"]
assert len(r) == 3
assert isinstance(r["means"], pd.DataFrame)
assert isinstance(r["pvalues"], pd.DataFrame)
assert isinstance(r["metadata"], pd.DataFrame)
def test_return_no_write(self, adata: AnnData, interactions: Interactions_t):
assert "foobar" not in adata.uns
r = ligrec(
adata, _CK, interactions=interactions, n_perms=5, copy=True, key_added="foobar", show_progress_bar=False
)
assert "foobar" not in adata.uns
assert len(r) == 3
assert isinstance(r["means"], pd.DataFrame)
assert isinstance(r["pvalues"], pd.DataFrame)
assert isinstance(r["metadata"], pd.DataFrame)
@pytest.mark.parametrize("fdr_method", [None, "fdr_bh"])
def test_pvals_in_correct_range(self, adata: AnnData, interactions: Interactions_t, fdr_method: Optional[str]):
r = ligrec(
adata,
_CK,
interactions=interactions,
n_perms=5,
copy=True,
show_progress_bar=False,
corr_method=fdr_method,
threshold=0,
)
if np.sum(np.isnan(r["pvalues"].values)) == np.prod(r["pvalues"].shape):
assert fdr_method == "fdr_bh"
else:
assert np.nanmax(r["pvalues"].values) <= 1.0, np.nanmax(r["pvalues"].values)
assert np.nanmin(r["pvalues"].values) >= 0, np.nanmin(r["pvalues"].values)
def test_result_correct_index(self, adata: AnnData, interactions: Interactions_t):
r = ligrec(adata, _CK, interactions=interactions, n_perms=5, copy=True, show_progress_bar=False)
np.testing.assert_array_equal(r["means"].index, r["pvalues"].index)
np.testing.assert_array_equal(r["pvalues"].index, r["metadata"].index)
np.testing.assert_array_equal(r["means"].columns, r["pvalues"].columns)
assert not np.array_equal(r["means"].columns, r["metadata"].columns)
assert not np.array_equal(r["pvalues"].columns, r["metadata"].columns)
def test_result_is_sparse(self, adata: AnnData, interactions: Interactions_t):
interactions = pd.DataFrame(interactions, columns=["source", "target"])
if TYPE_CHECKING:
assert isinstance(interactions, pd.DataFrame)
interactions["metadata"] = "foo"
r = ligrec(adata, _CK, interactions=interactions, n_perms=5, seed=2, copy=True, show_progress_bar=False)
assert r["means"].sparse.density <= 0.15
assert r["pvalues"].sparse.density <= 0.95
with pytest.raises(AttributeError, match=r"Can only use the '.sparse' accessor with Sparse data."):
_ = r["metadata"].sparse
np.testing.assert_array_equal(r["metadata"].columns, ["metadata"])
np.testing.assert_array_equal(r["metadata"]["metadata"], interactions["metadata"])
@pytest.mark.parametrize("n_jobs", [1, 2])
def test_reproducibility_cores(self, adata: AnnData, interactions: Interactions_t, n_jobs: int):
r1 = ligrec(
adata,
_CK,
interactions=interactions,
n_perms=25,
copy=True,
show_progress_bar=False,
seed=42,
n_jobs=n_jobs,
)
r2 = ligrec(
adata,
_CK,
interactions=interactions,
n_perms=25,
copy=True,
show_progress_bar=False,
seed=42,
n_jobs=n_jobs,
)
r3 = ligrec(
adata,
_CK,
interactions=interactions,
n_perms=25,
copy=True,
show_progress_bar=False,
seed=43,
n_jobs=n_jobs,
)
assert r1 is not r2
np.testing.assert_allclose(r1["means"], r2["means"])
np.testing.assert_allclose(r2["means"], r3["means"])
np.testing.assert_allclose(r1["pvalues"], r2["pvalues"])
assert not np.allclose(r3["pvalues"], r1["pvalues"])
assert not np.allclose(r3["pvalues"], r2["pvalues"])
def test_reproducibility_numba_parallel_off(self, adata: AnnData, interactions: Interactions_t):
t1 = time()
r1 = ligrec(
adata,
_CK,
interactions=interactions,
n_perms=25,
copy=True,
show_progress_bar=False,
seed=42,
numba_parallel=False,
)
t1 = time() - t1
t2 = time()
r2 = ligrec(
adata,
_CK,
interactions=interactions,
n_perms=25,
copy=True,
show_progress_bar=False,
seed=42,
numba_parallel=True,
)
t2 = time() - t2
assert r1 is not r2
# for such a small data, overhead from parallelization is too high
assert t1 <= t2, (t1, t2)
np.testing.assert_allclose(r1["means"], r2["means"])
np.testing.assert_allclose(r1["pvalues"], r2["pvalues"])
def test_paul15_correct_means(self, paul15: AnnData, paul15_means: pd.DataFrame):
res = ligrec(
paul15,
"paul15_clusters",
interactions=list(paul15_means.index.to_list()),
corr_method=None,
copy=True,
show_progress_bar=False,
threshold=0.01,
seed=0,
n_perms=1,
n_jobs=1,
)
np.testing.assert_array_equal(res["means"].index, paul15_means.index)
np.testing.assert_array_equal(res["means"].columns, paul15_means.columns)
np.testing.assert_allclose(res["means"].values, paul15_means.values)
def test_reproducibility_numba_off(
self, adata: AnnData, interactions: Interactions_t, ligrec_no_numba: Mapping[str, pd.DataFrame]
):
r = ligrec(
adata, _CK, interactions=interactions, n_perms=5, copy=True, show_progress_bar=False, seed=42, n_jobs=1
)
np.testing.assert_array_equal(r["means"].index, ligrec_no_numba["means"].index)
np.testing.assert_array_equal(r["means"].columns, ligrec_no_numba["means"].columns)
np.testing.assert_array_equal(r["pvalues"].index, ligrec_no_numba["pvalues"].index)
np.testing.assert_array_equal(r["pvalues"].columns, ligrec_no_numba["pvalues"].columns)
np.testing.assert_allclose(r["means"], ligrec_no_numba["means"])
np.testing.assert_allclose(r["pvalues"], ligrec_no_numba["pvalues"])
np.testing.assert_array_equal(np.where(np.isnan(r["pvalues"])), np.where(np.isnan(ligrec_no_numba["pvalues"])))
def test_logging(self, adata: AnnData, interactions: Interactions_t, capsys):
s.logfile = sys.stderr
s.verbosity = 4
ligrec(
adata,
_CK,
interactions=interactions,
n_perms=5,
copy=False,
show_progress_bar=False,
complex_policy="all",
key_added="ligrec_test",
n_jobs=2,
)
err = capsys.readouterr().err
assert "DEBUG: Removing duplicate interactions" in err
assert "DEBUG: Removing duplicate genes in the data" in err
assert "DEBUG: Creating all gene combinations within complexes" in err
assert "DEBUG: Removing interactions with no genes in the data" in err
assert "DEBUG: Removing genes not in any interaction" in err
assert "Running `5` permutations on `25` interactions and `25` cluster combinations using `2` core(s)" in err
assert "Adding `adata.uns['ligrec_test']`" in err
def test_non_uniqueness(self, adata: AnnData, interactions: Interactions_t):
# add complexes
expected = {(r.upper(), l.upper()) for r, l in interactions}
interactions += ( # type: ignore
(f"{interactions[-1][0]}_{interactions[-1][1]}", f"{interactions[-2][0]}_{interactions[-2][1]}"),
) * 2
interactions += interactions[:3] # type: ignore
res = ligrec(
adata,
_CK,
interactions=interactions,
n_perms=1,
copy=True,
show_progress_bar=False,
seed=42,
numba_parallel=False,
)
assert len(res["pvalues"]) == len(expected)
assert set(res["pvalues"].index.to_list()) == expected
@pytest.mark.xfail(reason="AnnData cannot handle writing MultiIndex")
def test_writeable(self, adata: AnnData, interactions: Interactions_t, tmpdir):
ligrec(adata, _CK, interactions=interactions, n_perms=5, copy=False, show_progress_bar=False, key_added="foo")
res = adata.uns["foo"]
sc.write(tmpdir / "ligrec.h5ad", adata)
bdata = sc.read(tmpdir / "ligrec.h5ad")
for key in ["means", "pvalues", "metadata"]:
assert_frame_equal(res[key], bdata.uns["foo"][key])
@pytest.mark.parametrize("use_raw", [False, True])
def test_gene_symbols(self, adata: AnnData, use_raw: bool):
gene_ids = adata.var["gene_ids"]
interactions = tuple(product(gene_ids[:5], gene_ids[:5]))
res = ligrec(
adata,
_CK,
interactions=interactions,
n_perms=5,
use_raw=use_raw,
copy=True,
show_progress_bar=False,
gene_symbols="gene_ids",
)
np.testing.assert_array_equal(res["means"].index, | pd.MultiIndex.from_tuples(interactions) | pandas.MultiIndex.from_tuples |
import time
import warnings
from concurrent.futures.process import ProcessPoolExecutor
from typing import Tuple
import numpy as np
import pandas as pd
import torch
from sklearn.preprocessing import LabelBinarizer
from torch.utils.data import Dataset
from tqdm.auto import tqdm
from .base import BaseClassifier, ClassifierNotTrainedError, BaseXModel
from .ext_models.brl.RuleListClassifier import RuleListClassifier
from ..utils.base import NotAvailableError, brl_extracting_formula
from ..utils.metrics import Metric, Accuracy
class XBRLClassifier(BaseClassifier, BaseXModel):
"""
BR class module. It does provides for explanations.
:param n_classes: int
number of classes to classify - dimension of the output layer of the network
:param n_features: int
number of features - dimension of the input space
:param discretize: bool
whether to discretize data or not
:param feature_names: list
name of the features, if not given substituted with f1, f2, ... fd
:param class_names: list
name of the classes, if not given substituted with class_1, class_2, ... class_n
"""
def __init__(self, n_classes: int, n_features: int, discretize: bool = True, feature_names: list = None,
class_names: list = None, n_processes : int = 1, device: torch.device = torch.device('cpu'),
name: str = "brl.pth", ):
super().__init__(name=name, device=device)
assert device == torch.device('cpu'), "Only cpu training is provided with BRL models."
self.n_classes = n_classes
self.n_features = n_features
self.discretize = discretize
self.n_processes = n_processes
self.model = []
self.class_names = []
for i in range(self.n_classes):
class_name = class_names[i] if class_names is not None else f"class_{i}"
model = RuleListClassifier(max_iter=10000, class1label=class_name, verbose=False)
self.model.append(model)
self.class_names.append(class_name)
self.features_names = feature_names if feature_names is not None else [f"f{i}" for i in range(n_features)]
if n_classes == 1:
n_classes = 2
self.explanations = ["" for _ in range(n_classes)]
def forward(self, x, **kwargs) -> torch.Tensor:
"""
forward method extended from Classifier. Here input data goes through the layer of the ReLU network.
A probability value is returned in output after sigmoid activation
:param x: input tensor
:return: output classification
"""
x = x.detach().cpu().numpy()
if self.discretize:
x = self._discretize(x)
outputs = []
pbar = tqdm(range(self.n_classes), desc="BRL predicting classes")
futures = []
if self.n_processes > 1:
executor = ProcessPoolExecutor(self.n_processes)
for i in range(self.n_classes):
args = {
"self": self.model[i],
"X": x,
"use_only_d_star": True
}
futures.append(executor.submit(RuleListClassifier.predict_proba, **args))
for i in range(self.n_classes):
if self.n_processes > 1:
brl_outputs = futures[i].result()
else:
brl_outputs = self.model[i].predict_proba(x, use_only_d_star=True)
# BRL outputs both the negative prediction (output[0]) and the positive (output[1])
output = brl_outputs[:, 1]
outputs.append(torch.tensor(output))
pbar.update()
pbar.close()
outputs = torch.stack(outputs, dim=1)
return outputs
def _discretize(self, train_data: np.ndarray) -> np.ndarray:
train_data = [[self.features_names[i] if item > 0.5 else "~" + self.features_names[i]
for i, item in enumerate(array)]
for array in train_data]
return np.asarray(train_data)
def _binarize_labels(self, train_labels: torch.tensor):
if len(train_labels.shape) == 1:
train_labels = np.expand_dims(train_labels, axis=1)
if len(np.unique(train_labels)) > 2:
train_labels = LabelBinarizer().fit_transform(train_labels)
print(f"Binarized labels. Labels {train_labels.shape}")
elif len(np.unique(train_labels)) == 2 and self.n_classes == 2:
train_labels = np.hstack((1 - train_labels, train_labels))
else:
print(f"Labels {train_labels.shape}")
return train_labels
def get_loss(self, output: torch.Tensor, target: torch.Tensor, **kwargs) -> None:
"""
Loss is not used in BRL as it is not a gradient based algorithm. Therefore, if this function
is called an error is thrown.
:param output: output tensor from the forward function
:param target: label tensor
:param kwargs:
:raise: NotAvailableError
"""
raise NotAvailableError()
def get_device(self) -> torch.device:
"""
Return the device on which the classifier is actually loaded. For BRL is always cpu
:return: device in use
"""
return torch.device("cpu")
def fit(self, train_set: Dataset, val_set: Dataset = None, train_sample_rate: float = 0.1,
metric: Metric = Accuracy(), verbose: bool = True, save=True, eval=True, **kwargs) -> pd.DataFrame:
"""
fit function that execute many of the common operation generally performed by many method during training.
Adam optimizer is always employed
:param train_set: training set on which to train
:param val_set: validation set used for early stopping
:param train_sample_rate:
:param metric: metric to evaluate the predictions of the network
:param verbose: whether to output or not epoch metrics
:param save: whether to save the model or not
:param eval: whether to evaluate training and validation data (it may takes time)
:return: pandas dataframe collecting the metrics from each epoch
"""
# Loading dataset
train_loader = torch.utils.data.DataLoader(train_set, len(train_set))
train_data, train_labels = next(train_loader.__iter__())
train_data, train_labels = self._random_sample_data(train_sample_rate, train_data, train_labels)
train_data = train_data.numpy()
train_labels = train_labels.numpy()
train_labels = self._binarize_labels(train_labels)
if self.discretize:
print("Discretized features")
train_data = self._discretize(train_data)
features = self.features_names
else:
features = []
# Fitting a BRL classifier for each class in parallel
futures = []
pbar = tqdm(range(self.n_classes), desc="BRL training classes")
if self.n_processes > 1:
executor = ProcessPoolExecutor(self.n_processes)
for i in range(self.n_classes):
self.model[i].verbose = verbose
args = {
"self": self.model[i],
"X" : train_data,
"y" : train_labels[:, i],
"feature_labels" : self.features_names,
"undiscretized_features": features
}
futures.append(executor.submit(RuleListClassifier.fit, **args))
for i in range(self.n_classes):
if self.n_processes > 1:
self.model[i] = futures[i].result()
else:
self.model[i] = self.model[i].fit(X=train_data, y=train_labels[:, i],
feature_labels=self.features_names, undiscretized_features=features)
pbar.update()
pbar.close()
# Compute accuracy, f1 and constraint_loss on the whole train, validation dataset
if eval:
train_acc = self.evaluate(train_set, metric=metric)
if val_set is not None:
val_acc = self.evaluate(val_set, metric=metric)
else:
val_acc = 0
else:
train_acc, val_acc = 0., 0.
if verbose:
print(f"Train_acc: {train_acc:.1f}, Val_acc: {val_acc:.1f}")
if save:
self.save()
# Performance dictionary
performance_dict = {
"tot_loss": [0],
"train_accs": [train_acc],
"val_accs": [val_acc],
"best_epoch": [0],
}
performance_df = | pd.DataFrame(performance_dict) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# # Import Base Packages
# In[ ]:
import pandas as pd
import numpy as np
import warnings
warnings.filterwarnings('ignore')
# # Interface function to feature engineering data
# In[ ]:
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import PolynomialFeatures
column_names = [ 'age', 'workclass', 'fnlwgt', 'education', 'educational-num', 'marital-status', 'occupation', 'relationship', 'race', 'gender', 'capital-gain', 'capital-loss', 'hours-per-week', 'native-country', 'income' ]
columns_to_encoding = [ 'workclass', 'marital-status', 'occupation', 'relationship', 'race', 'gender' ]
columns_to_normalize = [ 'age', 'educational-num', 'hours-per-week', 'capital-gain', 'capital-loss' ]
le = LabelEncoder()
scaler = StandardScaler()
pl = PolynomialFeatures(2, include_bias=False)
def feature_engineering(filename, train=True):
df = | pd.read_csv(filename, index_col=False) | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Created on Sun May 22 10:30:01 2016
SC process signups functions
@author: tkc
"""
#%%
import pandas as pd
import numpy as np
from datetime import datetime, date
import re, glob, math
from openpyxl import load_workbook # writing to Excel
from PIL import Image, ImageDraw, ImageFont
import tkinter as tk
import pkg.SC_config as cnf # _OUTPUT_DIR and _INPUT_DIR
#%%
def combinephrases(mylist):
''' Combine list of phrases using commas & and '''
if len(mylist)==1:
return str(mylist[0])
elif len(mylist)==2:
tempstr=str(mylist[0])+ ' and ' +str(mylist[1])
return tempstr
else:
rest=mylist[:-1]
rest=[str(i) for i in rest]
last=mylist[-1]
tempstr=', '.join(rest) +' and ' + str(last)
return tempstr#%%
def writetoxls(df, sheetname, xlsfile):
''' Generic write of given df to specified tab of given xls file '''
book=load_workbook(xlsfile)
writer=pd.ExcelWriter(xlsfile, engine='openpyxl', datetime_format='mm/dd/yy', date_format='mm/dd/yy')
writer.book=book
writer.sheets = dict((ws.title, ws) for ws in book.worksheets)
df.to_excel(writer,sheet_name=sheetname,index=False) # this overwrites existing file
writer.save() # saves xls file with all modified data
return
def loadtransfers(df, signups):
''' Load transferred players and add to signups (then run player ID);
transfers added as normal players but need fake billing entries
'''
df=df.rename(columns={'Fname':'First','Lname':'Last','Street':'Address','Parish':'Parish of Registration'})
df=df.rename(columns={'Phone':'Phone1','Birthdate':'DOB','Sex':'Gender','Open/Closed':'Ocstatus'})
# Replace Girl, Boy with m f
df.loc[:,'Gender']=df.Gender.replace('F','Girl')
df.loc[:,'Gender']=df.Gender.replace('M','Boy')
# Manually enter sport
print('Enter sport for transferred players')
sport=input()
df.loc[:,'Sport']=sport
df=df.dropna(subset=['First']) # remove blank rows if present
mycols=[col for col in df if col in signups]
df=df[mycols]
df=formatnamesnumbers(df)
# place date/transfer in timestamp
mystamp=datetime.strftime(datetime.now(),'%m/%d/%y')+' transfer'
df.loc[:,'Timestamp']=mystamp
mycols=signups.columns
signups=signups.append(df, ignore_index=True)
signups=signups[mycols]
return signups
def packagetransfers(teams, Mastersignups, famcontact, players, season, year, acronyms, messfile):
''' Package roster and contact info by sport- school and save as separate xls files
also generate customized e-mails in single log file (for cut and paste send to appropriate persons)
args:
teams - loaded team list
mastersignups - signups w/ team assignment
players -player DB
famcontact - family contact db
season - Fall, Winter or Spring
year - starting sports year (i.e. 2019 for 2019-20 school year)
acronyms - school/parish specific abbreviations
messfile - e-mail message template w/ blanks
returns:
'''
teams=teams[pd.notnull(teams['Team'])]
transferteams=np.ndarray.tolist(teams[teams['Team'].str.contains('#')].Team.unique())
transSU=Mastersignups[Mastersignups['Team'].isin(transferteams)]
# ensure that these are from correct season/year
sportsdict={'Fall':['VB','Soccer'], 'Winter':['Basketball'],'Spring':['Track','Softball','Baseball','T-ball']}
sportlist=sportsdict.get(season)
transSU=transSU.loc[(transSU['Sport'].isin(sportlist)) & (transSU['Year']==year)] # season is not in mastersignups... only individual sports
# get family contact info from famcontacts
transSU=pd.merge(transSU, famcontact, how='left', on=['Famkey'], suffixes=('','_r'))
# get school from players.csv
transSU=pd.merge(transSU, players, how='left', on=['Plakey'], suffixes=('','_r2'))
# get division from Teams xls (for roster)
transSU=pd.merge(transSU, teams, how='left', on=['Team'], suffixes=('','_r3')) # effectively adds other team info for roster toall players
transSU.loc[:,'Role']='Player' # add column for role
# transSU['Open/Closed']='Closed'
# Sort by grade pre-split
transSU.loc[:,'Grade']=transSU.Grade.replace('K',0)
transSU.loc[:,'Grade']=transSU.Grade.apply(int)
transSU=transSU.sort_values(['Grade'], ascending=True)
transSU.loc[:,'Grade']=transSU.Grade.replace(0,'K') # replace K with zero to allow sorting
# Column for sorting by transferred to school
transSU.loc[:,'Transchool']=transSU['Team'].str.split('#').str[0]
grouped=transSU.groupby(['Sport','Transchool'])
for [sport, school], group in grouped:
# prepare roster tab
xlsname=cnf._OUTPUT_DIR+'\\Cabrini_to_'+school+'_'+sport+'_'+str(year)+'.xlsx'
writer=pd.ExcelWriter(xlsname, engine='openpyxl')
Transferroster=organizeroster(group)
Transferroster=Transferroster.sort_values(['Team', 'Sex', 'Grade'], ascending=True)
Transferroster=replaceacro(Transferroster,acronyms)
Transferroster.to_excel(writer,sheet_name='roster',index=False)
# prep contacts tab
mycols=['First', 'Last', 'Grade', 'Gender', 'School', 'Phone1', 'Text1','Email1', 'Phone2', 'Text2',
'Email2', 'Pfirst1', 'Plast1', 'Pfirst2', 'Plast2', 'Team']
Transfercontacts=group[mycols]
Transfercontacts.to_excel(writer, sheet_name='contacts', index=False)
writer.save()
# Now generate list of e-mails for all schools/directors
logfile='transfers_director_emails_log.txt'
with open(logfile,'w+') as emaillog:
# Read generic file to sport director
with open(messfile, 'r') as file:
blankmessage=file.read()
for [sport, school], group in grouped:
plagroup=group.groupby(['Grade', 'Gender'])
platypes=[] # list of # of players by grade, gender
gradedict={'K':'K', 1:'1st', 2:'2nd',3:'3rd',4:'4th',5:'5th',6:'6th', 7:'7th',8:'8th'}
genderdict={'f':'girls', 'F':'girls','m':'boys','M':'boys'}
for [grade, gender], group in plagroup:
numplays=str(int(group['Grade'].count()))
grname=gradedict.get(grade)
genname=genderdict.get(gender)
platypes.append(numplays+' '+grname+' '+genname)
plalist=combinephrases(platypes)
thismess=blankmessage.replace('$SCHOOL', school)
thismess=thismess.replace('$SPORT', sport)
thismess=thismess.replace('$PLALIST', plalist)
emaillog.write(thismess)
emaillog.write('\n\n')
return
def findcards():
'''Search ID cards folder and return player # and file link
cards resized to 450x290 pix jpg in photoshop (scripts-image processor)
keys are either player number as string or coach CYC ID, vals are links to files'''
cardlist=glob.glob('%s\\IDcards\\*.jpg' %cnf._OUTPUT_DIR, recursive=True)
# construct list of [card #, filename]
nums=[i.split('\\')[-1] for i in cardlist]
nums=[i.split('_')[0] if '_' in i else i.split('--')[0] for i in nums ]
cards={} # dict for card numbers/filenames
for i,num in enumerate(nums):
cards.update({num: cardlist[i]})
return cards
def makethiscard(IDlist, team):
''' Passes link to ID card or player name (if missing) From team's list of player numbers (in alphabetical order), find/open card links, and create single image'''
# make the master image and determine image array size
margin=10 # pix on all sides
if len(IDlist)<11: # use 2 x 5 array (horiz)
wide=2
high=5
elif len(IDlist)<13: # 4w x 3 h (vert)
wide=4
high=3
elif len(IDlist)<22: # 3x by 5-7 high (horiz); max 21
wide=3
high=math.ceil(len(IDlist)/3)
else: # more than 21 ... yikes
wide=3
high=math.ceil(len(IDlist)/3)
cardimage = Image.new('RGB', (450*wide+2*margin, 300*high+2*margin), "white") # blank image of correct size
draw=ImageDraw.Draw(cardimage) # single draw obj for adding missing card names
ttfont=ImageFont.truetype('arial.ttf', size=36)
for i,fname in enumerate(IDlist):
row=i//high # remainder is row
col=i%high # mod is correct column
xpos=margin+row*450
ypos=margin+col*300
try:
thiscard=Image.open(fname)
thiscard=thiscard.resize((450, 300), Image.ANTIALIAS)
cardimage.paste(im=thiscard, box=(xpos, ypos)) # paste w/ xpos,ypos as upper left
except: # occurs when "first last" present instead of file name/path
# blankcard=Image.new('RGB', (450, 300)) # make blank image as placeholder
draw.text((xpos+50,ypos+100),fname,font=ttfont, fill="red")
return cardimage
''' TESTING
i=0 team=teamlist[i]
'''
def makeCYCcards(df, players, teams, coaches, season, year, **kwargs):
''' From mastersignups and teams, output contact lists for all teams/all sports separately
team assignments must be finished
args:
df -- mastersignups dataframe
players - player info dataframe
teams - this year's teams csv
coaches - full coach CYC info list
season - Fall, Winter or Spring
kwargs:
showmissing - True (shows missing player's name); False- skip missing player
otherSchools - default False (also make card sheets for transferred teams/players)
kwargs={'showmissing':False}
missing = makeCYCcards(Mastersignups, players, teams, coaches, season, year, **{'showmissing':True} )
missing = makeCYCcards(Mastersignups, players, teams, coaches, season, year, **{'showmissing':False} )
'''
# Slice by sport: Basketball (null for winter?), Soccer, Volleyball, Baseball, T-ball, Softball, Track)
cards=findcards() # dictionary with number: filename combo for existing CYC cards
df=df[(df['Year']==year)]
df=df.reset_index(drop=True)
sportsdict={'Fall':['VB','Soccer'], 'Winter':['Basketball'],'Spring':['Track','Softball','Baseball','T-ball']}
sportlist=sportsdict.get(season)
df=df[df['Sport'].isin(sportlist)] # season is not in mastersignups... only individual sports
# Make list of teams that need cards (all track and others >1st grade)
def processGrade(val):
if val=='K':
return 0
else:
return int(val)
teams.loc[:,'Grade'] = teams['Grade'].apply(lambda x:processGrade(x))
if not kwargs.get('otherSchools', False):
# all transfer teams with contain # (e.g. SMOS#3G) so remove these
# dropped by default
teams = teams[~teams['Team'].str.contains('#')]
# need track teams or any team from grades 2+
cardTeamList= teams[ (teams['Grade']>1) | (teams['Sport']=='Track') ]['Team'].unique()
df=df[ df['Team'].isin(cardTeamList) ]
df=df.sort_values(['Last'])
# plakeys as string will be easiest for below matching
df.loc[:,'Plakey']=df['Plakey'].astype(int)
df.loc[:,'Plakey']=df['Plakey'].astype(str)
def getName(gr, pk):
# get name from plakey as string
match=gr[gr['Plakey']==pk]
name=match.iloc[0]['First'] + ' ' + match.iloc[0]['Last']
return name
teamgrouped = df.groupby(['Team'])
missinglist=[] # list of plakeys with missing card
for team, gr in teamgrouped:
# keys in card dict are strings
IDlist = [str(int(i)) for i in gr.Plakey.unique()]
missinglist.extend([i for i in gr.Plakey.unique() if i not in cards.keys() ])
if not kwargs.get('showmissing', False):
# Shows only valid cards, drops missing names
IDlist = [ cards.get(i) for i in IDlist if i in cards.keys() ]
filename='Cards_'+ team +'.jpg'
else: # show cards and missing name when card image not in IDcards folder
IDlist = [cards.get(i) if i in cards.keys() else getName(gr, i) for i in IDlist ]
filename='Cards_'+ team +'_all.jpg'
# get team's coaches
IDlist.extend(getcoachIDs(team, teams, coaches, cards)) # add coach ID image file or first/last if missing
cardimage =makethiscard(IDlist, team) # directly saved
# save the card file
cardimage.save(cnf._OUTPUT_DIR+'\\'+filename)
missingcards=players[players['Plakey'].isin(missinglist)]
missingcards=missingcards.sort_values(['Grade','Last'])
return missingcards
def getcoachIDs(team, teams, coaches, cards):
''' Returns CYC IDs for all team's coaches '''
thisteam=teams[teams['Team']==team]
IDlist=[]
if len(thisteam)!=1:
print(team, 'not found in current teams list')
return IDlist # blank list
thisteam=thisteam.dropna(subset=['Coach ID'])
if len(thisteam)!=1:
print('Coach ID not found for', team)
return IDlist # blank list
if thisteam.iloc[0]['Coach ID']!='': # possibly blank
thisID=thisteam.iloc[0]['Coach ID'].strip()
if thisID in cards:
IDlist.append(cards.get(thisID,'')) # file path to this coach's ID
else: # get first/last
thiscoach=coaches[coaches['Coach ID']==thisID]
if len(thiscoach)==1:
IDlist.append(thiscoach.iloc[0]['Fname']+' '+thiscoach.iloc[0]['Lname'])
else:
print("Couldn't find coach ", thisID)
thisteam=thisteam.dropna(subset=['AssistantIDs'])
if len(thisteam)==1: # grab asst IDs if they exist
asstIDs=thisteam.iloc[0]['AssistantIDs']
asstIDs=[str(s).strip() for s in asstIDs.split(",")]
for i, asstID in enumerate(asstIDs):
if asstID in cards:
IDlist.append(cards.get(asstID,'')) # found assistant coaches ID card image
else: # can't find ... get assistant first last
thisasst=coaches[coaches['Coach ID']==asstID] # matching asst coach row
if len(thisasst)==1:
IDlist.append(thisasst.iloc[0]['Fname']+' '+thisasst.iloc[0]['Lname'])
else:
print("Couldn't find coach ", asstID)
return IDlist
def autocsvbackup(df, filename, newback=True):
''' Pass df (i.e players for backup and basename (i.e. "family_contact" for file.. finds list of existing backups and keeps ones of
certain ages based on targetdates list;
can't remember why was newback=False was needed (always true here to make new backup)
'''
# TODO fix this!
pass
return
def parseDate(val):
'''
Conversion of date string to datetime.date (always header line 2 40:60)
Possible date formats: 20180316 (newer style) or 03/15/2018 (older style)
For NGA files Date format changed from 03/15/2018 to 20180316 (on jday 75 in 2018)
time format: 221100 or 22:11:00 (sometimes w/ UTC)
not terribly concerned w/ time
possible date formats: 0) 03/01/2018, 3/1/2018, 3/1/18 or 03/01/18
2) 1/1/19 2) 2019-1-1 3) 2019-01-01
'''
if not isinstance(val, str):
return val
else:
if ' ' in val: # Remove time substring (but will fail for 3 Oct 2019)
val=val.split(' ')[0] # strip time substring if present
patterns=['\d{1,2}/\d{1,2}/\d{2,4}', '\d{4}-\d{1,2}-\d{1,2}', '\d{1,2}-\d{1,2}-\d{4}']
for i, patt in enumerate(patterns):
match=re.search(r'%s' %patt, val)
if match:
if i==0: # Extract 03/16/2018 (or rarely 28/10/2019 style)
try:
(mo,dy,yr)=[int(i) for i in val.split('/')]
if yr<100 and len(str(yr))==2: # handle 2 digit year
yr=int('20'+str(yr))
if mo < 13: # normal US version (month first)
return datetime(yr, mo, dy).date()
# handle day month reversal
elif dy<13: # possible month day reverse
print('Month and day reverse for %s' %val)
return datetime(yr, dy, mo).date() # Assume month/day switch
except:
print('Problem extracting date from ', val)
return None
if i==1: # extract 2017-01-01 style (year first)
try:
(yr,mo,dy)=[int(i) for i in val.split('-')]
if mo < 13: # normal US version (month first)
return datetime(yr, mo, dy).date()
# handle day month reversal
elif dy<13: # possible month day reverse
print('Month and day reverse for %s' %val)
return datetime(yr, dy, mo).date() # Assume month/day switch
except:
print('Problem extracting date from ', val)
return val
if i==2: # extract 01-01-2019 style (year last)
try:
(mo,dy,yr)=[int(i) for i in val.split('-')]
if mo < 13: # normal US version (month first)
return datetime(yr, mo, dy).date()
# handle day month reversal
elif dy<13: # possible month day reverse
print('Month and day reverse for %s' %val)
return datetime(yr, dy, mo).date() # Assume month/day switch
except:
print('Problem extracting date from ', val)
return val
def loadProcessPlayerInfo():
'''Loads and processes players & family contacts (but not signup file)
args:
gsignups -- google signups
season - 'Fall', 'Winter', 'Spring
year - 4 digit int (uses fall value all school year.. ie. 2018-19 year is always
2018)
'''
players=pd.read_csv(cnf._INPUT_DIR + '\\players.csv', encoding='cp437') # load existing player data (need to find correct DOB format)
players.loc[:,'Grade']=players.Grade.replace('K',0)
players.loc[:,'Grade']=players.Grade.replace('pK',0) # just make them 0s for now
players.loc[:,'Grade']=players.Grade.astype(int)
# TODO use fixdates if players imports as string (various formats possible)
# players['DOB']=players['DOB'].apply(lambda x: fixDates(x))
if not isinstance(players.DOB[0], pd.Timestamp):# sometimes direct import to pd timestamp works, other times not
players.loc[:,'DOB']=players.DOB.apply(lambda x: parseDate(x))
famcontact=pd.read_csv(cnf._INPUT_DIR + '\\family_contact.csv', encoding='cp437') # load family contact info
famcontact=formatnamesnumbers(famcontact)
return players, famcontact
def loadProcessGfiles(gsignups, season, year):
'''Loads and processes players, family contacts and signup file, gets active
season and year
args:
gsignups -- google signups
season - 'Fall', 'Winter', 'Spring
year - 4 digit int (uses fall value all school year.. ie. 2018-19 year is always
2018)
'''
players=pd.read_csv(cnf._INPUT_DIR + '\\players.csv', encoding='cp437') # load existing player data (need to find correct DOB format)
players.loc[:,'Grade']=players.Grade.replace('K',0)
players.loc[:,'Grade']=players.Grade.replace('pK',0) # just make them 0s for now
players.loc[:,'Grade']=players.Grade.astype(int)
# TODO use fixdates if players imports as string (various formats possible)
# players['DOB']=players['DOB'].apply(lambda x: fixDates(x))
if not isinstance(players.DOB[0], pd.Timestamp):# sometimes direct import to pd timestamp works, other times not
players.loc[:,'DOB']=players.DOB.apply(lambda x: parseDate(x))
famcontact=pd.read_csv(cnf._INPUT_DIR + '\\family_contact.csv', encoding='cp437') # load family contact info
if season=='Winter':
gsignups['Sport']='Basketball'
# TODO determine where multiple sports converted to separate lines
duplicated=gsignups[gsignups.duplicated(subset=['First', 'Last','Grade','Sport'])]
if len(duplicated)>0:
print('Remove duplicate signups for %s' %", ".join(duplicated.Last.unique().tolist()))
gsignups=gsignups.drop_duplicates(subset=['First', 'Last','Grade','Sport'])
gsignups.loc[:,'Sport']=gsignups['Sport'].str.replace('Volleyball','VB')
#gsignups.loc[:,'Sport']=gsignups.loc[:,'Sport'].str.replace('Volleyball','VB').copy()
#gsignups.loc[:,'Sport']=gsignups['Sport'].replace({'Volleyball':'VB'}, regex=True).copy()
missing=[i for i in ['Famkey','Plakey'] if i not in gsignups.columns]
for col in missing: # add blank vals
gsignups.loc[gsignups.index, col]=np.nan
# convert assorted DOB strings to datetime.date
if not isinstance(gsignups.DOB[0], pd.Timestamp):# sometimes direct import to pd timestamp works, other times not
gsignups.loc[:,'DOB']=gsignups.DOB.apply(lambda x: parseDate(x))
# Get year from signup file name
outputduplicates(gsignups) # quick check of duplicates output in console window (already removed from signups)
gsignups=formatnamesnumbers(gsignups) # format phone numbers, names to title case, standardize schools, etc.
famcontact=formatnamesnumbers(famcontact)
def processGkey(val):
''' Some plakey/famkey copied to drive... must convert nan(float), whitespace or
number as string to either nan or int
'''
if isinstance(val, str):
val=''.join(val.split(' '))
if val=='':
return np.nan
else:
try:
return int(val)
except:
return np.nan
else:
return np.nan
# ensure gsignups has only int or nan (no whitespace)
gsignups.loc[:,'Plakey']=gsignups['Plakey'].apply(lambda x: processGkey(x))
gsignups.loc[:,'Famkey']=gsignups['Famkey'].apply(lambda x: processGkey(x))
return players, famcontact, gsignups
def loadprocessfiles(signupfile):
'''Loads and processes players, family contacts and signup file, gets active
season and year '''
players=pd.read_csv(cnf._INPUT_DIR + '\\players.csv', encoding='cp437') # load existing player data (need to find correct DOB format)
players.loc[:,'Grade']=players.Grade.replace('K',0)
players.loc[:,'Grade']=players.Grade.replace('pK',0) # just make them 0s for now
players.loc[:,'Grade']=players.Grade.astype(int)
# TODO use fixdates if players imports as string (various formats possible)
# players['DOB']=players['DOB'].apply(lambda x: fixDates(x))
if type(players.DOB[0])!=pd.Timestamp: # sometimes direct import to pd timestamp works, other times not
try:
players.loc[:'DOB']=parseDate(players.DOB) # return properly converted date columns series
except:
print('Failure converting player DOB to datetime/timestamp')
famcontact=pd.read_csv(cnf._INPUT_DIR + '\\family_contact.csv', encoding='cp437') # load family contact info
# read this season's sports signup file and rename columns
if signupfile.endswith('.csv'):
SUraw=pd.read_csv(signupfile)
elif 'xls' in signupfile:
try:
SUraw=pd.read_excel(signupfile, sheetname='Raw') # may or may not have plakey/famkey
except:
SUraw=pd.read_excel(signupfile)
if SUraw.shape[1]==30 and 'Plakey' in SUraw.columns:
SUraw.columns=['Timestamp','First','Last','DOB','Gender','School','Grade',
'Address','Zip','Parish','Sport','AltPlacement','Ocstatus','Pfirst1',
'Plast1','Phone1','Text1','Email','Othercontact','Coach','Pfirst2','Plast2',
'Phone2','Text2','Email2','Coach2','Unisize','Unineed','Plakey','Famkey']
elif SUraw.shape[1]==28 and 'Plakey' in SUraw.columns:
SUraw.columns=['Timestamp','First','Last','DOB','Gender','School','Grade',
'Address','Zip','Parish','Sport','AltPlacement','Ocstatus','Pfirst1',
'Plast1','Phone1','Text1','Email','Othercontact','Coach','Pfirst2','Plast2',
'Phone2','Text2','Email2','Coach2','Plakey','Famkey']
elif SUraw.shape[1]==26 and 'Plakey' not in SUraw.columns: # Raw value without plakey and famkey
SUraw.columns=['Timestamp','First','Last','DOB','Gender','School',
'Grade','Address','Zip','Parish','Sport','AltPlacement','Ocstatus',
'Pfirst1','Plast1','Phone1','Text1','Email','Othercontact','Coach',
'Pfirst2','Plast2','Phone2','Text2','Email2','Coach2']
elif SUraw.shape[1]==28 and 'Plakey' not in SUraw.columns: # Raw value without plakey and famkey
SUraw.columns=['Timestamp','First','Last','DOB','Gender','School',
'Grade','Address','Zip','Parish','Sport','AltPlacement','Ocstatus',
'Pfirst1','Plast1','Phone1','Text1','Email','Othercontact','Coach',
'Pfirst2','Plast2','Phone2','Text2','Email2','Coach2','Unisize','Unineed']
SUraw.loc[SUraw.index,'Plakey']=np.nan # add if absent
SUraw.loc[SUraw.index,'Famkey']=np.nan
signups=SUraw.drop_duplicates(subset=['First', 'Last','Grade','Sport'])
signups['Sport'].replace({'Volleyball':'VB'},inplace=True, regex=True)
# Get year from signup file name
season=re.match(r'(\D+)', signupfile).group(0) # season at string beginning followed by year (non-digit)
if '\\' in season: # remove file path problem
season=season.split('\\')[-1]
year=int(re.search(r'(\d{4})', signupfile).group(0)) # full year should be only number string in signups file
outputduplicates(SUraw) # quick check of duplicates output in console window (already removed from signups)
signups=formatnamesnumbers(signups) # format phone numbers, names to title case, standardize schools, etc.
famcontact=formatnamesnumbers(famcontact)
return players, famcontact, signups, season, year
def findavailablekeys(df, colname, numkeys):
'''Pass df and colname, return a defined number of available keys list
used for players, families, signups, etc.
'''
# list comprehension
allnums=[i for i in range(1,len(df))]
usedkeys=df[colname].unique()
usedkeys=np.ndarray.tolist(usedkeys)
availkeys=[i for i in allnums if i not in usedkeys]
if len(availkeys)<numkeys: # get more keys starting at max+1
needed=numkeys-len(availkeys)
for i in range(0,needed):
nextval=int(max(usedkeys)+1) # if no interior vals are available find next one
availkeys.append(nextval+i)
availkeys=availkeys[:numkeys] # truncate and only return the requested number of needed keys
return availkeys
def organizeroster(df):
''' Renaming, reorg, delete unnecessary columns for CYC roster output
already split by sport and year'''
df=df.rename(columns={'First':'Fname','Last':'Lname','Address':'Street','Parish_registration':'Parish of Registration'})
df=df.rename(columns={'Parish_residence':'Parish of Residence','Phone1':'Phone','DOB':'Birthdate','Gender':'Sex'})
df=df.rename(columns={'Email1':'Email'})
# replace Girl, Boy with m f
df.loc[:,'Sex']=df.Sex.replace('Girl','F').replace('Boy','M')
df.loc[:,'Sex']=df.Sex.str.upper() # ensure uppercase
# Convert date format to 8/25/2010 string format
mycols=['Fname', 'Lname', 'Street', 'City', 'State', 'Zip', 'Phone', 'Email', 'Birthdate', 'Sex', 'Role', 'Division', 'Grade', 'Team', 'School', 'Parish of Registration', 'Parish of Residence', 'Coach ID']
df=df[mycols] # put back in desired order
df=df.sort_values(['Team'])
return df
'''TESTING row=tempplay.iloc[7]
signups=signups[signups['Last']=='Elston']
'''
def processdatachanges(signups, players, famcontact, year):
'''Pass SC signups subset from google drive, update address for more up-to-date
contact information, new address, etc.
must start here if troubleshooting
args:
signups -- online signups file (normally google drive)
players - player DOB, grade, etc
famcontact- family contact info
year - sports year (int); e.g. 2019 for 2019-20 school year
'''
# Using all entries from signups (manual and gdrive)
# Updates from paper signups should be done directly to famcontact and players csv files (skip entirely)
'''
signups.Timestamp=pd.to_datetime(signups.Timestamp, errors='coerce') # converts to naT or timestamp
gdsignups=signups.dropna(subset=['Timestamp']) # drops manual entries (no google drive timestamp)
'''
# merge w/ players and update grade, recalc grade adjustment, and school
# must use left merge to keep correct indices from players df (inner causes reindexing)
players=players.reset_index(drop=True)
tempplay=pd.merge(players, signups, how='inner', on=['Plakey'], suffixes=('','_n'))
tempplay=tempplay.dropna(subset=['Gender_n']) # this drops all without a google drive entry
for index, row in tempplay.iterrows():
upkwargs={}
# Skip approval for grade updates
if row.Grade!=row.Grade_n: # grade discrepancy between players.csv and current signup
match=players[players['Plakey']==row.Plakey]
if len(match)==1:
thisind=match.index[0]
# update player grade (no approval)
players.loc[thisind,'Grade']=row.Grade_n # set to new value from current signup file
print (row.First," ",row.Last," grade changed to ", row.Grade_n)
if row.School!=row.School_n and str(row.School_n)!='nan':
upkwargs.update({'school':True})
# Check for DOB inconsistency between google drive and players.csv
if row.DOB!=row.DOB_n: # don't change grade adjustment if DOB discrepancy
if row.DOB_n.year!=year: # skip birthday instead of DOB error
upkwargs.update({'DOB':True})
else: # recalculate grade adjustment
# Direct adjustment to gradeadj in players (if indicated)
players=updategradeadjust(row, players, year)
if 'school' in upkwargs or 'DOB' in upkwargs:
# Interactively approve school or DOB changes
players=updateplayer_tk(row, players, **upkwargs)
autocsvbackup(players,'players', newback=True) # run autobackup script
outname=cnf._OUTPUT_DIR+'\\players.csv'
players.to_csv(outname,index=False) # direct save of changes from google drive info
# now update new info into family contacts
# faminfo=gdsignups.drop_duplicates(subset=['Famkey']) # only process first kid from family
faminfo=signups.drop_duplicates(subset=['Famkey'])
famcontact=prepcontacts(famcontact)
faminfo=prepcontacts(faminfo)
tempfam=pd.merge(famcontact, faminfo, how='inner', on=['Famkey'], suffixes=('','_n')) # same indices as famcontact
tempfam=tempfam.dropna(subset=['Zip_n']) # drops those without timestamped google drive entry
for index,row in tempfam.iterrows():
# Update/reshuffle phone, email, parent list, parish of registration (direct to famcontact)
famcontact=update_contact(row, famcontact) # update/reshuffle phone,text (list of lists)
autocsvbackup(famcontact,'family_contact', newback=True) # run autobackup script
outname=cnf._INPUT_DIR+'\\family_contact.csv'
famcontact.to_csv(outname, index=False)
return players, famcontact
def updatefamcon_tk(row, famcontact, **upkwargs):
''' Interactive approval of family contact changes
changes directly made to famcontacts (but not yet autosaved)
upkwargs: phone, email, address
'''
root = tk.Tk()
root.title('Update family contact info')
choice=tk.StringVar() # must be define outside of event called functions
rownum=0
mytxt='Family: '+row.Family+' # '+str(row.Plakey)
tk.Label(root, text=mytxt).grid(row=rownum, column=0)
tk.Label(root, text='Deselect to remove').grid(row=rownum, column=1)
rownum+=1
# Use listbox of common schools?
if 'parlist' in upkwargs: # indicates new parent found
colnum=0
parlist=upkwargs.get('parlist',[])
# Checkboxes to add new parent
if 'newpar1' in upkwargs:
addpar1=tk.BooleanVar()
addpar1.set(True)
try:
mytext='Add parent: '+ (' '.join(upkwargs.get('newpar1',[]))+'?')
except:
print('Error adding parent 1', )
mytext=''
tk.Checkbutton(root, variable=addpar1, text=mytext).grid(row=rownum, column=colnum)
colnum+=1
if 'newpar2' in upkwargs:
addpar2=tk.BooleanVar()
addpar2.set(True)
try:
mytext='Add parent: '+ (' '.join(upkwargs.get('newpar2',[]))+'?')
except:
mytext=''
tk.Checkbutton(root, variable=addpar2, text=mytext).grid(row=rownum, column=colnum)
colnum+=1
# Checkbutton and boolvar for each parent (default true)
pbools=[] # List of bools for parent inclusion
for i in range(0,len(parlist)):
pbools.append(tk.BooleanVar())
pbools[i].set(True)
tempstr=parlist[i]
tk.Checkbutton(root, variable=pbools[i], text=tempstr).grid(row=rownum, column=colnum)
rownum+=1
rownum+=1
if 'emails' in upkwargs: # indicates new parent found
emaillist=upkwargs.get('emails',[])
# Checkboxes to add new parent
colnum=0
if 'email1' in upkwargs:
addemail1=tk.BooleanVar()
addemail1.set(True)
email1=tk.StringVar()
email1.set(upkwargs.get('email1',''))
tk.Checkbutton(root, variable=addemail1, text='Add new email1').grid(row=rownum, column=colnum)
rownum+=1
tk.Entry(root, textvariable=email1).grid(row=rownum, column=colnum)
rownum+=1
if 'email2' in upkwargs:
addemail2=tk.BooleanVar()
addemail2.set(True)
email2=tk.StringVar()
email2.set(upkwargs.get('email2',''))
tk.Checkbutton(root, variable=addemail2, text='Add new email2').grid(row=rownum, column=colnum)
rownum+=1
tk.Entry(root, textvariable=email2).grid(row=rownum, column=colnum)
colnum+=1
# Checkbutton and boolvar for each email (default true)
ebools=[] # List of bools for parent inclusion
for i in range(0,len(emaillist)):
ebools.append(tk.BooleanVar())
tempstr=emaillist[i]
ebools[i].set(True)
tk.Checkbutton(root, variable=ebools[i], text=tempstr).grid(row=rownum, column=colnum)
rownum+=1
rownum+=1
if 'phones' in upkwargs: # indicates new parent found
phlist=upkwargs.get('phones',[])
# Checkboxes to add new parent
colnum=0
if 'phone1' in upkwargs:
addphone1=tk.BooleanVar()
addphone1.set(True)
try:
mytext='Add phone/text: '+ upkwargs.get('phone1','')
except:
mytext=''
tk.Checkbutton(root, variable=addphone1, text=mytext).grid(row=rownum, column=colnum)
colnum+=1
if 'phone2' in upkwargs:
addphone2=tk.BooleanVar()
addphone2.set(True)
try:
mytext='Add phone/text: '+ ', '.join(upkwargs.get('phone2',[]))
except:
mytext=''
tk.Checkbutton(root, variable=addphone2, text=mytext).grid(row=rownum, column=colnum)
colnum+=1
# Checkbutton and boolvar for each email (default true)
phbools=[] # List of bools for parent inclusion
for i in range(0,len(phlist)):
phbools.append(tk.BooleanVar())
tempstr=phlist[i]
phbools[i].set(True)
tk.Checkbutton(root, variable=phbools[i], text=tempstr).grid(row=rownum, column=colnum)
rownum+=1
if 'address' in upkwargs:
colnum=0
tk.Label(root, text='Possible change of address').grid(row=rownum, column=colnum)
rownum+=1
newaddrbool=tk.BooleanVar()
newaddr=tk.StringVar()
newaddrbool.set(False)
newaddr.set(row.Address_n)
newzip=tk.StringVar()
try:
newzip.set(int(row.Zip_n))
except:
print('Non-standard zip value',str(row.Zip_n))
tk.Checkbutton(root, variable=newaddrbool, text='Change address?').grid(row=rownum, column=colnum)
colnum+=1
tk.Label(root, text='Current address').grid(row=rownum, column=colnum)
colnum=0
rownum+=1
tk.Entry(root, textvariable=newaddr).grid(row=rownum, column=colnum)
rownum+=1
tk.Entry(root, textvariable=newzip).grid(row=rownum, column=colnum)
colnum+=1
tempstr=str(row.Address)+' '+str(row.Zip)
tk.Label(root, text=tempstr).grid(row=rownum, column=colnum)
rownum+=1
# Now set up select/close buttons
def skip(event):
choice.set('skip')
root.destroy()
def change(event):
choice.set('change')
root.destroy()
f=tk.Button(root, text='Skip')
f.bind('<Button-1>', skip)
f.grid(row=rownum, column=0)
g=tk.Button(root, text='Change')
g.bind('<Button-1>', change)
g.grid(row=rownum, column=1)
root.mainloop()
mychoice=choice.get()
if mychoice=='change':
# Find matching row for family (needed for all changes below)
famkey=row.Famkey
match=famcontact[famcontact['Famkey']==famkey]
if len(match)==1:
thisind=match.index[0]
else:
print('Problem finding unique entry for famkey', str(famkey))
return famcontact # return unaltered
# Reconstruct parent list
if 'parlist' in upkwargs:
newparlist=[] # constructing entirely new parent list from checkbox choices
if 'newpar1' in upkwargs:
if addpar1.get():
newparlist.append(upkwargs.get('newpar1',[np.nan,np.nan]))
#TODO fix nan error
print('Added parent',' '.join(upkwargs.get('newpar1')),' to ',str(row.Family))
for i, val in enumerate(pbools):
if pbools[i].get():
newparlist.append(parlist[i]) # [first, last] format
if 'newpar2' in upkwargs:
if addpar2.get():
newparlist.append(upkwargs.get('newpar2',[np.nan,np.nan]))
print('Added parent 2',' '.join(upkwargs.get('newpar2')),' to ',str(row.Family))
# Now direct update of parents in this family's famcontact entry
newparlist=newparlist[0:3] # limit to 3 entries
while len(newparlist)<3:
newparlist.append([np.nan,np.nan]) # pad with nan entries if necessary
# now reset parent name entries
for i in range(1,4): # reset 3 existing parents entries
fname='Pfirst'+str(i)
lname='Plast'+str(i)
famcontact.loc[thisind, fname] = newparlist[i-1][0]
famcontact.loc[thisind, lname] = newparlist[i-1][1]
# Reconstruct email list
if 'emails' in upkwargs:
newemaillist=[]
if 'email1' in upkwargs:
if addemail1.get():
newemaillist.append(email1.get())
print('Added email1', email1.get(), ' to ', str(row.Family))
for i, val in enumerate(ebools):
if ebools[i].get():
newemaillist.append(emaillist[i])
if 'email2' in upkwargs:
if addemail2.get():
# insert in 2nd position
newemaillist.insert(1, email2.get())
print('Added email2', email2.get(), ' to ', str(row.Family))
# Now update emails in famcontact entry
# Direct update of parent list
newemaillist=newemaillist[0:3] # limit to 3 entries
while len(newemaillist)<3:
newemaillist.append(np.nan) # pad with nan entries if necessary
# now reset parent name entries
for i in range(1,4): # reset 3 existing parents entries
colname='Email'+str(i)
famcontact.loc[thisind, colname]= newemaillist[i-1]
# Reconstruct phone list
if 'phones' in upkwargs:
newphlist=[]
if 'phone1' in upkwargs:
if addphone1.get():
newphlist.append(upkwargs.get('phone1', [np.nan,np.nan]))
print('Added phone1', ','.join(upkwargs.get('phone1',[])), ' to ', str(row.Family))
for i, val in enumerate(phbools):
if phbools[i].get():
newphlist.append(phlist[i])
# added at end... probably should go
if 'phone2' in upkwargs:
if addphone2.get():
# insert in 2nd position
newphlist.insert(1, upkwargs.get('phone2',[np.nan,np.nan]))
print('Added phone2', ','.join(upkwargs.get('phone2',[])), ' to ', str(row.Family))
# Now update phone, text in famcontact entry
newphlist=newphlist[0:4] # limit to 4 entries
while len(newphlist)<4:
newphlist.append([np.nan, np.nan]) # pad with nan entries if necessary
# now reset parent name entries
for i in range(1,5): # reset max 4 phone entries
phname='Phone'+str(i)
textname='Text'+str(i)
famcontact.loc[thisind, phname] = newphlist[i-1][0]
famcontact.loc[thisind, textname] = newphlist[i-1][1]
# Handle change of address (direct change if approved)
# Also change associated zip code and reset parish of residence
if 'address' in upkwargs:
if newaddrbool:
print('Address changed for ', str(row.Family))
famcontact.loc[thisind, 'Address'] = newaddr.get()
# Reset parish of residence to nan (manually find and replace)
famcontact.loc[thisind, 'Parish_residence'] = np.nan
try:
famcontact.loc[thisind,'Zip']=int(newzip.get())
except:
print('Problem converting zip code ', newzip.get())
# TODO ... handle parish of registration
return famcontact
def update_contact(row, famcontact):
'''Update phone and textable list from google drive entries;
google drive raw entries first processed in process_data_changes (then update
contacts is called)
row is a merge of existing famcontact info and new signup info
existing entries from fam_contact listed first;
pass/modify/return series for family; reorder/replace numbers
has fairly long list of changes made w/o interactive approval:
1) changing order of email or phone numbers (e.g. swap phone1 and phone2)
2) add phone2 (or email2) if current phone2(email2) is nan
3) change order of parents (new parent1)
All other changes done w/ interactive approval using update_famcon_tk
'''
# [phone, text, order]
thisfam=row.Family
match=famcontact[famcontact['Famkey']==row.Famkey]
if len(match)==1:
thisind=match.index[0] # correct index for updating this family in famcontacts
else:
print(str(row.Family), " not found in famcontacts.. shouldn't happen")
return famcontact
upkwargs={} # empty dict for monitoring all changes
# check for possible change in address (housenum as trigger)
match1=re.search(r'\d+', row.Address)
match2=re.search(r'\d+', row.Address_n)
if match1 and match2:
num1=match1.group(0)
num2=match2.group(0)
if num1!=num2: # change in address number strongly suggestive of actual change
upkwargs.update({'address':True})
else:
print('No address # found for', str(thisfam))
phonelist=[] # list of lists with number and textable Y/N
for i in range(1,5): # get 4 existing phone entries (phone1, phone2, etc.)
phname='Phone'+str(i)
txtname='Text'+str(i)
if str(row[phname])!='nan':
phonelist.append([row[phname],row[txtname]]) # as phone and text y/N
# New google drive entries will be Phone1_n.. look for phone/text pair in existing list
if str(row.Phone1_n)!='nan' and [row.Phone1_n,row.Text1_n] in phonelist: # new ones phone is required entry
# default move of phone1, text1 to top of list - no confirmation
if [row.Phone1_n,row.Text1_n]!=phonelist[0]: # move if not in first position
phonelist.insert(0,phonelist.pop(phonelist.index([row.Phone1_n,row.Text1_n])))
print('Phone 1 changed for ', str(thisfam))
upkwargs.update({'phchange':True})
if str(row.Phone1_n)!='nan' and [row.Phone1_n,row.Text1_n] not in phonelist: # new ones phone is required entry
if [row.Phone1_n, np.nan] in phonelist: # remove if # present but w/o text indication (no confirm)
phonelist.remove([row.Phone1_n,np.nan])
phonelist.insert(0,[row.Phone1_n,row.Text1_n]) # insert in first position
print('Updated phone 1 to', row.Phone1_n,' for ',str(thisfam))
upkwargs.update({'phchange':True})
else:
# phone1 change to be confirmed
upkwargs.update({'phone1':[row.Phone1_n,row.Text1_n]})
upkwargs.update({'phones': phonelist})
if str(row.Phone2_n)!='nan': # check for phone2 entry (with _n suffix)
if [row.Phone2_n,row.Text2_n] not in phonelist: # add second phone to 2nd position if not present
if [row.Phone2_n,np.nan] in phonelist: # remove if # present but w/o text indication
phonelist.remove([row.Phone2_n,np.nan])
phonelist.insert(1,[row.Phone2_n,row.Text2_n])
print ('Updated phone 2 to ', str(row.Phone2_n), 'for ', str(thisfam))
upkwargs.update({'phchange':True})
else: # get approval for phone 2 addition
upkwargs.update({'phone2':[row.Phone2_n,row.Text2_n]})
upkwargs.update({'phones': phonelist})
# Construct existing list of known email addresses
emaillist=[]
for i in range(1,4): # get 3 existing email entries
emailname='Email'+str(i)
if str(row[emailname])!='nan':
emaillist.append(row[emailname].lower())
# Find new email1 entry in google drive data
if str(row.Email)!='nan' and '@' in row.Email: # real primary gd named email
if row.Email.lower() in emaillist: # add in first position if not present (no confirmation)
if row.Email.lower()!=emaillist[0]: # check if in first position already
emaillist.insert(0,emaillist.pop(emaillist.index(row.Email)))
upkwargs.update({'emchange':True})
print ('Updated email 1 ', str(row.Email.lower()), 'for family', str(thisfam))
else: # confirm email1 if not present
upkwargs.update({'email1':row.Email})
upkwargs.update({'emails':emaillist})
# look for new email in email2 position and add
if str(row.Email2_n)!='nan' and '@' in row.Email2_n:
if row.Email2_n.lower() not in emaillist: # add second email to 2nd position if not present
upkwargs.update({'email2':row.Email2_n})
upkwargs.update({'emails':emaillist})
# Update list of parent names (max 3 entries)
parlist=[] # construct existing list from family contacts
# skip if all nan for entered parents (non-gd entry)
for i in range(1,4): # construct existing parents list
fname='Pfirst'+str(i)
lname='Plast'+str(i)
if str(row[fname])!='nan':
parlist.append([row[fname],row[lname]]) # list of lists [first, last]
if str(row.Pfirst1_n)!='nan': # skip if parent name is nan
if [row.Pfirst1_n,row.Plast1_n] in parlist: # reorder in list
if [row.Pfirst1_n,row.Plast1_n]!=parlist[0]: # check if already in first
# move to first position (everything else requires approval)
parlist.insert(0,parlist.pop(parlist.index([row.Pfirst1_n,row.Plast1_n])))
parlist.insert(0,[row.Pfirst1_n, row.Plast1_n]) # insert in first position
upkwargs.update({'parchange':True})
else: # parent not in list (confirm)
upkwargs.update({'newpar1':[row.Pfirst1_n,row.Plast1_n]})
upkwargs.update({'parlist':parlist})
# inserts in first position while simultaneously removing other entry
if str(row.Pfirst2_n)!='nan': # Check for parent 2 entry
if [row.Pfirst2_n,row.Plast2_n] not in parlist: # add second phone to 2nd position if not present
upkwargs.update({'newpar2':[row.Pfirst2_n,row.Plast2_n]})
upkwargs.update({'parlist':parlist})
# Save auto-changes in phone to family contacts
if 'phchange' in upkwargs: # Record altered phonelist in famcontacts
if 'phones' in upkwargs: # if present in upkwargs, update list
upkwargs.update({'phones': phonelist}) # ensure most current copy
phonelist=phonelist[0:3] # construct proper list
while len(phonelist)<4:
phonelist.append([np.nan,np.nan]) # pad with nan entries if necessary
for i in range(1,5): # reset 4 existing phone entries
phname='Phone'+str(i)
txtname='Text'+str(i)
famcontact.loc[thisind, phname] = phonelist[i-1][0] # first of tuple is phone
famcontact.loc[thisind, txtname] = phonelist[i-1][1] # 2nd of tuple is text y/n
del upkwargs['phchange']
print('automatic phone changes for', thisfam)
# Save auto-changes in emails to family contacts
if 'emchange' in upkwargs: # Record altered phonelist in famcontacts
if 'emails' in upkwargs: # if present in upkwargs, update list
upkwargs.update({'emails': emaillist}) # ensure most current copy
emaillist=emaillist[0:2] # construct proper list
while len(emaillist)<3:
emaillist.append(np.nan) # pad with nan entries if necessary
for i in range(1,4): # reset 4 existing phone entries
emname='Email'+str(i)
famcontact.loc[thisind, emname] =emaillist[i-1]
del upkwargs['emchange']
print('automatic email changes for', thisfam)
if 'parchange' in upkwargs: # Record altered parents list in famcontacts
if 'parlist' in upkwargs: # if present in upkwargs, update list
upkwargs.update({'parlist': parlist}) # ensure most current copy
parlist=parlist[0:2] # construct proper list
while len(parlist)<3:
parlist.append(np.nan) # pad with nan entries if necessary (3 total)
for i in range(1,4): # reset 4 existing phone entries
fname='Pfirst'+str(i)
lname='Plast'+str(i)
try:
famcontact.loc[thisind, fname] =parlist[i-1][0]
famcontact.loc[thisind, lname] =parlist[i-1][1]
except:
print('Error updating parents for', thisfam)
del upkwargs['parchange']
print('automatic parent changes for', thisfam)
# now check for any changes needing interactive approval
if len(upkwargs)>0: # something needs interactive approval
famcontact=updatefamcon_tk(row, famcontact, **upkwargs)
return famcontact
def updateplayer_tk(row, players, **upkwargs):
''' Interactive approval of player info updates (except date)
changes directly made to players (but not yet autosaved)
called by processdatachanges
'''
commonschools=['Cabrini','Soulard','SLPS','Charter','Private']
root = tk.Tk()
root.title('Update player info')
choice=tk.StringVar() # must be define outside of event called functions
rownum=0
mytxt='Player:'+row.First+' '+row.Last+' # '+str(row.Plakey)
tk.Label(root, text=mytxt).grid(row=rownum, column=0)
rownum+=1
# Use listbox of common schools?
if 'DOB' in upkwargs: # indicates discrepancy
DOB1=date(row.DOB)
DOB2=date(row.DOB_n)
# create and display DOB variables
def ChooseDOB1(event):
DOB.set(datetime.strftime(DOB1,'%m/%d/%y'))
def ChooseDOB2(event):
DOB.set(datetime.strftime(DOB2,'%m/%d/%y'))
DOB=tk.StringVar()
DOB.set(datetime.strftime(DOB1,'%m/%d/%y')) # defaults to original
tk.Label(root, text='Update date of birth?').grid(row=rownum, column=0)
mytxt='current DOB:'+datetime.strftime(DOB1,'%m/%d/%y')
b=tk.Button(master=root, text=mytxt)
b.bind('<Button-1>', ChooseDOB1)
b.grid(row=rownum, column=1)
mytxt='New DOB:'+datetime.strftime(DOB2,'%m/%d/%y')
b=tk.Button(master=root, text=mytxt)
b.bind('<Button-1>', ChooseDOB2)
b.grid(row=rownum, column=2)
tk.Entry(master=root, textvariable=DOB).grid(row=rownum, column=3)
rownum+=1
if 'school' in upkwargs:
school=tk.StringVar()
school.set(row.School) # default to existing value
tk.Label(root, text='Update school?').grid(row=rownum, column=0)
rownum+=1
def newschool(event):
school.set(row.School_n)
def oldschool(event):
school.set(row.School)
def pickschool(event):
# double-click to pick standard school choice
items=lb.curselection()[0] # gets selected position in list
school.set(commonschools[items])
tk.Entry(root, textvariable=school).grid(row=rownum, column=2)
mytxt='new school:'+str(row.School_n)
b=tk.Button(master=root, text=mytxt)
b.bind('<Button-1>', newschool)
b.grid(row=rownum, column=1)
mytxt='existing school:'+str(row.School)
b=tk.Button(master=root, text=mytxt)
b.bind('<Button-1>', oldschool)
b.grid(row=rownum, column=0)
# also include selectable listbox of common school choices
lb=tk.Listbox(master=root, selectmode=tk.SINGLE)
lb.bind("<Double-Button-1>", pickschool)
lb.grid(row=rownum, column=3)
for i,sch in enumerate(commonschools):
lb.insert(tk.END, sch)
rownum+=1
# Now set up select/close buttons
def skip(event):
choice.set('skip')
root.destroy()
def change(event):
choice.set('change')
root.destroy()
f=tk.Button(root, text='Skip')
f.bind('<Button-1>', skip)
f.grid(row=rownum, column=0)
g=tk.Button(root, text='Change')
g.bind('<Button-1>', change)
g.grid(row=rownum, column=1)
root.mainloop()
mychoice=choice.get()
if mychoice=='change':
try:
# make changes directly to players after finding correct index using plakey
plakey=row.Plakey
match=players[players['Plakey']==plakey]
thisind=match.index[0]
if 'school' in upkwargs:
players.loc[thisind,'School']= school.get()
if 'DOB' in upkwargs:
newDOB=datetime.strptime(DOB.get(),'%m/%d/%y')
players.loc[thisind,'DOB']= newDOB
except:
print('Error updating info for', row.Plakey, row.First, row.Last)
return players
def prepcontacts(df):
''' Prepare for update contacts/ matching with google drive info
avoids possible problems/spaces in manually entered info '''
mycols=['Pfirst1', 'Plast1','Pfirst2', 'Plast2', 'Pfirst3', 'Plast3',
'Phone1', 'Text1','Phone2', 'Text2', 'Phone3', 'Text3', 'Phone4',
'Text4', 'Email1','Email2', 'Email3']
for i, col in enumerate(mycols):
try:
df.loc[:,col]=df[col].str.strip()
except: # maybe only nan or not present (i.e. in signups)
pass
mycols=['Text1','Text2','Text3']
for i, col in enumerate(mycols):
try:
df.loc[:,col]=df[col].str.replace('No','N', case=False)
df.loc[:,col]=df[col].str.replace('Yes','Y', case=False)
except:
pass
return df
def findyearseason(df):
''' Pass raw signups and determine year and sports season '''
# get year from system clock and from google drive timestamp
now=datetime.now()
val=df.Timestamp[0] # grab first timestamp
if val!=datetime: # if not a timestamp (i.e. manual string entry find one
while type(val)!=datetime:
for index, row in df.iterrows():
val=df.Timestamp[index]
year=val.year # use year value from signup timestamps
if now.year!=val.year:
print ('Possible year discrepancy: Signups are from ',str(val.year))
# now find sports season
mask = np.column_stack([df['Sport'].str.contains("occer", na=False)])
if len(df.loc[mask.any(axis=1)])>0:
season='Fall'
mask = np.column_stack([df['Sport'].str.contains("rack", na=False)])
if len(df.loc[mask.any(axis=1)])>0:
season='Spring'
mask = np.column_stack([df['Sport'].str.contains("asket", na=False)])
if len(df.loc[mask.any(axis=1)])>0:
season='Winter'
return season, year
def outputduplicates(df):
'''Prints out names of players with duplicated entries into console... can then delete from google drive signups '''
tempdf=df[df.duplicated(['First','Last','Sport'])] # series with 2nd of duplicated entries as True
firsts=tempdf.First.tolist()
lasts=tempdf.Last.tolist()
for f,l in zip(firsts, lasts):
print('Duplicated signup for player: {} {}'.format(f,l))
return
def formatphone(df):
''' Convert all entered phone numbers in dfs phone columns to 314-xxx-xxxx string and standardize text field '''
def phoneFormat(val):
# lambda function phone number reformatting
if not isinstance(val, str):
return val
# replace/remove any white space
val="".join(val.split(' '))
if val=='': # blank phone causes problems
return np.nan
if not re.search(r'(\d+-\d+-\d+)', val):
val=re.sub("[^0-9]", "", val) # substitute blank for non-number
if len(val)==7:
return '314'+val
elif len(val)==11 and val.startswith('1'): # remove starting 1 if present
return val[1:11]
elif len(val)!=10: # sometimes has ---
# print('Bad number: ',val)
return val
else:
return val[0:3]+'-'+val[3:6]+'-'+val[6:10]
else:
return val # already good
# find phone columns (named phone, phone2, etc.)
phlist=[str(s) for s in df.columns if 'Phone' in s]
for col in phlist:
df.loc[:,col]=df[col].apply(lambda x: phoneFormat(x))
# now change yes in any text field to Y
txtlist=[str(s) for s in df.columns if 'Text' in s]
for col in txtlist:
df.loc[:,col]=df[col].replace('yes','Y')
df.loc[:,col]=df[col].replace('Yes','Y')
return df
def standardizeschool(df):
''' can pass any frame with school column and standardize name as Cabrini and Soulard'''
schstr='frances' + '|' + 'cabrini' + '|' + 'sfca' # multiple school matching string
tempdf=df[df['School'].str.contains(schstr, na=False, case=False)]
df.loc[tempdf.index,'School']='Cabrini'
tempdf = df[df['School'].str.contains('soulard', na=False, case=False)]
df.loc[tempdf.index,'School']='Soulard'
tempdf = df[df['School'].str.contains('public', na=False, case=False)]
df.loc[tempdf.index,'School']='Public'
schstr='city garden' + '|' + 'citygarden' # multiple school matching string
tempdf = df[df['School'].str.contains(schstr, na=False, case=False)]
df.loc[tempdf.index,'School']='City Garden'
return df
def formatnamesnumbers(df):
'''Switch names to title case, standardize gender, call phone/text reformat and standardize school name'''
def titleStrip(val):
try:
return val.title().strip()
except:
return val
processCols=['First','Last','Family','Pfirst1','Plast1','Pfirst2','Plast2','Email','Email2']
processCols=[i for i in processCols if i in df.columns]
for col in processCols:
df.loc[:, col]=df[col].apply(lambda x: titleStrip(x))
if 'Gender' in df:
df.loc[:,'Gender']=df.Gender.replace('Girl','f')
df.loc[:,'Gender']=df.Gender.replace('Boy','m')
if 'Grade' in df:
df.loc[:,'Grade']=df.Grade.replace('K',0)
df.loc[:,'Grade']=df.Grade.replace('pK',0)
try:
df.loc[:,'Grade']=df.Grade.astype(int)
except:
print('Player grade likely missing from raw signup file... enter manually')
df=formatphone(df) # call phone reformatting string
if 'School' in df:
df=standardizeschool(df) # use "Cabrini" and "Soulard" as school names
return df
def graduate_players(players, year):
''' Recalc grade based on grade adjustment, school year (run once per year in fall) and age.
some player grades will already have been updated (generally google drive entries)... however recalc shouldn't
change grade '''
players.loc[:,'Grade']=players.Grade.replace('K',0)
for index,row in players.iterrows():
# replace K with zero
grade=int(players.iloc[index]['Grade']) # get currently listed grade
gradeadj=players.iloc[index]['Gradeadj']
dob=players.iloc[index]['DOB']
if str(gradeadj)=='nan' or str(dob)=='NaT': # skip grade update if info is missing
continue
dob=date(dob)
# calculate current age at beginning of this school on 8/1
age=date(year,8,1)-dob
age = (age.days + age.seconds/86400)/365.2425
# assign grade based on age (and grade adjustment)
newgrade=int(age)+int(gradeadj)-5
if grade!=newgrade:
first=players.iloc[index]['First']
last=players.iloc[index]['Last']
print('Grade changed from',grade,'to',newgrade,'for', first, last)
players.loc[index, 'Grade'] = newgrade
players.loc[:,'Grade']=players.Grade.replace(0,'K')
return players
def removeEmptyFams(players, famcontact):
'''
Remove empty families (with no remaining players)
'''
# Remove families with no active players
plaset=[int(i) for i in list(players.Famkey.unique())]
famset=[int(i) for i in list(famcontact.Famkey.unique())]
# Empty families
emptykey=[i for i in famset if i not in plaset]
empty=famcontact[famcontact['Famkey'].isin(emptykey)]
print('Remove empty families:')
for ind, row in empty.iterrows():
print(row.Family, ':',row.Pfirst1, row.Plast1)
choice=input("Remove empty families (Y,N)?\n")
if choice.upper()=='Y':
famcontact=famcontact[~famcontact['Famkey'].isin(emptykey)]
outname=cnf._INPUT_DIR+'\\family_contact.csv'
famcontact.to_csv(outname, index=False)
return famcontact
def removeHSkids(players):
''' Drop graduated players (9th graders) from list '''
grlist=[i for i in range(0,9)]
grlist.append('K')
Hs=players.loc[~(players.Grade.isin(grlist))]
for ind, row in Hs.iterrows():
print(row.First, row.Last)
choice=input('Remove above HS players (Y/N)?\n')
if choice.upper()=='Y':
players=players.loc[(players.Grade.isin(grlist))]
print('HS Players removed but not autosaved')
return players
def estimategrade(df, year):
'''Estimate grade for this sports season based on DOB.. not commonly used '''
for index, row in df.iterrows():
grade=df.loc[index]['Grade']
if str(grade)=='nan': # skips any players who already have assigned grade
dob=df.loc[index]['DOB']
dob=date(dob) # convert to datetime date from timestamp
first=df.loc[index]['First']
last=df.loc[index]['Last']
if str(dob)=='nan':
print ('DOB missing for ', first,' ', last)
continue # skip to next if dob entry is missing
currage=date(year,8,1) - dob
currage = (currage.days + currage.seconds/86400)/365.2425 # age on first day of school/ sports season
gradeest=int(currage-5)
if gradeest==0:
gradeest='K'
print(first, last, 'probably in grade', gradeest)
df.loc[index,'Grade']=gradeest
return df
def updateoldteams(teams, year):
''' Load old teams after copy to teams tab in teams_coaches, then auto-update year-grade
must be manually saved with saveteams... then any adjustments made manually in Excel'''
# check to ensure teams are not already updated
if teams.iloc[0]['Year']==year:
print('Teams already updated for ', year,' school year')
return teams # pass back unaltered
# temporarily make the K to 0 replacements
teams.Grade=teams.Grade.replace('K',0)
teams.loc[:'Graderange']=teams['Graderange'].astype(str) # convert all to string
teams.loc[:,'Year']=year
teams.loc[:,'Grade']+=1
for index, row in teams.iterrows():
grade=teams.loc[index]['Grade']
div=teams.loc[index]['Division'] # division must match grade
div=div.replace('K','0') # replace any Ks in string
newdiv=''.join([s if not s.isdigit() else str(grade) for s in div]) # find replace for unknown # w/ new grade
teams.loc[index,'Division'] = newdiv
cycname=teams.loc[index]['Team'] # update grade portion of team name
if cycname.startswith('K'):
newcycname='1'+ cycname[1:]
teams.loc[index,'Team'] = newcycname
elif cycname[0].isdigit(): # now update teams beginning w/ numbers
newcycname=str(grade)+ cycname[1:]
teams.loc[index,'Team']= newcycname
# update grade ranges
grrange=teams.loc[index]['Graderange'] # should be all numbers
grrange=grrange.replace('K','0')
newrange=''.join([str(int(i)+1) for i in grrange])
teams.loc[index,'Graderange'] = newrange # grade range stored as string, right?
# no auto-save... save with saveteams after checking for proper changes
return teams
def splitcoaches(df):
''' Pass CYC teams list, split and duplicate rows with comma separated vals in colname for extra coaches'''
df.loc[:,'Role']='Coach' # add col for head or asst (first entry for head coach)
# df['Open/Closed']='Closed'
assistants=df.dropna(subset=['AssistantIDs']) # drop teams w/ no asst coaches
for index, rows in assistants.iterrows():
val=assistants.loc[index,'AssistantIDs']
asstcoaches=[str(s) for s in val.split(',')] #list of assistants for single team
for i,asst in enumerate(asstcoaches):
newrow=assistants.loc[index] # duplicate entry as series
asst=asst.strip() # strip leading, trailing blanks
newrow.loc['Coach ID'] = asst # set this asst coaches ID
newrow.loc['Role'] = 'Assistant Coach'
df=df.append(newrow)
df=df.sort_values(['Team'],ascending=True)
return df
def addcoachestoroster(teams, coaches):
'''Creates roster entries for coaches for each CYC team
pass teams and coaches (with coach roster info)
needed roster cols are all below (except sport used in output parsing)
args: teams -- team table w/ head and asst coach CYC ids
coaches - coaches table with CYC Id (key) and associated info
returns: coachroster --separate df to be appended to main player roster
'''
# Add team coaches (match by CYC-IDs)
thismask = teams['Team'].str.contains('-', case=False, na=False) # finds this season's CYC level teams
CYCcoach=teams.loc[thismask] # also has associated sport
CYCcoach=splitcoaches(CYCcoach) # makes new row for all assistant coaches on CYC teams
CYCcoach=pd.merge(CYCcoach, coaches, how='left', on=['Coach ID'], suffixes=('','_r'))
mycols=['Sport','Fname', 'Lname', 'Street', 'City', 'State', 'Zip', 'Phone', 'Email', 'Birthdate', 'Sex', 'Role', 'Division', 'Grade', 'Team', 'School', 'Parish of Registration', 'Parish of Residence', 'Coach ID']
for col in [col for col in mycols if col not in CYCcoach.columns]:
CYCcoach[col]='' # birthdate generally missing
CYCcoach=CYCcoach[mycols] # put back in desired order
# drop duplicates on CYC ID, team (sometimes occurs during merge)
CYCcoach=CYCcoach.drop_duplicates(['Coach ID','Team'])
return CYCcoach
def countteamplayers(df, teams, season, year):
''' For each team, summarize number of players (subset those that are younger or older) and list of names
passing mastersignups'''
df=df[df['Year']==year] # removes possible naming ambiguity
sportsdict={'Fall':['VB','Soccer'], 'Winter':['Basketball'],'Spring':['Track','Softball','Baseball','T-ball']}
sportlist=sportsdict.get(season,[])
df=df[df['Sport'].isin(sportlist)] # only this sports season
df.Grade=df.Grade.replace('K',0)
df.Grade=df.Grade.astype('int')
teams.loc[:,'Grade']=teams.Grade.replace('K',0)
teams.loc[:,'Grade']=teams.Grade.astype('int')
teams.loc[:,'Playerlist']=teams.Playerlist.astype('str')
for index, row in teams.iterrows():
teamname=teams.loc[index]['Team']
match=df[df['Team']==teamname] # all players on this team from master_signups
teams.loc[index,'Number'] = len(match) # total number of players
# compose player list (First L.) and add to teams
playerlist=[]
for ind, ro in match.iterrows():
first=match.loc[ind]['First']
last=match.loc[ind]['Last']
strname=first+' ' +last[0]
playerlist.append(strname)
players=", ".join(playerlist)
teams.loc[index,'Playerlist'] = players
# count players above or below grade level
thisgrade=int(teams.loc[index]['Grade'])
teams.loc[index,'Upper'] = (match.Grade > thisgrade).sum()
teams.loc[index,'Lower'] = (match.Grade < thisgrade).sum()
writetoxls(teams, 'Teams', 'Teams_coaches.xlsx')
return teams
def writecontacts(df, famcontact, players, season, year):
''' From mastersignups and teams, output contact lists for all teams/all sports separately '''
# Slice by sport: Basketball (null for winter?), Soccer, Volleyball, Baseball, T-ball, Softball, Track)
sportsdict={'Fall':['VB','Soccer'], 'Winter':['Basketball'],'Spring':['Track','Softball','Baseball','T-ball']}
sportlist=sportsdict.get(season)
df=df.loc[(df['Sport'].isin(sportlist)) & (df['Year']==year)] # season is not in mastersignups... only individual sports
'''# for transfers to same school (but different grades), combine all into single list for given school
for index,row in df.iterrows():
if str(df.loc[index]['Team'])!='nan': # avoids nan team screwups
if '#' in df.loc[index]['Team']: # this combines Ambrose#2B, Ambrose#3G to single tab
df=df.set_value(index,'Team',df.loc[index]['Team'].split('#')[0])
'''
# get family contact info from famcontacts
df=pd.merge(df, famcontact, how='left', on=['Famkey'], suffixes=('','_r'))
# get school from players.csv
df=pd.merge(df, players, how='left', on=['Plakey'], suffixes=('','_r3'))
# Sort by grade pre-split
df.loc[:,'Grade']=df.Grade.replace('K',0)
df.loc[:,'Grade']=df.Grade.apply(int)
df=df.sort_values(['Grade'], ascending=True)
df.loc[:,'Grade']=df.Grade.replace(0,'K') # replace K with zero to allow sorting
df.loc[:,'Team']=df.Team.replace(np.nan,'None') # still give contacts if team not yet assigned
df.loc[:,'Team']=df.Team.replace('','None')
# Standard sport contacts output for soccer, VB, basketball
if season!='Spring':
for i, sport in enumerate(sportlist):
fname=cnf._OUTPUT_DIR+'\\'+sport+'_'+str(year)+'_contacts.xlsx'
writer=pd.ExcelWriter(fname, engine='openpyxl')
Thissport=df[df['Sport']==sport]
teamlist= Thissport.Team.unique()
teamlist=np.ndarray.tolist(teamlist)
# Combine transfers to same school
transchools=[s.split('#')[0] for s in teamlist if '#' in s]
teamlist=[s for s in teamlist if '#' not in s]
teamlist.extend(transchools) # all to same school as single "team"
# now can organize contacts (and drop sport)
mycols=['First', 'Last', 'Grade', 'Gender', 'School', 'Phone1', 'Text1','Email1', 'Phone2', 'Text2',
'Email2', 'Team', 'Pfirst1', 'Plast1', 'Pfirst2', 'Plast2', 'Plakey', 'Famkey', 'Family']
Thissport=Thissport[mycols] # drop columns and rearrange
for i, team in enumerate(teamlist):
thisteam=Thissport[Thissport['Team'].str.contains(team)]
thisteam.to_excel(writer,sheet_name=team,index=False) # this overwrites existing file
writer.save()
else: # handle spring special case
Balls=df[df['Sport']!='Track'] # all ball-bat sports together
mycols=['First', 'Last', 'Grade', 'Gender', 'School', 'Phone1', 'Text1','Email1', 'Phone2', 'Text2',
'Email2', 'Team', 'Pfirst1', 'Plast1', 'Pfirst2', 'Plast2', 'Plakey', 'Famkey', 'Family']
Balls=Balls[mycols]
teamlist= Balls.Team.unique()
teamlist=np.ndarray.tolist(teamlist)
# Combine transfers
transchools=[s.split('#')[0] for s in teamlist if '#' in s]
teamlist=[s for s in teamlist if '#' not in s]
teamlist.extend(transchools) # all to same school as single "team"
fname=cnf._OUTPUT_DIR+'\\'+'Batball'+'_'+str(year)+'_contacts.xlsx'
writer=pd.ExcelWriter(fname, engine='openpyxl')
# create a separate tab for each team and write the contacts
for i, team in enumerate(teamlist):
thisteam=Balls[Balls['Team'].str.contains(team)]
thisteam.to_excel(writer,sheet_name=team,index=False) # this overwrites existing file
writer.save() # overwrites existing
# Entire track team as single file
Track=df[df['Sport']=='Track']
Track=Track[mycols] # drop columns and rearrange
fname=cnf._OUTPUT_DIR+'\\'+'Track'+'_'+str(year)+'_contacts.xlsx'
writer=pd.ExcelWriter(fname, engine='openpyxl')
Track.to_excel(writer,sheet_name='Track',index=False)
writer.save()
return
def makegoogcont(df, famcontact, players, season, year):
'''Create and save a google contacts file for all Cabrini teams
save to csv '''
sportsdict={'Fall':['VB','Soccer'], 'Winter':['Basketball'],'Spring':['Track','Softball','Baseball','T-ball']}
sportlist=sportsdict.get(season)
df=df.loc[(df['Sport'].isin(sportlist)) & (df['Year']==year)] # season is not in mastersignups... only individual sports
'''# for transfers to same school (but different grades), combine all into single list for given school
for index,row in df.iterrows():
if str(df.loc[index]['Team'])!='nan': # avoids nan team screwups
if '#' in df.loc[index]['Team']: # this combines Ambrose#2B, Ambrose#3G to single tab
df=df.set_value(index,'Team',df.loc[index]['Team'].split('#')[0])
'''
# get family contact info from famcontacts
df=pd.merge(df, famcontact, how='left', on=['Famkey'], suffixes=('','_r'))
# get school from players.csv
df=pd.merge(df, players, how='left', on=['Plakey'], suffixes=('','_r3'))
# Drop any players not yet assigned
df=df.dropna(subset=['Team'])
# Full contacts list format for android/google
for i, sport in enumerate(sportlist):
Thissport=df[df['Sport']==sport]
teamlist= Thissport.Team.unique()
teamlist=np.ndarray.tolist(teamlist)
# drop if team is not yet assigned
teamlist=[s for s in teamlist if str(s) != 'nan']
# drop if team is 'drop'
teamlist=[s for s in teamlist if str(s) != 'drop']
# Drop all non-Cabrini transferred teams (which must contain #)
teamlist=[s for s in teamlist if '#' not in s]
# Combine track subteams to single team
teamlist=[s[0:5] if 'Track' in s else s for s in teamlist]
teamlist=set(teamlist)
teamlist=list(teamlist)
# now create google contacts list for each Cabrini team and save
for j, team in enumerate(teamlist):
thisteam=Thissport[Thissport['Team'].str.contains(team)]
# Drop duplicate from same family
thisteam=thisteam.drop_duplicates('Phone1')
thisteam.loc[:,'Name']=thisteam['First']+' '+thisteam['Last']
thisteam.loc[:,'Group']=sport+str(year)
mycols=['Name','Pfirst1','Last','Phone1','Phone2','Email1','Email2','Group']
newcols=['Name','Additional Name','Family Name','Phone 1 - Value','Phone 2 - Value',
'E-mail 1 - Value','E-mail 2 - Value','Group Membership']
thisteam=thisteam[mycols]
thisteam.columns=newcols
thisteam=thisteam.replace(np.nan,'')
fname=cnf._OUTPUT_DIR+'\\google'+team+'.csv'
thisteam.to_csv(fname, index=False)
return
def createsignups(df, Mastersignups, season, year):
''' pass signups and add signups to master list, also returns list of current
player keys by sport; typically use writesignupstoExcel instead
args:
df - signup (dataframe)
Mastersignups - existing all signups db-like file
season - ['Fall','Winter','Spring']
year- 4 digit year as int
returns:
Mastersignups - same with new unique entries
'''
sportsdict={'Fall':['VB','Soccer'], 'Winter':['Basketball'],'Spring':['Track','Softball','Baseball','T-ball']}
sportlist=sportsdict.get(season)
# Use comma sep on multiple sport entries??
now=datetime.now()
thisdate=date.strftime(now,'%m/%d/%Y') # for signup date
df.loc[:,'SUdate']=thisdate # can do this globally although might also add to signups
startlen=len(Mastersignups) # starting number of signups
intcols=['SUkey','Year']
for i, col in enumerate(intcols):
if col not in df:
df.loc[df.index, col]=np.nan
mycols=Mastersignups.columns.tolist() # desired column order
for i, col in enumerate(mycols):
if col not in df:
df.loc[df.index,col]=np.nan
# TODO one option here would be to clone comma-separated sport entries (i.e. track and softball)
for i, sport in enumerate(sportlist):
# Use caution here due to Tball in Softball string problem (currently set to T-ball)
thissport=df.loc[df['Sport'].str.contains(sport, na=False, case=False)] # also handles multi-sports
# Prepare necessary columns
for index, row in thissport.iterrows():
thissport.loc[index,'Sport'] = sport # set individually to formal sport name
thissport.loc[index,'Year'] = int(year)
thissport.loc[index,'SUkey'] = 0 # assigned actual key below
# Now organize signups and add year
Mastersignups=pd.concat([thissport,Mastersignups], ignore_index=True)
Mastersignups=Mastersignups[mycols] # put back in original order
# drop duplicates and save master signups file (keep older signup if present... already assigned SUkey)
Mastersignups=Mastersignups.sort_values(['Plakey', 'Sport','Year','SUkey'], ascending=False) # keeps oldest signup
Mastersignups=Mastersignups.drop_duplicates(subset=['Plakey', 'Sport','Year']) # drop duplicates (for rerun with updated signups)
newsignups=len(Mastersignups)-startlen # number of new signups added this pass
print('Added ', str(newsignups),' new ', season, ' signups to master list.')
# add unique SUkey (if not already assigned)
neededkeys = Mastersignups[(Mastersignups['SUkey']==0)] # filter by year
availSUkeys=findavailablekeys(Mastersignups, 'SUkey', len(neededkeys)) # get necessary # of unique SU keys
keycounter=0
for index, row in neededkeys.iterrows():
Mastersignups.loc[index,'SUkey'] = availSUkeys[keycounter] # reassign SU key in source master list
keycounter+=1 # move to next available key
Mastersignups.loc[:,'Grade']=Mastersignups.Grade.replace('K',0)
Mastersignups=Mastersignups.sort_values(['Year', 'Sport', 'Gender','Grade'], ascending=False)
Mastersignups.loc[:,'Grade']=Mastersignups.Grade.replace(0,'K')
# autocsvbackup(Mastersignups,'master_signups', newback=True)
Mastersignups.to_csv(cnf._INPUT_DIR + '\\master_signups.csv', index=False, date_format='mm/dd/yy') # automatically saved
return Mastersignups
def replaceacro(df, acronyms):
''' Pass df column and return with acronyms replaced with full translations (parishes and schools
currently used only for CYC rosters '''
for index, row in acronyms.iterrows():
acro=acronyms.loc[index]['acronym']
transl=acronyms.loc[index]['translation']
# TODO only for parish columns
df.loc[:,'Parish of Registration']=df['Parish of Registration'].replace(acro, transl)
df.loc[:,'Parish of Residence']=df['Parish of Residence'].replace(acro, transl)
df.loc[:,'School']=df['School'].replace(acro, transl)
return df
def createrosters(df, season, year, players, teams, coaches, famcontact, acronyms):
''' From Mastersignups of this season creates Cabrini CYC roster and transfers (for separate sports)
and all junior sports (calculates ages for Judge Dowd); pulls info merged from famcontact, players, teams, and coaches
teams should already be assigned using teams xls and assigntoteams function
returns: None ... direct save to OUTPUT_DIR
'''
sportsdict={'Fall':['VB','Soccer'], 'Winter':['Basketball'],'Spring':['Track','Softball','Baseball','T-ball']}
specials=['Chess','Track']
sports=sportsdict.get(season)
sportlist=[sport for sport in sports if sport not in specials]
speciallist=[sport for sport in sports if sport in specials] # for track, chess, other oddballs
Specials=df[(df['Year']==year) & (df['Sport'].isin(speciallist))] # deal with these at bottom
# Proceed with all normal South Central sports
df = df[(df['Year']==year) & (df['Sport'].isin(sportlist))] # filter by year
# make duplicate entry row for double-rostered players (multiple team assignments)
thismask = df['Team'].str.contains(',', na=False) # multiple teams are comma separated
doubles=df.loc[thismask]
for index, rows in doubles.iterrows():
team=doubles.loc[index,'Team']
team=team.split(',')[1] # grab 2nd of duplicate teams
doubles.loc[index, 'Team'] = team
df=pd.concat([df,doubles], ignore_index=True) # adds duplicate entry for double-rostered players with 2nd team
thismask = df['Team'].str.contains(',', na=False) # multiple teams are comma separated
for index, val in thismask.iteritems():
if val:
team=df.loc[index]['Team']
team=team.split(',')[0] # grab 1st of duplicate teams
df.loc[index, 'Team'] = team # removes 2nd team from first entry
# now grab all extra info needed for CYC rosters
# Street, City, State, Zip, Phone, email, Parishreg, parishres from fam-contact
df=pd.merge(df, famcontact, how='left', on=['Famkey'], suffixes=('','_r'))
# Get division from Teams xls
df=pd.merge(df, teams, how='left', on=['Team'], suffixes=('','_r2')) # effectively adds other team info for roster toall players
# DOB, School from players.csv
df=pd.merge(df, players, how='left', on=['Plakey'], suffixes=('','_r3'))
df.loc[:,'Role']='Player' # add column for role
# df['Open/Closed']=np.nan
df.loc[:,'Coach ID']=''
def formatDOB(val):
# Pat moore date format is 4/4/19.. reformat as string for csv output
try:
return datetime.strftime(val, "%m/%d/%y")
except:
# print('Problem converting %s of type %s to date string format' %(val, type(val)) )
return ''
# Find Cabrini CYC names (containing hyphen)
thismask = df['Team'].str.contains('-', case=False, na=False)
CabriniCYC=df.loc[thismask] # all players on Cabrini CYC teams all sports this season
# Finds info for CYC coaches (all sports) and generate roster entries
coachroster=addcoachestoroster(teams, coaches) # coaches roster already in correct format + sport column
if len(CabriniCYC)>1: # skip if all transfers or junior (i.e. in spring)
# Split by sport
for i, sport in enumerate(sportlist):
Sportroster=CabriniCYC[CabriniCYC['Sport']==sport]
# reformat this mess as single CYC roster
Sportroster=organizeroster(Sportroster)
# Add coaches from this sport to roster
Rostercoaches=coachroster[coachroster['Sport']==sport]
Rostercoaches=organizeroster(Rostercoaches)
Sportroster=pd.concat([Sportroster,Rostercoaches], ignore_index=True) # adds coaches and players together
Sportroster=Sportroster.sort_values(['Team','Role','Grade','Lname'])
fname=cnf._OUTPUT_DIR+'\\Cabrini_'+sport+'roster'+str(year)+'.csv'
Sportroster=replaceacro(Sportroster, acronyms) # replace abbreviations
Sportroster.loc[:,'Birthdate']=Sportroster['Birthdate'].apply(lambda x: formatDOB(x))
Sportroster.to_csv(fname, index=False)
# done with Cabrini CYC rosters
# Break out all other types of teams (transfers, junior teams, Chess/Track)
thismask = df['Team'].str.contains('-', case=False, na=False)
Others=df.loc[~thismask] # no hyphen for all non Cabrini CYC level (Cabrini junior and transfers)
# Cabrini transferred players to CYC teams with # (i.e. Ambrose#8B, OLS#3G)
# Non-CYC cabrini junior teams start with number
thismask = Others['Team'].str.contains('#', na=True) # flag nans and set to true (usually jr teams w/o assignment)
# Transferred teams contain # such as OLS#3G
Transfers=Others.loc[thismask] # transferred teams have # but no hyphen
for i, sport in enumerate(sportlist): # output roster for all transfers (all grades in case of CYC)
Transferroster=Transfers[Transfers['Sport']==sport]
Transferroster=organizeroster(Transferroster)
Transferroster=Transferroster.sort_values(['Team', 'Sex', 'Grade'], ascending=True)
fname=cnf._OUTPUT_DIR+'\\CYC'+sport+'transfers.csv'
Transferroster=replaceacro(Transferroster,acronyms)
Transferroster.loc[:,'Birthdate']=Transferroster['Birthdate'].apply(lambda x: formatDOB(x))
Transferroster.to_csv(fname, index=False)
# Now deal with junior cabrini (should be only thing left after Cabrini CYC<
# transfers, special sports
Juniorteams=Others.loc[~thismask] # remove transfers
Juniorteams=Juniorteams[Juniorteams['Team']!='drop'] # remove dropped players
# now output all junior teams in same format (sometimes needed by <NAME>)
# also calculate current age
if len(Juniorteams)>0:
Juniorteams=organizeroster(Juniorteams) # put in standard South Central roster format
# Calculate current age from DOBs (renamed to Birthdate for roster only)
Juniorteams.loc[:,'Age']=calcage(Juniorteams['Birthdate'])
fname=cnf._OUTPUT_DIR+'\\Cabrini_junior_teams_'+str(year)+'.csv'
Juniorteams=replaceacro(Juniorteams, acronyms)
Juniorteams.loc[:,'Birthdate']=Juniorteams['Birthdate'].apply(lambda x: formatDOB(x))
Juniorteams.to_csv(fname, index=False)
# Deal with special cases -Track and Chess
# Get DOB/school from players.. anything else needed by <NAME>?
Specials=pd.merge(Specials, players, how='left', on='Plakey', suffixes=('','_r'))
# needs address
Specials=pd.merge(Specials, famcontact, how='left', on='Famkey', suffixes=('','_r2'))
for i, sport in enumerate(speciallist): # output roster for all transfers (all grades in case of CYC)
Specials=Specials[Specials['Sport']==sport]
Specials=Specials.rename(columns={'DOB':'Birthdate'})
mycols=['First', 'Last','Gender','Team','Grade','Birthdate','School','Address','Zip']
Specials=Specials[mycols]
Specials=Specials.sort_values(['Gender', 'Birthdate', 'Grade'], ascending=True)
Specials.loc[:,'Birthdate']=Specials['Birthdate'].apply(lambda x: formatDOB(x))
fname= cnf._OUTPUT_DIR+'\\'+ sport+'_'+str(year)+'_rosters.csv'
Specials.to_csv(fname, index=False)
return
def makemultiteam(df):
'''Small utility called by assigntoteams to make temp teams df that has separate entry for each grade if team is mixed grade
then merge to assign teams is straightforward
twoteams- '''
# TODO annoying problem with combining teams due to K1 (string but not int)
mycols=df.dtypes.index
# Deal with K1, K2 and such teams
kteams=[str(s) for s in np.ndarray.tolist(df.Graderange.unique()) if 'K' in str(s)]
kteams=[s for s in kteams if len(s)>1] # combo teams only
kteams=df[df['Graderange'].isin(kteams)]
xtrateams=pd.DataFrame(index=np.arange(0,0),columns=mycols) # empty df
# clones rows to match lower grades in range
for index, row in kteams.iterrows():
tempstr= kteams.loc[index]['Graderange']
gr1=0 # 0 for grade K
gr2=int(tempstr[1])
for gr in range(gr1,gr2):
newrow=kteams.loc[index] # grabs row as series
newrow.loc['Grade'] = gr # set to correct grade
xtrateams=xtrateams.append(newrow) # add single row to temp df
df.loc[:,'Grade']=df.Grade.replace('K','0', regex=True)
# get rid of K string problem
df.loc[:,'Graderange']=df.Graderange.replace('K','0', regex=True)
df.loc[:,'Graderange']=df.Graderange.astype('int')
# now handle numbered multiteams (e.g. 45 78 two digit ints)
multiteams=df.loc[df['Graderange']>9] # subset of teams comprised of multiple grades
for index, row in multiteams.iterrows(): # check for 3 or more grades
# TODO make sure it's not 3 grades (i.e. K-2)
tempstr= str(multiteams.loc[index]['Graderange'])
gr1=int(tempstr[0])
gr2=int(tempstr[1])
for gr in range(gr1,gr2):
newrow=multiteams.loc[index] # grabs row as series
newrow.loc['Grade'] = gr # set to correct grade
xtrateams=xtrateams.append(newrow) # add single row to temp df
# Detect gender-grade-sport w/ two teams
# now combine with original df
df= | pd.concat([df,xtrateams], ignore_index=True) | pandas.concat |
import time
import numpy as np
import pandas as pd
import healpy as hp
from astropy.table import Table
from astropy.io import fits
from astropy.wcs import WCS
from scipy import interpolate
import os, socket, subprocess, shlex
import argparse
import logging, traceback
import paramiko
from helper_funcs import send_email, send_error_email, send_email_attach, send_email_wHTML
from sqlite_funcs import get_conn
from dbread_funcs import get_files_tab, get_info_tab, guess_dbfname
from coord_conv_funcs import convert_radec2imxy, convert_imxy2radec
def cli():
parser = argparse.ArgumentParser()
parser.add_argument('--evfname', type=str,\
help="Event data file",
default=None)
parser.add_argument('--fp_dir', type=str,\
help="Directory where the detector footprints are",
default='/storage/work/jjd330/local/bat_data/rtfp_dir_npy/')
parser.add_argument('--Nrate_jobs', type=int,\
help="Total number of jobs",
default=16)
parser.add_argument('--TSscan', type=float,\
help="Min TS needed to do a full FoV scan",
default=6.25)
parser.add_argument('--pix_fname', type=str,\
help="Name of the file with good imx/y coordinates",\
default='good_pix2scan.npy')
parser.add_argument('--bkg_fname', type=str,\
help="Name of the file with the bkg fits",\
default='bkg_estimation.csv')
parser.add_argument('--dbfname', type=str,\
help="Name to save the database to",\
default=None)
parser.add_argument('--GWname', type=str,\
help="Name of the event to submit jobs as",\
default='')
parser.add_argument('--queue', type=str,\
help="Name of the queue to submit jobs to",\
default='cyberlamp')
parser.add_argument('--qos', type=str,\
help="Name of the qos to submit jobs to",\
default=None)
parser.add_argument('--pcfname', type=str,\
help="Name of the partial coding image",\
default='pc_2.img')
parser.add_argument('--BKGpyscript', type=str,\
help="Name of python script for Bkg Estimation",\
default='do_bkg_estimation_wPSs_mp.py')
parser.add_argument('--RATEpyscript', type=str,\
help="Name of python script for Rates analysis",\
default='do_rates_mle_wPSs.py')
parser.add_argument('--LLHpyscript', type=str,\
help="Name of python script for LLH analysis",\
default='do_llh_wPSs_uncoded_realtime.py')
parser.add_argument('--SCANpyscript', type=str,\
help="Name of python script for FoV scan",\
default='do_llh_scan_uncoded.py')
parser.add_argument('--PEAKpyscript', type=str,\
help="Name of python script for FoV scan",\
default='do_intLLH_forPeaks.py')
parser.add_argument('--do_bkg',\
help="Submit the BKG estimation script",\
action='store_true')
parser.add_argument('--do_rates',\
help="Submit the Rate jobs",\
action='store_true')
parser.add_argument('--do_llh',\
help="Submit the llh jobs",\
action='store_true')
parser.add_argument('--do_scan',\
help="Submit the scan jobs",\
action='store_true')
parser.add_argument('--skip_waiting',\
help="Skip waiting for the stuff to finish and use what's there now",\
action='store_true')
parser.add_argument('--archive',\
help="Run in archive mode, not realtime mode",\
action='store_true')
parser.add_argument('--rhel7',\
help="Submit to a rhel7 node",\
action='store_true')
parser.add_argument('--pbs_fname', type=str,\
help="Name of pbs script",\
default='/storage/work/jjd330/local/bat_data/BatML/submission_scripts/pyscript_template.pbs')
parser.add_argument('--min_pc', type=float,\
help="Min partical coding fraction to use",\
default=0.1)
args = parser.parse_args()
return args
def im_dist(imx0, imy0, imx1, imy1):
return np.hypot(imx0 - imx1, imy0 - imy1)
def get_rate_res_fnames(direc='.'):
rate_fnames = [fname for fname in os.listdir(direc) if ('rates' in fname) and (fname[-4:]=='.csv')]
return rate_fnames
def get_res_fnames(direc='.'):
res_fnames = [fname for fname in os.listdir(direc) if (fname[-4:]=='.csv') and (fname[:4]=='res_')]
return res_fnames
def get_scan_res_fnames(direc='.'):
res_fnames = [fname for fname in os.listdir(direc) if (fname[-4:]=='.csv') and ('scan_res_' in fname)]
return res_fnames
def get_peak_res_fnames(direc='.'):
res_fnames = [fname for fname in os.listdir(direc) if (fname[-4:]=='.csv') and ('peak_scan_' in fname)]
return res_fnames
def get_merged_csv_df(csv_fnames):
dfs = []
for csv_fname in csv_fnames:
try:
dfs.append(pd.read_csv(csv_fname, dtype={'timeID':np.int}))
except Exception as E:
logging.error(E)
continue
df = pd.concat(dfs)
return df
def probm2perc(pmap):
bl = (pmap>0)
p_map = np.copy(pmap)
inds_sort = np.argsort(p_map)[::-1]
perc_map = np.zeros_like(p_map)
perc_map[inds_sort] = np.cumsum(p_map[inds_sort])#*\
perc_map[~bl] = 1.
return perc_map
def get_merged_csv_df_wpos(csv_fnames, attfile, perc_map=None, direc=None):
dfs = []
for csv_fname in csv_fnames:
try:
if direc is None:
tab = pd.read_csv(csv_fname, dtype={'timeID':np.int})
else:
tab = pd.read_csv(os.path.join(direc,csv_fname), dtype={'timeID':np.int})
if len(tab) > 0:
# att_ind = np.argmin(np.abs(attfile['TIME'] - trigger_time))
# att_quat = attfile['QPARAM'][att_ind]
# ras = np.zeros(len(tab))
# decs = np.zeros(len(tab))
# for i in xrange(len(ras)):
# # print np.shape(res_tab['time'][i]), np.shape(attfile['TIME'])
# att_ind0 = np.argmin(np.abs(tab['time'][i] + tab['duration'][i]/2. - attfile['TIME']))
# att_quat0 = attfile['QPARAM'][att_ind0]
# ras[i], decs[i] = convert_imxy2radec(tab['imx'][i],\
# tab['imy'][i],\
# att_quat0)
t0_ = np.nanmean(tab['time'] + tab['duration']/2.)
att_ind0 = np.argmin(np.abs(t0_ - attfile['TIME']))
att_quat0 = attfile['QPARAM'][att_ind0]
ras, decs = convert_imxy2radec(tab['imx'], tab['imy'], att_quat0)
tab['ra'] = ras
tab['dec'] = decs
if not perc_map is None:
Nside = hp.npix2nside(len(perc_map))
hp_inds = hp.ang2pix(Nside, ras, decs, lonlat=True, nest=True)
cred_lvl = perc_map[hp_inds]
tab['cls'] = cred_lvl
dfs.append(tab)
except Exception as E:
logging.warning(E)
continue
df = pd.concat(dfs)
return df
def mk_seed_tab4scans(res_tab, pc_fname, rate_seed_tab, TS_min=6.5, im_steps=20, pc_min=0.1):
PC = fits.open(pc_fname)[0]
pc = PC.data
w_t = WCS(PC.header, key='T')
pcbl = (pc>=(pc_min*.99))
pc_inds = np.where(pcbl)
pc_imxs, pc_imys = w_t.all_pix2world(pc_inds[1], pc_inds[0], 0)
imxax = np.linspace(-2,2,im_steps*4+1)
imyax = np.linspace(-1,1,im_steps*2+1)
im_step = imxax[1] - imxax[0]
bins = [imxax, imyax]
h = np.histogram2d(pc_imxs, pc_imys, bins=bins)[0]
inds = np.where(h>=10)
squareIDs_all = np.ravel_multi_index(inds, h.shape)
df_twinds = res_tab.groupby('timeID')
seed_tabs = []
for twind, dft in df_twinds:
if np.max(dft['TS']) >= TS_min:
seed_dict = {}
seed_dict['timeID'] = twind
seed_dict['dur'] = dft['duration'].values[0]
seed_dict['time'] = dft['time'].values[0]
bl_rate_seed = (rate_seed_tab['timeID']==twind)
squareIDs_done = rate_seed_tab['squareID'][bl_rate_seed]
squareIDs = squareIDs_all[~np.isin(squareIDs_all, squareIDs_done)]
seed_dict['squareID'] = squareIDs
seed_tabs.append(pd.DataFrame(seed_dict))
seed_tab = | pd.concat(seed_tabs) | pandas.concat |
import numpy as np
import yaml
from dask.distributed import Client, LocalCluster, as_completed
import argparse
from os.path import exists, join
from os import makedirs
from mlmicrophysics.data import subset_data_files_by_date, assemble_data_files
from sklearn.ensemble import RandomForestRegressor, RandomForestClassifier
from sklearn.preprocessing import StandardScaler, RobustScaler, MaxAbsScaler, MinMaxScaler
from sklearn.metrics import mean_squared_error, r2_score, mean_absolute_error, accuracy_score
from mlmicrophysics.metrics import hellinger_distance, heidke_skill_score, peirce_skill_score
from sklearn.model_selection import ParameterSampler
from scipy.stats import randint, uniform, expon
import pandas as pd
import traceback
scalers = {"MinMaxScaler": MinMaxScaler,
"MaxAbsScaler": MaxAbsScaler,
"StandardScaler": StandardScaler,
"RobustScaler": RobustScaler}
def sampler_generator(ps):
for params in ps:
yield params
def parse_model_config_params(model_params, num_settings, random_state):
"""
Args:
model_params:
num_settings:
random_state:
Returns:
"""
param_distributions = dict()
dist_types = dict(randint=randint, expon=expon, uniform=uniform)
for param, param_value in model_params.items():
if param_value[0] in ["randint", "expon", "uniform"]:
param_distributions[param] = dist_types[param_value[0]](*param_value[1:])
else:
param_distributions[param] = param_value
return sampler_generator(ParameterSampler(param_distributions, n_iter=num_settings, random_state=random_state))
def main():
parser = argparse.ArgumentParser()
parser.add_argument("config", help="Configuration yaml file")
parser.add_argument("-p", "--proc", type=int, default=1, help="Number of processors")
args = parser.parse_args()
if not exists(args.config):
raise FileNotFoundError(args.config + " not found.")
with open(args.config) as config_file:
config = yaml.load(config_file)
train_files, val_files, test_files = subset_data_files_by_date(config["data_path"],
config["data_end"], **config["subset_data"])
input_scaler = scalers[config["input_scaler"]]()
train_input, \
train_output_labels, \
train_transformed_output, \
train_scaled_output, \
output_scalers = assemble_data_files(train_files,
config["input_cols"],
config["output_cols"],
config["input_transforms"],
config["output_transforms"],
input_scaler,
subsample=config["subsample"])
print("Train Input Size:", train_input.shape)
val_input, \
val_output_labels, \
val_transformed_output, \
val_scaled_output, \
output_scalers = assemble_data_files(val_files,
config["input_cols"],
config["output_cols"],
config["input_transforms"],
config["output_transforms"],
input_scaler,
output_scalers=output_scalers,
train=False,
subsample=config["subsample"])
print("Val Input Size:", val_input.shape)
cluster = LocalCluster(n_workers=args.proc, threads_per_worker=1)
client = Client(cluster)
print(client)
train_input_link = client.scatter(train_input)
train_labels_link = client.scatter(train_output_labels)
train_scaled_output_link = client.scatter(train_scaled_output)
val_input_link = client.scatter(val_input)
val_output_labels_link = client.scatter(val_output_labels)
val_scaled_output_link = client.scatter(val_scaled_output)
submissions = []
if not exists(config["out_path"]):
makedirs(config["out_path"])
for class_model_name, class_model_params in config["classifier_models"].items():
for reg_model_name, reg_model_params in config["regressor_models"].items():
rs = np.random.RandomState(config["random_seed"])
class_model_config_generator = parse_model_config_params(class_model_params,
config["num_param_samples"],
rs)
reg_model_config_generator = parse_model_config_params(reg_model_params,
config["num_param_samples"],
rs)
class_model_configs = []
reg_model_configs = []
for s in range(config["num_param_samples"]):
class_model_config = next(class_model_config_generator)
reg_model_config = next(reg_model_config_generator)
class_model_configs.append(class_model_config)
reg_model_configs.append(reg_model_config)
config_index = f"{class_model_name}_{reg_model_name}_{s:04}"
submissions.append(client.submit(validate_model_configuration,
class_model_name, class_model_config,
reg_model_name, reg_model_config, config_index,
train_input_link, train_labels_link,
train_scaled_output_link,
val_input_link, val_output_labels_link,
val_scaled_output_link,
config["classifier_metrics"],
config["regressor_metrics"]))
class_config_frame = pd.DataFrame(class_model_configs)
reg_config_frame = pd.DataFrame(reg_model_configs)
class_config_frame.to_csv(join(config["out_path"],
f"{class_model_name}_{reg_model_name}_classifier_params.csv"),
index_label="Config")
reg_config_frame.to_csv(join(config["out_path"],
f"{class_model_name}_{reg_model_name}_regressor_params.csv"))
result_count = 0
for out in as_completed(submissions):
if out.status == "finished":
result = out.result()
print(result)
if result_count == 0:
result.to_frame().T.to_csv(join(config["out_path"],
f"{class_model_name}_{reg_model_name}_metrics.csv"),
index_label="Config")
else:
result.to_frame().T.to_csv(join(config["out_path"],
f"{class_model_name}_{reg_model_name}_metrics.csv"),
header=False,
mode="a")
result_count += 1
else:
tb = out.traceback()
for line in traceback.format_tb(tb):
print(line)
del submissions[:]
client.close()
cluster.close()
return
def validate_model_configuration(classifier_model_name, classifier_model_config,
regressor_model_name, regressor_model_config, config_index,
train_scaled_input, train_labels, train_scaled_output,
val_scaled_input, val_labels, val_scaled_output,
classifier_metric_list, regressor_metric_list):
"""
Train a single machine learning model configuration to predict each microphysical tendency.
Args:
classifier_model_name:
classifier_model_config:
regressor_model_name:
regressor_model_config:
config_index:
train_scaled_input:
train_labels:
train_scaled_output:
val_scaled_input:
val_labels:
val_scaled_output:
classifier_metric_list:
regressor_metric_list:
Returns:
"""
from mlmicrophysics.models import DenseNeuralNetwork, DenseGAN
import keras.backend as K
metrics = {"mse": mean_squared_error,
"mae": mean_absolute_error,
"r2": r2_score,
"hellinger": hellinger_distance,
"acc": accuracy_score,
"hss": heidke_skill_score,
"pss": peirce_skill_score}
sess = K.tf.Session(config=K.tf.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1))
K.set_session(sess)
with sess.as_default():
model_classes = {"RandomForestRegressor": RandomForestRegressor,
"RandomForestClassifier": RandomForestClassifier,
"DenseNeuralNetwork": DenseNeuralNetwork,
"DenseGAN": DenseGAN}
classifier_models = {}
regressor_models = {}
output_label_preds = pd.DataFrame(0, index=val_labels.index, columns=val_labels.columns,
dtype=np.int32)
output_preds = pd.DataFrame(0, index=val_scaled_output.index, columns=val_scaled_output.columns,
dtype=np.float32)
output_regressor_preds = pd.DataFrame(0, index=val_scaled_output.index, columns=val_scaled_output.columns,
dtype=np.float32)
output_metric_columns = []
for output_col in train_scaled_output.columns:
for metric in classifier_metric_list:
output_metric_columns.append(output_col + "_" + metric)
for metric in regressor_metric_list:
output_metric_columns.append(output_col + "_" + metric)
unique_labels = np.unique(train_labels[output_col])
for unique_label in unique_labels:
for metric in regressor_metric_list:
output_metric_columns.append(f"{output_col}_{unique_label}_{metric}")
output_metrics = | pd.Series(index=output_metric_columns, name=config_index, dtype=np.float32) | pandas.Series |
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 14 12:56:07 2022
@author: r.thorat
"""
#importing necessary modules
import pandas._libs.tslibs.base
import os
os.getcwd()
from tkinter import filedialog as fd
import pandas as pd
import re
from nameparser import HumanName
import os
os.getcwd()
#define functions
#name of the round
def sub_round():
""" callback when the button is clicked
"""
msg = f'You entered: {round.get()}'
showinfo(
title='Information',
message=msg
)
return round.get()
#to get suggested reviewers, if any
def getsuggestedreviewers0():
global df_SR_0
import_file_path2 = fd.askopenfilename()
df_SR_0 = pd.read_excel (import_file_path2)
return df_SR_0
#to get country sheet
def country_sheet():
global df_CS
import_file_path7 = fd.askopenfilename()
df_CS = pd.read_excel(import_file_path7)
return df_CS
# Create Tkinter GUI to interact with the code
##https://www.geeksforgeeks.org/tkinter-application-to-switch-between-different-page-frames/
import tkinter as tk
from tkinter import ttk
from tkinter.messagebox import showinfo
def Close():
app.destroy()
LARGEFONT =("Verdana", 35)
class tkinterApp(tk.Tk):
# __init__ function for class tkinterApp
def __init__(self, *args, **kwargs):
# __init__ function for class Tk
tk.Tk.__init__(self, *args, **kwargs)
# creating a container
container = tk.Frame(self)
container.pack(side = "top", fill = "both", expand = True)
container.grid_rowconfigure(0, weight = 1)
container.grid_columnconfigure(0, weight = 1)
# initializing frames to an empty array
self.frames = {}
# iterating through a tuple consisting
# of the different page layouts
for F in (StartPage, Page1):
frame = F(container, self)
# initializing frame of that object from
# startpage, page1, page2 respectively with
# for loop
self.frames[F] = frame
frame.grid(row = 0, column = 0, sticky ="nsew")
self.show_frame(StartPage)
# to display the current frame passed as
# parameter
def show_frame(self, cont):
frame = self.frames[cont]
frame.tkraise()
# first window frame startpage
class StartPage(tk.Frame):
def __init__(self, parent, controller):
tk.Frame.__init__(self, parent)
# label of frame Layout 2
label = ttk.Label(self, text ="Startpage")
# putting the grid in its place by using
# grid
label.grid(row = 0, column = 1, padx = 10, pady = 10)
button1 = ttk.Button(self, text ="Info for Expert Lookup",
command = lambda : controller.show_frame(Page1))
# putting the button in its place by
# using grid
button1.grid(row = 1, column = 1, padx = 10, pady = 10)
# button to show proposalinfo with text
# layout3
button2 = ttk.Button(self, text ="Close",
command =Close)
# putting the button in its place by
# using grid
button2.grid(row = 2, column = 1, padx = 10, pady = 10)
# fourth window frame page1
class Page1(tk.Frame):
def __init__(self, parent, controller):
tk.Frame.__init__(self, parent)
label = ttk.Label(self, text ="Info for Expert Lookup", font = LARGEFONT)
label.grid(row = 0, column = 1, padx = 40, pady = 10)
# button to show frame 3 with text
# layout3
button3 = ttk.Button(self, text ="Back to Startpage",
command = lambda : controller.show_frame(StartPage))
# putting the button in its place by
# using grid
button3.grid(row = 1, column = 1, padx = 10, pady = 10)
my_label = ttk.Label(self, text ="Enter subsidy name (example: \'%XXX-Talentprogramma XXX XXX 2021%\'):")
# putting the button in its place by
# using grid
my_label.grid(row = 1, column = 2, padx =60, pady = 10)
# putting the button in its place by
# using grid
global round
round = tk.StringVar(self)
# button to show proposalinfo with text
# layout3
my_box = ttk.Entry(self, textvariable=round, width=60)
# putting the button in its place by
# using grid
my_box.grid(row = 2, column = 2, padx = 10, pady = 10)
# button to show proposalinfo with text
# layout3
button4 = ttk.Button(self, text ="Click me to confirm",
command =sub_round)
# putting the button in its place by
# using grid
button4.grid(row = 3, column = 2, padx = 10, pady = 10)
# button to show suggested reviewers with text
# layout3
button5 = ttk.Button(self, text ="Import sugg reviewers",
command =getsuggestedreviewers0)
# putting the button in its place by
# using grid
button5.grid(row = 4, column = 2, padx = 10, pady = 10)
# button to input export file name with text
# layout3
button9 = ttk.Button(self, text ="Import country sheet",
command =country_sheet)
# putting the button in its place by
# using grid
button9.grid(row = 6, column = 2, padx = 10, pady = 10)
# button to show proposalinfo with text
# layout3
button8 = ttk.Button(self, text ="Close",
command =Close)
# putting the button in its place by
# using grid
button8.grid(row = 7, column = 1, padx = 10, pady = 10)
# Driver Code
app = tkinterApp()
#https://newbedev.com/how-to-change-the-color-of-ttk-button
style = ttk.Style()
style.theme_use('alt')
style.configure('TButton', background = '#232323', foreground = 'white', width = 20, borderwidth=1, focusthickness=3, focuscolor='none')
style.map('TButton', background=[('active','#ff0000')])
app.title("This is my Interface")
#app.bind('<Return>', run_and_close)
#app.bind('<Escape>', close)
app.mainloop()
#Write query in SQL service managemnt studio to get required information such as
#File number,Main applicant,Main applicant_Last name,First name,Gender,Promotion date,C_Email,
#Main Organization,C_Organization,C_Postal code,C_City,Title,Summary,Main discipline,Subdiscipline,Word
query_code='YOUR CODE where sub.subsidieronde_naam like'
Sub_Naam=round.get()
Cond='and rol.Naam in' + ' (' + "'Hoofdaanvrager','Medeaanvrager'"+')'
query=query_code+' '+Sub_Naam + Cond
#importing necessary modules
print(query)
import pyodbc
conn = pyodbc.connect('Driver={SQL Server};'
'Server=YOUR SERVER;'
'Database=YOUR DATABASE;'
'Trusted_Connection=yes;')
#creation of a dataframe
df_pv = | pd.read_sql_query(query, conn) | pandas.read_sql_query |
import glob
import scipy.io as ff
import pandas as pd
import yt
import numpy as np
import os as os
props=['M','x','y','z','vx','vy','vz','jx_g','jy_g','jz_g','dMB','dME','dM','rho','cs','dv','Esave','jx_bh','jy_bh','jz_bh','spinmag','eps_sink', 'rho_stars', 'rho_dm', 'vx_stars', 'vy_stars', 'vz_stars', 'vx_dm', 'vy_dm', 'vz_dm', 'n_stars', 'n_dm', 'rho_lowspeed_stars', 'rho_lowspeed_dm', 'fact_fast_stars', 'fact_fast_dm']
os.system('mkdir sinks')
os.system('mv sink_* ./sinks')
files=glob.glob('output_*/info*')
ds=yt.load(files[-1])
df={tmpprop:[] for tmpprop in props}
df=pd.DataFrame(data=df)
if ds.cosmological_simulation==1:
df=pd.concat([pd.DataFrame(data={'a':[]}),df],axis=1)
else:
df=pd.concat([pd.DataFrame(data={'t':[]}),df],axis=1)
files=glob.glob('./sinks/sink*')
files.sort()
d=ds.all_data()
dx=float(d[('index','dx')].min().in_units('pc'))
dx_dm=float(ds.length_unit.in_units('pc')/2**ds.max_level*(1+ds.current_redshift))
for f in files:
p=ff.FortranFile(f)
p.read_ints(); p.read_ints()
a=list(p.read_reals('d'))
scale_l=p.read_reals('d')
scale_d=p.read_reals('d')
scale_t=p.read_reals('d')
bhid=p.read_ints()
d={tmpprop:p.read_reals('d') for tmpprop in props[:30]}
d=pd.DataFrame(data=d, index=bhid)
d = pd.concat([d, pd.DataFrame(data={tmpprop:p.read_ints() for tmpprop in props[30:32]}, index=bhid)], axis=1)
d = pd.concat([d, pd.DataFrame(data={tmpprop:p.read_reals('d') for tmpprop in props[32:]}, index=bhid)], axis=1)
t=list(p.read_reals('d'))
d['M']*=scale_d*scale_l**3/2e33
d['vx']*=scale_l/1e5/scale_t
d['vy']*=scale_l/1e5/scale_t
d['vz']*=scale_l/1e5/scale_t
d['dMB']*=scale_d*scale_l**3/2e33 /scale_t * 3600*24*365
d['dME']*=scale_d*scale_l**3/2e33 /scale_t * 3600*24*365
d['dM']*=scale_d*scale_l**3/2e33
d['rho']*=scale_d/1.67e-24
d['cs']*=scale_l/1e5/scale_t
d['dv']*=scale_l/1e5/scale_t
d['Esave']*=scale_l/1e5/scale_t
d['vx_stars']*=scale_l/1e5/scale_t
d['vy_stars']*=scale_l/1e5/scale_t
d['vz_stars']*=scale_l/1e5/scale_t
d['vx_dm']*=scale_l/1e5/scale_t
d['vy_dm']*=scale_l/1e5/scale_t
d['vz_dm']*=scale_l/1e5/scale_t
d['rho_stars']*=scale_d/1.67e-24
d['rho_dm']*=scale_d/1.67e-24
d['rho_lowspeed_stars']*=scale_d/1.67e-24
d['rho_lowspeed_dm']*=scale_d/1.67e-24
d['fact_fast_stars']*=scale_d/1.67e-24
d['fact_fast_dm']*=scale_d/1.67e-24
for tmpbhid in bhid:
if tmpbhid not in df.index:
df.loc[tmpbhid]=[[]]+[[] for tmpprop in props]
bh=df.loc[tmpbhid]
dd=d.loc[tmpbhid]
if ds.cosmological_simulation==1:
bh['a']+=a
else:
bh['t']+=t
for tmpprop in props:
bh[tmpprop]+=[dd[tmpprop]]
for bhid in df.index:
tmp={tmpprop:df.loc[bhid][tmpprop] for tmpprop in props}
if ds.cosmological_simulation==1:
tmp.update({'a':df.loc[bhid]['a']})
tmp=pd.DataFrame(data=tmp)
tmp=pd.concat([tmp, pd.DataFrame({'t':np.copy(ds.cosmology.t_from_z(1/np.copy(tmp.a)-1).in_units('Gyr'))})], axis=1)
else:
tmp.update({'t':df.loc[bhid]['t']})
tmp=pd.DataFrame(data=tmp)
tmp.t*=scale_t/(1e9*365*24*3600)
dMdt=tmp.dM[1:]/np.diff(tmp.t)/1e9
dMdt.index-=1
dMdt.loc[dMdt.index.max()+1]=0
tmp['x']/=ds['boxlen']
tmp['y']/=ds['boxlen']
tmp['z']/=ds['boxlen']
tmp['dM']=dMdt
tmp['vsink_rel_stars'] = np.sqrt((tmp['vx_stars']-tmp['vx'])**2+(tmp['vy_stars']-tmp['vy'])**2+(tmp['vz_stars']-tmp['vz'])**2)
tmp['vsink_rel_dm'] = np.sqrt((tmp['vx_dm']-tmp['vx'])**2+(tmp['vy_dm']-tmp['vy'])**2+(tmp['vz_dm']-tmp['vz'])**2)
tmp['rinf_stars'] = (tmp.M / 1e7) / (tmp.vsink_rel_stars / 200)**2
tmp['rinf_dm'] = (tmp.M / 1e7) / (tmp.vsink_rel_dm / 200)**2
CoulombLog = np.maximum(np.zeros(len(tmp.t)), np.log(4*dx/tmp.rinf_stars))
tmp['a_stars_slow']=4*np.pi*(6.67e-8)**2*tmp.M*2e33*tmp.rho_lowspeed_stars*1.67e-24*CoulombLog/(tmp.vsink_rel_stars*1e5)**2*3600*24*365*1e6/1e5
CoulombLog = np.minimum(np.zeros(len(tmp.t)), tmp.rinf_stars-4*dx) / (tmp.rinf_stars - 4*dx)
tmp['a_stars_fast']=4*np.pi*(6.67e-8)**2*tmp.M*2e33*tmp.fact_fast_stars*1.67e-24*CoulombLog/(tmp.vsink_rel_stars*1e5)**2*3600*24*365*1e6/1e5
CoulombLog = np.maximum(np.zeros(len(tmp.t)), np.log(4*dx/tmp.rinf_dm))
tmp['a_dm_slow']=4*np.pi*(6.67e-8)**2*tmp.M*2e33*tmp.rho_lowspeed_dm*1.67e-24*CoulombLog/(tmp.vsink_rel_dm*1e5)**2*3600*24*365*1e6/1e5
CoulombLog = np.minimum(np.zeros(len(tmp.t)), tmp.rinf_dm-4*dx) / (tmp.rinf_dm - 4*dx)
tmp['a_dm_fast']=4*np.pi*(6.67e-8)**2*tmp.M*2e33*tmp.fact_fast_dm*1.67e-24*CoulombLog/(tmp.vsink_rel_dm*1e5)**2*3600*24*365*1e6/1e5
M=tmp.dv / tmp.cs
tmp['rinf_gas'] = (tmp.M / 1e7) / (tmp.dv**2 + tmp.cs**2)/200**2
CoulombLog = np.minimum(np.zeros(len(tmp.t)), tmp.rinf_gas-4*dx) / (tmp.rinf_gas - 4*dx)
fudge=M
fudge.loc[M < 0.95] = 1/M**2*(0.5*np.log((1+M)/(1-M)) - M)
fudge.loc[(M >= 0.95) & (M <= 1.007)] = 1
fudge.loc[M > 1.007] = 1/M**2*(0.5*np.log(M**2-1) + 3.2)
tmp['a_gas']=4*np.pi*(6.67e-8)**2*tmp.M*2e33*tmp.rho*1.67e-24/(tmp.cs*1e5)**2*fudge*(3600*24*365*1e6)/1e5*CoulombLog
if os.path.exists('./sinks/BH00001.csv'):
os.system('rm ./sinks/BH{:05}'.format(bhid)+'.csv')
tmp.to_csv('./sinks/BH{:05}'.format(bhid)+'.csv', index=False)
tmp={tmpprop:[] for tmpprop in props}
tmp.update({'a':[],'t':[]})
tmp= | pd.DataFrame(data=tmp) | pandas.DataFrame |
import pandas as pd
from bitfinex_csv_parser import BitfinexCSVParser
class FundingEarningsCalculator:
"""
Helper class to determine the bitfinex funding earnings.
"""
def __init__(self, filepath=None):
"""
Constructor.
:param filepath: The path to the bitfinex earnings csv file.
"""
self.df_earnings = BitfinexCSVParser().parse(filepath)
def get_currencies(self):
"""
Returns the currencies contained in the data.
:return: The currencies contained in the data.
"""
return self.df_earnings['Currency'].unique()
def get_currency_stats(self):
"""
Returns a dictionary of dataframes. The key is the name of the currency. The value is a dataframe that
contains statistical information about the currency.
:return: A list of dataframe. Every dataframe contains the data of one currency.
"""
currencies = self.get_currencies()
months = self.df_earnings['month'].unique()
years = self.df_earnings['year'].unique()
currency_stats_dict = {}
for currency in currencies:
df_currency_stats = pd.DataFrame()
cum_amount = 'Cumulative Amount'
min_amount = 'Min Amount'
max_amount = 'Max Amount'
num_pay = 'Number of Payments'
for year in years:
for month in months:
df_currency = self.df_earnings
df_currency = df_currency[((df_currency['Currency'] == currency) & (df_currency['year'] == year)) &
(df_currency['month'] == month)]
stats = {
cum_amount: df_currency['Amount'].sum(),
min_amount: df_currency['Amount'].min(),
max_amount: df_currency['Amount'].max(),
num_pay: len(df_currency)
}
df_currency_stats.insert(0, str(year) + "-" + str(month), pd.Series(stats))
alltime_stats = {
min_amount: df_currency_stats.loc[[min_amount], :].min(axis=1)[0],
max_amount: df_currency_stats.loc[[max_amount], :].max(axis=1)[0],
cum_amount: df_currency_stats.loc[[cum_amount], :].sum(axis=1)[0],
num_pay: df_currency_stats.loc[[num_pay], :].sum(axis=1)[0]
}
df_currency_stats.insert(0, "All Time", | pd.Series(alltime_stats) | pandas.Series |
import plotly.io as _pio
import plotly.graph_objects as _go
import pandas as _pd
import webcolors as _wc
import re
from ZSAdvancedViz.zscolorscale import colors_scale, discrete_colorscale
class zsadvancedviz:
def __init__(self):
_pio.templates["ZS_theme"] = _go.layout.Template(
# layout_plot_bgcolor= '#86C8BC',
# layout_paper_bgcolor= '#86C8BC',
layout_font={'family': 'Roboto', 'color': '#53565A'},
layout_title={
'text': 'Plot Title',
'font': {
'family': 'Roboto',
'color': '#53565A',
'size': 20
},
# 'x': 0.1,
},
layout_xaxis={
'automargin': True,
'title': 'x axis title',
'titlefont_size': 15,
'titlefont_color': '#83868b',
'linecolor': '#6600cc',
'ticks': '',
'zerolinecolor': '#283442',
'zerolinewidth': 0,
'showgrid': False,
'showline': True,
'linecolor': "grey",
'linewidth': 0.5,
# 'gridcolor': '#283442'
},
layout_yaxis={
'automargin': True,
'title': 'y axis title',
'titlefont_size': 15,
'titlefont_color': '#83868b',
'linecolor': '#83868b',
'ticks': '',
'zerolinecolor': '#283442',
'zerolinewidth': 0,
'showgrid': False,
'showline': True,
'linecolor': "grey",
'linewidth': 0.5,
# 'gridcolor': '#283442'
},
layout_colorway=[
'#86C8BC', '#00629B', '#6E2B62', '#B8CC7B', '#01A6DC', '#A3B2AA', '#A0AFC6', '#B6E880', '#FF97FF',
'#FECB52'
],
layout_coloraxis={
'colorbar': {'outlinewidth': 0,
'ticks': ''}
},
layout_hovermode='closest',
layout_hoverlabel={'align': 'left',
'bgcolor': 'white',
'font_size': 13,
'font_color': '#53565A',
# 'bordercolor':'#86C8BC'
},
layout_legend={
# 'x': 0,
# 'y': 1,
# 'traceorder': "normal",
# 'font': dict(
# family="sans-serif",
# size=12,
# color="black"
# ),
# 'bgcolor':"LightSteelBlue",
# 'bordercolor':"Black",
# 'borderwidth':2
},
layout_legend_orientation='v'
)
self.theme = "ZS_theme"
# hover template - do define what to show on tooltip
# plotly_template = _pio.templates["ZS_theme"]
# plotly_template.layout
def line_chart(self,
data,
x_data_col: str,
y_data_col: str,
chart_attr: dict = {}):
'''
This method is used to plot line chart
:param data: dict of data containing the x_data_col and y_data_col names as keys and its corresponding values in list or pandas data frame of the data.
dict :
{'x_data_col_name': ['A','B','C'],
'y_data_col_name': [10,20,30]}
( or )
pandas data frame :
x_data_col_name y_data_col_name
0 A 10
1 B 20
2 C 25
3 D 30
:param x_data_col: x_axis column name form the data frame
:param y_data_col: y_axis column name form the data frame
:param chart_attr: dict which contains the parameters x_axis_title, y_axis_title, chart_title
x_axis_title: title to represent the x_axis. Default value is ""
y_axis_title: title to represent the y_axis. Default value is ""
chart_title: title to represent the chart. Default value is "Line Chart"
:return: plotly fig of Line Chart
'''
# checks for the data type
if not isinstance(x_data_col, str):
raise ValueError("The type of x_data_col is " + str(type(x_data_col)) + ", where as expected type is str")
if not isinstance(y_data_col, str):
raise ValueError("The type of y_data_col is " + str(type(y_data_col)) + ", where as expected type is str")
if not isinstance(chart_attr, dict):
raise ValueError("The type of chart_attr is " + str(type(chart_attr)) + ", where as expected type is dict")
fig = _go.Figure(data=_go.Scatter(x=data[x_data_col],
y=data[y_data_col],
mode="markers+lines"),
layout=_go.Layout(title=chart_attr.get("chart_title", "Line Chart"),
xaxis={'title': chart_attr.get("x_axis_title", "")},
yaxis={'title': chart_attr.get("y_axis_title", "")},
template=self.theme))
return fig
def bar_chart(self,
data,
x_data_col: str,
y_data_col: str,
chart_attr: dict = {}):
'''
This method is used to plot bar chart
:param data: dict of data containing the x_data_col and y_data_col names as keys and its corresponding values in list or pandas data frame of the data.
dict :
{'x_data_col_name': ['A','B','C'],
'y_data_col_name_1': [10,20,30]}
( or )
pandas data frame :
x_data_col_name y_data_col_name
0 A 10
1 B 20
2 C 25
3 D 30
:param x_data_col: x_axis column name form the data frame
:param y_data_col: y_axis column name form the data frame
:param chart_attr: dict which contains the parameters x_axis_title, y_axis_title, chart_title
x_axis_title: title to represent the x_axis. Default value is ""
y_axis_title: title to represent the y_axis. Default value is ""
chart_title: title to represent the chart. Default value is "Bar Chart"
:return: plotly fig of Bar chart
'''
# checks for the data type
if not isinstance(x_data_col, str):
raise ValueError("The type of x_data_col is " + str(type(x_data_col)) + ", where as expected type is str")
if not isinstance(y_data_col, str):
raise ValueError("The type of y_data_col is " + str(type(y_data_col)) + ", where as expected type is str")
if not isinstance(chart_attr, dict):
raise ValueError("The type of chart_attr is " + str(type(chart_attr)) + ", where as expected type is dict")
fig = _go.Figure(data=_go.Bar(x=data[x_data_col],
y=data[y_data_col]),
layout=_go.Layout(title=chart_attr.get("chart_title", "Bar Chart"),
xaxis={'title': chart_attr.get("x_axis_title", "")},
yaxis={'title': chart_attr.get("y_axis_title", "")},
template=self.theme))
return fig
def multibar_chart(self,
data,
x_data_col: str,
y_data_col: list,
chart_attr: dict = {},
legend_name: list = [],
bar_type: str = "group"):
'''
This method is used to plot multi bar chart
:param data: dict of data containing the x_data_col and y_data_col names as keys and its corresponding values in list or pandas data frame of the data.
dict :
{'x_data_col_name': ['A','B','C'],
'y_data_col_name_1': [10,20,30],
'y_data_col_name_2': [15,10,40],
'y_data_col_name_3': [20,30,10]}
( or )
pandas data frame :
x_data_col_name y_data_col_name_1 y_data_col_name_2
0 A 10 15
1 B 20 10
2 C 25 40
3 D 30 20
:param x_data_col: x_axis column name form the data frame
:param y_data_col: list of y_axis column names form the data frame i.e. ['y_data_col_name_1','y_data_col_name_2']
:param chart_attr: dict which contains the parameters x_axis_title, y_axis_title, chart_title
x_axis_title: title to represent the x_axis. Default value is ""
y_axis_title: title to represent the y_axis. Default value is ""
chart_title: title to represent the chart. Default value is "Multi Bar Chart"
:param legend_name: list of legend names that should be shown on the graph with respect to the y_axis_column names i.e. ['legend1','legend2']
y_axis_col names are the default legend names i.e. ['y_data_col_name_1','y_data_col_name_2']
:param bar_type: Determines how bars at the same location coordinate are displayed on the graph.
With "stack", the bars are stacked on top of one another.
With "relative", the bars are stacked on top of one another, with negative values below the axis, positive values above.
With "group", the bars are plotted next to one another centered around the shared location.
Default is the "group"
:return: plotly fig of Multi Bar chart
'''
# checks for the data type
if not isinstance(x_data_col, str):
raise ValueError("The type of x_data_col is " + str(type(x_data_col)) + ", where as expected type is str")
if not isinstance(y_data_col, list):
raise ValueError("The type of y_data_col is " + str(type(y_data_col)) + ", where as expected type is list")
if not isinstance(chart_attr, dict):
raise ValueError("The type of chart_attr is " + str(type(chart_attr)) + ", where as expected type is dict")
if not isinstance(legend_name, list):
raise ValueError(
"The type of legend_name is " + str(type(legend_name)) + ", where as expected type is list")
if not isinstance(bar_type, str):
raise ValueError("The type of bar_type is " + str(type(bar_type)) + ", where as expected type is str")
# initilization of multilbar data
multi_bar_data = []
# iterates each y_axis_column and assigns the bar and legend
for index in range(len(y_data_col)):
# checks the length of legend_name and assigns the column_name to legend
if len(legend_name) == 0:
multi_bar_data.append(_go.Bar(x=data[x_data_col],
y=data[y_data_col[index]],
name=y_data_col[index]))
# if length of legend_name is greater than the y_axis_col names list then assigns the first n legend names
elif len(y_data_col) <= len(legend_name):
multi_bar_data.append(_go.Bar(x=data[x_data_col],
y=data[y_data_col[index]],
name=legend_name[index]))
# if legend_names are less tha the y_axis_col names
elif len(y_data_col) > len(legend_name):
# assigns legend_name if the length is less than y_axis_col
if index < len(legend_name):
multi_bar_data.append(_go.Bar(x=data[x_data_col],
y=data[y_data_col[index]],
name=legend_name[index]))
# assigns the y_axis_col name if the length of legend_name is less
else:
multi_bar_data.append(_go.Bar(x=data[x_data_col],
y=data[y_data_col[index]],
name=y_data_col[index]))
fig = _go.Figure(data=multi_bar_data,
layout=_go.Layout(barmode=bar_type,
title=chart_attr.get("chart_title", "Multi Bar Chart"),
xaxis={'title': chart_attr.get("x_axis_title", "")},
yaxis={'title': chart_attr.get("y_axis_title", "")},
template=self.theme))
return fig
def pie_chart(self,
data,
label_col: str,
value_col: str,
chart_attr: dict = {}):
'''
This method is used to plot pie chart
:param data: dict of data containing the x_data_col and y_data_col names as keys and its corresponding values in list or pandas data frame of the data.
dict :
{'label_col_name': ['A','B','C'],
'value_col_name': [10,20,30]}
( or )
pandas data frame :
label_col_name value_col_name
0 A 10
1 B 20
2 C 25
3 D 30
:param label_col: label column name form the data frame
:param value_col: value column name form the data frame
:param chart_attr: dict which contains the parameters x_axis_title, y_axis_title, chart_title
chart_title: title to represent the chart. Default value is "Multi Bar Chart"
:return: plotly fig of pie chart
'''
# checks for the data type
if not isinstance(label_col, str):
raise ValueError("The type of label_col is " + str(type(label_col)) + ", where as expected type is str")
if not isinstance(value_col, str):
raise ValueError("The type of value_col is " + str(type(value_col)) + ", where as expected type is str")
if not isinstance(chart_attr, dict):
raise ValueError("The type of chart_attr is " + str(type(chart_attr)) + ", where as expected type is dict")
fig = _go.Figure(data=_go.Pie(labels=data[label_col],
values=data[value_col],
textposition='none', # ( "inside" | "outside" | "auto" | "none" )
textinfo="none",
# "label", "text", "value", "percent" joined with a "+" OR "none".
),
layout=_go.Layout(title=chart_attr.get("chart_title", "Pie Chart"),
template=self.theme))
return fig
def donut_chart(self,
data,
label_col: str,
value_col: str,
hole_value: float = 0.3,
chart_attr: dict = {}):
'''
This method is used to plot donut chart
:param data: dict of data containing the x_data_col and y_data_col names as keys and its corresponding values in list or pandas data frame of the data.
dict :
{'label_col_name': ['A','B','C'],
'value_col_name': [10,20,30]}
( or )
pandas data frame :
label_col_name value_col_name
0 A 10
1 B 20
2 C 25
3 D 30
:param label_col: label column name form the data frame
:param value_col: value column name form the data frame
:param hole_value: number between 0 and 1
Default: 0.3
Sets the fraction of the radius to cut.
:param chart_attr: dict which contains the parameters x_axis_title, y_axis_title, chart_title
chart_title: title to represent the chart. Default value is "Multi Bar Chart"
:return: plotly fig of donut chart
'''
# checks for the data type
if not isinstance(label_col, str):
raise ValueError("The type of label_col is " + str(type(label_col)) + ", where as expected type is str")
if not isinstance(value_col, str):
raise ValueError("The type of value_col is " + str(type(value_col)) + ", where as expected type is str")
if not isinstance(hole_value, float):
raise ValueError("The type of hole_value is " + str(type(hole_value)) + ", where as expected type is float")
if not isinstance(chart_attr, dict):
raise ValueError("The type of chart_attr is " + str(type(chart_attr)) + ", where as expected type is dict")
fig = _go.Figure(data=_go.Pie(labels=data[label_col],
values=data[value_col],
textposition='none', # ( "inside" | "outside" | "auto" | "none" )
textinfo="none",
# "label", "text", "value", "percent" joined with a "+" OR "none".
hole=hole_value
),
layout=_go.Layout(title=chart_attr.get("chart_title", "Donut Chart"),
template=self.theme))
return fig
def sankey_chart(self,
data,
source_column: str,
target_column: str,
value_column: str,
label_column: str = "",
valuesuffix: str = "",
decimal_points: int = 2,
chart_attr: dict = {}):
"""
This method is used to plot sankey chart
:param data: pandas data frame of the data which is used to plot the graph.
One can refer the below data frame for the reference of the data that to be passed.
Source Target Value Label(optional)
0 AB EF 20.007243 Line 1
1 CD GH 12 Line 2
2 AB GH 5.001095 Line 3
3 HI JK 0.063865 Line 4
4 HI EF 0.005332 Line 5
:param source_column: source column name from the data frame
:param target_column: target column name from the data frame
:param value_column: value column name from the data frame
:param label_column: label column name from the data frame. The values in the column are shown on the hover of the links in the graph.
If the label column is not provided by the user, the label name will not be displayed on hover.
:param valuesuffix: suffix will be appended to the value in the graph.
If the value suffix is not provided by the user, the suffix will not be displayed on hover.
:param decimal_points: Number of decimal points to be shown in the graph.
It shows 2 digits after the decimals by default.
:param chart_attr: dict which contains the parameters like chart_title
chart_title: title to represent the chart. Default value is "Sankey Chart"
Format of dictionary is as follows:
chart_attr = { 'chart_title': 'Chart Title'}
:return: plotly fig of sankey chart
"""
# checks for the data type
if not isinstance(data, _pd.core.frame.DataFrame):
raise ValueError("The type of data is " + str(type(data)) + ". Where as expected pandas dataframe")
if not isinstance(source_column, str):
raise ValueError(
"The type of source_column is " + str(type(source_column)) + ", where as expected type is str")
if not isinstance(target_column, str):
raise ValueError(
"The type of target_column is " + str(type(target_column)) + ", where as expected type is str")
if not isinstance(value_column, str):
raise ValueError(
"The type of value_column is " + str(type(value_column)) + ", where as expected type is str")
if not isinstance(label_column, str):
raise ValueError(
"The type of label_column is " + str(type(label_column)) + ", where as expected type is str")
if not isinstance(decimal_points, int):
raise ValueError(
"The type of decimal_points is " + str(type(decimal_points)) + ", where as expected type is int")
if not isinstance(chart_attr, dict):
raise ValueError("The type of chart_attr is " + str(type(chart_attr)) + ", where as expected type is dict")
label_data = []
# appends the empty label
if len(label_column) == 0:
for a in data[source_column]:
label_data.append("")
else:
label_data = data[label_column]
# appends source_data, target_data and clears the duplicate values
node_label = list(set(data[source_column]))
node_label.extend(list(set(data[target_column])))
node_label = list(set(node_label))
source = []
target = []
# creates the sankey required source data from node_label
for source_value in data[source_column]:
for a in range(len(node_label)):
if source_value == node_label[a]:
source.append(a)
# creates the sankey required target data from node_label
for target_value in data[target_column]:
for a in range(len(node_label)):
if target_value == node_label[a]:
target.append(a)
# color_scale from zs_theme_colors
colors = colors_scale(node_label)
# creates the colors that to be given to links between source and target
# reduces the alpha of rgba by 0.25 in each color
# generates the colors with less alpha to colors which are given to source
# re.search(r"(1)\)|0.([0-9]*)\)" - searces for the alpha which starts with either 1 or 0. in rgba
# .group(0).split(")")[0] - splits the alpha value into the number
link_color = [
colors[src].replace(", " + re.search(r"(1)\)|0.([0-9]*)\)", colors[src]).group(0).split(")")[0] + ")",
", " + str(round(
float(re.search(r"(1)\)|0.([0-9]*)\)", colors[src]).group(0).split(")")[0]) - 0.25,
3)) + ")") for src in source]
fig = _go.Figure(data=[_go.Sankey(
valueformat="." + str(decimal_points) + "f", # ".2f"
valuesuffix=valuesuffix, # " $"
orientation="h", ### h/v
# Define nodes
node=dict(
pad=5,
thickness=15,
line=dict(color="grey", width=0.1),
label=node_label,
color=colors
),
# Add links
link=dict(
line=dict(color="grey", width=0.2),
source=source,
target=target,
value=data[value_column],
label=label_data,
color=link_color
),
)])
fig.update_layout(title_text=chart_attr.get("chart_title", "Sankey Chart"),
template=self.theme)
return fig
def bitmap(self,
data,
x_axis_order_list: list,
x_axis_column: str,
y_axis_column: str,
values_data_column: str,
intensity_column: str = "",
config_data={},
color_column: str = "color",
symbols_column: str = "symbol",
values_data_config_column: str = "value",
chart_attr: dict = {}):
"""
This method is used to plot Bit Map chart
:param data: pandas data frame of the data which is used to plot the graph.
One can refer the below data frame for the reference of the data that to be passed.
y_axis_label Value x_axis_label Intensity
0 Patient 1 AR M-1 1
1 Patient 1 MR Start 3
2 Patient 1 GR M 1 2
3 Patient 2 AR M-2 1
4 Patient 2 GR Start 2
5 Patient 3 GR M-8 3
6 Patient 3 AR M-7 1
7 Patient 3 MR M 2 2
:param x_axis_order_list: list of x_axis values in an order that should be shown on the garph.
As per the above data frame the the x_axis labels can be arranged in random manner on the x_axis.
To have an arranged order of x_axis, pass the list of values in an order that to be represented on x_axis.
i.e. ['M-8','M-7','M-2','M-1','Start','M 1','M 2']
:param x_axis_column: x_axis column name form the data frame
:param y_axis_column: y_axis column name form the data frame
:param values_data_column: value column name form the data frame of the data point (x,y)
:param config_data: pandas data frame of the config file which is used to assign the symbol and color for each value
One can refer the below data frame for the reference. Color code should be in HEX
value symbol color
0 AR 0 #00ffff
1 MR 1 #000000
2 GR 2 #0000ff
symbol accepts only numbers from the list: [0,1,2,3,4,5,17,18,19,21,22,23,33,34,35,100,101,102,105,117,118]
:param color_column: color column name from the config data frame. If the config data is not provided, then the colors will be assigned from default list of colors.
:param symbols_column: symbol column name from the config data frame. If the config data is not provided, then the symbols will be assigned from above mentioned default list of symbols.
:param values_data_config_column: value column name from the config data frame.
:param intensity_column: Intensity column name from the data frame to represent in the graph
Assign 1- High, 2- Medium, 3- Low in the data frame and pass the column of intensity
Accepts only above integers as the value. If the intensity column is not provided, Default value is taken as 1-High.
:param chart_attr: dict which contains the parameters x_axis_title, y_axis_title, chart_title
chart_title: title to represent the chart. Default value is "Bit Map"
Format of dictionary is as follows:
chart_attr = { 'chart_title': 'Chart Title',
'x_axis_title': 'X Axis title',
'y_axis_title': 'Y Axis Title'}
:return: plotly fig of bit map
"""
zs_symbols = [0,1,2,3,4,5,17,18,19,21,22,23,100,101,102,105,117,118,133,134,135]
zs_colors = ["rgba(0, 98, 155, 1)","rgba(1, 166, 220, 1)","rgba(110, 43, 98, 1)","rgba(134, 200, 188, 1)",
"rgba(160, 175, 198, 1)","rgba(163, 178, 170, 1)","rgba(182, 232, 128, 1)","rgba(184, 204, 123, 1)",
"rgba(254, 203, 82, 1)","rgba(255, 151, 255, 1)","rgba(99, 110, 250, 1)","rgba(239, 85, 59, 1)",
"rgba(0, 204, 150, 1)","rgba(171, 99, 250, 1)","rgba(255, 161, 90, 1)","rgba(25, 211, 243, 1)",
"rgba(255, 102, 146, 1)","rgba(182, 232, 128, 1)","rgba(255, 151, 255, 1)","rgba(254, 203, 82, 1)"]
# checks for the data type
if not isinstance(data, _pd.core.frame.DataFrame) :
raise ValueError("The type of data is "+str(type(data))+". Where as expected pandas dataframe")
if not isinstance(x_axis_order_list, list):
raise ValueError("The type of x_axis_order_list is "+str(type(x_axis_order_list))+", where as expected type is list")
if not isinstance(x_axis_column, str):
raise ValueError("The type of x_axis_column is "+str(type(x_axis_column))+", where as expected type is str")
if not isinstance(y_axis_column, str):
raise ValueError("The type of y_axis_column is "+str(type(y_axis_column))+", where as expected type is str")
if not isinstance(values_data_column, str):
raise ValueError("The type of values_data_column is "+str(type(values_data_column))+", where as expected type is str")
if not isinstance(intensity_column, str):
raise ValueError("The type of intensity_column is "+str(type(intensity_column))+", where as expected type is str")
if not isinstance(color_column, str):
raise ValueError("The type of color_column is "+str(type(color_column))+", where as expected type is str")
if not isinstance(symbols_column, str):
raise ValueError("The type of symbols_column is "+str(type(symbols_column))+", where as expected type is str")
if not isinstance(values_data_config_column, str):
raise ValueError("The type of values_data_config_column is "+str(type(values_data_config_column))+", where as expected type is str")
if not isinstance(chart_attr, dict):
raise ValueError("The type of chart_attr is "+str(type(chart_attr))+", where as expected type is dict")
# fill the NaN with empty string
pandas_dataframe = data.fillna('')
x_axis_order_list = x_axis_order_list
y_axis_data_column_name = y_axis_column
x_axis_data_column_name = x_axis_column
values_data_column_name = values_data_column
color_intensity_column_name = intensity_column
#convert x-axis dataframe column to string
pandas_dataframe[x_axis_data_column_name] = pandas_dataframe[x_axis_data_column_name].astype(str)
values_data_config_column_name = values_data_config_column
symbols_column_name = symbols_column
color_column_name = color_column
if len(config_data) != 0:
# checks for the data type
if not isinstance(config_data, _pd.core.frame.DataFrame):
raise ValueError("The type of config_data is "+str(type(config_data))+". Where as expected pandas dataframe")
# checks for the existing of column in config data frame
else:
if values_data_config_column_name not in list(config_data.columns):
raise ValueError("The column name: "+values_data_config_column_name+" is not present in the given config data.")
elif symbols_column_name not in list(config_data.columns):
raise ValueError("The column name: "+symbols_column_name+" is not present in the given config data.")
elif color_column_name not in list(config_data.columns):
raise ValueError("The column name: "+color_column_name+" is not present in the given config data.")
# initilization of config_data
pandas_dataframe_1 = {}
# verifies the config_data
if len(config_data) != 0:
# fill the NaN with empty string
pandas_dataframe_1 = config_data.fillna('')
# checks wheter the values are repeated in config data
if len(list(pandas_dataframe_1[values_data_config_column_name])) != len(set(pandas_dataframe_1[values_data_config_column_name])):
raise ValueError("Data in column: "+values_data_config_column_name+" is repeated. Place unique values in this column")
# checks whether there is any empty value present in symbols
if '' in list(pandas_dataframe_1[symbols_column_name]):
raise ValueError("There is an empty value in the "+symbols_column_name+" column")
# checks whether the type of value in symbol column is int
elif str(pandas_dataframe_1[symbols_column_name].dtypes) != 'int64':
for symbol in list(pandas_dataframe_1[symbols_column_name]):
if not isinstance(symbol, int):
raise ValueError("There is an invalid data: "+str(symbol)+" in the column: "+symbols_column_name+". Expected only integers from list "+str(zs_symbols))
# checks whether the type of value in symbol column is int
elif str(pandas_dataframe_1[symbols_column_name].dtypes) == 'int64':
# iterates each symbol and checks whether the number is valid
for symbol in list(pandas_dataframe_1[symbols_column_name]):
if symbol not in zs_symbols:
raise ValueError("Given symbol number: "+str(symbol)+" is invalid. choose the symbol number from symbols list "+str(zs_symbols))
# hex colors from config data
hex_colors_list = list(pandas_dataframe_1[color_column_name])
rgb_colors_list = []
# iterates each hex color and converts it into rgba
for hex_color in hex_colors_list:
# checks for the valid hex color code
if re.match(r'^[#][0-9A-Za-z]{6}$',str(hex_color)):
rgb_colors_list.append(str(_wc.hex_to_rgb(str(hex_color))).replace("IntegerRGB","rgba").replace("red=","").replace("green=","").replace("blue=","").replace(")",", 1)"))
else:
raise ValueError("Entered wrong Hex code: "+str(hex_color)+ " in the data. Enter the correct hex code")
# appends the rgba column to the config data frame
updated_rgba_color_df = _pd.DataFrame({color_column_name: rgb_colors_list})
pandas_dataframe_1[color_column_name] = updated_rgba_color_df
# creates the config data
else:
# initialising the config data
config_data_dict = {}
# list of unique values in ascending order
unique_values_list = list(set(pandas_dataframe[values_data_column_name]))
unique_values_list.sort()
# updates the config_data with values list
config_data_dict.update({values_data_config_column_name:unique_values_list})
# colors and symbols initiulization
pre_colors = []
pre_symbols = []
# counters initilization
color_counter = 0
symbol_counter = 0
# iterates each value and assign the color and symbol form default symbols and colors
for value in unique_values_list:
# if counter is greater than the length of existing colors then it resets the counter and assigns the colors in repeating mode
if color_counter >= (len(zs_colors) - 1) :
color_counter = 0
pre_colors.append(zs_colors[color_counter])
color_counter += 1
else:
pre_colors.append(zs_colors[color_counter])
color_counter += 1
# if counter is greater than the length of existing symbols then it resets the counter and assigns the symbols in repeating mode
if symbol_counter >= (len(zs_symbols) - 1) :
symbol_counter = 0
pre_symbols.append(zs_symbols[symbol_counter])
symbol_counter += 1
else:
pre_symbols.append(zs_symbols[symbol_counter])
symbol_counter += 1
# updates config_data and converts to pandas data frame
config_data_dict.update({symbols_column_name:pre_symbols,color_column_name:pre_colors})
pandas_dataframe_1 = _pd.DataFrame(config_data_dict)
# list of unique y_axis data
unique_y_axis_params = list(set(pandas_dataframe[y_axis_data_column_name]))
unique_y_axis_params.sort()
y_axis_data = []
x_axis_data = []
symbols = []
colors = []
text_list = []
# creates the list of data to be provided for plot
# iterates each value in y_axis params
for y_axis_param in unique_y_axis_params:
# fetches the data of y_axis param from whole data
y_axis_param_data = pandas_dataframe.loc[pandas_dataframe[y_axis_data_column_name] == y_axis_param]
# iterates each value in x_axis order
for x_axis_order_param in x_axis_order_list:
# fetches the data of x_axis param from y_axis param data
y_axis_detail_param_data = y_axis_param_data.loc[y_axis_param_data[x_axis_data_column_name] == x_axis_order_param]
# check for empty data set
if len(y_axis_detail_param_data) == 0:
y_axis_data.append(y_axis_param)
x_axis_data.append(x_axis_order_param)
# open circle is assigned
symbols.append(100)
# white doesn't appear on graph
colors.append("white")
# no text is shown
text_list.append("")
else:
y_axis_data.append(y_axis_param)
x_axis_data.append(x_axis_order_param)
# fetches the data of the value form config data
config_data_of_value = pandas_dataframe_1.loc[pandas_dataframe_1[values_data_config_column_name] == list(y_axis_detail_param_data[values_data_column_name])[0]]
if len(config_data_of_value) != 0:
# fetches the assigned symbol for the value
symbols.append(list(config_data_of_value[symbols_column_name])[0])
# checks whether the user provided the intensity column
if color_intensity_column_name != "" :
# checks whether the given intensity is correct
# 1- High, 2- Medium, 3- Low
if list(y_axis_detail_param_data[color_intensity_column_name])[0] in [1,2,3] :
if list(y_axis_detail_param_data[color_intensity_column_name])[0] == 1:
colors.append(list(config_data_of_value[color_column_name])[0])
# text list to show on the hover
text_list.append("value: "+str(list(y_axis_detail_param_data[values_data_column_name])[0])+"<br>Intensity: 1-High")
elif list(y_axis_detail_param_data[color_intensity_column_name])[0] == 2:
medium_intensity_color = list(config_data_of_value[color_column_name])[0].replace(" 1)", (" "+str(0.7)+")"))
colors.append(medium_intensity_color)
# text list to show on the hover
text_list.append("value: "+str(list(y_axis_detail_param_data[values_data_column_name])[0])+"<br>Intensity: 2-Medium")
elif list(y_axis_detail_param_data[color_intensity_column_name])[0] == 3:
low_intensity_color = list(config_data_of_value[color_column_name])[0].replace(" 1)", (" "+str(0.4)+")"))
colors.append(low_intensity_color)
# text list to show on the hover
text_list.append("value: "+str(list(y_axis_detail_param_data[values_data_column_name])[0])+"<br>Intensity: 3-Low")
# considering high if intensity is left blank/empty
elif list(y_axis_detail_param_data[color_intensity_column_name])[0] == "" :
colors.append(list(config_data_of_value[color_column_name])[0])
# text list to show on the hover
text_list.append("value: "+str(list(y_axis_detail_param_data[values_data_column_name])[0])+"<br>Intensity: 1-High")
# raise exception if value is other than 1,2,3
else:
raise ValueError("The Intensity: "+str(list(y_axis_detail_param_data[color_intensity_column_name])[0])+" is invalid. Enter 1 -High, 2 -Medium, 3 -Low")
else:
colors.append(list(config_data_of_value[color_column_name])[0])
# text list to show on the hover
text_list.append("value: "+str(list(y_axis_detail_param_data[values_data_column_name])[0]))
else:
raise ValueError("The value "+str(list(y_axis_detail_param_data[values_data_column_name])[0])+" is not present in the config_data. Please check the data")
# list of unique value data
unique_value_params = list(set(pandas_dataframe[values_data_column_name]))
unique_value_params.sort()
legend_x_data = []
legend_y_data = []
legend_value = []
legend_marker_symbol = []
legend_marker_color = []
legend_text_list = []
# creates the data to be provided for the legends
# iterates each value in the data
for value_index in range(len(unique_value_params)):
value = list(unique_value_params)[value_index]
# if the data has intensity column
if color_intensity_column_name in list(pandas_dataframe.columns):
# fetches the data of the value from provided data frame
value_data = pandas_dataframe.loc[(pandas_dataframe[color_intensity_column_name] == 1) & (pandas_dataframe[values_data_column_name] == value)]
# fetches the data of the value from the config_data
value_config_data = pandas_dataframe_1.loc[pandas_dataframe_1[values_data_config_column_name] == value]
# checks for the value data from provided data frame
if len(value_data) != 0:
# appends respective values to respective legend list variables
legend_x_data.append(list(value_data[x_axis_data_column_name])[0])
legend_y_data.append(list(value_data[y_axis_data_column_name])[0])
legend_value.append(list(value_data[values_data_column_name])[0])
legend_marker_symbol.append(list(value_config_data[symbols_column_name])[0])
legend_marker_color.append(list(value_config_data[color_column_name])[0])
legend_text_list.append("value: "+str(list(value_data[values_data_column_name])[0])+"<br>Intensity: 1-High")
else:
value_data = pandas_dataframe.loc[(pandas_dataframe[color_intensity_column_name] == 2) & (pandas_dataframe[values_data_column_name] == value)]
if len(value_data) != 0:
# appends respective values to respective legend list variables
legend_x_data.append(list(value_data[x_axis_data_column_name])[0])
legend_y_data.append(list(value_data[y_axis_data_column_name])[0])
legend_value.append(list(value_data[values_data_column_name])[0])
legend_marker_symbol.append(list(value_config_data[symbols_column_name])[0])
legend_marker_color.append(list(value_config_data[color_column_name])[0].replace(" 1)", (" "+str(0.7)+")")))
legend_text_list.append("value: "+str(list(value_data[values_data_column_name])[0])+"<br>Intensity: 2-Medium")
else:
value_data = pandas_dataframe.loc[(pandas_dataframe[color_intensity_column_name] == 3) & (pandas_dataframe[values_data_column_name] == value)]
if len(value_data) != 0:
# appends respective values to respective legend list variables
legend_x_data.append(list(value_data[x_axis_data_column_name])[0])
legend_y_data.append(list(value_data[y_axis_data_column_name])[0])
legend_value.append(list(value_data[values_data_column_name])[0])
legend_marker_symbol.append(list(value_config_data[symbols_column_name])[0])
legend_marker_color.append(list(value_config_data[color_column_name])[0].replace(" 1)", (" "+str(0.4)+")")))
legend_text_list.append("value: "+str(list(value_data[values_data_column_name])[0])+"<br>Intensity: 2-Low")
# considers intensity as 1- High if intensity column is not present
else:
value_data = pandas_dataframe.loc[pandas_dataframe[values_data_column_name] == value]
value_config_data = pandas_dataframe_1.loc[pandas_dataframe_1[values_data_config_column_name] == value]
# appends respective values to respective legend list variables
legend_x_data.append(list(value_data[x_axis_data_column_name])[0])
legend_y_data.append(list(value_data[y_axis_data_column_name])[0])
legend_value.append(list(value_data[values_data_column_name])[0])
legend_marker_symbol.append(list(value_config_data[symbols_column_name])[0])
legend_marker_color.append(list(value_config_data[color_column_name])[0])
legend_text_list.append("value: "+str(list(value_data[values_data_column_name])[0])+"<br>Intensity: 1-High")
# main bit map plot
fig_data = [_go.Scatter(mode="markers",
showlegend = False,
x = x_axis_data,
y = y_axis_data,
marker_symbol=symbols,
marker_line_color=colors,
marker_color=colors,
marker_line_width=1.5,
marker_size=6,
hoverinfo = "x+y+text", #'x', 'y', 'z', 'text', 'name'
text = text_list)]
# legend plot
for index in range(len(legend_x_data)):
fig_data.append(_go.Scatter(mode = "markers",
name = legend_value[index],
x = [legend_x_data[index]],
y = [legend_y_data[index]],
marker_symbol = legend_marker_symbol[index],
marker_color = legend_marker_color[index],
marker_line_color= legend_marker_color[index],
marker_size = 6,
hoverinfo = "x+y+text",
text = legend_text_list[index]))
fig = _go.Figure(data = fig_data,
layout = _go.Layout(title= chart_attr.get("chart_title","Bit Map"),
xaxis = {"title" : {"text":chart_attr.get("x_axis_title","")}},
yaxis = {"title" : {"text":chart_attr.get("y_axis_title","")}},
template = self.theme,
legend = {'x' : 1.02, # -2 and 3 v - 1.02, h - 0
'xanchor' : "left", # "auto" | "left" | "center" | "right"
'y' : 1, # -2 and 3 v - 1, h - -0.1
'yanchor' : "auto", # "auto" | "top" | "middle" | "bottom"
'traceorder': "normal", # "reversed", "grouped", "reversed+grouped", "normal"
'orientation' : 'v'}))
return fig
def multiline_chart(self,
data,
x_data_col: str,
y_data_col: list,
chart_attr: dict = {},
legend_name: list = []):
'''
This method is used to plot multiline chart
:param data: dict of data containing the x_data_col and y_data_col names as keys and its corresponding values in list or pandas data frame of the data.
dict :
{'x_data_col_name': ['A','B','C'],
'y_data_col_name_1': [10,20,30],
'y_data_col_name_2': [15,10,40],
'y_data_col_name_3': [20,30,10]}
( or )
pandas data frame :
x_data_col_name y_data_col_name_1 y_data_col_name_2
0 A 10 15
1 B 20 10
2 C 25 40
3 D 30 20
:param x_data_col: x_axis column name form the data frame
:param y_data_col: list of y_axis column names form the data frame i.e. ['y_data_col_name_1','y_data_col_name_2']
:param chart_attr: dict which contains the parameters x_axis_title, y_axis_title, chart_title
x_axis_title: title to represent the x_axis. Default value is ""
y_axis_title: title to represent the y_axis. Default value is ""
chart_title: title to represent the chart. Default value is "Line Chart"
:param legend_name: list of legend names that should be shown on the graph with respect to the y_axis_column names.
y_axis_col names are the default legend names
:return: plotly fig of MultiLine Chart
'''
# checks for the data type
if not isinstance(x_data_col, str):
raise ValueError("The type of x_data_col is " + str(type(x_data_col)) + ", where as expected type is str")
if not isinstance(y_data_col, list):
raise ValueError("The type of y_data_col is " + str(type(y_data_col)) + ", where as expected type is list")
if not isinstance(chart_attr, dict):
raise ValueError("The type of chart_attr is " + str(type(chart_attr)) + ", where as expected type is dict")
if not isinstance(legend_name, list):
raise ValueError(
"The type of legend_name is " + str(type(legend_name)) + ", where as expected type is list")
# initilization of multiline data
multi_line_data = []
# iterates each y_axis_column and assigns the scatter and legend
for index in range(len(y_data_col)):
# checks the length of legend_name and assigns the column_name to legend
if len(legend_name) == 0:
multi_line_data.append(_go.Scatter(x=data[x_data_col],
y=data[y_data_col[index]],
mode="markers+lines",
name=y_data_col[index]))
# if length of legend_name is greater than the y_axis_col names list then assigns the first n legend names
elif len(y_data_col) <= len(legend_name):
multi_line_data.append(_go.Scatter(x=data[x_data_col],
y=data[y_data_col[index]],
mode="markers+lines",
name=legend_name[index]))
# if legend_names are less tha the y_axis_col names
elif len(y_data_col) > len(legend_name):
# assigns legend_name if the length is less than y_axis_col
if index < len(legend_name):
multi_line_data.append(_go.Scatter(x=data[x_data_col],
y=data[y_data_col[index]],
mode="markers+lines",
name=legend_name[index]))
# assigns the y_axis_col name if the length of legend_name is less
else:
multi_line_data.append(_go.Scatter(x=data[x_data_col],
y=data[y_data_col[index]],
mode="markers+lines",
name=y_data_col[index]))
fig = _go.Figure(data=multi_line_data,
layout=_go.Layout(title=chart_attr.get("chart_title", "MultiLine Chart"),
xaxis={'title': chart_attr.get("x_axis_title", "")},
yaxis={'title': chart_attr.get("y_axis_title", "")},
template=self.theme))
return fig
def dexter(self,
data,
x_axis_order_list:list,
x_axis_column:str,
y_axis_column:str,
values_data_column:str,
config_data = {},
color_column: str = "color",
grp_column: str = "",
values_data_config_column:str = "value",
chart_attr:dict = {}):
"""
:param data: pandas data frame of the data which is used to plot the graph.
Below data frame can be referenced for the data that to be passed.
y_axis_labels Value x_axis_labels Intensity
0 Patient 1 T1 M-1 1
1 Patient 1 T1 Start 1
2 Patient 1 T2 M 1 2
3 Patient 2 T1 M-2 1
4 Patient 2 T1 Start 1
5 Patient 3 T3 M-8 2
6 Patient 3 T3 M-7 2
7 Patient 3 T3 M 2 2
:param x_axis_order_list: list of x_axis values in an order that should be shown on the garph.
As per the above data frame the the x_axis labels can be arranged in random manner on the x_axis.
so to have an arranged order of x_axis values, pass the list of values in an order that to be represented on x_axis.
i.e. ['M1','M2','M3','M4','M5','M6','M7']
:param x_axis_column: x_axis column name form the data frame
:param y_axis_column: y_axis column name form the data frame
:param values_data_column: value column name form the data frame of the data point (x,y)
:param config_data: pandas data frame of the config file which is used to assign the symboland color for each value
One can refer the below data frame for the reference. Color code should be in HEX
value color
0 T1 #00ffff
1 T2 #000000
2 T3 #0000ff
:param color_column: color column name from the config data frame. Default value is "color"
:param values_data_config_column: value column name from the config data frame. Default vaue is "value"
:param chart_attr: dict which contains the parameters x_axis_title, y_axis_title, chart_title
x_axis_title: title to represent the x_axis. Default value is ""
y_axis_title: title to represent the y_axis. Default value is ""
chart_title: title to represent the chart. Default value is "Dexter chart"
:return: plotly fig of dexter chart
"""
zs_colors = ["rgba(0, 98, 155, 1)","rgba(1, 166, 220, 1)","rgba(110, 43, 98, 1)","rgba(134, 200, 188, 1)",
"rgba(160, 175, 198, 1)","rgba(163, 178, 170, 1)","rgba(182, 232, 128, 1)","rgba(184, 204, 123, 1)",
"rgba(254, 203, 82, 1)","rgba(255, 151, 255, 1)","rgba(99, 110, 250, 1)","rgba(239, 85, 59, 1)",
"rgba(0, 204, 150, 1)","rgba(171, 99, 250, 1)","rgba(255, 161, 90, 1)","rgba(25, 211, 243, 1)",
"rgba(255, 102, 146, 1)","rgba(182, 232, 128, 1)","rgba(255, 151, 255, 1)","rgba(254, 203, 82, 1)"]
# checks for the data type
if not isinstance(data, _pd.core.frame.DataFrame):
raise ValueError("The type of data is " + str(type(data)) + ". Where as expected pandas dataframe")
if not isinstance(x_axis_order_list, list):
raise ValueError(
"The type of x_axis_order_list is " + str(type(x_axis_order_list)) + ", where as expected type is list")
if not isinstance(x_axis_column, str):
raise ValueError(
"The type of x_axis_column is " + str(type(x_axis_column)) + ", where as expected type is str")
if not isinstance(y_axis_column, str):
raise ValueError(
"The type of y_axis_column is " + str(type(y_axis_column)) + ", where as expected type is str")
if not isinstance(values_data_column, str):
raise ValueError("The type of values_data_column is " + str(
type(values_data_column)) + ", where as expected type is str")
if not isinstance(color_column, str):
raise ValueError(
"The type of color_column is " + str(type(color_column)) + ", where as expected type is str")
if not isinstance(values_data_config_column, str):
raise ValueError("The type of values_data_config_column is " + str(
type(values_data_config_column)) + ", where as expected type is str")
if not isinstance(chart_attr, dict):
raise ValueError("The type of chart_attr is " + str(type(chart_attr)) + ", where as expected type is dict")
# fill the NaN with empty string
pandas_dataframe = data.fillna('')
#convert x-axis list into string
x_axis_order_list = list(map(str,x_axis_order_list))
y_axis_data_column_name = y_axis_column
#convert x-axis dataframe column to string
pandas_dataframe[x_axis_column] = pandas_dataframe[x_axis_column].astype(str)
x_axis_data_column_name = x_axis_column
values_data_column_name = values_data_column
values_data_config_column_name = values_data_config_column
color_column_name = color_column
grp_column_name = grp_column
#group by treatment name and assigned unique number column
pandas_dataframe['treatment_grp_no'] = 0
mask = pandas_dataframe.groupby([values_data_column])['treatment_grp_no'].transform(lambda x : len(x)>1)
pandas_dataframe.loc[mask,'treatment_grp_no'] = pandas_dataframe.loc[mask,[values_data_column]].astype(str).sum(1).factorize()[0]
#if config data present, check for all columns and create color scale based on grp column values
#else do grp by on treatement and assign grp numbers and create color scale on the basis of that
if len(config_data) != 0:
# checks for the data type
if type(config_data) is not _pd.core.frame.DataFrame:
raise ValueError("The type of dataframe is "+str(type(config_data))+". Where as expected pandas dataframe")
# checks for the existing of column in config data frame
else:
if values_data_config_column_name not in list(config_data.columns):
raise ValueError("The column name: "+values_data_config_column_name+" is not present in the given config data.")
elif color_column_name not in list(config_data.columns):
raise ValueError("The column name: "+color_column_name+" is not present in the given config data.")
# else:
# values_data_config_column_name = values_data_config_column
# color_column_name = color_column
# initilization of config_data
pandas_dataframe_1 = {}
if len(config_data) != 0:
# fill the NaN with empty string
pandas_dataframe_1 = config_data.fillna('')
# checks wheter the values are repeated in config data
if len(list(pandas_dataframe_1[values_data_config_column_name])) != len(set(pandas_dataframe_1[values_data_config_column_name])):
raise ValueError("Data in column: "+values_data_config_column_name+" is repeated. Place unique values in this column")
# hex colors from config data
hex_colors_list = list(pandas_dataframe_1[color_column_name])
rgb_colors_list = []
# iterates each hex color and converts it into rgba
for hex_color in hex_colors_list:
# checks for the valid hex color code
if re.match(r'^[#][0-9A-Za-z]{6}$',str(hex_color)):
rgb_colors_list.append(str(_wc.hex_to_rgb(str(hex_color))).replace("IntegerRGB","rgba").replace("red=","").replace("green=","").replace("blue=","").replace(")",", 1)"))
else:
raise ValueError("Entered wrong Hex code: "+str(hex_color)+ " in the data. Enter the correct hex code")
# appends the rgba column to the config data frame
updated_rgba_color_df = | _pd.DataFrame({color_column_name: rgb_colors_list}) | pandas.DataFrame |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import json
import datetime
import os
from os import listdir
from os.path import isfile, join
from shutil import copyfile
import logging
import pandas as pd
logger = logging.getLogger("root")
logging.basicConfig(
format="\033[1;36m%(levelname)s: %(filename)s (def %(funcName)s %(lineno)s): \033[1;37m %(message)s",
level=logging.DEBUG
)
class BuildLatestData(object):
"""
This script process county-level json from the CDC into a csv file with the latest data into a `latest-cdc-weekly-county-data.csv` file
"""
timestamp = datetime.datetime.now().strftime("%Y-%m-%d-%H%M%S")
dir_current = os.path.dirname(os.path.realpath(__file__))
dir_data = "daily-cdc-county-transmission-data"
path = os.path.join(dir_current, dir_data)
def handle(self):
latest_csv = "latest-daily-cdc-county-transmission.csv"
latest_json = "latest-daily-cdc-county-transmission.json"
files = [os.path.join(self.path, f) for f in listdir(
self.path) if isfile(join(self.path, f))]
target = max(files, key=os.path.getctime)
file_saved = os.path.join(self.dir_current, self.dir_data, latest_json)
copyfile(target, file_saved)
# with open(target, encoding='utf-8') as f:
# raw_json = json.load(f)
# for item in raw_json['integrated_county_latest_external_data']:
# item['acquired_datestamp'] = os.path.basename(target)[:10]
# latest_output.append(item)
# self.update_csv(latest_csv, latest_output)
# self.create_json(latest_csv, latest_json, latest_output)
def update_csv(self, file, data):
file_saved = os.path.join(self.dir_current, self.dir_data, file)
csv_data = | pd.DataFrame(data) | pandas.DataFrame |
###############################################################################
###
### pedar_walk
### This file is part of CorePressure
### This file was created by Dr <NAME>
### includes a set of fuctions to process walking data from pedar
###
### Copyright (C) 2018 University of Salford - All Rights Reserved
### You may use, distribute and modify this code under the terms of MIT Licence
### See <filename> or go to <url> for full licence details
###
###############################################################################
#Builtins:
import numpy as np
import pandas as pd
# import matplotlib as mpl
# mpl.rcParams['backend'] = "qt4agg"
# mpl.rcParams['backend.qt4'] = "PySide"
from matplotlib import pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
import importlib
#Independents:
import AProcessing as ap; importlib.reload(ap)
class PedarWalkData:
"""
A class to process pressure data collected using Pedar System
"""
def __init__(self, files):
self.files = files
def closeall(): # this closes all figures in reverse order
l = plt.get_fignums()
l.reverse()
for a in l:
plt.close(a)
def pdf_plot(fname):
pp = PdfPages('C:/Temp/'+fname+'.pdf')
return(pp)
def openPD(fname):
srows = [i for i in range(9)]
fdat = pd.read_csv(fname,sep='\t',skiprows=srows)
return(fdat)
def find_blocks(dataset,figname,pp):
"""
takes 2d cyclic dataset and chunks file to blocks
returns start and end locations of each block
"""
Condit = max(dataset)*0.75
PPeaks = ap.PeakDetect(dataset,span=30,condit=Condit)
MCondit = 0.5*Condit
PMins = ap.MinDetect(dataset,span=30,condit=MCondit)
fig = plt.figure(figname + '_findBlock')
plt.subplot(311)
plt.plot(dataset)
plt.plot(PPeaks[0],PPeaks[1],'*')
plt.plot(PMins[0],PMins[1],'*')
### Zero between cycles:
for Pmin in PMins[1]:
dataset[dataset == Pmin] = 0
### Zero start:
for j in range(1, len(PPeaks[0])):
if PPeaks[0][j] - PPeaks[0][j-1] > 100:
if j < 9:
dataset[:PPeaks[0][j]] = 0
else:
break
### Zero end:
dataset[PPeaks[0][-1]:] = 0
for j in range(len(PPeaks[0])-5, len(PPeaks[0])):
if PPeaks[0][j] - PPeaks[0][j-1] > 100:
dataset[PPeaks[0][j]:] = 0
else:
break
### Zero between blocks:
for j in range(1, len(PPeaks[0])):
if PPeaks[0][j] - PPeaks[0][j-1] > 85:
dataset[PPeaks[0][j-1]:PPeaks[0][j]] = 0
plt.subplot(312)
plt.plot(dataset)
plt.plot(PPeaks[0],PPeaks[1],'*')
### chunk blocks:
blocks = []
bl = [PPeaks[0][0]]
for i in range(1,len(PPeaks[0])):
if i == len(PPeaks[0])-1:
if len(bl) > 9:
blocks.append(bl)
elif PPeaks[0][i] - PPeaks[0][i-1] < 90:
bl.append(PPeaks[0][i])
else:
if len(bl) > 9:
blocks.append(bl)
bl = []
### Setup outputs:
start = []
end = []
for bl in blocks:
bl = bl[1:-2]
flip_data = np.array(dataset[:bl[0]])[::-1]
slip_data = np.array(dataset[bl[-1]:])
st = np.argmin(flip_data)+1
en = np.argmin(slip_data)
start.append(bl[0]-st)
end.append(bl[-1]+en)
ys = [75] * len(start)
plt.subplot(313)
plt.plot(dataset)
plt.plot(start,ys,'*')
plt.plot(end,ys,'*')
pp.savefig(fig)
plt.close()
return (start,end)
def one_insole_one_block(data,ffname,insole='Left'):
heads = list(data.head(0))
if insole == 'Left':
pdat = data.loc[:,heads[1]:heads[99]]
insole_label = 'Left'
else:
try:
pdat = data.loc[:,heads[100]:heads[198]]
insole_label = 'Right'
except IndexError:
pdat = data.loc[:,heads[100]:]
insole_label = 'Right - Part'
sensors = [str(x) for x in range(1,100)]
insole_df = | pd.DataFrame(data=pdat) | pandas.DataFrame |
import numpy as np
import pandas as pd
from tensorflow.keras import utils
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.preprocessing.text import Tokenizer
from sklearn.model_selection import train_test_split, KFold
import ensemble_capsule_network
from config import Config
from preprocessing import text_preprocessing, load_word_embedding_matrix
folder_path = "D:\\deep_learning_experiments"
lankadeepa_data_path = folder_path + "\\sinhala_data\\lankadeepa_tagged_comments.csv"
gossip_lanka_data_path = folder_path + "\\sinhala_data\\gossip_lanka_tagged_comments.csv"
word_embedding_keyed_vectors_path = 'D:\\deep_learning_experiments\\word_vectors_sinhala\\keyed.kv'
word_embedding_matrix_path = 'D:\\deep_learning_experiments\\word_embedding_matrix'
EMBEDDING_SIZE = 300
lankadeepa_data = | pd.read_csv(lankadeepa_data_path) | pandas.read_csv |
"""
将原来的数据库,变成一个stock id一个文件的数据库
"""
import os
import pandas as pd
import numpy as np
import pickle
# 导入行情数据
file_path = 'C:/Users/Administrator/Desktop/program/data/hangqing/'
file_list = os.listdir(file_path)
columns_name = pd.read_csv(file_path+file_list[0]).columns
hangqing_record = []
temp_record = pd.DataFrame(columns=columns_name)
for i in range(len(file_list)):
now_path = file_path+file_list[i]
now_df = pd.read_table(now_path, sep=',')
temp_record = pd.concat((temp_record, now_df), axis=0)
if (i+1) % 50 == 0 or (i+1) == len(file_list):
del temp_record['Unnamed: 0']
del temp_record['Unnamed: 25']
hangqing_record.append(temp_record)
temp_record = pd.DataFrame(columns=columns_name)
print('all:%s, now:%s' % (len(file_list), i+1))
for i in range(len(hangqing_record)):
if i == 0:
hangqing_df = hangqing_record[0]
else:
hangqing_df = pd.concat((hangqing_df, hangqing_record[i]), axis=0)
del hangqing_record
# 导入多因子
file_path = 'C:/Users/Administrator/Desktop/program/data/duoyinzi/'
file_list = os.listdir(file_path)
columns_name = pd.read_csv(file_path+file_list[0]).columns
duoyinzi_record = []
temp_record = | pd.DataFrame(columns=columns_name) | pandas.DataFrame |
from openpyxl import Workbook
from openpyxl import load_workbook
import pandas as pd
wb = load_workbook('/home/nikon-cook/Documents/МИТМО/веб-прога/minos_db.xlsx')
print(wb.get_sheet_names())
Publishers_data = wb.get_sheet_by_name('Publishers')
#print(Publishers_data.dimensions, Publishers_data.max_row, Publishers_data.max_column)
Authors_data = wb.get_sheet_by_name('Authors')
Books_data = wb.get_sheet_by_name('Books')
Directors_data = wb.get_sheet_by_name('Directors')
'''
p_df = pd.DataFrame()
p_rows = Publishers_data.iter_rows()
p_first_row = next(p_rows)
p_sec_row = next(p_rows)
p_headings = [c.value for c in p_first_row]
p_row1 = [c.value for c in p_sec_row]
p_headings
print(p_headings, p_row1)
'''
'''
df = []
for i in range(Publishers_data.max_row):
df.append([c.value for c in next(p_rows)])
publisher_df = pd.DataFrame(df[1:])
publisher_df.columns = df[0]
print(publisher_df)
'''
'''
a_rows = Authors_data.iter_rows()
a_first_row = next(a_rows)
a_headings = [c.value for c in a_first_row]
a_headings
'''
def make_dataframe(data_sheet_name):
df = []
rows = data_sheet_name.iter_rows()
for i in range(data_sheet_name.max_row):
df.append([c.value for c in next(rows)])
res_df = | pd.DataFrame(df[1:]) | pandas.DataFrame |
import os
import pandas as pd
import pickle
import numpy as np
import lightgbm as lgb
import xgboost as xgb
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.metrics import confusion_matrix
from sklearn.svm import SVR
from sklearn.model_selection import KFold
from openpyxl import load_workbook
from config import parse_args
from src.log import Logger
from src.utils import Jaccard,Cosine,Peason
def save_args(args, logger):
save_args_file = os.path.join(args.root_path, "args.txt")
line = str(args)
with open(save_args_file, mode="w", encoding="utf-8") as wfp:
wfp.write(line + "\n")
logger.info("Args saved in file%s" % save_args_file)
def check_path(args):
assert os.path.exists("./data")
if not os.path.exists(args.log_path):
os.mkdir(args.log_path)
if not os.path.exists(args.processed_path):
os.mkdir(args.processed_path)
def xgb_train(args):
root_path = os.path.join(args.log_path,"third")
# dataset preparing
# determine inputs dtype
value_mol_file = os.path.join(args.raw_path, "Molecular_Descriptor.xlsx")
admet_file = os.path.join(args.raw_path, "ADMET.xlsx")
admet_mat_train = | pd.read_excel(admet_file, sheet_name="training") | pandas.read_excel |
#
# Copyright 2020 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Testing out the datacompy functionality
"""
import io
import logging
import sys
from datetime import datetime
from decimal import Decimal
from unittest import mock
import numpy as np
import pandas as pd
import pytest
from pandas.util.testing import assert_series_equal
from pytest import raises
import datacompy
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
def test_numeric_columns_equal_abs():
data = """a|b|expected
1|1|True
2|2.1|True
3|4|False
4|NULL|False
NULL|4|False
NULL|NULL|True"""
df = pd.read_csv(io.StringIO(data), sep="|")
actual_out = datacompy.columns_equal(df.a, df.b, abs_tol=0.2)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_numeric_columns_equal_rel():
data = """a|b|expected
1|1|True
2|2.1|True
3|4|False
4|NULL|False
NULL|4|False
NULL|NULL|True"""
df = pd.read_csv(io.StringIO(data), sep="|")
actual_out = datacompy.columns_equal(df.a, df.b, rel_tol=0.2)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_string_columns_equal():
data = """a|b|expected
Hi|Hi|True
Yo|Yo|True
Hey|Hey |False
résumé|resume|False
résumé|résumé|True
💩|💩|True
💩|🤔|False
| |True
| |False
datacompy|DataComPy|False
something||False
|something|False
||True"""
df = pd.read_csv(io.StringIO(data), sep="|")
actual_out = datacompy.columns_equal(df.a, df.b, rel_tol=0.2)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_string_columns_equal_with_ignore_spaces():
data = """a|b|expected
Hi|Hi|True
Yo|Yo|True
Hey|Hey |True
résumé|resume|False
résumé|résumé|True
💩|💩|True
💩|🤔|False
| |True
| |True
datacompy|DataComPy|False
something||False
|something|False
||True"""
df = pd.read_csv(io.StringIO(data), sep="|")
actual_out = datacompy.columns_equal(df.a, df.b, rel_tol=0.2, ignore_spaces=True)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_string_columns_equal_with_ignore_spaces_and_case():
data = """a|b|expected
Hi|Hi|True
Yo|Yo|True
Hey|Hey |True
résumé|resume|False
résumé|résumé|True
💩|💩|True
💩|🤔|False
| |True
| |True
datacompy|DataComPy|True
something||False
|something|False
||True"""
df = pd.read_csv(io.StringIO(data), sep="|")
actual_out = datacompy.columns_equal(
df.a, df.b, rel_tol=0.2, ignore_spaces=True, ignore_case=True
)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_date_columns_equal():
data = """a|b|expected
2017-01-01|2017-01-01|True
2017-01-02|2017-01-02|True
2017-10-01|2017-10-10|False
2017-01-01||False
|2017-01-01|False
||True"""
df = pd.read_csv(io.StringIO(data), sep="|")
# First compare just the strings
actual_out = datacompy.columns_equal(df.a, df.b, rel_tol=0.2)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
# Then compare converted to datetime objects
df["a"] = pd.to_datetime(df["a"])
df["b"] = pd.to_datetime(df["b"])
actual_out = datacompy.columns_equal(df.a, df.b, rel_tol=0.2)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
# and reverse
actual_out_rev = datacompy.columns_equal(df.b, df.a, rel_tol=0.2)
assert_series_equal(expect_out, actual_out_rev, check_names=False)
def test_date_columns_equal_with_ignore_spaces():
data = """a|b|expected
2017-01-01|2017-01-01 |True
2017-01-02 |2017-01-02|True
2017-10-01 |2017-10-10 |False
2017-01-01||False
|2017-01-01|False
||True"""
df = pd.read_csv(io.StringIO(data), sep="|")
# First compare just the strings
actual_out = datacompy.columns_equal(df.a, df.b, rel_tol=0.2, ignore_spaces=True)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
# Then compare converted to datetime objects
df["a"] = pd.to_datetime(df["a"])
df["b"] = pd.to_datetime(df["b"])
actual_out = datacompy.columns_equal(df.a, df.b, rel_tol=0.2, ignore_spaces=True)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
# and reverse
actual_out_rev = datacompy.columns_equal(df.b, df.a, rel_tol=0.2, ignore_spaces=True)
assert_series_equal(expect_out, actual_out_rev, check_names=False)
def test_date_columns_equal_with_ignore_spaces_and_case():
data = """a|b|expected
2017-01-01|2017-01-01 |True
2017-01-02 |2017-01-02|True
2017-10-01 |2017-10-10 |False
2017-01-01||False
|2017-01-01|False
||True"""
df = pd.read_csv(io.StringIO(data), sep="|")
# First compare just the strings
actual_out = datacompy.columns_equal(
df.a, df.b, rel_tol=0.2, ignore_spaces=True, ignore_case=True
)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
# Then compare converted to datetime objects
df["a"] = pd.to_datetime(df["a"])
df["b"] = pd.to_datetime(df["b"])
actual_out = datacompy.columns_equal(df.a, df.b, rel_tol=0.2, ignore_spaces=True)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
# and reverse
actual_out_rev = datacompy.columns_equal(df.b, df.a, rel_tol=0.2, ignore_spaces=True)
assert_series_equal(expect_out, actual_out_rev, check_names=False)
def test_date_columns_unequal():
"""I want datetime fields to match with dates stored as strings
"""
df = pd.DataFrame([{"a": "2017-01-01", "b": "2017-01-02"}, {"a": "2017-01-01"}])
df["a_dt"] = pd.to_datetime(df["a"])
df["b_dt"] = pd.to_datetime(df["b"])
assert datacompy.columns_equal(df.a, df.a_dt).all()
assert datacompy.columns_equal(df.b, df.b_dt).all()
assert datacompy.columns_equal(df.a_dt, df.a).all()
assert datacompy.columns_equal(df.b_dt, df.b).all()
assert not datacompy.columns_equal(df.b_dt, df.a).any()
assert not datacompy.columns_equal(df.a_dt, df.b).any()
assert not datacompy.columns_equal(df.a, df.b_dt).any()
assert not datacompy.columns_equal(df.b, df.a_dt).any()
def test_bad_date_columns():
"""If strings can't be coerced into dates then it should be false for the
whole column.
"""
df = pd.DataFrame(
[{"a": "2017-01-01", "b": "2017-01-01"}, {"a": "2017-01-01", "b": "217-01-01"}]
)
df["a_dt"] = pd.to_datetime(df["a"])
assert not datacompy.columns_equal(df.a_dt, df.b).any()
def test_rounded_date_columns():
"""If strings can't be coerced into dates then it should be false for the
whole column.
"""
df = pd.DataFrame(
[
{"a": "2017-01-01", "b": "2017-01-01 00:00:00.000000", "exp": True},
{"a": "2017-01-01", "b": "2017-01-01 00:00:00.123456", "exp": False},
{"a": "2017-01-01", "b": "2017-01-01 00:00:01.000000", "exp": False},
{"a": "2017-01-01", "b": "2017-01-01 00:00:00", "exp": True},
]
)
df["a_dt"] = pd.to_datetime(df["a"])
actual = datacompy.columns_equal(df.a_dt, df.b)
expected = df["exp"]
assert_series_equal(actual, expected, check_names=False)
def test_decimal_float_columns_equal():
df = pd.DataFrame(
[
{"a": Decimal("1"), "b": 1, "expected": True},
{"a": Decimal("1.3"), "b": 1.3, "expected": True},
{"a": Decimal("1.000003"), "b": 1.000003, "expected": True},
{"a": Decimal("1.000000004"), "b": 1.000000003, "expected": False},
{"a": Decimal("1.3"), "b": 1.2, "expected": False},
{"a": np.nan, "b": np.nan, "expected": True},
{"a": np.nan, "b": 1, "expected": False},
{"a": Decimal("1"), "b": np.nan, "expected": False},
]
)
actual_out = datacompy.columns_equal(df.a, df.b)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_decimal_float_columns_equal_rel():
df = pd.DataFrame(
[
{"a": Decimal("1"), "b": 1, "expected": True},
{"a": Decimal("1.3"), "b": 1.3, "expected": True},
{"a": Decimal("1.000003"), "b": 1.000003, "expected": True},
{"a": Decimal("1.000000004"), "b": 1.000000003, "expected": True},
{"a": Decimal("1.3"), "b": 1.2, "expected": False},
{"a": np.nan, "b": np.nan, "expected": True},
{"a": np.nan, "b": 1, "expected": False},
{"a": Decimal("1"), "b": np.nan, "expected": False},
]
)
actual_out = datacompy.columns_equal(df.a, df.b, abs_tol=0.001)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_decimal_columns_equal():
df = pd.DataFrame(
[
{"a": Decimal("1"), "b": Decimal("1"), "expected": True},
{"a": Decimal("1.3"), "b": Decimal("1.3"), "expected": True},
{"a": Decimal("1.000003"), "b": Decimal("1.000003"), "expected": True},
{"a": Decimal("1.000000004"), "b": Decimal("1.000000003"), "expected": False},
{"a": Decimal("1.3"), "b": Decimal("1.2"), "expected": False},
{"a": np.nan, "b": np.nan, "expected": True},
{"a": np.nan, "b": Decimal("1"), "expected": False},
{"a": Decimal("1"), "b": np.nan, "expected": False},
]
)
actual_out = datacompy.columns_equal(df.a, df.b)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_decimal_columns_equal_rel():
df = pd.DataFrame(
[
{"a": Decimal("1"), "b": Decimal("1"), "expected": True},
{"a": Decimal("1.3"), "b": Decimal("1.3"), "expected": True},
{"a": Decimal("1.000003"), "b": Decimal("1.000003"), "expected": True},
{"a": Decimal("1.000000004"), "b": Decimal("1.000000003"), "expected": True},
{"a": Decimal("1.3"), "b": Decimal("1.2"), "expected": False},
{"a": np.nan, "b": np.nan, "expected": True},
{"a": np.nan, "b": Decimal("1"), "expected": False},
{"a": Decimal("1"), "b": np.nan, "expected": False},
]
)
actual_out = datacompy.columns_equal(df.a, df.b, abs_tol=0.001)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_infinity_and_beyond():
df = pd.DataFrame(
[
{"a": np.inf, "b": np.inf, "expected": True},
{"a": -np.inf, "b": -np.inf, "expected": True},
{"a": -np.inf, "b": np.inf, "expected": False},
{"a": np.inf, "b": -np.inf, "expected": False},
{"a": 1, "b": 1, "expected": True},
{"a": 1, "b": 0, "expected": False},
]
)
actual_out = datacompy.columns_equal(df.a, df.b)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_mixed_column():
df = pd.DataFrame(
[
{"a": "hi", "b": "hi", "expected": True},
{"a": 1, "b": 1, "expected": True},
{"a": np.inf, "b": np.inf, "expected": True},
{"a": Decimal("1"), "b": Decimal("1"), "expected": True},
{"a": 1, "b": "1", "expected": False},
{"a": 1, "b": "yo", "expected": False},
]
)
actual_out = datacompy.columns_equal(df.a, df.b)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_mixed_column_with_ignore_spaces():
df = pd.DataFrame(
[
{"a": "hi", "b": "hi ", "expected": True},
{"a": 1, "b": 1, "expected": True},
{"a": np.inf, "b": np.inf, "expected": True},
{"a": Decimal("1"), "b": Decimal("1"), "expected": True},
{"a": 1, "b": "1 ", "expected": False},
{"a": 1, "b": "yo ", "expected": False},
]
)
actual_out = datacompy.columns_equal(df.a, df.b, ignore_spaces=True)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_mixed_column_with_ignore_spaces_and_case():
df = pd.DataFrame(
[
{"a": "hi", "b": "hi ", "expected": True},
{"a": 1, "b": 1, "expected": True},
{"a": np.inf, "b": np.inf, "expected": True},
{"a": Decimal("1"), "b": Decimal("1"), "expected": True},
{"a": 1, "b": "1 ", "expected": False},
{"a": 1, "b": "yo ", "expected": False},
{"a": "Hi", "b": "hI ", "expected": True},
{"a": "HI", "b": "HI ", "expected": True},
{"a": "hi", "b": "hi ", "expected": True},
]
)
actual_out = datacompy.columns_equal(df.a, df.b, ignore_spaces=True, ignore_case=True)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_compare_df_setter_bad():
df = pd.DataFrame([{"a": 1, "A": 2}, {"a": 2, "A": 2}])
with raises(TypeError, match="df1 must be a pandas DataFrame"):
compare = datacompy.Compare("a", "a", ["a"])
with raises(ValueError, match="df1 must have all columns from join_columns"):
compare = datacompy.Compare(df, df.copy(), ["b"])
with raises(ValueError, match="df1 must have unique column names"):
compare = datacompy.Compare(df, df.copy(), ["a"])
df_dupe = pd.DataFrame([{"a": 1, "b": 2}, {"a": 1, "b": 3}])
assert datacompy.Compare(df_dupe, df_dupe.copy(), ["a", "b"]).df1.equals(df_dupe)
def test_compare_df_setter_good():
df1 = pd.DataFrame([{"a": 1, "b": 2}, {"a": 2, "b": 2}])
df2 = pd.DataFrame([{"A": 1, "B": 2}, {"A": 2, "B": 3}])
compare = datacompy.Compare(df1, df2, ["a"])
assert compare.df1.equals(df1)
assert compare.df2.equals(df2)
assert compare.join_columns == ["a"]
compare = datacompy.Compare(df1, df2, ["A", "b"])
assert compare.df1.equals(df1)
assert compare.df2.equals(df2)
assert compare.join_columns == ["a", "b"]
def test_compare_df_setter_different_cases():
df1 = pd.DataFrame([{"a": 1, "b": 2}, {"a": 2, "b": 2}])
df2 = pd.DataFrame([{"A": 1, "b": 2}, {"A": 2, "b": 3}])
compare = datacompy.Compare(df1, df2, ["a"])
assert compare.df1.equals(df1)
assert compare.df2.equals(df2)
def test_compare_df_setter_bad_index():
df = pd.DataFrame([{"a": 1, "A": 2}, {"a": 2, "A": 2}])
with raises(TypeError, match="df1 must be a pandas DataFrame"):
compare = datacompy.Compare("a", "a", on_index=True)
with raises(ValueError, match="df1 must have unique column names"):
compare = datacompy.Compare(df, df.copy(), on_index=True)
def test_compare_on_index_and_join_columns():
df = pd.DataFrame([{"a": 1, "b": 2}, {"a": 2, "b": 2}])
with raises(Exception, match="Only provide on_index or join_columns"):
compare = datacompy.Compare(df, df.copy(), on_index=True, join_columns=["a"])
def test_compare_df_setter_good_index():
df1 = pd.DataFrame([{"a": 1, "b": 2}, {"a": 2, "b": 2}])
df2 = pd.DataFrame([{"a": 1, "b": 2}, {"a": 2, "b": 3}])
compare = datacompy.Compare(df1, df2, on_index=True)
assert compare.df1.equals(df1)
assert compare.df2.equals(df2)
def test_columns_overlap():
df1 = pd.DataFrame([{"a": 1, "b": 2}, {"a": 2, "b": 2}])
df2 = pd.DataFrame([{"a": 1, "b": 2}, {"a": 2, "b": 3}])
compare = datacompy.Compare(df1, df2, ["a"])
assert compare.df1_unq_columns() == set()
assert compare.df2_unq_columns() == set()
assert compare.intersect_columns() == {"a", "b"}
def test_columns_no_overlap():
df1 = pd.DataFrame([{"a": 1, "b": 2, "c": "hi"}, {"a": 2, "b": 2, "c": "yo"}])
df2 = pd.DataFrame([{"a": 1, "b": 2, "d": "oh"}, {"a": 2, "b": 3, "d": "ya"}])
compare = datacompy.Compare(df1, df2, ["a"])
assert compare.df1_unq_columns() == {"c"}
assert compare.df2_unq_columns() == {"d"}
assert compare.intersect_columns() == {"a", "b"}
def test_10k_rows():
df1 = pd.DataFrame(np.random.randint(0, 100, size=(10000, 2)), columns=["b", "c"])
df1.reset_index(inplace=True)
df1.columns = ["a", "b", "c"]
df2 = df1.copy()
df2["b"] = df2["b"] + 0.1
compare_tol = datacompy.Compare(df1, df2, ["a"], abs_tol=0.2)
assert compare_tol.matches()
assert len(compare_tol.df1_unq_rows) == 0
assert len(compare_tol.df2_unq_rows) == 0
assert compare_tol.intersect_columns() == {"a", "b", "c"}
assert compare_tol.all_columns_match()
assert compare_tol.all_rows_overlap()
assert compare_tol.intersect_rows_match()
compare_no_tol = datacompy.Compare(df1, df2, ["a"])
assert not compare_no_tol.matches()
assert len(compare_no_tol.df1_unq_rows) == 0
assert len(compare_no_tol.df2_unq_rows) == 0
assert compare_no_tol.intersect_columns() == {"a", "b", "c"}
assert compare_no_tol.all_columns_match()
assert compare_no_tol.all_rows_overlap()
assert not compare_no_tol.intersect_rows_match()
@mock.patch("datacompy.logging.debug")
def test_subset(mock_debug):
df1 = pd.DataFrame([{"a": 1, "b": 2, "c": "hi"}, {"a": 2, "b": 2, "c": "yo"}])
df2 = pd.DataFrame([{"a": 1, "c": "hi"}])
comp = datacompy.Compare(df1, df2, ["a"])
assert comp.subset()
assert mock_debug.called_with("Checking equality")
@mock.patch("datacompy.logging.info")
def test_not_subset(mock_info):
df1 = pd.DataFrame([{"a": 1, "b": 2, "c": "hi"}, {"a": 2, "b": 2, "c": "yo"}])
df2 = pd.DataFrame([{"a": 1, "b": 2, "c": "hi"}, {"a": 2, "b": 2, "c": "great"}])
comp = datacompy.Compare(df1, df2, ["a"])
assert not comp.subset()
assert mock_info.called_with("Sample c mismatch: a: 2, df1: yo, df2: great")
def test_large_subset():
df1 = pd.DataFrame(np.random.randint(0, 100, size=(10000, 2)), columns=["b", "c"])
df1.reset_index(inplace=True)
df1.columns = ["a", "b", "c"]
df2 = df1[["a", "b"]].sample(50).copy()
comp = datacompy.Compare(df1, df2, ["a"])
assert not comp.matches()
assert comp.subset()
def test_string_joiner():
df1 = pd.DataFrame([{"ab": 1, "bc": 2}, {"ab": 2, "bc": 2}])
df2 = pd.DataFrame([{"ab": 1, "bc": 2}, {"ab": 2, "bc": 2}])
compare = datacompy.Compare(df1, df2, "ab")
assert compare.matches()
def test_decimal_with_joins():
df1 = pd.DataFrame([{"a": Decimal("1"), "b": 2}, {"a": Decimal("2"), "b": 2}])
df2 = pd.DataFrame([{"a": 1, "b": 2}, {"a": 2, "b": 2}])
compare = datacompy.Compare(df1, df2, "a")
assert compare.matches()
assert compare.all_columns_match()
assert compare.all_rows_overlap()
assert compare.intersect_rows_match()
def test_decimal_with_nulls():
df1 = pd.DataFrame([{"a": 1, "b": Decimal("2")}, {"a": 2, "b": Decimal("2")}])
df2 = pd.DataFrame([{"a": 1, "b": 2}, {"a": 2, "b": 2}, {"a": 3, "b": 2}])
compare = datacompy.Compare(df1, df2, "a")
assert not compare.matches()
assert compare.all_columns_match()
assert not compare.all_rows_overlap()
assert compare.intersect_rows_match()
def test_strings_with_joins():
df1 = pd.DataFrame([{"a": "hi", "b": 2}, {"a": "bye", "b": 2}])
df2 = pd.DataFrame([{"a": "hi", "b": 2}, {"a": "bye", "b": 2}])
compare = datacompy.Compare(df1, df2, "a")
assert compare.matches()
assert compare.all_columns_match()
assert compare.all_rows_overlap()
assert compare.intersect_rows_match()
def test_index_joining():
df1 = pd.DataFrame([{"a": "hi", "b": 2}, {"a": "bye", "b": 2}])
df2 = pd.DataFrame([{"a": "hi", "b": 2}, {"a": "bye", "b": 2}])
compare = datacompy.Compare(df1, df2, on_index=True)
assert compare.matches()
def test_index_joining_strings_i_guess():
df1 = pd.DataFrame([{"a": "hi", "b": 2}, {"a": "bye", "b": 2}])
df2 = pd.DataFrame([{"a": "hi", "b": 2}, {"a": "bye", "b": 2}])
df1.index = df1["a"]
df2.index = df2["a"]
df1.index.name = df2.index.name = None
compare = datacompy.Compare(df1, df2, on_index=True)
assert compare.matches()
def test_index_joining_non_overlapping():
df1 = pd.DataFrame([{"a": "hi", "b": 2}, {"a": "bye", "b": 2}])
df2 = pd.DataFrame([{"a": "hi", "b": 2}, {"a": "bye", "b": 2}, {"a": "back fo mo", "b": 3}])
compare = datacompy.Compare(df1, df2, on_index=True)
assert not compare.matches()
assert compare.all_columns_match()
assert compare.intersect_rows_match()
assert len(compare.df1_unq_rows) == 0
assert len(compare.df2_unq_rows) == 1
assert list(compare.df2_unq_rows["a"]) == ["back fo mo"]
def test_temp_column_name():
df1 = pd.DataFrame([{"a": "hi", "b": 2}, {"a": "bye", "b": 2}])
df2 = pd.DataFrame([{"a": "hi", "b": 2}, {"a": "bye", "b": 2}, {"a": "back fo mo", "b": 3}])
actual = datacompy.temp_column_name(df1, df2)
assert actual == "_temp_0"
def test_temp_column_name_one_has():
df1 = pd.DataFrame([{"_temp_0": "hi", "b": 2}, {"_temp_0": "bye", "b": 2}])
df2 = pd.DataFrame([{"a": "hi", "b": 2}, {"a": "bye", "b": 2}, {"a": "back fo mo", "b": 3}])
actual = datacompy.temp_column_name(df1, df2)
assert actual == "_temp_1"
def test_temp_column_name_both_have():
df1 = pd.DataFrame([{"_temp_0": "hi", "b": 2}, {"_temp_0": "bye", "b": 2}])
df2 = pd.DataFrame(
[{"_temp_0": "hi", "b": 2}, {"_temp_0": "bye", "b": 2}, {"a": "back fo mo", "b": 3}]
)
actual = datacompy.temp_column_name(df1, df2)
assert actual == "_temp_1"
def test_temp_column_name_both_have():
df1 = pd.DataFrame([{"_temp_0": "hi", "b": 2}, {"_temp_0": "bye", "b": 2}])
df2 = pd.DataFrame(
[{"_temp_0": "hi", "b": 2}, {"_temp_1": "bye", "b": 2}, {"a": "back fo mo", "b": 3}]
)
actual = datacompy.temp_column_name(df1, df2)
assert actual == "_temp_2"
def test_temp_column_name_one_already():
df1 = pd.DataFrame([{"_temp_1": "hi", "b": 2}, {"_temp_1": "bye", "b": 2}])
df2 = pd.DataFrame(
[{"_temp_1": "hi", "b": 2}, {"_temp_1": "bye", "b": 2}, {"a": "back fo mo", "b": 3}]
)
actual = datacompy.temp_column_name(df1, df2)
assert actual == "_temp_0"
### Duplicate testing!
def test_simple_dupes_one_field():
df1 = pd.DataFrame([{"a": 1, "b": 2}, {"a": 1, "b": 2}])
df2 = pd.DataFrame([{"a": 1, "b": 2}, {"a": 1, "b": 2}])
compare = datacompy.Compare(df1, df2, join_columns=["a"])
assert compare.matches()
# Just render the report to make sure it renders.
t = compare.report()
def test_simple_dupes_two_fields():
df1 = pd.DataFrame([{"a": 1, "b": 2}, {"a": 1, "b": 2, "c": 2}])
df2 = pd.DataFrame([{"a": 1, "b": 2}, {"a": 1, "b": 2, "c": 2}])
compare = datacompy.Compare(df1, df2, join_columns=["a", "b"])
assert compare.matches()
# Just render the report to make sure it renders.
t = compare.report()
def test_simple_dupes_index():
df1 = pd.DataFrame([{"a": 1, "b": 2}, {"a": 1, "b": 2}])
df2 = pd.DataFrame([{"a": 1, "b": 2}, {"a": 1, "b": 2}])
df1.index = df1["a"]
df2.index = df2["a"]
df1.index.name = df2.index.name = None
compare = datacompy.Compare(df1, df2, on_index=True)
assert compare.matches()
# Just render the report to make sure it renders.
t = compare.report()
def test_simple_dupes_one_field_two_vals():
df1 = pd.DataFrame([{"a": 1, "b": 2}, {"a": 1, "b": 0}])
df2 = pd.DataFrame([{"a": 1, "b": 2}, {"a": 1, "b": 0}])
compare = datacompy.Compare(df1, df2, join_columns=["a"])
assert compare.matches()
# Just render the report to make sure it renders.
t = compare.report()
def test_simple_dupes_one_field_two_vals():
df1 = pd.DataFrame([{"a": 1, "b": 2}, {"a": 1, "b": 0}])
df2 = pd.DataFrame([{"a": 1, "b": 2}, {"a": 2, "b": 0}])
compare = datacompy.Compare(df1, df2, join_columns=["a"])
assert not compare.matches()
assert len(compare.df1_unq_rows) == 1
assert len(compare.df2_unq_rows) == 1
assert len(compare.intersect_rows) == 1
# Just render the report to make sure it renders.
t = compare.report()
def test_simple_dupes_one_field_three_to_two_vals():
df1 = pd.DataFrame([{"a": 1, "b": 2}, {"a": 1, "b": 0}, {"a": 1, "b": 0}])
df2 = pd.DataFrame([{"a": 1, "b": 2}, {"a": 1, "b": 0}])
compare = datacompy.Compare(df1, df2, join_columns=["a"])
assert not compare.matches()
assert len(compare.df1_unq_rows) == 1
assert len(compare.df2_unq_rows) == 0
assert len(compare.intersect_rows) == 2
# Just render the report to make sure it renders.
t = compare.report()
def test_dupes_from_real_data():
data = """acct_id,acct_sfx_num,trxn_post_dt,trxn_post_seq_num,trxn_amt,trxn_dt,debit_cr_cd,cash_adv_trxn_comn_cntry_cd,mrch_catg_cd,mrch_pstl_cd,visa_mail_phn_cd,visa_rqstd_pmt_svc_cd,mc_pmt_facilitator_idn_num
100,0,2017-06-17,1537019,30.64,2017-06-15,D,CAN,5812,M2N5P5,,,0.0
200,0,2017-06-24,1022477,485.32,2017-06-22,D,USA,4511,7114,7.0,1,
100,0,2017-06-17,1537039,2.73,2017-06-16,D,CAN,5812,M4J 1M9,,,0.0
200,0,2017-06-29,1049223,22.41,2017-06-28,D,USA,4789,21211,,A,
100,0,2017-06-17,1537029,34.05,2017-06-16,D,CAN,5812,M4E 2C7,,,0.0
200,0,2017-06-29,1049213,9.12,2017-06-28,D,CAN,5814,0,,,
100,0,2017-06-19,1646426,165.21,2017-06-17,D,CAN,5411,M4M 3H9,,,0.0
200,0,2017-06-30,1233082,28.54,2017-06-29,D,USA,4121,94105,7.0,G,
100,0,2017-06-19,1646436,17.87,2017-06-18,D,CAN,5812,M4J 1M9,,,0.0
200,0,2017-06-30,1233092,24.39,2017-06-29,D,USA,4121,94105,7.0,G,
100,0,2017-06-19,1646446,5.27,2017-06-17,D,CAN,5200,M4M 3G6,,,0.0
200,0,2017-06-30,1233102,61.8,2017-06-30,D,CAN,4121,0,,,
100,0,2017-06-20,1607573,41.99,2017-06-19,D,CAN,5661,M4C1M9,,,0.0
200,0,2017-07-01,1009403,2.31,2017-06-29,D,USA,5814,22102,,F,
100,0,2017-06-20,1607553,86.88,2017-06-19,D,CAN,4812,H2R3A8,,,0.0
200,0,2017-07-01,1009423,5.5,2017-06-29,D,USA,5812,2903,,F,
100,0,2017-06-20,1607563,25.17,2017-06-19,D,CAN,5641,M4C 1M9,,,0.0
200,0,2017-07-01,1009433,214.12,2017-06-29,D,USA,3640,20170,,A,
100,0,2017-06-20,1607593,1.67,2017-06-19,D,CAN,5814,M2N 6L7,,,0.0
200,0,2017-07-01,1009393,2.01,2017-06-29,D,USA,5814,22102,,F,"""
df1 = pd.read_csv(io.StringIO(data), sep=",")
df2 = df1.copy()
compare_acct = datacompy.Compare(df1, df2, join_columns=["acct_id"])
assert compare_acct.matches()
compare_unq = datacompy.Compare(
df1, df2, join_columns=["acct_id", "acct_sfx_num", "trxn_post_dt", "trxn_post_seq_num"]
)
assert compare_unq.matches()
# Just render the report to make sure it renders.
t = compare_acct.report()
r = compare_unq.report()
def test_strings_with_joins_with_ignore_spaces():
df1 = pd.DataFrame([{"a": "hi", "b": " A"}, {"a": "bye", "b": "A"}])
df2 = pd.DataFrame([{"a": "hi", "b": "A"}, {"a": "bye", "b": "A "}])
compare = datacompy.Compare(df1, df2, "a", ignore_spaces=False)
assert not compare.matches()
assert compare.all_columns_match()
assert compare.all_rows_overlap()
assert not compare.intersect_rows_match()
compare = datacompy.Compare(df1, df2, "a", ignore_spaces=True)
assert compare.matches()
assert compare.all_columns_match()
assert compare.all_rows_overlap()
assert compare.intersect_rows_match()
def test_strings_with_joins_with_ignore_case():
df1 = pd.DataFrame([{"a": "hi", "b": "a"}, {"a": "bye", "b": "A"}])
df2 = pd.DataFrame([{"a": "hi", "b": "A"}, {"a": "bye", "b": "a"}])
compare = datacompy.Compare(df1, df2, "a", ignore_case=False)
assert not compare.matches()
assert compare.all_columns_match()
assert compare.all_rows_overlap()
assert not compare.intersect_rows_match()
compare = datacompy.Compare(df1, df2, "a", ignore_case=True)
assert compare.matches()
assert compare.all_columns_match()
assert compare.all_rows_overlap()
assert compare.intersect_rows_match()
def test_decimal_with_joins_with_ignore_spaces():
df1 = pd.DataFrame([{"a": 1, "b": " A"}, {"a": 2, "b": "A"}])
df2 = pd.DataFrame([{"a": 1, "b": "A"}, {"a": 2, "b": "A "}])
compare = datacompy.Compare(df1, df2, "a", ignore_spaces=False)
assert not compare.matches()
assert compare.all_columns_match()
assert compare.all_rows_overlap()
assert not compare.intersect_rows_match()
compare = datacompy.Compare(df1, df2, "a", ignore_spaces=True)
assert compare.matches()
assert compare.all_columns_match()
assert compare.all_rows_overlap()
assert compare.intersect_rows_match()
def test_decimal_with_joins_with_ignore_case():
df1 = | pd.DataFrame([{"a": 1, "b": "a"}, {"a": 2, "b": "A"}]) | pandas.DataFrame |
from __future__ import unicode_literals, division, print_function
import os
import unittest
import pandas as pd
import numpy as np
import warnings
from itertools import product
from pymatgen.core.structure import Structure
from pymatgen.util.testing import PymatgenTest
from sklearn.dummy import DummyRegressor, DummyClassifier
from matminer.utils.caching import _get_all_nearest_neighbors
from matminer.featurizers.base import BaseFeaturizer, MultipleFeaturizer, \
StackedFeaturizer
from matminer.featurizers.function import FunctionFeaturizer
from matminer.featurizers.structure import SiteStatsFingerprint
class SingleFeaturizer(BaseFeaturizer):
def feature_labels(self):
return ['y']
def featurize(self, x):
return [x + 1]
def citations(self):
return ["A"]
def implementors(self):
return ["Us"]
class SingleFeaturizerMultiArgs(SingleFeaturizer):
def featurize(self, *x):
return [x[0] + x[1]]
class MultipleFeatureFeaturizer(BaseFeaturizer):
def feature_labels(self):
return ['w', 'z']
def featurize(self, x):
return [x - 1, x + 2]
def citations(self):
return ["A"]
def implementors(self):
return ["Them"]
class MatrixFeaturizer(BaseFeaturizer):
def feature_labels(self):
return ['representation']
def featurize(self, *x):
return [np.eye(2, 2)]
def citations(self):
return ["C"]
def implementors(self):
return ["Everyone"]
class MultiArgs2(SingleFeaturizerMultiArgs):
def featurize(self, *x):
# Making a 2D array to test whether MutliFeaturizer
# can handle featurizers that have both 1D vectors with
# singleton dimensions (e.g., shape==(4,1)) and those
# without (e.g., shape==(4,))
return [super(MultiArgs2, self).featurize(*x)]
def feature_labels(self):
return ['y2']
class FittableFeaturizer(BaseFeaturizer):
"""
This test featurizer tests fitting qualities of BaseFeaturizer, including
refittability and different results based on different fits.
"""
def fit(self, X, y=None, **fit_kwargs):
self._features = ['a', 'b', 'c'][:len(X)]
return self
def featurize(self, x):
return [x + 3, x + 4, 2 * x][:len(self._features)]
def feature_labels(self):
return self._features
def citations(self):
return ["Q"]
def implementors(self):
return ["A competing research group"]
class MultiTypeFeaturizer(BaseFeaturizer):
"""A featurizer that returns multiple dtypes"""
def featurize(self, *x):
return ['a', 1]
def feature_labels(self):
return ['label', 'int_label']
class TestBaseClass(PymatgenTest):
def setUp(self):
self.single = SingleFeaturizer()
self.multi = MultipleFeatureFeaturizer()
self.matrix = MatrixFeaturizer()
self.multiargs = SingleFeaturizerMultiArgs()
self.fittable = FittableFeaturizer()
@staticmethod
def make_test_data():
return pd.DataFrame({'x': [1, 2, 3]})
def test_dataframe(self):
data = self.make_test_data()
data = self.single.featurize_dataframe(data, 'x')
self.assertArrayAlmostEqual(data['y'], [2, 3, 4])
data = self.multi.featurize_dataframe(data, 'x')
self.assertArrayAlmostEqual(data['w'], [0, 1, 2])
self.assertArrayAlmostEqual(data['z'], [3, 4, 5])
def test_matrix(self):
"""Test the ability to add features that are matrices to a dataframe"""
data = self.make_test_data()
data = self.matrix.featurize_dataframe(data, 'x')
self.assertArrayAlmostEqual(np.eye(2, 2), data['representation'][0])
def test_inplace(self):
data = self.make_test_data()
self.single.featurize_dataframe(data, 'x', inplace=False)
self.assertNotIn('y', data.columns)
self.single.featurize_dataframe(data, 'x', inplace=True)
self.assertIn('y', data)
def test_indices(self):
data = self.make_test_data()
data.index = [4, 6, 6]
data = self.single.featurize_dataframe(data, 'x')
self.assertArrayAlmostEqual(data['y'], [2, 3, 4])
def test_multiple(self):
# test iterating over both entries and featurizers
for iter_entries in [True, False]:
multi_f = MultipleFeaturizer([self.single, self.multi],
iterate_over_entries=iter_entries)
data = self.make_test_data()
self.assertArrayAlmostEqual([2, 0, 3], multi_f.featurize(1))
self.assertArrayEqual(['A'], multi_f.citations())
implementors = multi_f.implementors()
self.assertIn('Us', implementors)
self.assertIn('Them', implementors)
self.assertEqual(2, len(implementors))
# Ensure BaseFeaturizer operation without overriden featurize_dataframe
with warnings.catch_warnings(record=True) as w:
multi_f.featurize_dataframe(data, 'x')
self.assertEqual(len(w), 0)
self.assertArrayAlmostEqual(data['y'], [2, 3, 4])
self.assertArrayAlmostEqual(data['w'], [0, 1, 2])
self.assertArrayAlmostEqual(data['z'], [3, 4, 5])
f = MatrixFeaturizer()
multi_f = MultipleFeaturizer([self.single, self.multi, f])
data = self.make_test_data()
with warnings.catch_warnings(record=True) as w:
multi_f.featurize_dataframe(data, 'x')
self.assertEqual(len(w), 0)
self.assertArrayAlmostEqual(data['representation'][0],
[[1.0, 0.0], [0.0, 1.0]])
# Leaving this here fore now in case this issue crops up again.
# def test_multifeatures(self):
# multiargs2 = MultiArgs2()
#
# # test iterating over both entries and featurizers
# for iter_entries in [True, False]:
# # Make a test dataset with two input variables
# data = self.make_test_data()
# data['x2'] = [4, 5, 6]
#
# # Create featurizer
# multi_f = MultipleFeaturizer([self.multiargs, multiargs2],
# iterate_over_entries=iter_entries)
#
# # Test featurize with multiple arguments
# features = multi_f.featurize(0, 2)
# self.assertArrayAlmostEqual([2, 2], features)
#
# # Test dataframe
# data = multi_f.featurize_dataframe(data, ['x', 'x2'])
# self.assertEqual(['y', 'y2'], multi_f.feature_labels())
# self.assertArrayAlmostEqual([[5, 5], [7, 7], [9, 9]],
# data[['y', 'y2']])
def test_featurize_many(self):
# Single argument
s = self.single
s.set_n_jobs(2)
mat = s.featurize_many([1, 2, 3])
self.assertArrayAlmostEqual(mat, [[2], [3], [4]])
# Multi-argument
s = self.multiargs
s.set_n_jobs(2)
mat = s.featurize_many([[1, 4], [2, 5], [3, 6]])
self.assertArrayAlmostEqual(mat, [[5], [7], [9]])
def test_multiprocessing_df(self):
# Single argument
s = self.single
data = self.make_test_data()
s.set_n_jobs(2)
data = s.featurize_dataframe(data, 'x')
self.assertArrayAlmostEqual(data['y'], [2, 3, 4])
# Multi-argument
s = self.multiargs
data = self.make_test_data()
s.set_n_jobs(2)
data['x2'] = [4, 5, 6]
data = s.featurize_dataframe(data, ['x', 'x2'])
self.assertArrayAlmostEqual(data['y'], [5, 7, 9])
def test_fittable(self):
data = self.make_test_data()
ft = self.fittable
# Test fit and featurize separately
ft.fit(data['x'][:2])
ft.featurize_dataframe(data, 'x')
self.assertArrayAlmostEqual(data['a'], [4, 5, 6])
self.assertRaises(Exception, data.__getattr__, 'c')
# Test fit + featurize methods on new fits
data = self.make_test_data()
transformed = ft.fit_transform([data['x'][1]])
self.assertArrayAlmostEqual(transformed[0], [5])
data = self.make_test_data()
ft.fit_featurize_dataframe(data, 'x')
self.assertArrayAlmostEqual(data['a'], [4, 5, 6])
self.assertArrayAlmostEqual(data['b'], [5, 6, 7])
self.assertArrayAlmostEqual(data['c'], [2, 4, 6])
def test_stacked_featurizer(self):
data = self.make_test_data()
data['y'] = [1, 2, 3]
# Test for a regressor
model = DummyRegressor()
model.fit(self.multi.featurize_many(data['x']), data['y'])
# Test the predictions
f = StackedFeaturizer(self.single, model)
self.assertEqual([2], f.featurize(data['x'][0]))
# Test the feature names
self.assertEqual(['prediction'], f.feature_labels())
f.name = 'ML'
self.assertEqual(['ML prediction'], f.feature_labels())
# Test classifier
model = DummyClassifier("prior")
data['y'] = [0, 0, 1]
model.fit(self.multi.featurize_many(data['x']), data['y'])
# Test the prediction
f.model = model
self.assertEqual([2. / 3], f.featurize(data['x'][0]))
# Test the feature labels
self.assertRaises(ValueError, f.feature_labels)
f.class_names = ['A', 'B']
self.assertEqual(['ML P(A)'], f.feature_labels())
# Test with three classes
data['y'] = [0, 2, 1]
model.fit(self.multi.featurize_many(data['x']), data['y'])
self.assertArrayAlmostEqual([1. / 3] * 2, f.featurize(data['x'][0]))
f.class_names = ['A', 'B', 'C']
self.assertEqual(['ML P(A)', 'ML P(B)'], f.feature_labels())
def test_multiindex_inplace(self):
df_1lvl = pd.DataFrame({'x': [1, 2, 3]})
df_2lvl = | pd.DataFrame({'x': [1, 2, 3]}) | pandas.DataFrame |
# ---
# jupyter:
# jupytext:
# formats: ipynb,py
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.9.1+dev
# kernelspec:
# display_name: Python [conda env:annorxiver]
# language: python
# name: conda-env-annorxiver-py
# ---
# # Re-Run Analyses with Polka et. al. Subset
# +
from datetime import timedelta, date
from pathlib import Path
import sys
from cairosvg import svg2png
from IPython.display import Image
import matplotlib.pyplot as plt
import matplotlib as mpl
from matplotlib.patches import ConnectionPatch
from mizani.breaks import date_breaks
from mizani.formatters import timedelta_format
import numpy as np
import pandas as pd
import plotnine as p9
import requests
from scipy.spatial.distance import cdist
from scipy.stats import linregress
import seaborn as sns
from sklearn.model_selection import GridSearchCV
from sklearn.linear_model import LogisticRegressionCV, LogisticRegression
from sklearn.preprocessing import StandardScaler
from sklearn.tree import DecisionTreeClassifier, export_graphviz
import spacy
import tqdm
from annorxiver_modules.corpora_comparison_helper import (
aggregate_word_counts,
calculate_confidence_intervals,
create_lemma_count_df,
get_term_statistics,
plot_bargraph,
plot_point_bar_figure,
plot_pointgraph,
)
sys.path.append(str(Path("../../../preprint_similarity_search/server").resolve()))
from SAUCIE import SAUCIE, Loader # noqa: E402
mpl.rcParams["figure.dpi"] = 600
mpl.rcParams["font.size"] = 12
mpl.rcParams["font.family"] = "Arial"
# -
import rpy2.robjects as robjects # noqa: E402
from rpy2.robjects import pandas2ri # noqa: E402
# +
from rpy2.robjects.packages import importr # noqa: E402
utils = importr("utils")
utils.install_packages("svglite")
# -
# # Corpora Comparison Preprint-Published Update
# ## BioRxiv to Published Mapping
mapped_doi_df = (
pd.read_csv("../journal_tracker/output/mapped_published_doi.tsv", sep="\t")
.query("published_doi.notnull()")
.query("pmcid.notnull()")
.groupby("preprint_doi")
.agg(
{
"author_type": "first",
"heading": "first",
"category": "first",
"document": "first",
"preprint_doi": "last",
"published_doi": "last",
"pmcid": "last",
}
)
.reset_index(drop=True)
)
mapped_doi_df.head()
polka_et_al_mapped_df = pd.read_csv(
"output/polka_et_al_pmc_mapped_subset.tsv", sep="\t"
)
polka_et_al_mapped_df.head()
spacy_nlp = spacy.load("en_core_web_sm")
stop_word_list = list(spacy_nlp.Defaults.stop_words)
# ## BioRxiv Token Counts
polka_preprints = list(Path("output/biorxiv_word_counts").rglob("*tsv"))
# +
preprint_count = aggregate_word_counts(polka_preprints)
preprint_count_df = (
pd.DataFrame.from_records(
[
{
"lemma": token[0],
"pos_tag": token[1],
"dep_tag": token[2],
"count": preprint_count[token],
}
for token in preprint_count
]
)
.query(f"lemma not in {stop_word_list}")
.groupby("lemma")
.agg({"count": "sum"})
.reset_index()
.sort_values("count", ascending=False)
)
preprint_count_df.head()
# -
# ## PMCOA Token Counts
polka_published = list(Path("output/pmcoa_word_counts").rglob("*tsv"))
# +
published_count = aggregate_word_counts(polka_published)
published_count_df = (
pd.DataFrame.from_records(
[
{
"lemma": token[0],
"pos_tag": token[1],
"dep_tag": token[2],
"count": published_count[token],
}
for token in published_count
]
)
.query(f"lemma not in {stop_word_list}")
.groupby("lemma")
.agg({"count": "sum"})
.reset_index()
.sort_values("count", ascending=False)
)
published_count_df.head()
# -
# ## Get Token Stats
preprint_vs_published = get_term_statistics(published_count_df, preprint_count_df, 100)
preprint_vs_published.to_csv(
"output/updated_preprint_to_published_comparison.tsv", sep="\t", index=False
)
preprint_vs_published
full_plot_df = calculate_confidence_intervals(preprint_vs_published)
full_plot_df.to_csv(
"output/polka_preprint_published_comparison_error_bars.tsv", sep="\t", index=False
)
full_plot_df.head()
plot_df = (
full_plot_df.sort_values("odds_ratio", ascending=False)
.iloc[4:]
.head(20)
.append(full_plot_df.sort_values("odds_ratio", ascending=True).head(20))
.assign(
odds_ratio=lambda x: x.odds_ratio.apply(lambda x: np.log2(x)),
lower_odds=lambda x: x.lower_odds.apply(lambda x: np.log2(x)),
upper_odds=lambda x: x.upper_odds.apply(lambda x: np.log2(x)),
)
)
plot_df.head()
# +
g = plot_pointgraph(
plot_df,
x_axis_label="Preprint vs Published log2(Odds Ratio)",
left_arrow_label="Preprint Enriched",
right_arrow_label="Published Enriched",
left_arrow_start=-2,
left_arrow_height=39.5,
right_arrow_start=2,
right_arrow_height=1.5,
arrow_length=6,
left_arrow_label_x=-5,
left_arrow_label_y=38.5,
right_arrow_label_x=5,
right_arrow_label_y=2.5,
limits=(-11, 11),
)
g.save("output/figures/preprint_published_frequency_odds.svg")
g.save("output/figures/preprint_published_frequency_odds.png", dpi=250)
print(g)
# -
count_plot_df = create_lemma_count_df(plot_df, "published", "preprint").assign(
repository=lambda x: pd.Categorical(
x.repository.tolist(), categories=["preprint", "published"]
)
)
count_plot_df.to_csv(
"output/polka_preprint_published_comparison_raw_counts.tsv", sep="\t", index=False
)
count_plot_df.head()
g = plot_bargraph(count_plot_df, plot_df)
g.save("output/figures/preprint_published_frequency_bar.svg")
print(g)
# +
fig_output_path = "output/figures/polka_preprint_published_frequency.png"
fig = plot_point_bar_figure(
"output/figures/preprint_published_frequency_odds.svg",
"output/figures/preprint_published_frequency_bar.svg",
)
# save generated SVG files
svg2png(bytestring=fig.to_str(), write_to=fig_output_path, dpi=75)
Image(fig_output_path)
# -
# # Document Embeddings
# ## Load the Documents
biorxiv_documents_df = pd.read_csv(
"../word_vector_experiment/output/word2vec_output/biorxiv_all_articles_300.tsv.xz",
sep="\t",
)
biorxiv_documents_df.head()
polka_preprints_df = pd.read_csv("output/polka_et_al_biorxiv_embeddings.tsv", sep="\t")
polka_preprints_df.head()
pca_components = pd.read_csv(
Path("../pca_association_experiment/output/word_pca_similarity/pca_components.tsv"),
sep="\t",
)
pca_components.head()
# ## PCA Components
# This section aims to see which principal components have a high association with Polka et al's subset. Furthermore, we also aim to see if we can use linear models to explain which PCs affect preprint prediction.
document_pca_sim = 1 - cdist(
polka_preprints_df.drop("document", axis=1).values, pca_components.values, "cosine"
)
print(document_pca_sim.shape)
document_pca_sim
document_to_pca_map = {
document: document_pca_sim[idx, :]
for idx, document in enumerate(polka_preprints_df.document.tolist())
}
polka_pca_sim_df = (
pd.DataFrame.from_dict(document_to_pca_map, orient="index")
.rename(index=str, columns={col: f"pc{col+1}" for col in range(int(300))})
.reset_index()
.rename(index=str, columns={"index": "document"})
)
polka_pca_sim_df.to_csv("output/polka_pca_enrichment.tsv", sep="\t")
polka_pca_sim_df = polka_pca_sim_df.assign(label="polka")
polka_pca_sim_df.head()
document_pca_sim = 1 - cdist(
biorxiv_documents_df.drop("document", axis=1).values,
pca_components.values,
"cosine",
)
print(document_pca_sim.shape)
document_pca_sim
document_to_pca_map = {
document: document_pca_sim[idx, :]
for idx, document in enumerate(biorxiv_documents_df.document.tolist())
}
biorxiv_pca_sim_df = (
pd.DataFrame.from_dict(document_to_pca_map, orient="index")
.rename(index=str, columns={col: f"pc{col+1}" for col in range(int(300))})
.reset_index()
.rename(index=str, columns={"index": "document"})
.assign(label="biorxiv")
)
biorxiv_pca_sim_df.head()
# ## PC Regression
# ### Logistic Regression
# Goal here is to determine if we can figure out which PCs separate the bioRxiv subset from Polka et al.'s subset. Given that their dataset is only 60 papers we downsampled our dataset to contain only 60 papers.
dataset_df = biorxiv_pca_sim_df.sample(60, random_state=100).append(polka_pca_sim_df)
dataset_df.head()
model = LogisticRegressionCV(
cv=10, Cs=100, max_iter=1000, penalty="l1", solver="liblinear"
)
model.fit(
StandardScaler().fit_transform(dataset_df[[f"pc{idx+1}" for idx in range(50)]]),
dataset_df["label"],
)
best_result = list(filter(lambda x: x[1] == model.C_, enumerate(model.Cs_)))[0]
print(best_result)
print("Best CV Fold")
print(model.scores_["polka"][:, best_result[0]])
model.scores_["polka"][:, best_result[0]].mean()
model_weights_df = pd.DataFrame.from_dict(
{
"weight": model.coef_[0],
"pc": list(range(1, 51)),
}
)
model_weights_df["pc"] = pd.Categorical(model_weights_df["pc"])
model_weights_df.head()
g = (
p9.ggplot(model_weights_df, p9.aes(x="pc", y="weight"))
+ p9.geom_col(position=p9.position_dodge(width=5), fill="#253494")
+ p9.coord_flip()
+ p9.scale_x_discrete(limits=list(sorted(range(1, 51), reverse=True)))
+ p9.theme_seaborn(context="paper", style="ticks", font_scale=1.1, font="Arial")
+ p9.theme(figure_size=(10, 8))
+ p9.labs(
title="Regression Model Weights", x="Princpial Component", y="Model Weight"
)
)
g.save("output/figures/pca_log_regression_weights.svg")
g.save("output/figures/pca_log_regression_weights.png", dpi=250)
print(g)
fold_features = model.coefs_paths_["polka"].transpose(1, 0, 2)
model_performance_df = pd.DataFrame.from_dict(
{
"feat_num": ((fold_features.astype(bool).sum(axis=1)) > 0).sum(axis=1),
"C": model.Cs_,
"score": model.scores_["polka"].mean(axis=0),
}
)
model_performance_df.head()
# +
fig, ax1 = plt.subplots()
ax1.set_xscale("log")
ax2 = plt.twinx()
ax1.plot(
model_performance_df.C.tolist(),
model_performance_df.feat_num.tolist(),
label="Features",
marker=".",
)
ax1.set_ylabel("# of Features")
ax1.set_xlabel("Inverse Regularization (C)")
ax1.legend(loc=0)
ax2.plot(
model_performance_df.C.tolist(),
model_performance_df.score.tolist(),
label="Score",
marker=".",
color="green",
)
ax2.set_ylabel("Score (Accuracy %)")
ax2.legend(loc=4)
plt.savefig("output/preprint_classifier_results.png")
# -
plot_path = list(
zip(
model.Cs_,
model.scores_["polka"].transpose(),
model.coefs_paths_["polka"].transpose(1, 0, 2),
)
)
data_records = []
for cs in plot_path[33:40]:
model = LogisticRegression(C=cs[0], max_iter=1000, penalty="l1", solver="liblinear")
model.fit(
StandardScaler().fit_transform(dataset_df[[f"pc{idx+1}" for idx in range(50)]]),
dataset_df["label"],
)
data_records.append(
{
"C": cs[0],
"PCs": ",".join(map(str, model.coef_.nonzero()[1] + 1)),
"feat_num": len(model.coef_.nonzero()[1]),
"accuracy": cs[1].mean(),
}
)
model_coefs_df = pd.DataFrame.from_records(data_records)
model_coefs_df
# ### Decision Tree
# Given that Logistic regression doesn't return sparse weights, we may get better insight into this analysis by using a decision tree to determine which PCs are important in prediction.
model = DecisionTreeClassifier(random_state=100)
search_grid = GridSearchCV(
model, {"criterion": ["gini", "entropy"], "max_features": ["auto", None]}, cv=10
)
search_grid.fit(dataset_df[[f"pc{idx+1}" for idx in range(50)]], dataset_df["label"])
print(search_grid.best_params_)
print(search_grid.best_score_)
export_graphviz(
search_grid.best_estimator_,
out_file="output/figures/pca_tree.dot",
feature_names=[f"pc{idx+1}" for idx in range(50)],
class_names=["bioRxiv", "polka et al."],
rotate=True,
)
# ! dot -Tpng output/figures/pca_tree.dot -o output/figures/pca_tree.png
Image(filename="output/figures/pca_tree.png")
# ## Saucie Subset
# Where do the preprints in this subset lie along the SAUCIE map?
saucie_model = SAUCIE(
300,
restore_folder=str(Path("../../pmc/journal_recommendation/output/model").resolve()),
)
coordinates = saucie_model.get_embedding(
Loader(polka_preprints_df.drop("document", axis=1).values)
)
subset_df = pd.DataFrame(coordinates, columns=["dim1", "dim2"])
subset_df.to_csv("output/polka_et_al_saucie_coordinates.tsv", sep="\t", index=False)
subset_df.head()
pmc_data_df = pd.read_csv(
Path("../../pmc/journal_recommendation/output")
/ Path("paper_dataset/paper_dataset_full_tsne.tsv"),
sep="\t",
)
pmc_data_df.head()
pandas2ri.activate()
robjects.globalenv["pmc_data_df"] = robjects.conversion.py2rpy(pmc_data_df)
robjects.globalenv["subset_df"] = robjects.conversion.py2rpy(subset_df)
robjects.r.source("saucie_plot.R")
Image(filename="output/figures/saucie_plot.png")
# +
# Publication Time Analysis
# Get publication dates
url = "https://api.biorxiv.org/pub/2019-11-01/3000-01-01/"
# Get preprint publication dates for 2019 -> 2020
already_downloaded = Path("output/biorxiv_published_dates_post_2019.tsv").exists()
if not already_downloaded:
collection = []
page_size = 100
total = 23948
for i in tqdm.tqdm(range(0, total, page_size), total=total / page_size):
collection += requests.get(url + str(i)).json()["collection"]
published_dates = pd.DataFrame(collection)
published_dates.to_csv(
"output/biorxiv_published_dates_post_2019.tsv", sep="\t", index=False
)
else:
published_dates = pd.read_csv(
"output/biorxiv_published_dates_post_2019.tsv", sep="\t"
)
published_dates = published_dates.assign(
preprint_date=lambda x: pd.to_datetime(x.preprint_date.tolist()),
published_date=lambda x: pd.to_datetime(
x.published_date.apply(lambda y: y[0 : y.index(":")] if ":" in y else y)
),
).assign(time_to_published=lambda x: x.published_date - x.preprint_date)
print(published_dates.shape)
published_dates.head()
# -
polka_preprints_df = polka_preprints_df.assign(
biorxiv_base=lambda x: x.document.apply(lambda y: y.split("_")[0]),
version_count=lambda x: x.document.apply(
lambda y: int(y[y.index("v") + 1 :].split(".")[0])
),
)
polka_preprints_df.head()
polka_published_df = | pd.read_csv("output/polka_et_al_pmcoa_embeddings.tsv", sep="\t") | pandas.read_csv |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib
import datetime as dt
import collections
import sklearn.preprocessing
import cartopy.crs as ccrs
import cartopy.io.shapereader as shpreader
import matplotlib.animation as animation
import tempfile
from PIL import Image
first_date = dt.date(2020, 3, 1)
## Main
def main():
df = download_data()
countries = get_all_countries(df, min_population=100000)
plot_by_country(df=df, ctype='deaths')
death_rate_chart(df=df, countries=countries, ctype='deaths', num_to_display=30)
## Visualisation
def death_rate_chart(df, countries, ctype, num_to_display=None):
results = pd.DataFrame(index=pd.date_range(start=first_date, end='today'), columns=countries)
for country in countries:
sr = country_series(df, country, ctype, cumsum=True, log=False)
sr /= df[df.countriesAndTerritories == country].iloc[0].popData2018
results[country] = sr
results = results.fillna(0)
sr = results.iloc[-1]
sr = sr.sort_values()
if isinstance(num_to_display, int):
sr = sr[-num_to_display:]
title = '%s per 100,000 for top %d countries' % (ctype.title(), num_to_display)
else:
title = '%s per 100,000' % (ctype.title())
sr *= 100000
l = len(sr)
labels = clean_labels(sr.index)
spacing = [(1/l)*i for i in range(l)]
colours = matplotlib.cm.hsv(sr / float(max(sr)))
fig, ax = plt.subplots()
plt.barh(spacing, width=sr.to_list(), height=(1/l)*0.92, tick_label=labels, color='orange')
plt.yticks(fontsize=8)
plt.title(title)
plt.xlabel(ctype.title())
# plt.show()
plt.savefig('bar_chart.png', bbox_inches='tight', dpi=300)
def plot_by_country(df, ctype):
df = normalised_progression_by_country(df, get_all_countries(df), ctype)
countries_shp = shpreader.natural_earth(resolution='110m', category='cultural', name='admin_0_countries')
cmap = matplotlib.cm.get_cmap('Spectral')
saved_figs = []
limit=5
for i in range(df.shape[0]):
tfile = tempfile.TemporaryFile()
ax = plt.axes(projection=ccrs.PlateCarree(), label=str(i))
for country in shpreader.Reader(countries_shp).records():
c = clean_country(country.attributes['NAME_LONG'])
if c == None:
rgba = (0.5, 0.5, 0.5, 1.0)
else:
rgba = cmap(df[c][i])
ax.add_geometries([country.geometry], ccrs.PlateCarree(), facecolor=rgba, label=country.attributes['NAME_LONG'])
plt.title(str(df.index[i]).split(' ')[0])
plt.savefig(tfile, dpi=400, bbox_inches='tight')
saved_figs.append(tfile)
plt.close()
fig = plt.figure()
ims = []
for temp_img in saved_figs:
X = Image.open(temp_img)
ims.append([plt.imshow(X, animated=True)])
ani = animation.ArtistAnimation(fig, ims, interval=800, blit=True, repeat_delay=1000)
plt.axis('off')
plt.tight_layout(pad=0)
# plt.show()
ani.save('animation.gif', writer='imagemagick', fps=2, dpi=400)
# Writer = animation.writers['ffmpeg']
# writer = Writer(fps=2, metadata=dict(artist='Me'), bitrate=100000)
# ani.save('/Users/daniel/Desktop/animation.mp4', writer=writer, dpi=400)
## Data acquisition and processing
def clean_labels(labels):
results = []
for label in labels:
if label == 'Cases_on_an_international_conveyance_Japan':
results.append('Japan')
elif label == 'United_States_of_America':
results.append('United States')
else:
results.append(label.replace('_', ' '))
return results
def download_data():
covid_raw_pd = | pd.read_csv('https://opendata.ecdc.europa.eu/covid19/casedistribution/csv') | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Tools for correcting energy-balance components to improve energy balance
closure and other data management, validation and scientific analysis tools.
"""
from pathlib import Path
import xarray
import numpy as np
import pandas as pd
from sklearn import linear_model
from sklearn.metrics import mean_squared_error
from refet.calcs import _ra_daily, _rso_simple
import refet
from .data import Data
from .plot import Plot
from .util import monthly_resample, Convert
class QaQc(Plot, Convert):
"""
Numerical routines for correcting daily energy balance closure
for eddy covariance data and other data analysis tools.
Two routines are provided for improving energy balance closure by adjusting
turbulent fluxes, latent energy and sensible heat, the Energy Balance Ratio
method (modified from `FLUXNET
<https://fluxnet.fluxdata.org/data/fluxnet2015-dataset/data-processing/>`__)
and the Bowen Ratio method.
The :obj:`QaQc` object also has multiple tools for temporal frequency
aggregation and resampling, estimation of climatic and statistical
variables (e.g. ET and potential shortwave radiation), downloading gridMET
reference ET, managing data and metadata, interactive validation plots, and
managing a structure for input and output data files. Input data is
expected to be a :obj:`.Data` instance or a
:obj:`pandas.DataFrame`.
Keyword Arguments:
data (:obj:`.Data`): :obj:`.Data` instance to create :obj:`.QaQc`
instance.
drop_gaps (bool): default :obj:`True`. If :obj:`True` automatically
filter variables on days with sub-daily measurement gaps less than
``daily_frac``.
daily_frac (float): default 1.00. Fraction of sub-daily data required
otherwise the daily value will be filtered out if ``drop_gaps`` is
:obj:`True`. E.g. if ``daily_frac = 0.5`` and the input data is
hourly, then data on days with less than 12 hours of data will be
forced to null within :attr:`QaQc.df`. This is important because
systematic diurnal gaps will affect the autmoatic resampling that
occurs when creating a :obj:`QaQc` instance and the daily data is
used in closure corrections, other calculations, and plots. If
sub-daily linear interpolation is applied to energy balance
variables the gaps are counted *after* the interpolation.
max_interp_hours (None or float): default 2. Length of largest gap to
fill with linear interpolation in energy balance variables if
input datas temporal frequency is less than daily. This value will
be used to fill gaps when :math:`Rn > 0` or :math:`Rn` is missing
during each day.
max_interp_hours_night (None or float): default 4. Length of largest gap
to fill with linear interpolation in energy balance variables if
input datas temporal frequency is less than daily when
:math:`Rn < 0` within 12:00PM-12:00PM daily intervals.
Attributes:
agg_dict (dict): Dictionary with internal variable names as keys and
method of temporal resampling (e.g. "mean" or "sum") as values.
config (:obj:`configparser.ConfigParser`): Config parser instance
created from the data within the config.ini file.
config_file (:obj:`pathlib.Path`): Absolute path to config.ini file
used for initialization of the :obj:`fluxdataqaqc.Data` instance
used to create the :obj:`QaQc` instance.
corrected (bool): False until an energy balance closure correction has
been run by calling :meth:`QaQc.correct_data`.
corr_methods (tuple): List of Energy Balance Closure correction routines
usable by :meth:`QaQc.correct_data`.
corr_meth (str or None): Name of most recently applied energy balance
closure correction.
elevation (float): Site elevation in meters.
gridMET_exists (bool): True if path to matching gridMET time series
file exists on disk and has time series for reference ET and
precipitation and the dates for these fully overlap with the energy
balance variables, i.e. the date index of :attr:`QaQc.df`.
gridMET_meta (dict): Dictionary with information for gridMET variables
that may be downloaded using :meth:`QaQc.download_gridMET`.
inv_map (dict): Dictionary with input climate file names as keys and
internal names as values. May only include pairs when they differ.
latitude (float): Site latitude in decimal degrees.
longitude (float): Site longitude in decimal degrees.
out_dir (pathlib.Path): Default directory to save output of
:meth:`QaQc.write` or :meth:`QaQc.plot` methods.
n_samples_per_day (int): If initial time series temporal frequency is
less than 0 then this value will be updated to the number of samples
detected per day, useful for post-processing based on the count of
sub-daily gaps in energy balance variables, e.g. "LE_subday_gaps".
plot_file (pathlib.Path or None): path to plot file once it is
created/saved by :meth:`QaQc.plot`.
site_id (str): Site ID.
temporal_freq (str): Temporal frequency of initial (as found in input
climate file) data as determined by :func:`pandas.infer_freq`.
units (dict): Dictionary with internal variable names as keys and
units as found in config as values.
variables (dict): Dictionary with internal variable names as keys and
names as found in the input data as values.
Note:
Upon initialization of a :obj:`QaQc` instance the temporal frequency of
the input data checked using :func:`pandas.infer_freq` which does not
always correctly parse datetime indices, if it is not able to correctly
determine the temporal frequency the time series will be resampled to
daily frequency but if it is in fact already at daily frequency the data
will be unchanged. In this case the :attr:`QaQc.temporal_freq` will be
set to "na".
"""
# dictionary used for temporally aggregating variables
agg_dict = {
'ASCE_ETo': 'sum',
'ASCE_ETr': 'sum',
'energy': 'mean',
'flux': 'mean',
'flux_corr': 'mean',
'br': 'mean',
'ET': 'sum',
'ET_corr': 'sum',
'ET_gap': 'sum',
'ET_fill': 'sum',
'ET_fill_val': 'sum',
'ET_user_corr': 'sum',
'ebr': 'mean',
'ebr_corr': 'mean',
'ebr_user_corr': 'mean',
'ebr_5day_clim': 'mean',
'gridMET_ETr': 'sum',
'gridMET_ETo': 'sum',
'gridMET_prcp': 'sum',
'lw_in': 'mean',
't_avg': 'mean',
't_max': 'mean',
't_min': 'mean',
't_dew': 'mean',
'rso': 'mean',
'sw_pot': 'mean',
'sw_in': 'mean',
'vp': 'mean',
'vpd': 'mean',
'ppt': 'sum',
'ppt_corr': 'sum',
'ws': 'mean',
'Rn': 'mean',
'Rn_subday_gaps': 'sum',
'rh' : 'mean',
'sw_out': 'mean',
'lw_out': 'mean',
'G': 'mean',
'G_subday_gaps': 'sum',
'LE': 'mean',
'LE_corr': 'mean',
'LE_subday_gaps': 'sum',
'LE_user_corr': 'mean',
'H': 'mean',
'H_corr': 'mean',
'H_subday_gaps': 'sum',
'H_user_corr': 'mean',
}
# EBR correction methods available
corr_methods = (
'ebr',
'br',
'lin_regress'
)
# gridMET dict, keys are names which can be passed to download_gridMET
gridMET_meta = {
'ETr': {
'nc_suffix': 'agg_met_etr_1979_CurrentYear_CONUS.nc#fillmismatch',
'name': 'daily_mean_reference_evapotranspiration_alfalfa',
'rename': 'gridMET_ETr',
'units': 'mm'
},
'pr': {
'nc_suffix': 'agg_met_pr_1979_CurrentYear_CONUS.nc#fillmismatch',
'name': 'precipitation_amount',
'rename': 'gridMET_prcp',
'units': 'mm'
},
'pet': {
'nc_suffix': 'agg_met_pet_1979_CurrentYear_CONUS.nc#fillmismatch',
'name': 'daily_mean_reference_evapotranspiration_grass',
'rename': 'gridMET_ETo',
'units': 'mm'
},
'sph': {
'nc_suffix': 'agg_met_sph_1979_CurrentYear_CONUS.nc#fillmismatch',
'name': 'daily_mean_specific_humidity',
'rename': 'gridMET_q',
'units': 'kg/kg'
},
'srad': {
'nc_suffix': 'agg_met_srad_1979_CurrentYear_CONUS.nc#fillmismatch',
'name': 'daily_mean_shortwave_radiation_at_surface',
'rename': 'gridMET_srad',
'units': 'w/m2'
},
'vs': {
'nc_suffix': 'agg_met_vs_1979_CurrentYear_CONUS.nc#fillmismatch',
'name': 'daily_mean_wind_speed',
'rename': 'gridMET_u10',
'units': 'm/s'
},
'tmmx': {
'nc_suffix': 'agg_met_tmmx_1979_CurrentYear_CONUS.nc#fillmismatch',
'name': 'daily_maximum_temperature',
'rename': 'gridMET_tmax',
'units': 'K'
},
'tmmn': {
'nc_suffix': 'agg_met_tmmn_1979_CurrentYear_CONUS.nc#fillmismatch',
'name': 'daily_minimum_temperature',
'rename': 'gridMET_tmin',
'units': 'K'
},
}
# all potentially calculated variables for energy balance corrections
_eb_calc_vars = (
'br',
'br_user_corr',
'energy',
'energy_corr',
'ebr',
'ebr_corr',
'ebr_user_corr',
'ebc_cf',
'ebr_5day_clim',
'flux',
'flux_corr',
'flux_user_corr',
'G_corr',
'H_corr',
'LE_corr',
'Rn_corr'
)
# potentially calculated variables for ET
_et_calc_vars = (
'ET',
'ET_corr',
'ET_user_corr'
)
# potentially calculated ET gap fill variables
_et_gap_fill_vars = (
'ET_gap',
'ET_fill',
'ET_fill_val',
'ETrF',
'ETrF_filtered',
'EToF',
'EToF_filtered'
)
def __init__(self, data=None, drop_gaps=True, daily_frac=1.00,
max_interp_hours=2, max_interp_hours_night=4):
if isinstance(data, Data):
self.config_file = data.config_file
self.config = data.config
data.df.head();# need to access to potentially calc vp/vpd
self._df = data.df
self.variables = data.variables
self.units = data.units
self.elevation = data.elevation
self.latitude = data.latitude
self.longitude = data.longitude
self.out_dir = data.out_dir
self.site_id = data.site_id
# flip variable naming dict for internal use
self.inv_map = {
v: k for k, v in self.variables.items() if (
not k in self._df.columns
)
}
# using 'G' in multiple g plot may overwrite G name internally
if not 'G' in self.inv_map.values():
user_G_name = self.variables.get('G')
if user_G_name:
self.inv_map[user_G_name] = 'G'
# data will be loaded if it has not yet via Data.df
self.temporal_freq = self._check_daily_freq(
drop_gaps, daily_frac, max_interp_hours, max_interp_hours_night
)
# check units, convert if possible for energy balance, ppt, Rs, vp,
self._check_convert_units()
self._check_gridMET()
# assume energy balance vars exist, will be validated upon corr
self._has_eb_vars = True
elif data is not None:
print('{} is not a valid input type'.format(type(data)))
raise TypeError("Must assign a fluxdataqaqc.data.Data object")
else:
self._df = None
self.corrected = False
self.corr_meth = None
def daily_ASCE_refET(self, reference='short', anemometer_height=None):
"""
Calculate daily ASCE standardized short (ETo) or tall (ETr) reference
ET from input data and wind measurement height.
The resulting time series will automatically be merged into the
:attr:`.Data.df` dataframe named "ASCE_ETo" or "ASCE_ETr" respectively.
Keyword Arguments:
reference (str): default "short", calculate tall or short ASCE
reference ET.
anemometer_height (float or None): wind measurement height in meters
, default :obj:`None`. If :obj:`None` then look for the
"anemometer_height" entry in the **METADATA** section of the
config.ini, if not there then print a warning and use 2 meters.
Returns:
:obj:`None`
Note:
If the hourly ASCE variables were prior calculated from a
:obj:`.Data` instance they will be overwritten as they are saved
with the same names.
"""
df = self.df.rename(columns=self.inv_map)
req_vars = ['vp', 'ws', 'sw_in', 't_min', 't_max']
if not set(req_vars).issubset(df.columns):
print('Missing one or more required variables, cannot compute')
return
if anemometer_height is None:
anemometer_height = self.config.get(
'METADATA', 'anemometer_height', fallback=None
)
if anemometer_height is None:
print(
'WARNING: anemometer height was not given and not found in '
'the config files metadata, proceeding with height of 2 m'
)
anemometer_height = 2
# RefET will convert to MJ-m2-hr
input_units = {
'rs': 'w/m2'
}
length = len(df.t_min)
tmin = df.t_min
tmax = df.t_max
rs = df.sw_in
ea = df.vp
uz = df.ws
zw = np.full(length, anemometer_height)
lat = np.full(length, self.latitude)
doy = df.index.dayofyear
elev = np.full(length, self.elevation)
REF = refet.Daily(
tmin,
tmax,
ea,
rs,
uz,
zw,
elev,
lat,
doy,
method='asce',
input_units=input_units,
)
if reference == 'short':
ret = REF.eto()
name = 'ASCE_ETo'
elif reference == 'tall':
ret = REF.etr()
name = 'ASCE_ETr'
# can add directly into QaQc.df
df[name] = ret
self._df = df.rename(columns=self.variables)
self.variables[name] = name
self.units[name] = 'mm'
def _check_convert_units(self):
"""
Verify if units are recognized for variables in QaQc.allowable_units,
next verify that they have required units as in QaQc.required_units
if not convert them.
Conversions are handled by util.Convert.convert class method.
"""
# force all input units to lower case
for k, v in self.units.items():
self.units[k] = v.lower()
# can add check/rename unit aliases, e.g. C or c or celcius, etc...
df = self._df.rename(columns=self.inv_map)
for v, u in self.units.items():
if not v in QaQc.required_units.keys():
# variable is not required to have any particular unit, skip
continue
elif not u in QaQc.allowable_units[v]:
print('ERROR: {} units are not recognizable for var: {}\n'
'allowable input units are: {}\nNot converting.'.format(
u, v, ','.join(QaQc.allowable_units[v])
)
)
elif not u == QaQc.required_units[v]:
# do conversion, update units
# pass variable, initial unit, unit to be converted to, df
df = Convert.convert(v, u, QaQc.required_units[v], df)
self.units[v] = QaQc.required_units[v]
self._df = df
def _check_gridMET(self):
"""
Check if gridMET has been downloaded (file path in config), if so
also check if dates fully intersect those of station data. If both
conditions are not met then update :attr:`gridMET_exists` to False
otherwise assign True.
Arguments:
None
Returns:
None
"""
gridfile = self.config.get('METADATA','gridMET_file_path',fallback=None)
if gridfile is None:
self.gridMET_exists = False
else:
try:
grid_df = pd.read_csv(
gridfile, parse_dates=True, index_col='date'
)
gridMET_dates = grid_df.index
station_dates = self.df.index
# add var names and units to attributes
for val in grid_df.columns:
meta = [
v for k,v in QaQc.gridMET_meta.items() if \
v['rename'] == val
][0]
self.variables[meta['rename']] = meta['rename']
self.units[meta['rename']] = meta['units']
# flag False if ETr was not downloaded for our purposes
if not {'gridMET_ETr','gridMET_ETo'}.issubset(grid_df.columns):
self.gridMET_exists = False
elif station_dates.isin(gridMET_dates).all():
self.gridMET_exists = True
# some gridMET exists but needs to be updated for coverage
else:
self.gridMET_exists = False
except:
print('WARNING: unable to find/read gridMET file\n {}'.format(
gridfile)
)
self.gridMET_exists = False
def download_gridMET(self, variables=None):
"""
Download reference ET (alfalfa and grass) and precipitation from
gridMET for all days in flux station time series by default.
Also has ability to download other specific gridMET variables by
passing a list of gridMET variable names. Possible variables and their
long form can be found in :attr:`QaQc.gridMET_meta`.
Upon download gridMET time series for the nearest gridMET cell will be
merged into the instances dataframe attibute :attr:`QaQc.df` and all
gridMET variable names will have the prefix "gridMET\_" for
identification.
The gridMET time series file will be saved to a subdirectory called
"gridMET_data" within the directory that contains the config file
for the current :obj:`QaQc` instance and named with the site ID and
gridMET cell centroid lat and long coordinates in decimal degrees.
Arguments:
variables (None, str, list, or tuple): default None. List of gridMET
variable names to download, if None download ETr and
precipitation. See the keys of the :attr:`QaQc.gridMET_meta`
dictionary for a list of all variables that can be downloaded
by this method.
Returns:
:obj:`None`
Note:
Any previously downloaded gridMET time series will be overwritten
when calling the method, however if using the the gap filling
method of the "ebr" correction routine the download will not
overwrite currently existing data so long as gridMET reference ET
and precipitation is on disk and its path is properly set in the
config file.
"""
# opendap thredds server
server_prefix =\
'http://thredds.northwestknowledge.net:8080/thredds/dodsC/'
if variables is None:
variables = ['ETr', 'pet', 'pr']
elif not isinstance(variables, (str,list,tuple)):
print(
'ERROR: {} is not a valid gridMET variable '
'or list of variable names, valid options:'
'\n{}'.format(
variables, ', '.join([v for v in QaQc.gridMET_meta])
)
)
return
if isinstance(variables, str):
variables = list(variables)
station_dates = self.df.index
grid_dfs = []
for i,v in enumerate(variables):
if not v in QaQc.gridMET_meta:
print(
'ERROR: {} is not a valid gridMET variable, '
'valid options: {}'.format(
v, ', '.join([v for v in QaQc.gridMET_meta])
)
)
continue
meta = QaQc.gridMET_meta[v]
self.variables[meta['rename']] = meta['rename']
self.units[meta['rename']] = meta['units']
print('Downloading gridMET var: {}\n'.format(meta['name']))
netcdf = '{}{}'.format(server_prefix, meta['nc_suffix'])
ds = xarray.open_dataset(netcdf).sel(
lon=self.longitude, lat=self.latitude, method='nearest'
).drop('crs')
df = ds.to_dataframe().loc[station_dates].rename(
columns={meta['name']:meta['rename']}
)
df.index.name = 'date' # ensure date col name is 'date'
# on first variable (if multiple) grab gridcell centroid coords
if i == 0:
lat_centroid = df.lat[0]
lon_centroid = df.lon[0]
df.drop(['lat', 'lon'], axis=1, inplace=True)
grid_dfs.append(df)
# combine data
df = pd.concat(grid_dfs, axis=1)
# save gridMET time series to CSV in subdirectory where config file is
gridMET_file = self.config_file.parent.joinpath(
'gridMET_data'
).joinpath('{}_{:.4f}N_{:.4f}W.csv'.format(
self.site_id, lat_centroid, lon_centroid
)
)
gridMET_file.parent.mkdir(parents=True, exist_ok=True)
self.config.set(
'METADATA', 'gridMET_file_path', value=str(gridMET_file)
)
df.to_csv(gridMET_file)
# rewrite config with updated gridMET file path
with open(str(self.config_file), 'w') as outf:
self.config.write(outf)
# drop previously calced vars for replacement, no join duplicates
self._df = _drop_cols(self._df, variables)
self._df = self._df.join(df)
self.gridMET_exists = True
def _check_daily_freq(self, drop_gaps, daily_frac, max_interp_hours,
max_interp_hours_night):
"""
Check temporal frequency of input Data, resample to daily if not already
Note:
If one or more sub-dauly values are missing for a day the entire
day will be replaced with a null (:obj:`numpy.nan`).
If user QC values for filtering data are present they will also be
resampled to daily means, however this should not be an issue as
the filtering step occurs in a :obj:`fluxdataqaqc.Data` object.
"""
# rename columns to internal names
df = self._df.rename(columns=self.inv_map)
if not isinstance(df, pd.DataFrame):
return
freq = | pd.infer_freq(df.index) | pandas.infer_freq |
import pytest
import numpy as np
import pandas as pd
from samplics.utils.checks import (
assert_brr_number_psus,
assert_in_range,
assert_not_unique,
assert_proportions,
assert_response_status,
assert_weights,
)
@pytest.mark.parametrize(
"x1, x2, x3, x4",
[(35, -35, 0.35, [0, 1, 39])],
)
def test_in_range_ints_successes(x1, x2, x3, x4):
assert assert_in_range(low=10, high=39, x=x1)
assert assert_in_range(low=-39, high=-10, x=x2)
assert assert_in_range(low=0, high=1, x=x3)
assert assert_in_range(low=-10, high=39, x=x4)
def test_in_range_for_ints_fails():
assert not assert_in_range(low=10, high=39, x=5)
assert not assert_in_range(low=-39, high=-10, x=-135)
assert not assert_in_range(low=0, high=1, x=1.35)
assert not assert_in_range(low=-10, high=39, x=-30)
assert not assert_in_range(low=-10, high=39, x=1000)
def test_fin_range_for_floats_successes():
assert assert_in_range(low=10.1, high=39.0, x=35.22)
assert assert_in_range(low=-39.2, high=-10.1, x=-35.35)
assert assert_in_range(low=0.0, high=1.0, x=0.35)
assert assert_in_range(low=-10.0, high=39.0, x=0.0)
assert assert_in_range(low=-10.0, high=39.0, x=1.9)
assert assert_in_range(low=-10.0, high=39.0, x=0.039)
def test_in_range_for_floats_fails():
assert not assert_in_range(low=10.0, high=39.0, x=5.5)
assert not assert_in_range(low=-39.3, high=-10.1, x=-135.23)
assert not assert_in_range(low=0.0, high=1.00, x=1.35)
assert not assert_in_range(low=-10.0, high=39.33, x=100.01)
assert not assert_in_range(low=-10.0, high=39.3, x=39.33)
@pytest.mark.parametrize(
"x1",
[np.array([20, 17, 11, 20, 23]), pd.Series([20, 17, 11, 20, 23])],
)
@pytest.mark.parametrize(
"x2",
[
np.array([-2, 17, -11, 20, 23]),
pd.Series([-10, 17, -11, 20, 23]),
| pd.Series([-10, 17, -11, 20, 23]) | pandas.Series |
from __future__ import division
from datetime import datetime
import sys
if sys.version_info < (3, 3):
import mock
else:
from unittest import mock
import pandas as pd
import numpy as np
from nose.tools import assert_almost_equal as aae
import bt
import bt.algos as algos
def test_algo_name():
class TestAlgo(algos.Algo):
pass
actual = TestAlgo()
assert actual.name == 'TestAlgo'
class DummyAlgo(algos.Algo):
def __init__(self, return_value=True):
self.return_value = return_value
self.called = False
def __call__(self, target):
self.called = True
return self.return_value
def test_algo_stack():
algo1 = DummyAlgo(return_value=True)
algo2 = DummyAlgo(return_value=False)
algo3 = DummyAlgo(return_value=True)
target = mock.MagicMock()
stack = bt.AlgoStack(algo1, algo2, algo3)
actual = stack(target)
assert not actual
assert algo1.called
assert algo2.called
assert not algo3.called
def test_run_once():
algo = algos.RunOnce()
assert algo(None)
assert not algo(None)
assert not algo(None)
def test_run_period():
target = mock.MagicMock()
dts = pd.date_range('2010-01-01', periods=35)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
algo = algos.RunPeriod()
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
dts = target.data.index
target.now = None
assert not algo(target)
# run on first date
target.now = dts[0]
assert not algo(target)
# run on first supplied date
target.now = dts[1]
assert algo(target)
# run on last date
target.now = dts[len(dts) - 1]
assert not algo(target)
algo = algos.RunPeriod(
run_on_first_date=False,
run_on_end_of_period=True,
run_on_last_date=True
)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
dts = target.data.index
# run on first date
target.now = dts[0]
assert not algo(target)
# first supplied date
target.now = dts[1]
assert not algo(target)
# run on last date
target.now = dts[len(dts) - 1]
assert algo(target)
# date not in index
target.now = datetime(2009, 2, 15)
assert not algo(target)
def test_run_daily():
target = mock.MagicMock()
dts = pd.date_range('2010-01-01', periods=35)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
algo = algos.RunDaily()
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('',[algo]),
data
)
target.data = backtest.data
target.now = dts[1]
assert algo(target)
def test_run_weekly():
dts = pd.date_range('2010-01-01', periods=367)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
target = mock.MagicMock()
target.data = data
algo = algos.RunWeekly()
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of week
target.now = dts[2]
assert not algo(target)
# new week
target.now = dts[3]
assert algo(target)
algo = algos.RunWeekly(
run_on_first_date=False,
run_on_end_of_period=True,
run_on_last_date=True
)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of week
target.now = dts[2]
assert algo(target)
# new week
target.now = dts[3]
assert not algo(target)
dts = pd.DatetimeIndex([datetime(2016, 1, 3), datetime(2017, 1, 8),datetime(2018, 1, 7)])
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# check next year
target.now = dts[1]
assert algo(target)
def test_run_monthly():
dts = pd.date_range('2010-01-01', periods=367)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
target = mock.MagicMock()
target.data = data
algo = algos.RunMonthly()
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of month
target.now = dts[30]
assert not algo(target)
# new month
target.now = dts[31]
assert algo(target)
algo = algos.RunMonthly(
run_on_first_date=False,
run_on_end_of_period=True,
run_on_last_date=True
)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of month
target.now = dts[30]
assert algo(target)
# new month
target.now = dts[31]
assert not algo(target)
dts = pd.DatetimeIndex([datetime(2016, 1, 3), datetime(2017, 1, 8), datetime(2018, 1, 7)])
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# check next year
target.now = dts[1]
assert algo(target)
def test_run_quarterly():
dts = pd.date_range('2010-01-01', periods=367)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
target = mock.MagicMock()
target.data = data
algo = algos.RunQuarterly()
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of quarter
target.now = dts[89]
assert not algo(target)
# new quarter
target.now = dts[90]
assert algo(target)
algo = algos.RunQuarterly(
run_on_first_date=False,
run_on_end_of_period=True,
run_on_last_date=True
)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of quarter
target.now = dts[89]
assert algo(target)
# new quarter
target.now = dts[90]
assert not algo(target)
dts = pd.DatetimeIndex([datetime(2016, 1, 3), datetime(2017, 1, 8), datetime(2018, 1, 7)])
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# check next year
target.now = dts[1]
assert algo(target)
def test_run_yearly():
dts = pd.date_range('2010-01-01', periods=367)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
target = mock.MagicMock()
target.data = data
algo = algos.RunYearly()
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of year
target.now = dts[364]
assert not algo(target)
# new year
target.now = dts[365]
assert algo(target)
algo = algos.RunYearly(
run_on_first_date=False,
run_on_end_of_period=True,
run_on_last_date=True
)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of year
target.now = dts[364]
assert algo(target)
# new year
target.now = dts[365]
assert not algo(target)
def test_run_on_date():
target = mock.MagicMock()
target.now = pd.to_datetime('2010-01-01')
algo = algos.RunOnDate('2010-01-01', '2010-01-02')
assert algo(target)
target.now = pd.to_datetime('2010-01-02')
assert algo(target)
target.now = pd.to_datetime('2010-01-03')
assert not algo(target)
def test_rebalance():
algo = algos.Rebalance()
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
s.adjust(1000)
s.update(dts[0])
s.temp['weights'] = {'c1': 1}
assert algo(s)
assert s.value == 1000
assert s.capital == 0
c1 = s['c1']
assert c1.value == 1000
assert c1.position == 10
assert c1.weight == 1.
s.temp['weights'] = {'c2': 1}
assert algo(s)
assert s.value == 1000
assert s.capital == 0
c2 = s['c2']
assert c1.value == 0
assert c1.position == 0
assert c1.weight == 0
assert c2.value == 1000
assert c2.position == 10
assert c2.weight == 1.
def test_rebalance_with_commissions():
algo = algos.Rebalance()
s = bt.Strategy('s')
s.set_commissions(lambda q, p: 1)
dts = pd.date_range('2010-01-01', periods=3)
data = | pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100) | pandas.DataFrame |
from datetime import datetime, timedelta
import dateutil
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs.ccalendar import DAYS, MONTHS
from pandas._libs.tslibs.period import IncompatibleFrequency
from pandas.compat import lrange, range, zip
import pandas as pd
from pandas import DataFrame, Series, Timestamp
from pandas.core.indexes.base import InvalidIndexError
from pandas.core.indexes.datetimes import date_range
from pandas.core.indexes.period import Period, PeriodIndex, period_range
from pandas.core.resample import _get_period_range_edges
import pandas.util.testing as tm
from pandas.util.testing import (
assert_almost_equal, assert_frame_equal, assert_series_equal)
import pandas.tseries.offsets as offsets
@pytest.fixture()
def _index_factory():
return period_range
@pytest.fixture
def _series_name():
return 'pi'
class TestPeriodIndex(object):
@pytest.mark.parametrize('freq', ['2D', '1H', '2H'])
@pytest.mark.parametrize('kind', ['period', None, 'timestamp'])
def test_asfreq(self, series_and_frame, freq, kind):
# GH 12884, 15944
# make sure .asfreq() returns PeriodIndex (except kind='timestamp')
obj = series_and_frame
if kind == 'timestamp':
expected = obj.to_timestamp().resample(freq).asfreq()
else:
start = obj.index[0].to_timestamp(how='start')
end = (obj.index[-1] + obj.index.freq).to_timestamp(how='start')
new_index = date_range(start=start, end=end, freq=freq,
closed='left')
expected = obj.to_timestamp().reindex(new_index).to_period(freq)
result = obj.resample(freq, kind=kind).asfreq()
assert_almost_equal(result, expected)
def test_asfreq_fill_value(self, series):
# test for fill value during resampling, issue 3715
s = series
new_index = date_range(s.index[0].to_timestamp(how='start'),
(s.index[-1]).to_timestamp(how='start'),
freq='1H')
expected = s.to_timestamp().reindex(new_index, fill_value=4.0)
result = s.resample('1H', kind='timestamp').asfreq(fill_value=4.0)
assert_series_equal(result, expected)
frame = s.to_frame('value')
new_index = date_range(frame.index[0].to_timestamp(how='start'),
(frame.index[-1]).to_timestamp(how='start'),
freq='1H')
expected = frame.to_timestamp().reindex(new_index, fill_value=3.0)
result = frame.resample('1H', kind='timestamp').asfreq(fill_value=3.0)
assert_frame_equal(result, expected)
@pytest.mark.parametrize('freq', ['H', '12H', '2D', 'W'])
@pytest.mark.parametrize('kind', [None, 'period', 'timestamp'])
@pytest.mark.parametrize('kwargs', [dict(on='date'), dict(level='d')])
def test_selection(self, index, freq, kind, kwargs):
# This is a bug, these should be implemented
# GH 14008
rng = np.arange(len(index), dtype=np.int64)
df = DataFrame({'date': index, 'a': rng},
index=pd.MultiIndex.from_arrays([rng, index],
names=['v', 'd']))
msg = ("Resampling from level= or on= selection with a PeriodIndex is"
r" not currently supported, use \.set_index\(\.\.\.\) to"
" explicitly set index")
with pytest.raises(NotImplementedError, match=msg):
df.resample(freq, kind=kind, **kwargs)
@pytest.mark.parametrize('month', MONTHS)
@pytest.mark.parametrize('meth', ['ffill', 'bfill'])
@pytest.mark.parametrize('conv', ['start', 'end'])
@pytest.mark.parametrize('targ', ['D', 'B', 'M'])
def test_annual_upsample_cases(self, targ, conv, meth, month,
simple_period_range_series):
ts = simple_period_range_series(
'1/1/1990', '12/31/1991', freq='A-%s' % month)
result = getattr(ts.resample(targ, convention=conv), meth)()
expected = result.to_timestamp(targ, how=conv)
expected = expected.asfreq(targ, meth).to_period()
assert_series_equal(result, expected)
def test_basic_downsample(self, simple_period_range_series):
ts = simple_period_range_series('1/1/1990', '6/30/1995', freq='M')
result = ts.resample('a-dec').mean()
expected = ts.groupby(ts.index.year).mean()
expected.index = period_range('1/1/1990', '6/30/1995', freq='a-dec')
assert_series_equal(result, expected)
# this is ok
assert_series_equal(ts.resample('a-dec').mean(), result)
assert_series_equal(ts.resample('a').mean(), result)
@pytest.mark.parametrize('rule,expected_error_msg', [
('a-dec', '<YearEnd: month=12>'),
('q-mar', '<QuarterEnd: startingMonth=3>'),
('M', '<MonthEnd>'),
('w-thu', '<Week: weekday=3>')
])
def test_not_subperiod(
self, simple_period_range_series, rule, expected_error_msg):
# These are incompatible period rules for resampling
ts = simple_period_range_series('1/1/1990', '6/30/1995', freq='w-wed')
msg = ("Frequency <Week: weekday=2> cannot be resampled to {}, as they"
" are not sub or super periods").format(expected_error_msg)
with pytest.raises(IncompatibleFrequency, match=msg):
ts.resample(rule).mean()
@pytest.mark.parametrize('freq', ['D', '2D'])
def test_basic_upsample(self, freq, simple_period_range_series):
ts = simple_period_range_series('1/1/1990', '6/30/1995', freq='M')
result = ts.resample('a-dec').mean()
resampled = result.resample(freq, convention='end').ffill()
expected = result.to_timestamp(freq, how='end')
expected = expected.asfreq(freq, 'ffill').to_period(freq)
assert_series_equal(resampled, expected)
def test_upsample_with_limit(self):
rng = period_range('1/1/2000', periods=5, freq='A')
ts = Series(np.random.randn(len(rng)), rng)
result = ts.resample('M', convention='end').ffill(limit=2)
expected = ts.asfreq('M').reindex(result.index, method='ffill',
limit=2)
assert_series_equal(result, expected)
def test_annual_upsample(self, simple_period_range_series):
ts = simple_period_range_series('1/1/1990', '12/31/1995', freq='A-DEC')
df = DataFrame({'a': ts})
rdf = df.resample('D').ffill()
exp = df['a'].resample('D').ffill()
assert_series_equal(rdf['a'], exp)
rng = period_range('2000', '2003', freq='A-DEC')
ts = Series([1, 2, 3, 4], index=rng)
result = ts.resample('M').ffill()
ex_index = period_range('2000-01', '2003-12', freq='M')
expected = ts.asfreq('M', how='start').reindex(ex_index,
method='ffill')
assert_series_equal(result, expected)
@pytest.mark.parametrize('month', MONTHS)
@pytest.mark.parametrize('target', ['D', 'B', 'M'])
@pytest.mark.parametrize('convention', ['start', 'end'])
def test_quarterly_upsample(self, month, target, convention,
simple_period_range_series):
freq = 'Q-{month}'.format(month=month)
ts = simple_period_range_series('1/1/1990', '12/31/1995', freq=freq)
result = ts.resample(target, convention=convention).ffill()
expected = result.to_timestamp(target, how=convention)
expected = expected.asfreq(target, 'ffill').to_period()
assert_series_equal(result, expected)
@pytest.mark.parametrize('target', ['D', 'B'])
@pytest.mark.parametrize('convention', ['start', 'end'])
def test_monthly_upsample(self, target, convention,
simple_period_range_series):
ts = simple_period_range_series('1/1/1990', '12/31/1995', freq='M')
result = ts.resample(target, convention=convention).ffill()
expected = result.to_timestamp(target, how=convention)
expected = expected.asfreq(target, 'ffill').to_period()
assert_series_equal(result, expected)
def test_resample_basic(self):
# GH3609
s = Series(range(100), index=date_range(
'20130101', freq='s', periods=100, name='idx'), dtype='float')
s[10:30] = np.nan
index = PeriodIndex([
Period('2013-01-01 00:00', 'T'),
Period('2013-01-01 00:01', 'T')], name='idx')
expected = Series([34.5, 79.5], index=index)
result = s.to_period().resample('T', kind='period').mean()
assert_series_equal(result, expected)
result2 = s.resample('T', kind='period').mean()
assert_series_equal(result2, expected)
@pytest.mark.parametrize('freq,expected_vals', [('M', [31, 29, 31, 9]),
('2M', [31 + 29, 31 + 9])])
def test_resample_count(self, freq, expected_vals):
# GH12774
series = Series(1, index=pd.period_range(start='2000', periods=100))
result = series.resample(freq).count()
expected_index = pd.period_range(start='2000', freq=freq,
periods=len(expected_vals))
expected = Series(expected_vals, index=expected_index)
assert_series_equal(result, expected)
def test_resample_same_freq(self, resample_method):
# GH12770
series = Series(range(3), index=pd.period_range(
start='2000', periods=3, freq='M'))
expected = series
result = getattr(series.resample('M'), resample_method)()
assert_series_equal(result, expected)
def test_resample_incompat_freq(self):
msg = ("Frequency <MonthEnd> cannot be resampled to <Week: weekday=6>,"
" as they are not sub or super periods")
with pytest.raises(IncompatibleFrequency, match=msg):
Series(range(3), index=pd.period_range(
start='2000', periods=3, freq='M')).resample('W').mean()
def test_with_local_timezone_pytz(self):
# see gh-5430
local_timezone = pytz.timezone('America/Los_Angeles')
start = datetime(year=2013, month=11, day=1, hour=0, minute=0,
tzinfo=pytz.utc)
# 1 day later
end = datetime(year=2013, month=11, day=2, hour=0, minute=0,
tzinfo=pytz.utc)
index = pd.date_range(start, end, freq='H')
series = Series(1, index=index)
series = series.tz_convert(local_timezone)
result = series.resample('D', kind='period').mean()
# Create the expected series
# Index is moved back a day with the timezone conversion from UTC to
# Pacific
expected_index = (pd.period_range(start=start, end=end, freq='D') -
offsets.Day())
expected = Series(1, index=expected_index)
assert_series_equal(result, expected)
def test_resample_with_pytz(self):
# GH 13238
s = Series(2, index=pd.date_range('2017-01-01', periods=48, freq="H",
tz="US/Eastern"))
result = s.resample("D").mean()
expected = Series(2, index=pd.DatetimeIndex(['2017-01-01',
'2017-01-02'],
tz="US/Eastern"))
assert_series_equal(result, expected)
# Especially assert that the timezone is LMT for pytz
assert result.index.tz == pytz.timezone('US/Eastern')
def test_with_local_timezone_dateutil(self):
# see gh-5430
local_timezone = 'dateutil/America/Los_Angeles'
start = datetime(year=2013, month=11, day=1, hour=0, minute=0,
tzinfo=dateutil.tz.tzutc())
# 1 day later
end = datetime(year=2013, month=11, day=2, hour=0, minute=0,
tzinfo=dateutil.tz.tzutc())
index = pd.date_range(start, end, freq='H', name='idx')
series = Series(1, index=index)
series = series.tz_convert(local_timezone)
result = series.resample('D', kind='period').mean()
# Create the expected series
# Index is moved back a day with the timezone conversion from UTC to
# Pacific
expected_index = (pd.period_range(start=start, end=end, freq='D',
name='idx') - offsets.Day())
expected = Series(1, index=expected_index)
assert_series_equal(result, expected)
def test_resample_nonexistent_time_bin_edge(self):
# GH 19375
index = date_range('2017-03-12', '2017-03-12 1:45:00', freq='15T')
s = Series(np.zeros(len(index)), index=index)
expected = s.tz_localize('US/Pacific')
result = expected.resample('900S').mean()
tm.assert_series_equal(result, expected)
# GH 23742
index = date_range(start='2017-10-10', end='2017-10-20', freq='1H')
index = index.tz_localize('UTC').tz_convert('America/Sao_Paulo')
df = DataFrame(data=list(range(len(index))), index=index)
result = df.groupby(pd.Grouper(freq='1D')).count()
expected = date_range(start='2017-10-09', end='2017-10-20', freq='D',
tz="America/Sao_Paulo",
nonexistent='shift_forward', closed='left')
| tm.assert_index_equal(result.index, expected) | pandas.util.testing.assert_index_equal |
import numpy as np
import pandas as pd
import pickle
from pathlib import Path
import covid19
from COVID19.model import AgeGroupEnum, EVENT_TYPES, TransmissionTypeEnum
from COVID19.model import Model, Parameters, ModelParameterException
import COVID19.simulation as simulation
from analysis_utils import ranker_I, check_fn_I, ranker_IR, check_fn_IR, roc_curve, events_list
from abm_utils import status_to_state, listofhouses, dummy_logger, quarantine_households
#import sib
#import greedy_Rank
def loop_abm(params,
inference_algo,
logger = dummy_logger(),
input_parameter_file = "./abm_params/baseline_parameters.csv",
household_demographics_file = "./abm_params/baseline_household_demographics.csv",
parameter_line_number = 1,
seed=1,
initial_steps = 0,
num_test_random = 50,
num_test_algo = 50,
fraction_SM_obs = 0.2,
fraction_SS_obs = 1,
quarantine_HH = False,
test_HH = False,
name_file_res = "res",
output_dir = "./output/",
save_every_iter = 5,
stop_zero_I = True,
adoption_fraction = 1.0,
fp_rate = 0.0,
fn_rate = 0.0,
smartphone_users_abm = False, # if True use app users fraction from OpenABM model
callback = lambda x : None,
data = {}
):
'''
Simulate interventions strategy on the openABM epidemic simulation.
input
-----
params: Dict
Dictonary with openABM to set
inference_algo: Class (rank_template)
Class for order the nodes according to the prob to be infected
logger = logger for printing intermediate steps
results:
print on file true configurations and transmission
'''
params_model = Parameters(input_parameter_file,
parameter_line_number,
output_dir,
household_demographics_file)
### create output_dir if missing
fold_out = Path(output_dir)
if not fold_out.exists():
fold_out.mkdir(parents=True)
### initialize a separate random stream
rng = np.random.RandomState()
rng.seed(seed)
### initialize ABM model
for k, val in params.items():
params_model.set_param(k, val)
model = Model(params_model)
model = simulation.COVID19IBM(model=model)
T = params_model.get_param("end_time")
N = params_model.get_param("n_total")
sim = simulation.Simulation(env=model, end_time=T, verbose=False)
house = covid19.get_house(model.model.c_model)
housedict = listofhouses(house)
has_app = covid19.get_app_users(model.model.c_model) if smartphone_users_abm else np.ones(N,dtype = int)
has_app &= (rng.random(N) <= adoption_fraction)
### init data and data_states
data_states = {}
data_states["true_conf"] = np.zeros((T,N))
data_states["statuses"] = np.zeros((T,N))
data_states["tested_algo"] = []
data_states["tested_random"] = []
data_states["tested_SS"] = []
data_states["tested_SM"] = []
for name in ["num_quarantined", "q_SS", "q_SM", "q_algo", "q_random", "q_all", "infected_free", "S", "I", "R", "IR", "aurI", "prec1%", "prec5%", "test_+", "test_-", "test_f+", "test_f-"]:
data[name] = np.full(T,np.nan)
data["logger"] = logger
### init inference algo
inference_algo.init(N, T)
### running variables
indices = np.arange(N, dtype=int)
excluded = np.zeros(N, dtype=bool)
daily_obs = []
all_obs = []
all_quarantined = []
freebirds = 0
num_quarantined = 0
fp_num = 0
fn_num = 0
p_num = 0
n_num = 0
noise_SM = rng.random(N)
nfree = params_model.get_param("n_seed_infection")
for t in range(T):
### advance one time step
sim.steps(1)
status = np.array(covid19.get_state(model.model.c_model))
state = status_to_state(status)
data_states["true_conf"][t] = state
nS, nI, nR = (state == 0).sum(), (state == 1).sum(), (state == 2).sum()
if nI == 0 and stop_zero_I:
logger.info("stopping simulation as there are no more infected individuals")
break
if t == initial_steps:
logger.info("\nobservation-based inference algorithm starts now\n")
logger.info(f'time:{t}')
### extract contacts
daily_contacts = covid19.get_contacts_daily(model.model.c_model, t)
logger.info(f"number of unique contacts: {len(daily_contacts)}")
### compute potential test results for all
if fp_rate or fn_rate:
noise = rng.random(N)
f_state = (state==1)*(noise > fn_rate) + (state==0)*(noise < fp_rate) + 2*(state==2)
else:
f_state = state
to_quarantine = []
all_test = []
excluded_now = excluded.copy()
fp_num_today = 0
fn_num_today = 0
p_num_today = 0
n_num_today = 0
def test_and_quarantine(rank, num):
nonlocal to_quarantine, excluded_now, all_test, fp_num_today, fn_num_today, p_num_today, n_num_today
test_rank = []
for i in rank:
if len(test_rank) == num:
break;
if excluded_now[i]:
continue
test_rank += [i]
if f_state[i] == 1:
p_num_today += 1
if state[i] != 1:
fp_num_today += 1
q = housedict[house[i]] if quarantine_HH else [i]
excluded_now[q] = True
to_quarantine += q
excluded[q] = True
if test_HH:
all_test += q
else:
all_test += [i]
else:
n_num_today += 1
if state[i] == 1:
fn_num_today += 1
excluded_now[i] = True
all_test += [i]
return test_rank
### compute rank from algorithm
num_test_algo_today = num_test_algo
if t < initial_steps:
daily_obs = []
num_test_algo_today = 0
weighted_contacts = [(c[0], c[1], c[2], 2.0 if c[3] == 0 else 1.0) for c in daily_contacts if (has_app[c[0]] and has_app[c[1]])]
if nfree == 0 and quarantine_HH:
print("faster end")
rank_algo = np.zeros((N,2))
rank_algo[:, 0]=np.arange(N)
rank_algo[:, 1]=np.random.rand(N)
else:
rank_algo = inference_algo.rank(t, weighted_contacts, daily_obs, data)
rank = np.array(sorted(rank_algo, key= lambda tup: tup[1], reverse=True))
rank = [int(tup[0]) for tup in rank]
### test num_test_algo_today individuals
test_algo = test_and_quarantine(rank, num_test_algo_today)
### compute roc now, only excluding past tests
eventsI = events_list(t, [(i,1,t) for (i,tf) in enumerate(excluded) if tf], data_states["true_conf"], check_fn = check_fn_I)
xI, yI, aurI, sortlI = roc_curve(dict(rank_algo), eventsI, lambda x: x)
### test all SS
SS = test_and_quarantine(indices[status == 4], N)
### test a fraction of SM
SM = indices[(status == 5) & (noise_SM < fraction_SM_obs)]
SM = test_and_quarantine(SM, len(SM))
### do num_test_random extra random tests
test_random = test_and_quarantine(rng.permutation(N), num_test_random)
### quarantine infected individuals
num_quarantined += len(to_quarantine)
covid19.intervention_quarantine_list(model.model.c_model, to_quarantine, T+1)
### update observations
daily_obs = [(int(i), int(f_state[i]), int(t)) for i in all_test]
all_obs += daily_obs
### exclude forever nodes that are observed recovered
rec = [i[0] for i in daily_obs if f_state[i[0]] == 2]
excluded[rec] = True
### update data
data_states["tested_algo"].append(test_algo)
data_states["tested_random"].append(test_random)
data_states["tested_SS"].append(SS)
data_states["tested_SM"].append(SM)
data_states["statuses"][t] = status
data["S"][t] = nS
data["I"][t] = nI
data["R"][t] = nR
data["IR"][t] = nR+nI
data["aurI"][t] = aurI
prec = lambda f: yI[int(f/100*len(yI))]/int(f/100*len(yI)) if len(yI) else np.nan
ninfq = sum(state[to_quarantine]>0)
nfree = int(nI - sum(excluded[state == 1]))
data["aurI"][t] = aurI
data["prec1%"][t] = prec(1)
data["prec5%"][t] = prec(5)
data["num_quarantined"][t] = num_quarantined
data["test_+"][t] = p_num
data["test_-"][t] = n_num
data["test_f+"][t] = fp_num
data["test_f-"][t] = fn_num
data["q_SS"][t] = len(SS)
data["q_SM"][t] = len(SM)
sus_test_algo = sum(state[test_algo]==0)
inf_test_algo = sum(state[test_algo]==1)
rec_test_algo = sum(state[test_algo]==2)
inf_test_random = sum(state[test_random]==1)
data["q_algo"][t] = inf_test_algo
data["q_random"][t] = sum(state[test_random]==1)
data["infected_free"][t] = nfree
asbirds = 'a bird' if nfree == 1 else 'birds'
fp_num += fp_num_today
fn_num += fn_num_today
n_num += n_num_today
p_num += p_num_today
### show output
logger.info(f"True : (S,I,R): ({nS:.1f}, {nI:.1f}, {nR:.1f})")
logger.info(f"AUR_I : {aurI:.3f}, prec(1% of {len(yI)}): {prec(1):.2f}, prec5%: {prec(5):.2f}")
logger.info(f"SS: {len(SS)}, SM: {len(SM)}, results test algo (S,I,R): ({sus_test_algo},{inf_test_algo},{rec_test_algo}), infected test random: {inf_test_random}/{num_test_random}")
logger.info(f"false+: {fp_num} (+{fp_num_today}), false-: {fn_num} (+{fn_num_today})")
logger.info(f"...quarantining {len(to_quarantine)} guys -> got {ninfq} infected, {nfree} free as {asbirds} ({nfree-freebirds:+d})")
freebirds = nfree
### callback
callback(data)
if t % save_every_iter == 0:
df_save = pd.DataFrame.from_records(data, exclude=["logger"])
df_save.to_csv(output_dir + name_file_res + "_res.gz")
# save files
df_save = pd.DataFrame.from_records(data, exclude=["logger"])
df_save.to_csv(output_dir + name_file_res + "_res.gz")
with open(output_dir + name_file_res + "_states.pkl", mode="wb") as f_states:
pickle.dump(data_states, f_states)
sim.env.model.write_individual_file()
df_indiv = | pd.read_csv(output_dir+"individual_file_Run1.csv", skipinitialspace = True) | pandas.read_csv |
import glob
import os
import sys
# these imports and usings need to be in the same order
sys.path.insert(0, "../")
sys.path.insert(0, "TP_model")
sys.path.insert(0, "TP_model/fit_and_forecast")
from Reff_functions import *
from Reff_constants import *
from sys import argv
from datetime import timedelta, datetime
from scipy.special import expit
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import matplotlib
matplotlib.use("Agg")
def forecast_TP(data_date):
from scenarios import scenarios, scenario_dates
from params import (
num_forecast_days,
alpha_start_date,
delta_start_date,
omicron_start_date,
truncation_days,
start_date,
sim_start_date,
third_start_date,
mob_samples,
)
data_date = pd.to_datetime(data_date)
# Define inputs
sim_start_date = pd.to_datetime(sim_start_date)
# Add 3 days buffer to mobility forecast
num_forecast_days = num_forecast_days + 3
# data_date = pd.to_datetime('2022-01-25')
print("============")
print("Generating forecasts using data from", data_date)
print("============")
# convert third start date to the correct format
third_start_date = pd.to_datetime(third_start_date)
third_end_date = data_date - timedelta(truncation_days)
# a different end date to deal with issues in fitting
third_end_date_diff = data_date - timedelta(18 + 7 + 7)
third_states = sorted(["NSW", "VIC", "ACT", "QLD", "SA", "TAS", "NT", "WA"])
# third_states = sorted(['NSW', 'VIC', 'ACT', 'QLD', 'SA', 'NT'])
# choose dates for each state for third wave
# NOTE: These need to be in date sorted order
third_date_range = {
"ACT": pd.date_range(start="2021-08-15", end=third_end_date).values,
"NSW": pd.date_range(start="2021-06-25", end=third_end_date).values,
"NT": pd.date_range(start="2021-12-20", end=third_end_date).values,
"QLD": pd.date_range(start="2021-07-30", end=third_end_date).values,
"SA": pd.date_range(start="2021-12-10", end=third_end_date).values,
"TAS": pd.date_range(start="2021-12-20", end=third_end_date).values,
"VIC": pd.date_range(start="2021-07-10", end=third_end_date).values,
"WA": pd.date_range(start="2022-01-01", end=third_end_date).values,
}
# Get Google Data - Don't use the smoothed data?
df_google_all = read_in_google(Aus_only=True, moving=True, local=True)
third_end_date = pd.to_datetime(data_date) - pd.Timedelta(days=truncation_days)
results_dir = (
"results/"
+ data_date.strftime("%Y-%m-%d")
+ "/"
)
# Load in vaccination data by state and date which should have the same date as the
# NNDSS/linelist data use the inferred VE
vaccination_by_state_delta = pd.read_csv(
results_dir + "adjusted_vaccine_ts_delta" + data_date.strftime("%Y-%m-%d") + ".csv",
parse_dates=["date"],
)
vaccination_by_state_delta = vaccination_by_state_delta[["state", "date", "effect"]]
vaccination_by_state_delta = vaccination_by_state_delta.pivot(
index="state", columns="date", values="effect"
) # Convert to matrix form
# Convert to simple array for indexing
vaccination_by_state_delta_array = vaccination_by_state_delta.to_numpy()
vaccination_by_state_omicron = pd.read_csv(
results_dir + "adjusted_vaccine_ts_omicron" + data_date.strftime("%Y-%m-%d") + ".csv",
parse_dates=["date"],
)
vaccination_by_state_omicron = vaccination_by_state_omicron[["state", "date", "effect"]]
vaccination_by_state_omicron = vaccination_by_state_omicron.pivot(
index="state", columns="date", values="effect"
) # Convert to matrix form
# Convert to simple array for indexing
vaccination_by_state_omicron_array = vaccination_by_state_omicron.to_numpy()
# Get survey data
surveys = pd.DataFrame()
path = "data/md/Barometer wave*.csv"
for file in glob.glob(path):
surveys = surveys.append(pd.read_csv(file, parse_dates=["date"]))
surveys = surveys.sort_values(by="date")
print("Latest microdistancing survey is {}".format(surveys.date.values[-1]))
surveys.loc[surveys.state != "ACT", "state"] = (
surveys.loc[surveys.state != "ACT", "state"]
.map(states_initials)
.fillna(surveys.loc[surveys.state != "ACT", "state"])
)
surveys["proportion"] = surveys["count"] / surveys.respondents
surveys.date = pd.to_datetime(surveys.date)
always = surveys.loc[surveys.response == "Always"].set_index(["state", "date"])
always = always.unstack(["state"])
# fill in date range
idx = pd.date_range("2020-03-01", pd.to_datetime("today"))
always = always.reindex(idx, fill_value=np.nan)
always.index.name = "date"
always = always.fillna(method="bfill")
always = always.stack(["state"])
# Zero out before first survey 20th March
always = always.reset_index().set_index("date")
always.loc[:"2020-03-20", "count"] = 0
always.loc[:"2020-03-20", "respondents"] = 0
always.loc[:"2020-03-20", "proportion"] = 0
always = always.reset_index().set_index(["state", "date"])
survey_X = pd.pivot_table(
data=always, index="date", columns="state", values="proportion"
)
prop_all = survey_X
## read in and process mask wearing data
mask_wearing = pd.DataFrame()
path = "data/face_coverings/face_covering_*_.csv"
for file in glob.glob(path):
mask_wearing = mask_wearing.append(pd.read_csv(file, parse_dates=["date"]))
mask_wearing = mask_wearing.sort_values(by="date")
print("Latest mask wearing survey is {}".format(mask_wearing.date.values[-1]))
# mask_wearing['state'] = mask_wearing['state'].map(states_initials).fillna(mask_wearing['state'])
mask_wearing.loc[mask_wearing.state != "ACT", "state"] = (
mask_wearing.loc[mask_wearing.state != "ACT", "state"]
.map(states_initials)
.fillna(mask_wearing.loc[mask_wearing.state != "ACT", "state"])
)
mask_wearing["proportion"] = mask_wearing["count"] / mask_wearing.respondents
mask_wearing.date = pd.to_datetime(mask_wearing.date)
mask_wearing_always = mask_wearing.loc[
mask_wearing.face_covering == "Always"
].set_index(["state", "date"])
mask_wearing_always = mask_wearing_always.unstack(["state"])
idx = pd.date_range("2020-03-01", pd.to_datetime("today"))
mask_wearing_always = mask_wearing_always.reindex(idx, fill_value=np.nan)
mask_wearing_always.index.name = "date"
# fill back to earlier and between weeks.
# Assume survey on day x applies for all days up to x - 6
mask_wearing_always = mask_wearing_always.fillna(method="bfill")
mask_wearing_always = mask_wearing_always.stack(["state"])
# Zero out before first survey 20th March
mask_wearing_always = mask_wearing_always.reset_index().set_index("date")
mask_wearing_always.loc[:"2020-03-20", "count"] = 0
mask_wearing_always.loc[:"2020-03-20", "respondents"] = 0
mask_wearing_always.loc[:"2020-03-20", "proportion"] = 0
mask_wearing_X = pd.pivot_table(
data=mask_wearing_always, index="date", columns="state", values="proportion"
)
mask_wearing_all = mask_wearing_X
# Get posterior
df_samples = read_in_posterior(
date=data_date.strftime("%Y-%m-%d"),
)
states = sorted(["NSW", "QLD", "SA", "VIC", "TAS", "WA", "ACT", "NT"])
plot_states = states.copy()
one_month = data_date + timedelta(days=num_forecast_days)
days_from_March = (one_month - pd.to_datetime(start_date)).days
# filter out future info
prop = prop_all.loc[:data_date]
masks = mask_wearing_all.loc[:data_date]
df_google = df_google_all.loc[df_google_all.date <= data_date]
# use this trick of saving the google data and then reloading it to kill
# the date time values
df_google.to_csv("results/test_google_data.csv")
df_google = pd.read_csv("results/test_google_data.csv")
# remove the temporary file
# os.remove("results/test_google_data.csv")
# Simple interpolation for missing vlaues in Google data
df_google = df_google.interpolate(method="linear", axis=0)
df_google.date = pd.to_datetime(df_google.date)
# forecast time parameters
today = data_date.strftime("%Y-%m-%d")
# add days to forecast if we are missing data
if df_google.date.values[-1] < data_date:
n_forecast = num_forecast_days + (data_date - df_google.date.values[-1]).days
else:
n_forecast = num_forecast_days
training_start_date = datetime(2020, 3, 1, 0, 0)
print(
"Forecast ends at {} days after 1st March".format(
(pd.to_datetime(today) - pd.to_datetime(training_start_date)).days
+ num_forecast_days
)
)
print(
"Final date is {}".format(pd.to_datetime(today) + timedelta(days=num_forecast_days))
)
df_google = df_google.loc[df_google.date >= training_start_date]
outdata = {"date": [], "type": [], "state": [], "mean": [], "std": []}
predictors = mov_values.copy()
# predictors.remove("residential_7days")
# Setup Figures
axes = []
figs = []
for var in predictors:
fig, ax_states = plt.subplots(figsize=(7, 8), nrows=4, ncols=2, sharex=True)
axes.append(ax_states)
# fig.suptitle(var)
figs.append(fig)
# extra fig for microdistancing
var = "Proportion people always microdistancing"
fig, ax_states = plt.subplots(figsize=(7, 8), nrows=4, ncols=2, sharex=True)
axes.append(ax_states)
figs.append(fig)
# # extra fig for mask wearing
var = "Proportion people always wearing masks"
fig, ax_states = plt.subplots(figsize=(7, 8), nrows=4, ncols=2, sharex=True)
axes.append(ax_states)
figs.append(fig)
var = "Reduction in Reff due to vaccination"
fig, ax_states = plt.subplots(figsize=(7, 8), nrows=4, ncols=2, sharex=True)
axes.append(ax_states)
figs.append(fig)
var = "Reduction in Reff due to vaccination"
fig, ax_states = plt.subplots(figsize=(7, 8), nrows=4, ncols=2, sharex=True)
axes.append(ax_states)
figs.append(fig)
# Forecasting Params
n_training = 21 # Period to examine trend
n_baseline = 150 # Period to create baseline
n_training_vaccination = 30 # period to create trend for vaccination
# since this can be useful, predictor ordering is:
# [
# 'retail_and_recreation_7days',
# 'grocery_and_pharmacy_7days',
# 'parks_7days',
# 'transit_stations_7days',
# 'workplaces_7days'
# ]
# Loop through states and run forecasting.
print("============")
print("Forecasting macro, micro and vaccination")
print("============")
state_Rmed = {}
state_sims = {}
for i, state in enumerate(states):
rownum = int(i / 2)
colnum = np.mod(i, 2)
rows = df_google.loc[df_google.state == state].shape[0]
# Rmed currently a list, needs to be a matrix
Rmed_array = np.zeros(shape=(rows, len(predictors), mob_samples))
for j, var in enumerate(predictors):
for n in range(mob_samples):
# historically we want a little more noise. In the actual forecasting of trends
# we don't want this to be quite that prominent.
Rmed_array[:, j, n] = df_google[df_google["state"] == state][
var
].values.T + np.random.normal(
loc=0, scale=df_google[df_google["state"] == state][var + "_std"]
)
dates = df_google[df_google["state"] == state]["date"]
# cap min and max at historical or (-50,0)
# 1 by predictors by mob_samples size
minRmed_array = np.minimum(-50, np.amin(Rmed_array, axis=0))
maxRmed_array = np.maximum(10, np.amax(Rmed_array, axis=0))
# days by predictors by samples
sims = np.zeros(shape=(n_forecast, len(predictors), mob_samples))
for n in range(mob_samples): # Loop through simulations
Rmed = Rmed_array[:, :, n]
minRmed = minRmed_array[:, n]
maxRmed = maxRmed_array[:, n]
if maxRmed[1] < 20:
maxRmed[1] = 50
R_baseline_mean = np.mean(Rmed[-n_baseline:, :], axis=0)
if state not in {"WA"}:
R_baseline_mean[-1] = 0
R_diffs = np.diff(Rmed[-n_training:, :], axis=0)
mu = np.mean(R_diffs, axis=0)
cov = np.cov(R_diffs, rowvar=False) # columns are vars, rows are obs
# Forecast mobility forward sequentially by day.
# current = np.mean(Rmed[-9:-2, :], axis=0) # Start from last valid days
# current = np.mean(Rmed[-1, :], axis=0) # Start from last valid days
current = Rmed[-1, :] # Start from last valid days
for i in range(n_forecast):
# ## SCENARIO MODELLING
# This code chunk will allow you manually set the distancing params for a state to allow for modelling.
if scenarios[state] == "":
# Proportion of trend_force to regression_to_baseline_force
p_force = (n_forecast - i) / (n_forecast)
# Generate a single forward realisation of trend
trend_force = np.random.multivariate_normal(mu, cov)
# Generate a single forward realisation of baseline regression
# regression to baseline force stronger in standard forecasting
regression_to_baseline_force = np.random.multivariate_normal(
0.05 * (R_baseline_mean - current), cov
)
new_forcast_points = (
current + p_force * trend_force + (1 - p_force) * regression_to_baseline_force
) # Find overall simulation step
# Apply minimum and maximum
new_forcast_points = np.maximum(minRmed, new_forcast_points)
new_forcast_points = np.minimum(maxRmed, new_forcast_points)
current = new_forcast_points
elif scenarios[state] != "":
# Make baseline cov for generating points
cov_baseline = np.cov(Rmed[-42:-28, :], rowvar=False)
mu_current = Rmed[-1, :]
mu_victoria = np.array(
[
-55.35057887,
-22.80891056,
-46.59531636,
-75.99942378,
-44.71119293,
]
)
mu_baseline = np.mean(Rmed[-42:-28, :], axis=0)
# mu_baseline = 0*np.mean(Rmed[-42:-28, :], axis=0)
if scenario_dates[state] != "":
scenario_change_point = (
pd.to_datetime(scenario_dates[state]) - data_date
).days + (n_forecast - 42)
# Constant Lockdown
if (
scenarios[state] == "no_reversion"
or scenarios[state] == "school_opening_2022"
):
# take a continuous median to account for noise in recent observations (such as sunny days)
# mu_current = np.mean(Rmed[-7:, :], axis=0)
# cov_baseline = np.cov(Rmed[-28:, :], rowvar=False)
new_forcast_points = np.random.multivariate_normal(
mu_current, cov_baseline
)
elif scenarios[state] == "no_reversion_continuous_lockdown":
# add the new scenario here
new_forcast_points = np.random.multivariate_normal(
mu_current, cov_baseline
)
# No Lockdown
elif scenarios[state] == "full_reversion":
# a full reversion scenario changes the social mobility and microdistancing
# behaviours at the scenario change date and then applies a return to baseline force
if i < scenario_change_point:
new_forcast_points = np.random.multivariate_normal(
mu_current, cov_baseline
)
else:
# baseline is within lockdown period so take a new baseline of 0's and trend towards this
R_baseline_0 = np.zeros_like(R_baseline_mean)
R_baseline_0 = mu_baseline
# set adjusted baselines by eyeline for now, need to get this automated
# R_baseline_0[1] = 10 # baseline of +10% for Grocery based on other jurisdictions
# # apply specific baselines to the jurisdictions progressing towards normal restrictions
# if state == 'NSW':
# R_baseline_0[3] = -25 # baseline of -25% for Transit based on 2021-April to 2021-July (pre-third-wave lockdowns)
# elif state == 'ACT':
# R_baseline_0[1] = 20 # baseline of +20% for Grocery based on other jurisdictions
# R_baseline_0[3] = -25 # baseline of -25% for Transit based on 2021-April to 2021-July (pre-third-wave lockdowns)
# elif state == 'VIC':
# R_baseline_0[0] = -15 # baseline of -15% for R&R based on 2021-April to 2021-July (pre-third-wave lockdowns)
# R_baseline_0[3] = -30 # baseline of -30% for Transit based on 2021-April to 2021-July (pre-third-wave lockdowns)
# R_baseline_0[4] = -15 # baseline of -15% for workplaces based on 2021-April to 2021-July (pre-third-wave lockdowns)
# the force we trend towards the baseline above with
p_force = (n_forecast - i) / (n_forecast)
trend_force = np.random.multivariate_normal(
mu, cov
) # Generate a single forward realisation of trend
# baseline scalar is smaller for this as we want slow returns
adjusted_baseline_drift_mean = R_baseline_0 - current
# we purposely scale the transit measure so that we increase a little more quickly
# tmp = 0.05 * adjusted_baseline_drift_mean[3]
adjusted_baseline_drift_mean *= 0.005
# adjusted_baseline_drift_mean[3] = tmp
regression_to_baseline_force = np.random.multivariate_normal(
adjusted_baseline_drift_mean, cov
) # Generate a single forward realisation of baseline regression
new_forcast_points = (
current
+ p_force * trend_force
+ (1 - p_force) * regression_to_baseline_force
) # Find overall simulation step
# new_forcast_points = current + regression_to_baseline_force # Find overall simulation step
# Apply minimum and maximum
new_forcast_points = np.maximum(minRmed, new_forcast_points)
new_forcast_points = np.minimum(maxRmed, new_forcast_points)
current = new_forcast_points
elif scenarios[state] == "immediately_baseline":
# this scenario is used to return instantly to the baseline levels
if i < scenario_change_point:
new_forcast_points = np.random.multivariate_normal(
mu_current, cov_baseline
)
else:
# baseline is within lockdown period so take a new baseline of 0's and trend towards this
R_baseline_0 = np.zeros_like(R_baseline_mean)
# jump immediately to baseline
new_forcast_points = np.random.multivariate_normal(
R_baseline_0, cov_baseline
)
# Temporary Lockdown
elif scenarios[state] == "half_reversion":
if i < scenario_change_point:
new_forcast_points = np.random.multivariate_normal(
mu_current, cov_baseline
)
else:
new_forcast_points = np.random.multivariate_normal(
(mu_current + mu_baseline) / 2, cov_baseline
)
# Stage 4
elif scenarios[state] == "stage4":
if i < scenario_change_point:
new_forcast_points = np.random.multivariate_normal(
mu_current, cov_baseline
)
else:
new_forcast_points = np.random.multivariate_normal(
mu_victoria, cov_baseline
)
# Set this day in this simulation to the forecast realisation
sims[i, :, n] = new_forcast_points
dd = [dates.tolist()[-1] + timedelta(days=x) for x in range(1, n_forecast + 1)]
sims_med = np.median(sims, axis=2) # N by predictors
sims_q25 = np.percentile(sims, 25, axis=2)
sims_q75 = np.percentile(sims, 75, axis=2)
# forecast mircodistancing
# Get a baseline value of microdistancing
mu_overall = np.mean(prop[state].values[-n_baseline:])
md_diffs = np.diff(prop[state].values[-n_training:])
mu_diffs = np.mean(md_diffs)
std_diffs = np.std(md_diffs)
extra_days_md = (
pd.to_datetime(df_google.date.values[-1])
- | pd.to_datetime(prop[state].index.values[-1]) | pandas.to_datetime |
import json
from datetime import datetime
from io import BytesIO
import matplotlib as mpl
import numpy as np
import pandas as pd
import requests
from matplotlib import pyplot as plt
def get_weather_df(username, password, port, url, zipcode):
line = 'https://'+username+':'+password+'@<EMAIL>:'+port+'/api/weather/v1/location/'+zipcode+'%3A4%3AUS/forecast/hourly/48hour.json?units=m&language=en-US'
raw = requests.get(line)
weather = json.loads(raw.text)
df = pd.DataFrame.from_dict(weather['forecasts'][0],orient='index').transpose()
for forecast in weather['forecasts'][1:]:
df = pd.concat([df, pd.DataFrame.from_dict(forecast,orient='index').transpose()])
time = np.array(df['fcst_valid_local'])
for row in range(len(time)):
time[row] = datetime.strptime(time[row], '%Y-%m-%dT%H:%M:%S%z')
df = df.set_index(time)
return df
def get_weather_plots(df):
plt.ioff()
df['rain'] = df['pop'].as_matrix()
tmean = pd.rolling_mean(df['temp'], window=4, center=True)
rhmean = | pd.rolling_mean(df['rh'], window=4, center=True) | pandas.rolling_mean |
#!/usr/bin/env python3
import warnings
import glob
from pathlib import Path ## for os-agnostic paths
import pandas as pd
from mendeleev import get_table
from ase import data as ase_data
from pymatgen import Element
HERE = Path(__file__).parent
def gather_ptable_dataframe():
df_list = []
## get properties in csv (retrieved from magpie project, imat project, and wikipedia)
all_files = glob.glob(str(HERE/"*"/"*.csv"))
for filename in all_files:
prop = str(Path(filename).stem)
source = str(Path(filename).parent.stem)
name = source + "_" + prop
tmp_df = pd.read_csv(filename, names=[name])
valid_0_list = [
"valence",
"valence_s",
"valence_p",
"valence_d",
"valence_f",
"unfilled",
"unfilled_f",
"unfilled_d",
"electron_affinity",
"electronegativity",
"magnetic_moment",
]
if not prop in valid_0_list:
tmp_df = tmp_df[name].apply(lambda x: None if x==0 else x)
df_list.append(tmp_df)
## get ase magnetic moments
magmom_list = ase_data.ground_state_magnetic_moments
tmp_df = | pd.DataFrame(magmom_list, columns=["ase_magnetic_moment"]) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 17 14:45:46 2016
@author: ernestmordret
"""
import pandas as pd
import numpy as np
import os
from matplotlib import pyplot as plt
import seaborn as sns
# from params import output_dir
import logging
def create_logger():
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
return logger
def hamming(s1, s2):
return sum(a != b for a, b in zip(s1, s2))
def get_codon_table(codons, amino_acids):
return dict(list(zip(codons, amino_acids)))
def get_inverted_codon_table(ct):
inv_codon_table = {}
for k, v in ct.items():
inv_codon_table[v] = inv_codon_table.get(v, [])
inv_codon_table[v].append(k)
return inv_codon_table
def prepare_count_matrix(df, codons, codon_table, inverted_codon_table, exact_PTM_spec_list):
matrix = pd.DataFrame(
data=0, index=codons, columns=list("ACDEFGHKLMNPQRSTVWY"), dtype=float
)
df = df[df["DP Decoy"] != "+"]
df = df[pd.notnull(df["codon"])]
df["codon"] = df["codon"].map(lambda x: x.replace("T", "U"))
for label in matrix.index:
if codon_table[label] == "*":
matrix.loc[label] = float("NaN")
for col in matrix.columns:
if (label in inverted_codon_table[col]) or (
codon_table[label] + " to " + col in exact_PTM_spec_list
):
matrix.loc[label, col] = float("NaN")
subs_agg = pd.DataFrame(
np.array(
list(
zip(
*list(
df.groupby(
["protein", "position", "origin", "destination", "codon"]
).groups.keys()
)
)
)
).T,
columns=["protein", "position", "origin", "destination", "codon"],
)
for x, l in list(subs_agg.groupby(["codon", "destination"]).groups.items()):
codon, destination = x
if (codon in matrix.index) and pd.notnull(matrix.loc[codon, destination]):
matrix.loc[codon, destination] = len(l)
matrix.rename(columns={"L": "I/L"}, inplace=True)
return matrix
def probe_mismatch(codon1, codon2, pos, spec):
origin, destination = spec
for i in range(3):
if i == pos:
if codon1[i] != origin or codon2[i] != destination:
return False
else:
if codon1[i] != codon2[i]:
return False
return True
def main(param_dict):
logger = create_logger()
logger.info('Plotting heatmap ...')
bases = "UCAG"
codons = [a + b + c for a in bases for b in bases for c in bases]
amino_acids = "FFLLSSSSYY**CC*WLLLLPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG"
RC = {"A": "U", "C": "G", "G": "C", "U": "A"}
codon_table = get_codon_table(codons, amino_acids)
inverted_codon_table = get_inverted_codon_table(codon_table)
inverted_codon_table["L"] = inverted_codon_table["L"] + inverted_codon_table["I"]
tol = 0.005
MW_dict = {
"G": 57.02147,
"A": 71.03712,
"S": 87.03203,
"P": 97.05277,
"V": 99.06842,
"T": 101.04768,
"I": 113.08407,
"L": 113.08407,
"N": 114.04293,
"D": 115.02695,
"Q": 128.05858,
"K": 128.09497,
"E": 129.0426,
"M": 131.04049,
"H": 137.05891,
"F": 147.06842,
"R": 156.10112,
"C": 160.030654, # CamCys
"Y": 163.0633,
"W": 186.07932,
}
aas_sorted_by_mass = [i[0] for i in sorted(list(MW_dict.items()), key=lambda x: x[1])]
danger_mods = | pd.read_pickle("danger_mods") | pandas.read_pickle |
import os
from io import StringIO
from pathlib import Path
import pandas as pd
import pandas._testing as pt
import pytest
from pyplotutil.datautil import Data, DataSet
csv_dir_path = os.path.join(os.path.dirname(__file__), "data")
test_data = """\
a,b,c,d,e
1,0.01,10.0,3.5,100
2,0.02,20.0,7.5,200
3,0.03,30.0,9.5,300
4,0.04,40.0,11.5,400
"""
test_dataset = """\
tag,a,b,c,d,e
tag01,0,1,2,3,4
tag01,5,6,7,8,9
tag01,10,11,12,13,14
tag01,15,16,17,18,19
tag01,20,21,22,23,24
tag01,25,26,27,28,29
tag02,10,11,12,13,14
tag02,15,16,17,18,19
tag02,110,111,112,113,114
tag02,115,116,117,118,119
tag02,120,121,122,123,124
tag02,125,126,127,128,129
tag03,20,21,22,23,24
tag03,25,26,27,28,29
tag03,210,211,212,213,214
tag03,215,216,217,218,219
tag03,220,221,222,223,224
tag03,225,226,227,228,229
"""
@pytest.mark.parametrize("cls", [str, Path])
def test_data_init_path(cls) -> None:
csv_path = os.path.join(csv_dir_path, "test.csv")
path = cls(csv_path)
expected_df = pd.read_csv(csv_path)
data = Data(path)
assert data.datapath == Path(csv_path)
pt.assert_frame_equal(data.dataframe, expected_df)
def test_data_init_StringIO() -> None:
csv_path = os.path.join(csv_dir_path, "test.csv")
expected_df = pd.read_csv(csv_path)
data = Data(StringIO(test_data))
assert data.datapath is None
pt.assert_frame_equal(data.dataframe, expected_df)
def test_data_init_DataFrame() -> None:
csv_path = os.path.join(csv_dir_path, "test.csv")
expected_df = pd.read_csv(csv_path)
if isinstance(expected_df, pd.DataFrame):
data = Data(expected_df)
assert data.datapath is None
pt.assert_frame_equal(data.dataframe, expected_df)
else:
pytest.skip(f"Expected DataFram type: {type(expected_df)}")
def test_data_init_kwds() -> None:
csv_path = os.path.join(csv_dir_path, "test.csv")
expected_df = pd.read_csv(csv_path, usecols=[0, 1])
data = Data(csv_path, usecols=[0, 1])
assert len(data.dataframe.columns) == 2
pt.assert_frame_equal(data.dataframe, expected_df)
def test_data_getitem() -> None:
df = pd.DataFrame([[0, 1, 2], [3, 4, 5], [6, 7, 8]], columns=["a", "b", "c"])
data = Data(df)
pt.assert_series_equal(data["a"], df.a) # type: ignore
pt.assert_series_equal(data["b"], df.b) # type: ignore
pt.assert_series_equal(data["c"], df.c) # type: ignore
def test_data_getitem_no_header() -> None:
df = pd.DataFrame([[0, 1, 2], [3, 4, 5], [6, 7, 8]])
data = Data(df)
pt.assert_series_equal(data[0], df[0]) # type: ignore
pt.assert_series_equal(data[1], df[1]) # type: ignore
pt.assert_series_equal(data[2], df[2]) # type: ignore
def test_data_len() -> None:
df = pd.DataFrame([[0, 1, 2], [3, 4, 5], [6, 7, 8]], columns=["a", "b", "c"])
data = Data(df)
assert len(data) == len(df)
def test_data_getattr() -> None:
df = pd.DataFrame([[0, 1, 2], [3, 4, 5], [6, 7, 8]], columns=["a", "b", "c"])
data = Data(df)
pt.assert_index_equal(data.columns, pd.Index(["a", "b", "c"]))
assert data.shape == (3, 3)
assert data.to_csv() == ",a,b,c\n0,0,1,2\n1,3,4,5\n2,6,7,8\n"
assert data.iat[1, 2] == 5
assert data.at[2, "a"] == 6
def test_data_attributes() -> None:
df = pd.DataFrame([[0, 1, 2], [3, 4, 5], [6, 7, 8]], columns=["a", "b", "c"])
data = Data(df)
pt.assert_series_equal(data.a, df.a) # type: ignore
pt.assert_series_equal(data.b, df.b) # type: ignore
pt.assert_series_equal(data.c, df.c) # type: ignore
def test_data_param() -> None:
csv_path = os.path.join(csv_dir_path, "test.csv")
data = Data(csv_path)
assert data.param("b") == 0.01
def test_data_param_list() -> None:
csv_path = os.path.join(csv_dir_path, "test.csv")
data = Data(csv_path)
assert data.param(["c", "e"]) == [10.0, 100]
@pytest.mark.parametrize("cls", [str, Path])
def test_dataset_init_path(cls) -> None:
csv_path = os.path.join(csv_dir_path, "test_dataset.csv")
path = cls(csv_path)
raw_df = pd.read_csv(csv_path)
dataset = DataSet(path)
assert dataset.datapath == Path(csv_path)
pt.assert_frame_equal(dataset.dataframe, raw_df)
if isinstance(raw_df, pd.DataFrame):
groups = raw_df.groupby("tag")
datadict = dataset._datadict
pt.assert_frame_equal(
datadict["tag01"].dataframe,
groups.get_group("tag01").reset_index(drop=True),
)
pt.assert_frame_equal(
datadict["tag02"].dataframe,
groups.get_group("tag02").reset_index(drop=True),
)
pt.assert_frame_equal(
datadict["tag03"].dataframe,
groups.get_group("tag03").reset_index(drop=True),
)
else:
pytest.skip(f"Expected DataFram type: {type(raw_df)}")
def test_dataset_init_StringIO() -> None:
csv_path = os.path.join(csv_dir_path, "test_dataset.csv")
raw_df = pd.read_csv(csv_path)
dataset = DataSet(StringIO(test_dataset))
assert dataset.datapath is None
pt.assert_frame_equal(dataset.dataframe, raw_df)
if isinstance(raw_df, pd.DataFrame):
groups = raw_df.groupby("tag")
datadict = dataset._datadict
pt.assert_frame_equal(
datadict["tag01"].dataframe,
groups.get_group("tag01").reset_index(drop=True),
)
pt.assert_frame_equal(
datadict["tag02"].dataframe,
groups.get_group("tag02").reset_index(drop=True),
)
pt.assert_frame_equal(
datadict["tag03"].dataframe,
groups.get_group("tag03").reset_index(drop=True),
)
else:
pytest.skip(f"Expected DataFram type: {type(raw_df)}")
def test_dataset_init_DataFrame() -> None:
csv_path = os.path.join(csv_dir_path, "test_dataset.csv")
raw_df = pd.read_csv(csv_path)
if isinstance(raw_df, pd.DataFrame):
dataset = DataSet(raw_df)
groups = raw_df.groupby("tag")
assert dataset.datapath is None
pt.assert_frame_equal(dataset.dataframe, raw_df)
pt.assert_frame_equal(
dataset._datadict["tag01"].dataframe,
groups.get_group("tag01").reset_index(drop=True),
)
pt.assert_frame_equal(
dataset._datadict["tag02"].dataframe,
groups.get_group("tag02").reset_index(drop=True),
)
pt.assert_frame_equal(
dataset._datadict["tag03"].dataframe,
groups.get_group("tag03").reset_index(drop=True),
)
else:
pytest.skip(f"Expected DataFram type: {type(raw_df)}")
def test_dataset_non_default_tag() -> None:
csv_path = os.path.join(csv_dir_path, "test_dataset_label.csv")
raw_df = pd.read_csv(csv_path)
dataset = DataSet(csv_path, by="label")
assert dataset.datapath == Path(csv_path)
pt.assert_frame_equal(dataset.dataframe, raw_df)
if isinstance(raw_df, pd.DataFrame):
groups = raw_df.groupby("label")
pt.assert_frame_equal(
dataset._datadict["label01"].dataframe,
groups.get_group("label01").reset_index(drop=True),
)
pt.assert_frame_equal(
dataset._datadict["label02"].dataframe,
groups.get_group("label02").reset_index(drop=True),
)
pt.assert_frame_equal(
dataset._datadict["label03"].dataframe,
groups.get_group("label03").reset_index(drop=True),
)
else:
pytest.skip(f"Expected DataFram type: {type(raw_df)}")
def test_dataset_no_tag() -> None:
csv_path = os.path.join(csv_dir_path, "test.csv")
raw_df = pd.read_csv(csv_path)
dataset = DataSet(csv_path)
pt.assert_frame_equal(dataset.dataframe, raw_df)
if isinstance(raw_df, pd.DataFrame):
assert len(dataset.datadict) == 1
pt.assert_frame_equal(dataset.datadict["0"].dataframe, raw_df)
else:
pytest.skip(f"Expected DataFram type: {type(raw_df)}")
def test_dataset_iter() -> None:
csv_path = os.path.join(csv_dir_path, "test_dataset.csv")
dataset = DataSet(csv_path)
raw_df = pd.read_csv(csv_path)
if isinstance(raw_df, pd.DataFrame):
groups = raw_df.groupby("tag")
for i, data in enumerate(dataset):
pt.assert_frame_equal(
data.dataframe, groups.get_group(f"tag{i+1:02d}").reset_index(drop=True)
)
else:
pytest.skip(f"Expected DataFram type: {type(raw_df)}")
def test_dataset_property_datadict() -> None:
csv_path = os.path.join(csv_dir_path, "test_dataset.csv")
dataset = DataSet(csv_path)
assert isinstance(dataset.datadict, dict)
assert list(dataset.datadict.keys()) == ["tag01", "tag02", "tag03"]
def test_dataset_keys() -> None:
csv_path = os.path.join(csv_dir_path, "test_dataset.csv")
dataset = DataSet(csv_path)
assert dataset.keys() == ["tag01", "tag02", "tag03"]
def test_dataset_items() -> None:
csv_path = os.path.join(csv_dir_path, "test_dataset.csv")
dataset = DataSet(csv_path)
raw_df = pd.read_csv(csv_path)
items = dataset.items()
assert isinstance(items, list)
assert len(items) == 3
if isinstance(raw_df, pd.DataFrame):
groups = raw_df.groupby("tag")
for i, tup in enumerate(items):
tag = tup[0]
data = tup[1]
assert tag == f"tag{i+1:02d}"
pt.assert_frame_equal(
data.dataframe, groups.get_group(tag).reset_index(drop=True)
)
else:
pytest.skip(f"Expected DataFram type: {type(raw_df)}")
def test_dataset_get() -> None:
csv_path = os.path.join(csv_dir_path, "test_dataset.csv")
dataset = DataSet(csv_path)
raw_df = | pd.read_csv(csv_path) | pandas.read_csv |
# 5.Perform Operations on Files
# 5.1: From the raw data below create a data frame
# 'first_name': ['Jason', 'Molly', 'Tina', 'Jake', 'Amy'],
# 'last_name': ['Miller', 'Jacobson', ".", 'Milner', 'Cooze'],
# 'age': [42, 52, 36, 24, 73],
# 'preTestScore': [4, 24, 31, ".", "."],
# 'postTestScore': ["25,000", "94,000", 57, 62, 70]
# 5.2: Save the dataframe into a csv file as example.csv
# 5.3: Read the example.csv and print the data frame
# 5.4: Read the example.csv without column heading
# 5.5: Read the example.csv andmake the index columns as 'First Name’ and 'Last Name'
# 5.6: Print the data frame in a Boolean form as True or False.
# True for Null/ NaN values and false for non-nullvalues
# 5.7: Read the dataframe by skipping first 3 rows and print the data frame
# 5.8: Load a csv file while interpreting "," in strings around numbers as thousands seperators.
# Check the raw data 'postTestScore' column has, as thousands separator.
# Comma should be ignored while reading the data. It is default behaviour,
# but you need to give argument to read_csv function which makes sure commas are ignored.
import pandas as pd
# 5.1
df = pd.DataFrame({'first_name': ['Jason', 'Molly', 'Tina', 'Jake', 'Amy'],
'last_name': ['Miller', 'Jacobson', ".", 'Milner', 'Cooze'],
'age': [42, 52, 36, 24, 73],
'preTestScore': [4, 24, 31, ".", "."],
'postTestScore': ["25,000", "94,000", 57, 62, 70]})
# 5.2
df.to_csv("example.csv")
print("*"*20)
# 5.3
df = pd.read_csv("example.csv")
print(df)
print("*"*20)
# 5.4
df_without_header = pd.read_csv("example.csv", header=None)
print(df_without_header)
print("*"*20)
# 5.5
df_with_index = pd.read_csv("example.csv", index_col=[
"first_name", "last_name"])
print(df_with_index)
print("*"*20)
# 5.6
boolean_df = df.isnull().any()
print(boolean_df)
print("*"*20)
# 5.7
df_skip_rows = | pd.read_csv("example.csv", skiprows=3) | pandas.read_csv |
import pandas as pd
langs_id = [
{
"lang": "Afrikaans",
"dataset_id": "af",
"stopwords_id": "af",
"flagged_words_id": None,
"fasttext_id": "af",
"sentencepiece_id": "af",
"kenlm_id": "af",
},
{
"lang": "Arabic",
"dataset_id": "ar",
"stopwords_id": "ar",
"flagged_words_id": "ar",
"fasttext_id": "ar",
"sentencepiece_id": "ar",
"kenlm_id": "ar",
},
{
"lang": "Egyptian Arabic",
"dataset_id": "arz",
"stopwords_id": None,
"flagged_words_id": None,
"fasttext_id": "arz",
"sentencepiece_id": "arz",
"kenlm_id": "arz",
},
{
"lang": "Assamese",
"dataset_id": "as",
"stopwords_id": None,
"flagged_words_id": None,
"fasttext_id": "as",
"sentencepiece_id": "as",
"kenlm_id": "as",
},
{
"lang": "Bengali",
"dataset_id": "bn",
"stopwords_id": "bn",
"flagged_words_id": None,
"fasttext_id": "bn",
"sentencepiece_id": "bn",
"kenlm_id": "bn",
},
{
"lang": "Catalan",
"dataset_id": "ca",
"stopwords_id": "ca",
"flagged_words_id": "ca",
"fasttext_id": "ca",
"sentencepiece_id": "ca",
"kenlm_id": "ca",
},
{
"lang": "English",
"dataset_id": "en",
"stopwords_id": "en",
"flagged_words_id": "en",
"fasttext_id": "en",
"sentencepiece_id": "en",
"kenlm_id": "en",
},
{
"lang": "Spanish",
"dataset_id": "es",
"stopwords_id": "es",
"flagged_words_id": "es",
"fasttext_id": "es",
"sentencepiece_id": "es",
"kenlm_id": "es",
},
{
"lang": "Basque",
"dataset_id": "eu",
"stopwords_id": "eu",
"flagged_words_id": "eu",
"fasttext_id": "eu",
"sentencepiece_id": "eu",
"kenlm_id": "eu",
},
{
"lang": "French",
"dataset_id": "fr",
"stopwords_id": "fr",
"flagged_words_id": "fr",
"fasttext_id": "fr",
"sentencepiece_id": "fr",
"kenlm_id": "fr",
},
{
"lang": "Gujarati",
"dataset_id": "gu",
"stopwords_id": None,
"flagged_words_id": None,
"fasttext_id": "gu",
"sentencepiece_id": "gu",
"kenlm_id": "gu",
},
{
"lang": "Hindi",
"dataset_id": "hi",
"stopwords_id": "hi",
"flagged_words_id": "hi",
"fasttext_id": "hi",
"sentencepiece_id": "hi",
"kenlm_id": "hi",
},
{
"lang": "Indonesian",
"dataset_id": "id",
"stopwords_id": "id",
"flagged_words_id": "id",
"fasttext_id": "id",
"sentencepiece_id": "id",
"kenlm_id": "id",
},
{
"lang": "Kannada",
"dataset_id": "kn",
"stopwords_id": None,
"flagged_words_id": "kn",
"fasttext_id": "kn",
"sentencepiece_id": "kn",
"kenlm_id": "kn",
},
{
"lang": "Malayalam",
"dataset_id": "ml",
"stopwords_id": None,
"flagged_words_id": "ml",
"fasttext_id": "ml",
"sentencepiece_id": "ml",
"kenlm_id": "ml",
},
{
"lang": "Marathi",
"dataset_id": "mr",
"stopwords_id": "mr",
"flagged_words_id": "mr",
"fasttext_id": "mr",
"sentencepiece_id": "mr",
"kenlm_id": "mr",
},
{
"lang": "Portuguese",
"dataset_id": "pt",
"stopwords_id": "pt",
"flagged_words_id": "pt",
"fasttext_id": "pt",
"sentencepiece_id": "pt",
"kenlm_id": "pt",
},
{
"lang": "Somali",
"dataset_id": "so",
"stopwords_id": "so",
"flagged_words_id": None,
"fasttext_id": "so",
"sentencepiece_id": None,
"kenlm_id": None,
},
{
"lang": "Swahili",
"dataset_id": "sw",
"stopwords_id": "sw",
"flagged_words_id": None,
"fasttext_id": "sw",
"sentencepiece_id": "sw",
"kenlm_id": "sw",
},
{
"lang": "Tamil",
"dataset_id": "ta",
"stopwords_id": None,
"flagged_words_id": None,
"fasttext_id": "ta",
"sentencepiece_id": "ta",
"kenlm_id": "ta",
},
{
"lang": "Telugu",
"dataset_id": "te",
"stopwords_id": None,
"flagged_words_id": "te",
"fasttext_id": "te",
"sentencepiece_id": "te",
"kenlm_id": "te",
},
{
"lang": "Urdu",
"dataset_id": "ur",
"stopwords_id": "ur",
"flagged_words_id": None,
"fasttext_id": "ur",
"sentencepiece_id": "ur",
"kenlm_id": "ur",
},
{
"lang": "Vietnamese",
"dataset_id": "vi",
"stopwords_id": "vi",
"flagged_words_id": "vi",
"fasttext_id": "vi",
"sentencepiece_id": "vi",
"kenlm_id": "vi",
},
{
"lang": "Yoruba",
"dataset_id": "yo",
"stopwords_id": "yo",
"flagged_words_id": None,
"fasttext_id": "yo",
"sentencepiece_id": "yo",
"kenlm_id": "yo",
},
{
"lang": "Chinese",
"dataset_id": "zh",
"stopwords_id": "zh",
"flagged_words_id": "zh",
"fasttext_id": "zh",
"sentencepiece_id": "zh",
"kenlm_id": "zh",
},
]
langs_id = | pd.DataFrame(langs_id) | pandas.DataFrame |
import functools
import time
import pandas as pd
from .datenquellen import Datenquelle
def timer(func):
"""Print the runtime of the decorated function"""
@functools.wraps(func)
def wrapper_timer(*args, **kwargs):
start_time = time.perf_counter() # 1
value = func(*args, **kwargs)
end_time = time.perf_counter() # 2
run_time = end_time - start_time # 3
print(f"Finished {func.__name__!r} in {run_time:.4f} secs")
return value
return wrapper_timer
class CSV(Datenquelle):
"""
"""
_args = None
_kwargs = None
def __init__(self, fname, *args, **kwargs):
super().__init__(fname)
self._args = args
self._kwargs = kwargs
def lesen(self):
self.data = pd.read_csv(self.fname, *self._args, **self._kwargs)
self.__repr__()
def mapping(self, xkateg1, xkateg2):
"""
Hat man zwei kategoriale Variablen kateg1 und kateg2, so kann man einen Dict-Objekt generieren
:param xkateg1:
:param xkateg2:
:return:
"""
list_dicts = self.data[[xkateg1, xkateg2]].drop_duplicates().transpose().to_dict().values()
mapping_res = {}
for xdict in list_dicts:
mapping_res[xdict[xkateg1]] = xdict[xkateg2]
return mapping_res
def __add__(self, csv_obj):
"""
"""
return | pd.concat([self.data, csv_obj.data]) | pandas.concat |
import pandas as pd
import json
import requests
import datetime as dt
import timeout_decorator
import ssl
import logging
logger = logging.getLogger("Rotating Log")
def get_station_status_url(sys_url):
data = requests.get(sys_url).json()
return [x for x in data['data']['en']['feeds'] if x['name']=='station_status'][0]['url']
def get_station_info_url(sys_url):
data = requests.get(sys_url).json()
return [x for x in data['data']['en']['feeds'] if x['name']=='station_information'][0]['url']
def get_system_info_url(sys_url):
data = requests.get(sys_url).json()
return [x for x in data['data']['en']['feeds'] if x['name']=='system_information'][0]['url']
@timeout_decorator.timeout(30)
def query_system_info(sys_url):
url = get_system_info_url(sys_url)
data = requests.get(url).json()
return data
@timeout_decorator.timeout(30)
def query_station_status(sys_url):
"""
Query station_status.json
"""
url = get_station_status_url(sys_url)
data = requests.get(url).json()
try:
df = pd.DataFrame(data['data']['stations'])
except KeyError:
df = pd.DataFrame(data['stations'])
df = df.drop_duplicates(['station_id','last_reported'])
try:
df['datetime'] = data['last_updated']
df['datetime'] = df['datetime'].map(lambda x: dt.datetime.utcfromtimestamp(x))
except KeyError:
df['datetime'] = dt.datetime.utcnow()
df['datetime'] = df['datetime'].dt.tz_localize('UTC')
df = df[['datetime','num_bikes_available','num_docks_available','is_renting','station_id']]
return df
@timeout_decorator.timeout(30)
def query_station_info(sys_url):
"""
Query station_information.json
"""
url = get_station_info_url(sys_url)
data = requests.get(url).json()
try:
df = pd.DataFrame(data['data']['stations'])
except KeyError:
df = pd.DataFrame(data['stations'])
return df[['name','station_id','lat','lon']]
@timeout_decorator.timeout(30)
def query_free_bikes(sys_url):
"""
Query free_bikes.json
"""
url = get_free_bike_url(sys_url)
data = requests.get(url).json()
try:
df = pd.DataFrame(data['data']['bikes'])
except KeyError:
df = | pd.DataFrame(data['bikes']) | pandas.DataFrame |
import pandas as pd
import os
from os import listdir
from os.path import isfile, join
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
from sklearn.linear_model import LinearRegression
from sklearn.pipeline import Pipeline, make_pipeline
from sklearn.preprocessing import PolynomialFeatures
from sklearn.model_selection import GridSearchCV
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import Ridge
from sklearn.linear_model import ElasticNet
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import GradientBoostingRegressor
from thundersvm import SVR
from sklearn.svm import LinearSVR
import time
import util
def models(dataset_canarin_seconds):
# - Method 2: Prediction of decibels calibrated starting from those of the microphone and from the environmental data of the canary - using a frequency per second.
# - As in the previous case, we use the data with a sampling frequency per second (such as that of the microphone and the sound level meter).
# Due to the quantity of data we use, also in this case, those relating to a month of sampling.
# - Split the dataset
X_train, X_val, y_train, y_val = train_test_split(
dataset_canarin_seconds.drop("db_phon", axis=1), # X = tutto tranne db_phon
dataset_canarin_seconds["db_phon"], # y = db_phon
test_size=1/3, random_state=42 # parametri divisione
)
# - Linear regression. (multivariate regression)
print("Linear regression")
lrm = LinearRegression()
lrm.fit(X_train, y_train)
# - Print out the error metrics
util.print_error_stats(X_val, y_val, lrm)
# - The value of the coefficients attributed by the model to the individual features to understand which ones are most important.
print( | pd.Series(lrm.coef_, index=X_train.columns) | pandas.Series |
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib
import numpy as np
import time
import random
import datetime
from datetime import date
from datetime import timedelta
from dateutil.relativedelta import relativedelta
from google.cloud import bigquery
import os
os.environ["GOOGLE_APPLICATION_CREDENTIALS"]="data/ironia-data-dfc6335a0abe.json"
client = bigquery.Client()
def generate_df(ids,working_dates,period=1,risk=0.05):
pd.options.mode.chained_assignment = None
main_df = pd.DataFrame(columns=["Ironia_id","Name","MDD","DaR","CDaR","RF","VaR", "CVaR", "MAD"])
prices_df = pd.DataFrame()
for i in ids:
sql = f'SELECT nav,date,name FROM `ironia-data.Ironia_Ext.precios` WHERE Ironia_id={i[0]} and date BETWEEN "2016-01-01" and "2018-12-31" LIMIT 10000 '
df = pd.read_gbq(sql)
df.dropna(inplace=True)
df["date"] = pd.to_datetime(df["date"])
df.sort_values(by="date",inplace=True)
df.set_index(df["date"],inplace=True)
if len(df) >= len(working_dates): #Number of working days from 2016 to 2018
try:
df = df.loc[working_dates]
df["nav"] = df["nav"].interpolate(method="linear")
full_series = True
except:
full_series = False
if full_series:
prices_df = pd.merge(prices_df,df["nav"],how="outer", left_index=True, right_index=True)
prices_df.rename(columns={'nav':df["name"].values[0] }, inplace=True)
prices_df = prices_df.interpolate(method="linear")
last_date = date.fromisoformat(str(df[-1:].index.values)[2:12])
years_ago = (last_date - relativedelta(years=period)).isoformat()
df = df[years_ago:last_date]
df.drop("date",axis=1,inplace=True)
df["nav return"] = (df["nav"]-df["nav"].shift(1))/df["nav"].shift(1)
df["nav return"][0] = 0
VaR = np.percentile(df["nav return"],risk*100)
CVaR = np.mean(df["nav return"].loc[df["nav return"]<=VaR])
max_acum = df["nav"].cummax()
df["drawdown"] = -(df["nav"]-max_acum)/max_acum
MDD = np.max(df["drawdown"].values)
recovery_factor = df["nav"].iloc[-1]/np.max(df["drawdown"].values)
DaR = np.percentile(df["drawdown"],risk*100)
CDaR = np.mean(df["drawdown"].loc[df["drawdown"]<=DaR])
MAD = df["nav return"].mad()
main_df = main_df.append({"Ironia_id":int(i[0]),"Name":df["name"].values[0],"MDD":MDD,"DaR":DaR,"CDaR":CDaR,"RF":recovery_factor,"VaR":VaR, "CVaR":CVaR, "MAD":MAD}, ignore_index=True)
return [main_df,prices_df]
def get_train_test_prices(ids,working_dates_all,start="2016-01-01",train_span="2018-12-31",test_span="2019-12-31",delta=0,print_every=100):
pd.options.mode.chained_assignment = None
index = pd.date_range(start=start,end=test_span)
prices_df = | pd.DataFrame(index=index,columns=["Init_"]) | pandas.DataFrame |
from copy import deepcopy
import datetime as pydatetime
from functools import lru_cache
from glob import iglob
import os
import pickle
import zipfile
import attr
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import numpy as np
import pandas as pd
from .__version__ import __version__
from .wue import water_use_efficiency, WUEError
from .hfdata import (
HFData,
HFSummary,
HFDataSource,
HFDataReadError,
TooFewDataError,
)
from .partition import fvspart_progressive, FVSPSolution
from .util import vapor_press_deficit
from .containers import AllFluxes, WUE
from .constants import MOLECULAR_WEIGHT as MW
EC_TOA5 = {
"filetype": "csv",
"skiprows": 4,
"time_col": 0,
"cols": (2, 3, 4, 5, 6, 7, 8),
"temper_unit": "C",
"unit_convert": dict(q=1e-3, c=1e-6, P=1e3),
"na_values": "NAN",
}
EC_TOB1 = {
"filetype": "tob1",
"time_col": 0,
"cols": (3, 4, 5, 6, 7, 8, 9),
"temper_unit": "C",
"unit_convert": dict(q=1e-3, c=1e-6, P=1e3),
}
EC_GHG1 = {
"filetype": "ghg",
"sep": "\t",
"cols": (11, 12, 13, 7, 8, 9, 10),
"time_col": [5, 6],
"unit_convert": dict(q=1e-3 * MW.vapor, c=1e-3 * MW.co2, P=1e3),
"temper_unit": "C",
"skiprows": 8,
"na_values": "NAN",
"to_datetime_kws": {"format": "%Y-%m-%d %H:%M:%S:%f"},
}
HFD_FORMAT = EC_TOA5
WUE_OPTIONS = {
"ci_mod": "const_ratio",
"ci_mod_param": None,
"leaf_temper": None,
"leaf_temper_corr": 0,
"diff_ratio": 1.6,
}
HFD_OPTIONS = {
"bounds": {"c": (0, np.inf), "q": (0, np.inf)},
"rd_tol": 0.5,
"ad_tol": 1024,
"ustar_tol": 0.1,
"correct_external": True,
}
PART_OPTIONS = dict(adjust_fluxes=True)
_bad_ustar = "ustar = {:.4} <= ustar_tol = {:.4}"
_bad_vpd = "vpd = {:.4} Pa <= 0"
_bad_qflux = "Fq = {:.4} <= 0"
_night_mssg = "Nighttime, fluxes all non-stomatal"
_fp_result_str = (
"===============\n"
"Fluxpart Result\n"
"===============\n"
"fluxpart version = {version}\n"
"date = {date}\n"
"---------------\n"
"dataread = {dataread}\n"
"attempt_partition = {attempt_partition}\n"
"partition_success = {partition_success}\n"
"mssg = {mssg}\n"
"label = {label}\n"
"sunrise = {sunrise}\n"
"sunset = {sunset}\n"
+ AllFluxes().results_str()
+ "\n"
+ HFSummary().results_str()
+ "\n"
+ WUE().results_str()
+ "\n"
+ FVSPSolution().results_str()
)
class Error(Exception):
pass
class FluxpartError(Error):
def __init__(self, message):
self.message = message
def fvspart(
file_or_dir,
time_sorted=False,
interval=None,
hfd_format=None,
hfd_options=None,
meas_wue=None,
wue_options=None,
part_options=None,
label=None,
stdout=True,
verbose=True,
):
"""Partition CO2 & H2O fluxes into stomatal & nonstomatal components.
Provides a full implementation of the flux variance similarity
partitioning algorithm [SS08]_[SAAS+18]_: reads high frequency eddy
covariance data; performs data transformations and data QA/QC;
analyzes water vapor and carbon dioxide fluxes; and partitions the
fluxes into stomatal (transpiration, photosynthesis) and nonstomatal
(evaporation, respiration) components.
The following notation is used in variable naming and documentation
to represent meteorological quantities::
u, v, w = wind velocities
q = water vapor mass concentration
c = carbon dioxide mass concentration
T = air temperature
P = total air pressure
Parameters
----------
For parameters explanation see: :func:`~fluxpart.api.fvs_partition`
Returns
-------
:class:`~fluxpart.fluxpart.FluxpartResult`
"""
if hfd_format is None:
hfd_format = deepcopy(HFD_FORMAT)
elif type(hfd_format) is str and hfd_format.upper() == "EC-TOA5":
hfd_format = deepcopy(EC_TOA5)
elif type(hfd_format) is str and hfd_format.upper() == "EC-TOB1":
hfd_format = deepcopy(EC_TOB1)
elif type(hfd_format) is str and hfd_format.upper() == "EC-GHG1":
hfd_format = deepcopy(EC_GHG1)
else:
hfd_format = deepcopy(hfd_format)
_validate_hfd_format(hfd_format)
hfd_options = {**HFD_OPTIONS, **(hfd_options or {})}
part_options = {**PART_OPTIONS, **(part_options or {})}
unit_convert = hfd_format.pop("unit_convert", {})
converters = {k: _converter_func(v, 0.0) for k, v in unit_convert.items()}
temper_unit = hfd_format.pop("temper_unit").upper()
if temper_unit == "C" or temper_unit == "CELSIUS":
converters["T"] = _converter_func(1.0, 273.15)
hfd_format["converters"] = converters
if "daytime" in part_options and type(part_options["daytime"]) is str:
part_options["daytime"] = _lookup(part_options["daytime"], 0, 1, 2)
if stdout:
print("Getting filenames ...")
files = _files(file_or_dir)
if len(files) == 0:
print("No files found!")
return
if time_sorted:
sorted_files = files
else:
if stdout:
print("Reading {} file datetimes ...".format(len(files)))
times = _peektime(files, **hfd_format)
if stdout:
print("Time sorting data files ...")
sorted_files = [f for t, f in sorted(zip(times, files))]
if stdout:
print("Creating data source ...")
reader = HFDataSource(sorted_files, **hfd_format).reader(interval=interval)
results = []
if stdout:
print("Beginning partitioning analyses ...")
while True:
try:
hfdat = HFData(next(reader))
except HFDataReadError as e:
results.append(FVSResult(label=label, mssg=e.args[0]))
continue
except StopIteration:
break
datetime = hfdat.dataframe.index[0]
date = datetime.date()
time = datetime.time()
if stdout:
print("{}: ".format(datetime), end="")
sunrise, sunset = np.nan, np.nan
nighttime = False
if "daytime" in part_options:
if callable(part_options["daytime"]):
sunrise, sunset = part_options["daytime"](date)
else:
sunrise, sunset = part_options["daytime"]
sunrise = pd.to_datetime(sunrise).time()
sunset = pd.to_datetime(sunset).time()
# shift sunrise so we partition if rise occurs during interval
if interval:
sunrise = pd.Timestamp.combine(date, sunrise)
sunrise = (sunrise - pd.Timedelta(interval)).time()
nighttime = time < sunrise or time > sunset
try:
hfdat, hfsum = _set_hfdata(hfdat, **hfd_options)
except (TooFewDataError, FluxpartError) as e:
results.append(
FVSResult(
label=datetime,
mssg=e.args[0],
dataread=True,
attempt_partition=False,
partition_success=False,
sunrise=sunrise,
sunset=sunset,
)
)
if stdout and verbose:
print(e.args[0])
continue
if nighttime and hfsum.cov_w_c >= 0:
fluxes = AllFluxes(
temper_kelvin=hfsum.T, **_set_all_fluxes_nonstomatal(hfsum)
)
mssg = _night_mssg
results.append(
FVSResult(
label=datetime,
attempt_partition=False,
partition_success=True,
mssg=mssg,
dataread=True,
fluxes=fluxes,
hfsummary=hfsum,
sunrise=sunrise,
sunset=sunset,
)
)
if stdout and verbose:
print(mssg)
continue
try:
leaf_wue = _set_leaf_wue(
meas_wue,
wue_options,
part_options,
hfsum,
date,
datetime,
temper_unit,
)
except WUEError as e:
results.append(
FVSResult(
label=datetime,
mssg=e.args[0],
dataread=True,
hfsummary=hfsum,
sunrise=sunrise,
sunset=sunset,
)
)
if stdout and verbose:
print(e.args[0])
continue
fluxes, fvsp = fvspart_progressive(
hfdat["w"].values,
hfdat["q"].values,
hfdat["c"].values,
leaf_wue.wue,
part_options["adjust_fluxes"],
)
if fvsp.valid_partition:
fluxes = AllFluxes(**attr.asdict(fluxes), temper_kelvin=hfsum.T)
else:
fluxes = AllFluxes()
results.append(
FVSResult(
label=datetime,
sunrise=sunrise,
sunset=sunset,
dataread=True,
attempt_partition=True,
fluxes=fluxes,
partition_success=fvsp.valid_partition,
mssg=fvsp.mssg,
fvsp_solution=fvsp,
hfsummary=hfsum,
wue=leaf_wue,
)
)
if stdout:
if fvsp.mssg:
print(fvsp.mssg)
else:
print("OK")
return FluxpartResult(results)
def _set_all_fluxes_nonstomatal(hfsum):
return dict(
Fq=hfsum.cov_w_q,
Fqt=0.0,
Fqe=hfsum.cov_w_q,
Fc=hfsum.cov_w_c,
Fcp=0.0,
Fcr=hfsum.cov_w_c,
)
def _set_only_total_fluxes(hfsum):
return dict(
Fq=hfsum.cov_w_q,
Fqt=np.nan,
Fqe=np.nan,
Fc=hfsum.cov_w_c,
Fcp=np.nan,
Fcr=np.nan,
)
def _set_hfdata(hfdata, bounds, rd_tol, ad_tol, correct_external, ustar_tol):
hfdata.cleanse(bounds, rd_tol, ad_tol)
hfdata.truncate_pow2()
if correct_external:
hfdata.correct_external()
hfsum = hfdata.summarize()
if hfsum.ustar < ustar_tol:
raise FluxpartError(_bad_ustar.format(hfsum.ustar, ustar_tol))
vpd = vapor_press_deficit(hfsum.rho_vapor, hfsum.T)
if vpd <= 0:
raise FluxpartError(_bad_vpd.format(vpd))
if hfsum.cov_w_q <= 0:
raise FluxpartError(_bad_qflux.format(hfsum.cov_w_q))
return hfdata, hfsum
def flux_partition(*args, **kws):
return fvspart(*args, **kws)
class FVSResult(object):
"""FVS partitioning result."""
def __init__(
self,
dataread=False,
attempt_partition=False,
partition_success=False,
mssg=None,
label=None,
sunrise=None,
sunset=None,
fluxes=AllFluxes(),
hfsummary=HFSummary(),
wue=WUE(),
fvsp_solution=FVSPSolution(),
):
"""Fluxpart result.
Parameters
----------
dataread, attempt_partition, partition_success : bool
Indicates success or failure in reading high frequency data,
attempting and obtaining a valid partioning solution.
mssg : str
Possibly informative message if `dataread` or `partition_success`
are False
label : optional
Pandas datetime.
fluxes : :class:`~fluxpart.containers.AllFluxes`
fvsp_solution : :class:`~fluxpart.containers.FVSPResult`
wue : :class:`~fluxpart.containers.WUE`
hfsummary : :class:`~fluxpart.hfdata.HFSummary`
"""
self.version = __version__
self.dataread = dataread
self.attempt_partition = attempt_partition
self.partition_success = partition_success
self.mssg = mssg
self.fluxes = fluxes
self.label = label
self.sunrise = sunrise
self.sunset = sunset
self.fvsp_solution = fvsp_solution
self.wue = wue
self.hfsummary = hfsummary
def __str__(self):
fluxpart = attr.asdict(self.fvsp_solution)
wqc_data = fluxpart.pop("wqc_data")
rootsoln = fluxpart.pop("rootsoln")
return _fp_result_str.format(
timenow=pydatetime.datetime.now(),
version=self.version,
dataread=self.dataread,
attempt_partition=self.attempt_partition,
partition_success=self.partition_success,
mssg=self.mssg,
label=self.label,
sunrise=self.sunrise,
sunset=self.sunset,
**attr.asdict(self.fluxes),
**attr.asdict(self.hfsummary),
**attr.asdict(self.wue),
**fluxpart,
**wqc_data,
**rootsoln,
)
class FluxpartResult(object):
def __init__(self, fp_results):
if isinstance(fp_results, pd.DataFrame):
self.df = fp_results
return
index = pd.DatetimeIndex(r.label for r in fp_results)
df0 = pd.DataFrame(
(r.fluxes.common_units() for r in fp_results),
index=index,
columns=fp_results[0].fluxes.common_units().keys(),
)
df1 = pd.DataFrame(
(r.hfsummary.common_units() for r in fp_results),
index=index,
columns=fp_results[0].hfsummary.common_units().keys(),
)
df2 = pd.DataFrame(
(r.wue.common_units() for r in fp_results),
index=index,
columns=fp_results[0].wue.common_units().keys(),
)
df3 = pd.DataFrame(
(r.fvsp_solution.common_units() for r in fp_results),
index=index,
columns=fp_results[0].fvsp_solution.common_units().keys(),
)
df4 = pd.DataFrame(
{
"dataread": [r.dataread for r in fp_results],
"attempt_partition": [r.attempt_partition for r in fp_results],
"partition_success": [r.partition_success for r in fp_results],
"mssg": [r.mssg for r in fp_results],
"sunrise": [r.sunrise for r in fp_results],
"sunset": [r.sunset for r in fp_results],
},
index=index,
)
self.df = pd.concat(
[df0, df1, df2, df3, df4],
axis=1,
sort=False,
keys=["fluxes", "hfsummary", "wue", "fvsp_solution", "fluxpart"],
)
self.meta = {
"version": fp_results[0].version,
"date": str(pydatetime.datetime.now()),
}
def __str__(self):
if len(self.df) == 1:
return self.istr(0)
else:
return self.df.__str__()
def __getitem__(self, item):
return self.df[item]
def __getattr__(self, x):
return getattr(self.df, x)
def plot_co2(
self,
start=None,
end=None,
units="mass",
components=(0, 1, 2),
ax=None,
**kws,
):
if ax is None:
ax = plt.gca()
if units == "mass":
cols = ["Fc", "Fcp", "Fcr"]
ylab = r"$\mathrm{CO_2\ Flux\ (mg\ m^{-2}\ s^{-1})}$"
else:
cols = ["Fc_mol", "Fcp_mol", "Fcr_mol"]
ylab = r"$\mathrm{CO_2\ Flux\ (umol\ m^{-2}\ s^{-1})}$"
labels = [
r"$\mathrm{F_c}$",
r"$\mathrm{F_{c_p}}$",
r"$\mathrm{F_{c_r}}$",
]
cols = [cols[j] for j in components]
labels = [labels[j] for j in components]
self.df.loc[start:end, ("fluxes", cols)].plot(ax=ax)
ax.legend(labels)
ax.set_ylabel(ylab)
return ax
def plot_h2o(
self,
start=None,
end=None,
units="mass",
components=(0, 1, 2),
ax=None,
**kws,
):
if ax is None:
ax = plt.gca()
if units == "mass":
cols = ["Fq", "Fqt", "Fqe"]
ylab = r"$\mathrm{H_20\ Flux\ (g\ m^{-2}\ s^{-1})}$"
elif units == "mol":
cols = ["Fq_mol", "Fqt_mol", "Fqe_mol"]
ylab = r"$\mathrm{H_20\ Flux\ (mmol\ m^{-2}\ s^{-1})}$"
else:
cols = ["LE", "LEt", "LEe"]
ylab = r"$\mathrm{LE\ (W\ m^{-2})}$"
labels = [
r"$\mathrm{F_q}$",
r"$\mathrm{F_{q_t}}$",
r"$\mathrm{F_{q_e}}$",
]
cols = [cols[j] for j in components]
labels = [labels[j] for j in components]
self.df.loc[start:end, ("fluxes", cols)].plot(ax=ax)
ax.legend(labels)
ax.set_ylabel(ylab)
return ax
def istr(self, i):
"""Return a string representation of the ith result"""
return _fp_result_str.format(
version=self.meta["version"],
date=self.meta["date"],
label=self.df.index[i],
**self.df.iloc[i]["fluxpart"].to_dict(),
**self.df.iloc[i]["fluxes"].to_dict(),
**self.df.iloc[i]["fvsp_solution"].to_dict(),
**self.df.iloc[i]["hfsummary"].to_dict(),
**self.df.iloc[i]["wue"].to_dict(),
)
def save(self, filename):
self.save_pickle(filename)
def save_csv(self, filename):
self.df.to_csv(filename) #, na_rep="NAN")
def save_pickle(self, filename):
self.df.to_pickle(filename)
def _converter_func(slope, intercept):
"""Return a function for linear transform of data."""
if type(slope) is str:
return slope
def func(val):
return slope * val + intercept
return func
def _files(file_or_dir):
if type(file_or_dir) is str:
file_or_dir = [file_or_dir]
unsorted_files = []
for path in file_or_dir:
if os.path.isfile(path):
unsorted_files.append(path)
continue
if os.path.isdir(path):
path = os.path.join(path, "*")
unsorted_files += iglob(path)
return unsorted_files
def _set_leaf_wue(
meas_wue, wue_options, part_options, hfsum, date, datetime, temper_unit
):
# TODO: these should work with file objects, not just str name
wue_options = {**WUE_OPTIONS, **(wue_options or {})}
heights, leaf_temper = None, None
canopy_ht = wue_options.pop("canopy_ht", None)
meas_ht = wue_options.pop("meas_ht", None)
if "heights" in wue_options:
heights = wue_options.pop("heights")
if not callable(heights):
heights = _lookup(heights, 0, 1, 2)
if "leaf_temper" in wue_options:
leaf_temper = wue_options.pop("leaf_temper")
if type(leaf_temper) is str:
leaf_temper = _lookup(leaf_temper, 0, 1)
if meas_wue:
if type(meas_wue) is str:
meas_wue = _lookup(meas_wue, 0, 1)
try:
if meas_wue:
if callable(meas_wue):
leaf_wue = WUE(wue=meas_wue(datetime))
else:
leaf_wue = WUE(wue=float(meas_wue))
else:
if heights is not None:
if callable(heights):
canopy_ht, meas_ht = heights(date)
else:
canopy_ht, meas_ht = heights
else:
if callable(canopy_ht):
canopy_ht = canopy_ht(date)
if callable(meas_ht):
meas_ht = meas_ht(date)
leaf_t = None
if leaf_temper is not None:
if callable(leaf_temper):
leaf_t = leaf_temper(datetime)
else:
leaf_t = float(leaf_temper)
if temper_unit == "C" or temper_unit == "CELSIUS":
leaf_t = leaf_t + 273.15
leaf_wue = water_use_efficiency(
hfsum,
canopy_ht=canopy_ht,
meas_ht=meas_ht,
leaf_temper=leaf_t,
**wue_options,
)
return leaf_wue
except WUEError:
raise
def _peektime(files, **kwargs):
if kwargs["filetype"] == "csv" or kwargs["filetype"] == "ghg":
dtcols = kwargs["time_col"]
if type(dtcols) is int:
dtcols = [dtcols]
sep = kwargs.get("delimiter", ",")
sep = kwargs.get("sep", sep)
datetimes = []
to_datetime_kws = kwargs.get("to_datetime_kws", {})
if kwargs["filetype"] == "csv":
for file_ in files:
with open(file_, "rt") as f:
for _ in range(kwargs.get("skiprows", 0)):
f.readline()
row = f.readline().split(sep)
tstamp = " ".join([row[i].strip("'\"") for i in dtcols])
datetimes.append(pd.to_datetime(tstamp, **to_datetime_kws))
elif kwargs["filetype"] == "ghg":
for file_ in files:
with zipfile.ZipFile(file_) as z:
with z.open(os.path.basename(file_)[:-3] + "data", "r") as f:
for _ in range(kwargs.get("skiprows", 0)):
f.readline()
row = f.readline().decode("utf-8").split(sep)
tstamp = " ".join([row[i].strip("'\"") for i in dtcols])
datetimes.append(pd.to_datetime(tstamp, **to_datetime_kws))
else: # "tob1"
source = HFDataSource(files, count=5, **kwargs)
datetimes = [df.index[0] for df in source.reader(interval=None)]
return datetimes
def _validate_hfd_format(hfd_format):
if "cols" not in hfd_format:
raise Error("No value for hfd_format['cols'] given.")
if "filetype" not in hfd_format:
raise Error("No value for hfd_format['filetype'] given.")
if hfd_format["filetype"] not in ("csv", "tob1", "ghg"):
raise Error(f"Unrecognized filetype: {hfd_format['filetype']}")
def _lookup(csv_file, date_icol, icol1, icol2=None, method="ffill"):
"""Create a function for looking up data in csv file.
date_icol, icol1, icol2 : int
column index for the respective data
method : str
Interpolation method used with pandas df.index.get_loc. The
default 'ffill' returns the PREVIOUS values if no exact date
match is found in the lookup.
"""
df = pd.read_csv(csv_file, index_col=date_icol, parse_dates=True)
@lru_cache()
def func(date):
ix = df.index.get_loc( | pd.to_datetime(date) | pandas.to_datetime |
# -*- coding: utf-8 -*-
from io import BytesIO
import pandas as pd
import pykintone
from karura.env import get_kintone_env
from karura.core.kintone.application import Application
from karura.core.kintone.kintone_exception import kintoneException
from karura.core.dataframe_extension import FType, FTypeNames, DataFrameExtension
class kintoneRequest():
def __init__(self, env=None):
self.env = env if env is not None else get_kintone_env()
def request_to_dfe(self, request_json):
app_id = request_json["app_id"]
field_settings = request_json["fields"]
view_name = request_json["view"]
# check fields setting
features = []
target = ""
for fs in field_settings:
if field_settings[fs]["usage"] == 1:
target = fs
elif field_settings[fs]["usage"] == 0:
features.append(fs)
if not target:
raise kintoneException("予測対象の項目が指定されていません")
if len(features) == 0:
raise kintoneException("予測に使用する項目が指定されていません")
# confirm view
app = pykintone.login(self.env.domain, self.env.login_id, self.env.password).app(app_id)
view = None
query = ""
if view_name:
views = app.administration().view().get().views
for v in views:
if v.name == view_name:
view = v
break
if view is None:
raise kintoneException("指定された名前のビュー「{}」は、アプリに作成されていません".format(view_name))
# make query
query = view.filter_cond if view.filter_cond else ""
query += " order by " + view.sort if view.sort else ""
# make field setting
fields = features if view is None else [f for f in view.fields if f in features]
fields = [target] + fields
# make dfe
kapp = Application(self.env)
dfe = kapp.load(app_id, query, fields, target)
return dfe
def record_to_df(self, request_json):
app_id = request_json["app_id"]
values = request_json["values"]
kapp = Application(self.env)
fields_d = kapp.get_fields(app_id)
columns = []
data = []
for k in values:
if k not in fields_d:
continue
f = fields_d[k]
columns.append(f.label)
data.append(values[k])
df = pd.DataFrame(data=[data], columns=columns)
return df
def download(self, request_json):
app_id = request_json["app_id"]
view_name = request_json["view"]
# confirm view
app = pykintone.login(self.env.domain, self.env.login_id, self.env.password).app(app_id)
view = None
query = ""
fields = []
if view_name:
views = app.administration().view().get().views
for v in views:
if v.name == view_name:
view = v
break
if view is None:
raise kintoneException("指定された名前のビュー「{}」は、アプリに作成されていません".format(view_name))
# make query
query = view.filter_cond if view.filter_cond else ""
query += " order by " + view.sort if view.sort else ""
fields = view.fields
# make dfe
kapp = Application(self.env)
dfe = kapp.load(app_id, query, fields)
return dfe
def file_to_df(self, byte_str):
fileio = BytesIO(byte_str)
columns = []
ftype_names = []
index = 0
for line in fileio:
items = line.decode("utf-8").split("\t")
items = [i.strip() for i in items]
if index == 0:
columns = items
elif index == 1:
ftype_names = items
index += 1
if index == 2:
break
df = | pd.read_csv(fileio, encoding="utf-8", sep="\t") | pandas.read_csv |
import warnings
import numpy as np
import pytest
from pandas import (
DataFrame,
MultiIndex,
Series,
date_range,
)
import pandas._testing as tm
from pandas.core.algorithms import safe_sort
class TestPairwise:
# GH 7738
@pytest.mark.parametrize("f", [lambda x: x.cov(), lambda x: x.corr()])
def test_no_flex(self, pairwise_frames, pairwise_target_frame, f):
# DataFrame methods (which do not call flex_binary_moment())
result = f(pairwise_frames)
tm.assert_index_equal(result.index, pairwise_frames.columns)
tm.assert_index_equal(result.columns, pairwise_frames.columns)
expected = f(pairwise_target_frame)
# since we have sorted the results
# we can only compare non-nans
result = result.dropna().values
expected = expected.dropna().values
tm.assert_numpy_array_equal(result, expected, check_dtype=False)
@pytest.mark.parametrize(
"f",
[
lambda x: x.expanding().cov(pairwise=True),
lambda x: x.expanding().corr(pairwise=True),
lambda x: x.rolling(window=3).cov(pairwise=True),
lambda x: x.rolling(window=3).corr(pairwise=True),
lambda x: x.ewm(com=3).cov(pairwise=True),
lambda x: x.ewm(com=3).corr(pairwise=True),
],
)
def test_pairwise_with_self(self, pairwise_frames, pairwise_target_frame, f):
# DataFrame with itself, pairwise=True
# note that we may construct the 1st level of the MI
# in a non-monotonic way, so compare accordingly
result = f(pairwise_frames)
tm.assert_index_equal(
result.index.levels[0], pairwise_frames.index, check_names=False
)
tm.assert_numpy_array_equal(
safe_sort(result.index.levels[1]),
safe_sort(pairwise_frames.columns.unique()),
)
tm.assert_index_equal(result.columns, pairwise_frames.columns)
expected = f(pairwise_target_frame)
# since we have sorted the results
# we can only compare non-nans
result = result.dropna().values
expected = expected.dropna().values
tm.assert_numpy_array_equal(result, expected, check_dtype=False)
@pytest.mark.parametrize(
"f",
[
lambda x: x.expanding().cov(pairwise=False),
lambda x: x.expanding().corr(pairwise=False),
lambda x: x.rolling(window=3).cov(pairwise=False),
lambda x: x.rolling(window=3).corr(pairwise=False),
lambda x: x.ewm(com=3).cov(pairwise=False),
lambda x: x.ewm(com=3).corr(pairwise=False),
],
)
def test_no_pairwise_with_self(self, pairwise_frames, pairwise_target_frame, f):
# DataFrame with itself, pairwise=False
result = f(pairwise_frames)
tm.assert_index_equal(result.index, pairwise_frames.index)
tm.assert_index_equal(result.columns, pairwise_frames.columns)
expected = f(pairwise_target_frame)
# since we have sorted the results
# we can only compare non-nans
result = result.dropna().values
expected = expected.dropna().values
tm.assert_numpy_array_equal(result, expected, check_dtype=False)
@pytest.mark.parametrize(
"f",
[
lambda x, y: x.expanding().cov(y, pairwise=True),
lambda x, y: x.expanding().corr(y, pairwise=True),
lambda x, y: x.rolling(window=3).cov(y, pairwise=True),
lambda x, y: x.rolling(window=3).corr(y, pairwise=True),
lambda x, y: x.ewm(com=3).cov(y, pairwise=True),
lambda x, y: x.ewm(com=3).corr(y, pairwise=True),
],
)
def test_pairwise_with_other(
self, pairwise_frames, pairwise_target_frame, pairwise_other_frame, f
):
# DataFrame with another DataFrame, pairwise=True
result = f(pairwise_frames, pairwise_other_frame)
tm.assert_index_equal(
result.index.levels[0], pairwise_frames.index, check_names=False
)
tm.assert_numpy_array_equal(
safe_sort(result.index.levels[1]),
safe_sort(pairwise_other_frame.columns.unique()),
)
expected = f(pairwise_target_frame, pairwise_other_frame)
# since we have sorted the results
# we can only compare non-nans
result = result.dropna().values
expected = expected.dropna().values
tm.assert_numpy_array_equal(result, expected, check_dtype=False)
@pytest.mark.parametrize(
"f",
[
lambda x, y: x.expanding().cov(y, pairwise=False),
lambda x, y: x.expanding().corr(y, pairwise=False),
lambda x, y: x.rolling(window=3).cov(y, pairwise=False),
lambda x, y: x.rolling(window=3).corr(y, pairwise=False),
lambda x, y: x.ewm(com=3).cov(y, pairwise=False),
lambda x, y: x.ewm(com=3).corr(y, pairwise=False),
],
)
def test_no_pairwise_with_other(self, pairwise_frames, pairwise_other_frame, f):
# DataFrame with another DataFrame, pairwise=False
result = (
f(pairwise_frames, pairwise_other_frame)
if pairwise_frames.columns.is_unique
else None
)
if result is not None:
with warnings.catch_warnings(record=True):
warnings.simplefilter("ignore", RuntimeWarning)
# we can have int and str columns
expected_index = pairwise_frames.index.union(pairwise_other_frame.index)
expected_columns = pairwise_frames.columns.union(
pairwise_other_frame.columns
)
tm.assert_index_equal(result.index, expected_index)
tm.assert_index_equal(result.columns, expected_columns)
else:
with pytest.raises(ValueError, match="'arg1' columns are not unique"):
f(pairwise_frames, pairwise_other_frame)
with pytest.raises(ValueError, match="'arg2' columns are not unique"):
f(pairwise_other_frame, pairwise_frames)
@pytest.mark.parametrize(
"f",
[
lambda x, y: x.expanding().cov(y),
lambda x, y: x.expanding().corr(y),
lambda x, y: x.rolling(window=3).cov(y),
lambda x, y: x.rolling(window=3).corr(y),
lambda x, y: x.ewm(com=3).cov(y),
lambda x, y: x.ewm(com=3).corr(y),
],
)
def test_pairwise_with_series(self, pairwise_frames, pairwise_target_frame, f):
# DataFrame with a Series
result = f(pairwise_frames, Series([1, 1, 3, 8]))
tm.assert_index_equal(result.index, pairwise_frames.index)
tm.assert_index_equal(result.columns, pairwise_frames.columns)
expected = f(pairwise_target_frame, Series([1, 1, 3, 8]))
# since we have sorted the results
# we can only compare non-nans
result = result.dropna().values
expected = expected.dropna().values
tm.assert_numpy_array_equal(result, expected, check_dtype=False)
result = f(Series([1, 1, 3, 8]), pairwise_frames)
tm.assert_index_equal(result.index, pairwise_frames.index)
tm.assert_index_equal(result.columns, pairwise_frames.columns)
expected = f(Series([1, 1, 3, 8]), pairwise_target_frame)
# since we have sorted the results
# we can only compare non-nans
result = result.dropna().values
expected = expected.dropna().values
tm.assert_numpy_array_equal(result, expected, check_dtype=False)
def test_corr_freq_memory_error(self):
# GH 31789
s = Series(range(5), index=date_range("2020", periods=5))
result = s.rolling("12H").corr(s)
expected = Series([np.nan] * 5, index=date_range("2020", periods=5))
tm.assert_series_equal(result, expected)
def test_cov_mulittindex(self):
# GH 34440
columns = MultiIndex.from_product([list("ab"), list("xy"), list("AB")])
index = range(3)
df = DataFrame(np.arange(24).reshape(3, 8), index=index, columns=columns)
result = df.ewm(alpha=0.1).cov()
index = MultiIndex.from_product([range(3), list("ab"), list("xy"), list("AB")])
columns = MultiIndex.from_product([list("ab"), list("xy"), list("AB")])
expected = DataFrame(
np.vstack(
(
np.full((8, 8), np.NaN),
np.full((8, 8), 32.000000),
np.full((8, 8), 63.881919),
)
),
index=index,
columns=columns,
)
tm.assert_frame_equal(result, expected)
def test_multindex_columns_pairwise_func(self):
# GH 21157
columns = MultiIndex.from_arrays([["M", "N"], ["P", "Q"]], names=["a", "b"])
df = DataFrame(np.ones((5, 2)), columns=columns)
result = df.rolling(3).corr()
expected = DataFrame(
np.nan,
index=MultiIndex.from_arrays(
[np.repeat(np.arange(5), 2), ["M", "N"] * 5, ["P", "Q"] * 5],
names=[None, "a", "b"],
),
columns=columns,
)
| tm.assert_frame_equal(result, expected) | pandas._testing.assert_frame_equal |
import numpy as np
import pandas as pd
from json import JSONDecoder, JSONDecodeError # for reading the JSON data files
import re # for regular expressions
import os # for os related operations
# Hyperparameters
num_classes = 1
num_features = 14 # number of 'color' channels for the input 1D 'image'
time_steps = 60 # number of pixels for the input 1D 'image'
# all features
feature_names = ['TOTUSJH', 'TOTBSQ', 'TOTPOT', 'TOTUSJZ', 'ABSNJZH', 'SAVNCPP', 'USFLUX', 'TOTFZ', 'MEANPOT', 'EPSZ',
'MEANSHR', 'SHRGT45', 'MEANGAM', 'MEANGBT', 'MEANGBZ', 'MEANGBH', 'MEANJZH', 'TOTFY', 'MEANJZD',
'MEANALP', 'TOTFX', 'EPSY', 'EPSX', 'R_VALUE', 'XR_MAX']
# relevent features by Fischer ranking score and manually looking at the histograms in positive and negative classes.
# I choose those features with reletively high Fischer ranking score & those whose histograms look different in
# positive and negative classes.
# I further drop features according to their physical definitions. When some features' definitions are related to each
# other, I first double check their correlation by looking at their scatter plot. If their correlation is confirmed visually,
# I drop n number features from the correlated features if there are n confirmed correlations.
relevant_features_0 = ['TOTUSJH','TOTBSQ','TOTPOT','TOTUSJZ','ABSNJZH','SAVNCPP','USFLUX','TOTFZ',
'EPSZ','MEANSHR','SHRGT45','MEANGAM','MEANGBT','MEANGBZ','MEANGBH','MEANJZD','R_VALUE']
# By observing the histograms of relevant features, their histograms can be grouped into four categories.
# right skewed with extreme outliers, right skewed without extreme outliers, left skewed with extreme outliers, non skewed
right_skewed_features = ['TOTUSJH', 'TOTBSQ', 'TOTPOT', 'TOTUSJZ', 'ABSNJZH', 'SAVNCPP', 'USFLUX', 'EPSZ', 'MEANSHR', 'MEANGAM', 'MEANGBH', 'MEANJZD']
right_skewed_features_with_ol = ['TOTBSQ', 'TOTPOT', 'TOTUSJZ', 'SAVNCPP', 'USFLUX', 'MEANSHR', 'MEANGAM', 'MEANGBH', 'MEANJZD']
right_skewed_features_without_ol = ['TOTUSJH', 'ABSNJZH', 'EPSZ']
left_skewed_features_with_ol = ['TOTFZ']
non_skewed_features = ['MEANGBT', 'R_VALUE']
# I further decide that TOTFZ is correlated with EPSZ and TOTBSQ. Furthermore, TOTFZ cannot be well scaled yet, I
# decide to drop it for now. Note that TOTFZ is the only feature in the list `left_skewed_with_ol`. In the end, I select
# 14 features for fitting the data, their names are stored in the list called `selected_features`.
selected_features = right_skewed_features + non_skewed_features
print('{} are selected for training.'.format(len(selected_features)))
print('selected features include \n',selected_features)
# get the indice for features
indice_right_skewed_with_ol = []
indice_right_skewed_without_ol = []
indice_non_skewed = []
for i in range(0,len(selected_features)):
if selected_features[i] in right_skewed_features_with_ol:
indice_right_skewed_with_ol.append(i)
elif selected_features[i] in right_skewed_features_without_ol:
indice_right_skewed_without_ol.append(i)
elif selected_features[i] in non_skewed_features:
indice_non_skewed.append(i)
scale_params_right_skewed = pd.read_csv('scale_params_right_skewed.csv')
scale_params_right_skewed.set_index('Unnamed: 0', inplace=True)
scale_params_non_skewed = pd.read_csv('scale_params_non_skewed.csv')
scale_params_non_skewed.set_index('Unnamed: 0', inplace=True)
# all features
feature_names = ['TOTUSJH', 'TOTBSQ', 'TOTPOT', 'TOTUSJZ', 'ABSNJZH', 'SAVNCPP', 'USFLUX', 'TOTFZ', 'MEANPOT', 'EPSZ',
'MEANSHR', 'SHRGT45', 'MEANGAM', 'MEANGBT', 'MEANGBZ', 'MEANGBH', 'MEANJZH', 'TOTFY', 'MEANJZD',
'MEANALP', 'TOTFX', 'EPSY', 'EPSX', 'R_VALUE', 'XR_MAX']
# relevent features by Fischer ranking score and manually looking at the histograms in positive and negative classes.
# I choose those features with reletively high Fischer ranking score & those whose histograms look different in
# positive and negative classes.
# I further drop features according to their physical definitions. When some features' definitions are related to each
# other, I first double check their correlation by looking at their scatter plot. If their correlation is confirmed visually,
# I drop n number features from the correlated features if there are n confirmed correlations.
relevant_features_0 = ['TOTUSJH','TOTBSQ','TOTPOT','TOTUSJZ','ABSNJZH','SAVNCPP','USFLUX','TOTFZ',
'EPSZ','MEANSHR','SHRGT45','MEANGAM','MEANGBT','MEANGBZ','MEANGBH','MEANJZD','R_VALUE']
# By observing the histograms of relevant features, their histograms can be grouped into four categories.
# right skewed with extreme outliers, right skewed without extreme outliers, left skewed with extreme outliers, non skewed
right_skewed_features = ['TOTUSJH', 'TOTBSQ', 'TOTPOT', 'TOTUSJZ', 'ABSNJZH', 'SAVNCPP', 'USFLUX', 'EPSZ', 'MEANSHR', 'MEANGAM', 'MEANGBH', 'MEANJZD']
right_skewed_features_with_ol = ['TOTBSQ', 'TOTPOT', 'TOTUSJZ', 'SAVNCPP', 'USFLUX', 'MEANSHR', 'MEANGAM', 'MEANGBH', 'MEANJZD']
right_skewed_features_without_ol = ['TOTUSJH', 'ABSNJZH', 'EPSZ']
left_skewed_features_with_ol = ['TOTFZ']
non_skewed_features = ['MEANGBT', 'R_VALUE']
# I further decide that TOTFZ is correlated with EPSZ and TOTBSQ. Furthermore, TOTFZ cannot be well scaled yet, I
# decide to drop it for now. Note that TOTFZ is the only feature in the list `left_skewed_with_ol`. In the end, I select
# 14 features for fitting the data, their names are stored in the list called `selected_features`.
selected_features = right_skewed_features + non_skewed_features
# print('{} are selected for training.'.format(len(selected_features)))
# print('selected features include \n',selected_features)
# get the indice for features
indice_right_skewed_with_ol = []
indice_right_skewed_without_ol = []
indice_non_skewed = []
for i in range(0,len(selected_features)):
if selected_features[i] in right_skewed_features_with_ol:
indice_right_skewed_with_ol.append(i)
elif selected_features[i] in right_skewed_features_without_ol:
indice_right_skewed_without_ol.append(i)
elif selected_features[i] in non_skewed_features:
indice_non_skewed.append(i)
scale_params_right_skewed = pd.read_csv('scale_params_right_skewed.csv')
scale_params_right_skewed.set_index('Unnamed: 0', inplace=True)
scale_params_non_skewed = | pd.read_csv('scale_params_non_skewed.csv') | pandas.read_csv |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import math
from sklearn.preprocessing import MinMaxScaler
from sklearn.cluster import DBSCAN
class ClusterBound:
def __init__(self, x1, y1, w, h):
self.x1 = x1
self.x2 = x1 + w
self.y1 = y1
self.y2 = y1 + h
def contains(self, xp, yp):
return self.x1 <= xp <= self.x2 and self.y1 <= yp <= self.y2
class ManualClusterModel():
def __init__(self, cluster_bounds):
self.cluster_bounds = cluster_bounds
def fit(self, X):
def find_cluster(x, cluster_bounds):
for i, c in enumerate(cluster_bounds):
if c.contains(x[0], x[1]):
return i
return -1
self.labels_ = X.apply(lambda x: find_cluster(x, self.cluster_bounds), axis=1)
MirageCalloutClusteringModel = ManualClusterModel([
ClusterBound(162, 169, 64, 65), # van
ClusterBound(227, 173, 32, 41), # b plat
ClusterBound(259, 173, 89, 40), # b front apt
ClusterBound(112, 231, 49, 93), # bench
ClusterBound(162, 214, 167, 41), # b default out of site
ClusterBound(203, 254, 68, 75), # b site
ClusterBound(170, 395, 32, 90), # kitchen door
ClusterBound(207, 396, 133, 90), # kitchen
ClusterBound(342, 234, 54, 46), # side cat
ClusterBound(342, 280, 160, 45), # cat site
ClusterBound(430, 328, 28, 119), # underpass
ClusterBound(463, 409, 218, 38), # cat
ClusterBound(396, 435, 32, 62), # window
ClusterBound(433, 446, 60, 59), # bottom mid
ClusterBound(495, 448, 59, 56), # mid mid
ClusterBound(556, 447, 131, 56), # top mid
ClusterBound(682, 313, 69, 124), # top top mid
ClusterBound(712, 440, 39, 59), # boxes
ClusterBound(383, 571, 84, 79), # jungle
ClusterBound(482, 508, 65, 91), # connector
ClusterBound(573, 504, 179, 28), # mid chair
ClusterBound(469, 601, 66, 54), # connector by stairs
ClusterBound(538, 601, 29, 69), # stairs
ClusterBound(643, 696, 42, 86), # palace deck/shadow
ClusterBound(382, 498, 45, 71), # mid window hidden
ClusterBound(648, 783, 50, 40), # front palace
ClusterBound(441, 827, 43, 49), # ticket booth
ClusterBound(319, 772, 149, 56), # ct
ClusterBound(164, 332, 175, 60), # b market side
ClusterBound(692, 627, 127, 57), # A ramp
ClusterBound(568, 646, 30, 20), # sandwich
ClusterBound(617, 624, 37, 29), # tetris
ClusterBound(480, 741, 42, 47), # triple box
ClusterBound(579, 791, 51, 35), # firebox
ClusterBound(521, 737, 93, 51), # front a site
ClusterBound(479, 671, 158, 65), # open a site
ClusterBound(463, 329, 52, 79) # b short
])
#Convert map coordinates to image coordinates, from <NAME>'s analysis
def pointx_to_resolutionx(xinput,startX=-3217,endX=1912,resX=1024):
sizeX = endX - startX
if startX < 0:
xinput += startX * (-1.0)
else:
xinput += startX
xoutput = float((xinput / abs(sizeX)) * resX);
return xoutput
def pointy_to_resolutiony(yinput,startY=-3401,endY=1682,resY=1024):
sizeY=endY-startY
if startY < 0:
yinput += startY *(-1.0)
else:
yinput += startY
youtput = float((yinput / abs(sizeY)) * resY);
return resY-youtput
def cluster_positions(firefight_df, cluster_map, verbose=False, scale=True):
"""
Clusters the dataframe spatially into common positions by type of position, map, and team. Clusters DMG_VIC and DMG_ATT together.
Input:
cluster_df: result of DataLoader.load_firefight_df, with columns ['file_round', 'seconds', 'pos_x', 'pos_y', 'hp_dmg']
eps_map: the eps to use for DBSCAN for each pos_type
Output:
the input cluster_df, with new columns ['pos_cluster']
"""
min_max_scaler = MinMaxScaler()
cluster_df = firefight_df.copy()
if scale:
cluster_df[["pos_x", "pos_y"]] = min_max_scaler.fit_transform(cluster_df[["pos_x", "pos_y"]])
cluster_df['pos_cluster'] = None
for map_name in cluster_df['map'].unique():
for team in cluster_df['att_side'].unique():
# Cluster nade positions
for pos_type in [t for t in cluster_df['pos_type'].unique() if t not in ['DMG_VIC', 'DMG_ATT']]:
mask = (cluster_df['map'] == map_name) & (cluster_df['pos_type'] == pos_type) & (cluster_df['att_side'] == team)
group = cluster_df[mask]
# https://medium.com/@tarammullin/dbscan-parameter-estimation-ff8330e3a3bd
cluster_model = cluster_map[pos_type]
#cluster_model = DBSCAN(eps=0.05, min_samples=min_samples)
pts = | pd.concat([group['pos_x'], group['pos_y']], axis=1) | pandas.concat |
import os
import shutil
import pandas as pd
from adcpipeline import PipelineBase
data = | pd.DataFrame(data=[[1, 2, 3], [4, 5, 6]]) | pandas.DataFrame |
import os
import pandas as pd
filepath="C:/Users/ajink/Duties_made_easy"
print("The file path is : ")
os.chdir(filepath)
file=pd.read_excel("DataFile.xlsx",sheet_name="Sheet1",na_values=" ")
#print(file)
####################### Accissing the Required Data ########################
def deleterow(i):
for j in range(i):
teacherfile.drop(teacherfile.index[j], inplace=True)
teacherfile=pd.read_excel("DataFile.xlsx",sheet_name="old Jr. Sup. order Nov. 2019", na_values=" ")
#deleterow(3)
to_delete=[0,1,2]
teacherfile.drop(teacherfile.index[to_delete], inplace=True)
colname=teacherfile.loc[3,:]
colname[7]="Favorable Dates"
teacherfile.columns=colname
teacherfile=teacherfile.drop('Jr. Sup. Sign', axis=1)
teacherfile=teacherfile.drop(index=3,axis=0)
teacherfile=teacherfile.set_index("Sr. No.",drop=False)
reqfile=teacherfile.loc[1.0:116.0,["Name of the faculty","Total duties","Favorable Dates"]]
#print(reqfile)
############## Getting the Exam Dates
date=file.loc[:,['Day/Date','Year','Time','Total Jr. Sup. Req.']]
weekdays=['monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday']
for i in range(len(weekdays)):
weekdays[i]=weekdays[i].title()
#print(weekdays)
datesofexam=[]
dateswithshift=[]
examdate=[]
dayduty=[]
for i in range(len(date['Day/Date'])):
j=date.loc[i][0]
if str(j) == 'nan':
continue
else:
k=j
j=j.split(" ")[0].title()
if j in weekdays:
shift=int(date.iloc[i][2][0])
if shift == 1:
l=k.split(" ")[1].strip() + 'm'
elif shift == 2:
l=k.split(" ")[1].strip() + 'e'
dat=k.split(" ")[1].strip()
dayduty.append((l,date.iloc[i][3]))
dateswithshift.append(l)
datesofexam.append(dat)
con=date.iloc[i][0]+date.iloc[i][2]
examdate.append(con)
else:
continue
############################# Processing of Data
class Teacher():
def __init__(self,name ,noOfDuties):
self.name = name
self.noOfDuties = noOfDuties
self.alloteddate=['12/12/2020']
def putdata(self):
print("Teacher Name : ",self.name, "\nTeacher Dutfavdt.iloc[cou][16]favdt.iloc[cou][16]ies : ",self.noOfDuties)
supreq=[]
for i in range(0,len(date['Total Jr. Sup. Req.']),1):
if type(date['Total Jr. Sup. Req.'][i]) is int:
supreq.append(date['Total Jr. Sup. Req.'][i])
thr=[]
for z in range(1,len(reqfile['Name of the faculty'])+1):
thr.append(Teacher(reqfile.loc[z][0], reqfile.loc[z][1]))
toblocks=[]
maxi=max(supreq)
for jrsup in range(1,max(supreq)+1,1):
toblocks.append(jrsup)
day=[]
for x in range(0,len(dateswithshift),1):
day.append([dateswithshift[x],[]])
forteacher=[]
sorteddates=[]
teacherlist=[]
teacherwithduties=[]
reqfile["Favorable Dates"].fillna("0",inplace=True)
for u in range(1,len(reqfile['Name of the faculty'])+1,1):
if reqfile["Favorable Dates"][u] != str(0):
forteacher.append([reqfile['Name of the faculty'][u],[]])
sorteddates.append([reqfile['Name of the faculty'][u],[]])
teacherlist.append(reqfile['Name of the faculty'][u].strip('.'))
teacherwithduties.append((reqfile['Name of the faculty'][u], reqfile["Total duties"][u], reqfile["Favorable Dates"][u]))
#================================= Let's assign the dates
mylist=list(dict.fromkeys(datesofexam))
datesofexam=mylist
fav=[]
favdt=pd.read_excel("DataFile.xlsx",sheet_name="Sheet3", na_values=" ")
favdt['Favorable Dates'].fillna(0,inplace=True)
for i in range(0,len(favdt),1):
if favdt['Favorable Dates'][i] != 0:
date=favdt['Favorable Dates'][i].split("/")
dayi=date[2]
month=date[1]
year=date[0]
date=dayi+"/"+month+"/"+year
nam=favdt['Unnamed: 1'][i]
if nam in teacherlist:
place=teacherlist.index(nam)
dutiesreq=teacherwithduties[place][1]
fav.append((favdt['Unnamed: 1'][i],date,dutiesreq))
else:
continue
def teach(ch):
if ch in teacherlist:
for ijk in range(len(fav)):
if str(fav[ijk][0]) == str(ch):
return [fav[ijk][0], str(fav[ijk][1]), fav[ijk][2]]
else:
return 0
"""
def givestart(pos,p): #pos = position of favdt in list and p = half of dutiesreq to teacher
lb=0 #lb = lower bound of index of datesofexam
ub=len(datesofexam)-1 #ub = upper bound
if (pos >= p) and (ub-pos)>=p:
return pos-p
elif pos+1 <= p:
return lb
else:
return ub
"""
def check(mylist,date,name,dutyreq):
dutyalloted=0
if (date in dateswithshift):
index=dateswithshift.index(date)
count=len(day[index][1])
if name in teacherlist:
pos=teacherlist.index(name)
dutyalloted=len(forteacher[pos][1])
# print("/nline 163: count=",count,"index = ",index)
if count < dayduty[index][1] and count>=0 :
if dutyalloted<dutyreq:
day[index][1].append(name)
if name in teacherlist:
forteacher[teacherlist.index(name)][1].append(day[index][0])
sorteddates[teacherlist.index(name)][1].append(day[index][0])
else:
return 0
def checkavl(date1,date2):
if (date1 in dateswithshift):
index=dateswithshift.index(date1)
counti=len(day[index][1])
if counti < dayduty[index][1]:
return date1
if (date2 in dateswithshift):
index=dateswithshift.index(date2)
counti=len(day[index][1])
if counti < dayduty[index][1]:
return date2
else:
return -1
def allote(teacher):
if teacher == None:
return 0
name=teacher[0]
fvdt=teacher[1]
dutiesreq=teacher[2]
pos=datesofexam.index(fvdt)
i=j=pos
date1=fvdt+'m'
date2=fvdt+'e'
getdate=checkavl(date1, date2)
if getdate != -1:
check(dateswithshift, getdate,name,dutiesreq)
#print("\n pos = ",pos)
chk=0
while chk <= dutiesreq:
i=i-1
j=j+1
if i <0 and j > len(datesofexam):
break
if i >= 0:
if name in teacherlist:
chk=len(forteacher[teacherlist.index(name)][1])
# print("\n\tchk = ",chk,"dutiesreq = ",dutiesreq)
if chk > dutiesreq:
break
fvdt=datesofexam[i]
date1=fvdt+'m'
date2=fvdt+'e'
getdate=checkavl(date1,date2)
if getdate != -1:
check(dateswithshift, getdate,name,dutiesreq)
if j< len(datesofexam):
if name in teacherlist:
chk=len(forteacher[teacherlist.index(name)][1])
# print("\n\tchk = ",chk,"dutiesreq = ",dutiesreq)
if chk > dutiesreq:
break
# print("\n\tj= ",j)
fvdt=datesofexam[j]
date1=fvdt+'m'
date2=fvdt+'e'
getdate=checkavl(date1,date2)
if getdate != -1:
check(dateswithshift, getdate,name,dutiesreq)
#===================== The allocation of teachers starts from here
import random
allotedteacher=[]
i=0
"""
choiceteach=random.choice(teacherlist)
li=teach(choiceteach)
allote(li)
allotedteacher.append(choiceteach)
"""
ak=0
lengthlt=[]
for shady in range(len(dateswithshift)):
ak=ak+dayduty[shady][1]
terminate=0
while terminate < ak:
terminate=0
gin=0
for gabbi in range(len(dateswithshift)) :
gin=len(day[gabbi][1])
terminate=terminate+gin
# print("Terminat", terminate)
choiceteach=random.choice(teacherlist)
li=teach(choiceteach)
if choiceteach in allotedteacher:
continue
elif li == 0:
# print("Got a successful Result")
allotedteacher.append(choiceteach)
teacherlist.remove(choiceteach)
i=i+1
else:
allote(li)
"""
if li[0] in teacherlist:
p=teacherlist.index(li[0])
reqdut=teacherwithduties[p][1]
forteacher[p][1]"""
allotedteacher.append(choiceteach)
i=i+1
#================================Getting equal duties for teachers
minimum=0
def extraallote(teacher='xyz'):
lengthlt.clear()
# print("line 302 : I am in while loop")
for i in range(len(teacherlist)):
length=len(forteacher[i][1])
lengthlt.append(length)
highest=max(lengthlt)
minimum=min(lengthlt)
if highest == minimum:
return 0
# print("minimum = ",minimum)
if highest in lengthlt:
position=lengthlt.index(highest)
date=forteacher[position][1][highest-1]
# print("for maximum : position = ", position,
# "highest - 1 = " ,highest-1,"and date = ",date)
forteacher[position][1].remove(date)
sorteddates[position][1].remove(date)
if teacher !='xyz':
position=teacherlist.index(teacher)
forteacher[position][1].append(date)
sorteddates[position][1].append(date)
else:
position=lengthlt.index(minimum)
forteacher[position][1].append(date)
sorteddates[position][1].append(date)
return minimum
#print("line 302 : I am in while loop")
x=0
while x == 0:
x = extraallote()
"""Teachers greater than 6 duties """
rearr=[]
underalloted={}
underduty={}
"""
for i in range(len(teacherlist)):
if len(forteacher[i][1]) == fav[i][2]:
if fav[i][2] >6:
rearr.append((fav[i][0],fav[i][1],fav[i][2]))
elif (len(forteacher[i][1]) < fav[i][2]) and (fav[i][2] < 6):
if len(forteacher[i][1]) <6:
diff=fav[i][2]-len(forteacher[i][1])
underalloted.update({fav[i][0] : [fav[i][1],fav[i][2],diff]})
elif len(forteacher[i][1]) < fav[i][2]:
if len(forteacher[i][1]) <6:
diff=fav[i][2]-len(forteacher[i][1])
underduty.update({fav[i][0] : [fav[i][1],fav[i][2],diff]})
"""
maximum=max(lengthlt)
count=0
for i in underalloted:
count=count+underalloted[i][2]
for i in underalloted:
for j in range(underalloted[i][2]):
extraallote(i)
count=count-1
for i in underduty:
count=underduty[i][1]
pre=underduty[i][1] - underduty[i][2]
count=int((count/2) + 1)
count=count-pre
for j in range(count):
extraallote(i)
#================================ Arranging the Dates alloted in ascending order
def sortkar(data):
sortedlist=[]
year=year1=[]
month=[]
day=[]
yearws=[]
new=[]
yrmonthday=[]
p=-1
for k in range(len(data)):
i=data[k].split('/')
j=i[2][:len(i[2])-1]
o=i[2][:len(i[2])]
l=i[1]
m=i[0]
sep1=[]
year.append(int(j))
month.append(int(l))
day.append(int(m))
yearws.append(str(o))
# print(i,' ',day,' ',month,' ',year)
year1=year.copy()
month1=month.copy()
day1=day.copy()
while len(year) != 0:
counts=year.count(min(year))
yrmonthday.append([min(year),[]])
p=p+1
while counts !=0:
minimm=min(year)
index=year.index(minimm)
sep1.append((year[index],month[index],day[index]))
yrmonthday[p][1].append(month[index])
day.remove(day[index])
month.remove(month[index])
year.remove(year[index])
counts=counts-1
for i in range(p+1):
yrmonthday[i][1]=list(set(yrmonthday[i][1]))
for i in range(len(yrmonthday)):
for j in range(len(yrmonthday[i][1])):
element=yrmonthday[i][1][j]
yrmonthday[i][1][j]=[element,[]]
for i in range(len(year1)):
for j in range(len(yrmonthday)):
if yrmonthday[j][0]==year1[i] :
for k in range(len(yrmonthday[j][1])):
# print("Very long : ",yrmonthday[j][1][k],month1)
if yrmonthday[j][1][k][0] == month1[i]:
yrmonthday[j][1][k][1].append(day1[i])
for i in range(len(yrmonthday)):
for j in range(len(yrmonthday[i][1])):
yrmonthday[i][1][j][1].sort()
# print("\nline 459 : ",yrmonthday)
sortedlist.clear()
for i in range(len(yrmonthday)):
for j in range(len(yrmonthday[i][1])):
for k in range(len(yrmonthday[i][1][j][1])):
# print("\nline 465 : i = ",i,"\tj = ",j,"\tk = ",k)
# [[2019, [[12, [5, 7, 9, 23, 26, 28, 31]]]], [2020, [[1, [2]]]]]
a=str(yrmonthday[i][1][j][1][k]) + "/" + str(yrmonthday[i][1][j][0]) + "/" + str(yrmonthday[i][0])
sortedlist.append(a)
return sortedlist
# print("sep 1 = ",sep1,"\nyrmonthday = ",yrmonthday,"\nyear1 = ",year1,"\nyear = ",year)
""" if counts == len(year):
mmonth=min(month)
monthcounts=month.count(mmonth)
if monthcounts == len(month):
while len(day) != 0:
mday=min(day)
dayin=day.index(mday)
sortedlist.append(str(day[dayin]) +"/"+ str(month[dayin]) +"/"+ str(yearws[dayin]))
day.remove(day[dayin])
month.remove(month[dayin])
year.remove(year[dayin])
# p=year.index(maximm)
# sortedlist.append(str(day[p]) +"/"+ str(month[p]) +"/"+ str(year[p]))
return(sortedlist)
"""
newlist=[]
for i in range(len(forteacher)):
newlist.clear()
sorteddates[i][1] = sortkar(forteacher[i][1])
for j in range(len(forteacher[i][1])):
word = len(forteacher[i][1][j]) - int(1)
newlist.append((forteacher[i][1][j][0:len(forteacher[i][1][j])-1]))
#===============================Making the file suitable for DataFrame
for i in range(len(day)):
while len(day[i][1]) < maxi:
day[i][1].append(" ")
df=pd.DataFrame(dict(day))
df=df.T
df.columns=[n+1 for n in range(39)]
df1= | pd.DataFrame(forteacher) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Sun Jun 28 21:24:44 2020
@author: omars
"""
#%% Libraries
import pickle
from datetime import datetime, timedelta
import pandas as pd
import numpy as np
from params import (target_col, date_col, region_col, training_cutoff,
df_path, default_path, nmin, restriction_dict, region_exceptions_dict)
import os
import warnings
warnings.filterwarnings("ignore")
#%% Helper Functions
def save_model(model,
filename):
file_pi = open(filename, 'wb')
pickle.dump(model, file_pi)
def load_model(filename):
filehandler = open(filename, 'rb')
return(pickle.load(filehandler))
def load_data(file=df_path,
target=target_col,
date=date_col,
region=region_col,
training_cutoff=training_cutoff,
validation_cutoff=None,
nmin=nmin,
restriction_dict=restriction_dict[region_col],
region_exceptions=region_exceptions_dict[region_col],
default_path=default_path,
add_countries=False):
if file is None:
df = get_public_data(default_path)
else:
df = pd.read_csv(file)
df.columns = map(str.lower, df.columns)
# delete excepctions
if not (region_exceptions is None):
df = df[~df[region].isin(region_exceptions)].copy()
df = df[df[target] >= nmin[region]]
df.sort_values(by=[region, date], inplace=True)
try:
df["cases_nom"] = df["cases"] / df["population"]
df["deaths_nom"] = df["deaths"] / df["population"]
except KeyError:
pass
df["cases_pct3"] = df.groupby(region)["cases"].pct_change(3).values
df["cases_pct5"] = df.groupby(region)["cases"].pct_change(5).values
try:
df[date] = df[date].apply(lambda x: datetime.strptime(x, '%Y-%m-%d'))
except:
df[date] = df[date].apply(lambda x: datetime.strptime(x, '%m/%d/%Y'))
df = df.sort_values(by=[region, date])
if (region == "state") & add_countries:
data = pd.read_csv('https://covid.ourworldindata.org/data/owid-covid-data.csv')
data.rename(columns={'location': region_col, 'total_cases': 'cases', 'total_tests': 'people_tested', 'total_deaths': 'deaths'}, inplace=True)
data = data.loc[:, [region_col, 'date', 'cases', 'deaths', 'people_tested']]
#data = data[~data.cases.isna()]
data['cases'] = data.groupby('state')['cases'].fillna(method='ffill')
data['deaths'] = data.groupby('state')['deaths'].fillna(method='ffill')
data[date] = pd.to_datetime(df[date])
df.rename(columns={'state': region_col}, inplace=True)
df = df.append(data)
# restrict to a subset of obervations
if not (restriction_dict is None):
masks = []
for col, values in restriction_dict.items():
try:
masks.append(df[col].isin(values))
except:
pass
if masks:
mask_ = masks.pop(0)
for other_mask in masks:
mask_ = (mask_ | other_mask)
df = df[mask_].copy()
df_train = df[df[date] <= training_cutoff]
print("Training set contains {} {}.".format(df[region].nunique(), region))
if validation_cutoff is None:
df_test = df[df[date] > training_cutoff]
df_val = df.copy()
else:
df_test = df[[a and b for a, b in zip(df[date] > training_cutoff, df[date] <= validation_cutoff)]]
df_val = df[df[date] <= validation_cutoff]
return(df, df_val, df_train, df_test)
def dict_to_df(output,
df_validation,
region_col=region_col,
date_col=date_col,
target_col=target_col):
models = list(output.keys())
regions = list(set(df_validation[region_col]))
dates = list(set(df_validation[date_col]))
predictions_rows = []
for region in regions:
for date in dates:
prediction = [region, date]
for model in models:
if region in output[model].keys():
try:
prediction.append(output[model][region].loc[date])
except:
prediction.append(np.nan)
else:
prediction.append(np.nan)
predictions_rows.append(prediction)
df_predictions = pd.DataFrame(predictions_rows, columns=[region_col, date_col] + models)
df_agg = df_predictions.merge(df_validation.loc[:, [region_col, date_col, target_col]], how='left', on=[region_col, date_col])
return df_agg
def mape(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs((y_true - y_pred) / y_true))
def get_mapes(df,
models,
region_col='state',
target_col='cases'):
results = []
for region in set(df[region_col]):
df_sub = df[df[region_col] == region]
results.append([region] + [mape(df_sub[target_col], df_sub[model]) for model in models])
results.append(['Average'] + [mape(df[target_col], df[model]) for model in models])
return(pd.DataFrame(results, columns=[region_col] + ['MAPE_' + model for model in models]))
def get_public_data(path=df_path):
# Import the latest data using the raw data urls
meas_url = 'https://raw.githubusercontent.com/COVID19StatePolicy/SocialDistancing/master/data/USstatesCov19distancingpolicy.csv'
case_url = 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_US.csv'
deaths_url = 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_deaths_US.csv'
mob_url = 'https://www.gstatic.com/covid19/mobility/Global_Mobility_Report.csv'
measures = pd.read_csv(meas_url,
encoding="ISO-8859-1")
cases = pd.read_csv(case_url,
encoding='utf-8')
deaths = pd.read_csv(deaths_url,
encoding='utf-8')
mobility = pd.read_csv(mob_url,
encoding='utf-8')
#<NAME> University daily reports
last_available_date = (datetime.today() - timedelta(1)).strftime('%Y-%m-%d')
dates = pd.date_range(start='2020-04-12', end=datetime.today() - timedelta(3)).strftime('%m-%d-%Y').tolist()
daily_df = pd.DataFrame()
for date in dates:
daily_url = 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_daily_reports_us/' + date + '.csv'
this_daily = pd.read_csv(daily_url,encoding='utf-8')
this_daily['date'] = date # fill the date column with the date from the file name
daily_df = daily_df.append(this_daily)
daily_df.drop(['Country_Region', 'Last_Update', 'Lat', 'Long_', 'UID', 'ISO3'], axis=1, inplace=True)
daily_df.columns = daily_df.columns.str.replace(
'Province_State', 'state').str.replace(
'Confirmed','cases').str.replace(
'Deaths', 'deaths')
daily_df['date'] = pd.to_datetime(daily_df['date'], format="%m-%d-%Y")
# Make sure cases and deaths have the same number of rows
try:
assert cases.shape[0] == deaths.shape[0]
except AssertionError:
print("Different number of rows in cases and deaths dataframes")
print(cases.shape)
print(deaths.shape)
# keep only US mobility data
mobility_us = mobility[mobility.country_region_code == 'US']
# make a new df with no missing values in state column, where sub_region_1=states, sub_region_2 = counties
mobility_counties = mobility_us[mobility_us['sub_region_1'].notna()]
mobility_states = mobility_counties[mobility_counties['sub_region_2'].isna()]
# drop columns with countries and counties; Show list of unique states in final df
mobility_states.drop(
['country_region_code', 'country_region', 'sub_region_2', 'iso_3166_2_code', 'census_fips_code'], axis=1,
inplace=True)
# Check that we have the right number of states
mob_states = sorted(mobility_states.sub_region_1.unique())
try:
assert len(mob_states) == 51
except AssertionError:
print("Number of states in mobility data is not 51")
print(len(mob_states))
print(mob_states)
measures[['DateIssued', 'DateEnacted', 'DateExpiry', 'DateEased', 'DateEnded']] = measures[
['DateIssued', 'DateEnacted', 'DateExpiry', 'DateEased', 'DateEnded']].apply(pd.to_datetime, format="%Y%m%d")
# Remove suffix from mobility column names
mobility_states.columns = [col.replace('_percent_change_from_baseline', '') for col in mobility_states.columns]
# check for any missing or misspelled states before joining cases, measures and mobility
mset = set(mobility_states.loc[:, 'sub_region_1'])
cset = set(cases.loc[:, 'Province_State'])
pset = set(measures.loc[:, 'StateName'])
dset = set(daily_df.loc[:, 'state'])
daily_diff = dset - cset
# emove state rows that are not in cases df
daily_df = daily_df[~daily_df.state.isin(list(daily_diff))]
#check for the same number of states in daily_df and cases df
try:
assert len(daily_df.state.unique()) == len(cset)
except AssertionError:
print("Number of states in daily_df and in cases df is not the same")
print(len(daily_df.state.unique()))
print(len(cset))
# Select columns from measures df to merge with cases and deaths dfs
measures = measures[~measures.StateFIPS.isnull()] # drop rows with empty StateFIPS values
# Select columns in measures for modeling
meas_sel = measures[['StateName', 'StatePolicy', 'DateEnacted', 'DateEnded']]
# drop columns not used in models
cases.drop(['iso2', 'iso3', 'Country_Region', 'Combined_Key', 'UID', 'code3', 'FIPS', 'Lat', 'Long_'], axis=1,
inplace=True)
deaths.drop(['iso2', 'iso3', 'Country_Region', 'Combined_Key', 'UID', 'code3', 'FIPS', 'Lat', 'Long_'], axis=1,
inplace=True)
# Reshape cases and deaths df from wide to tall format
c_melt = cases.melt(id_vars=['Province_State', 'Admin2'], var_name='date', value_name='cases')
d_melt = deaths.melt(id_vars=['Province_State', 'Admin2', 'Population'], var_name='date', value_name='deaths')
# merge cases and deaths df on state and date columns
case_death_df = pd.merge(c_melt, d_melt, how='left', on=['Province_State', 'Admin2', 'date'])
# convert date colum from str to date
case_death_df['date'] = pd.to_datetime(case_death_df['date'], format="%m/%d/%y")
# Drop rows with Population = 0 (correction, out-of-state, unassigned)
tmp = case_death_df[case_death_df['Population'] != 0]
# get total state Population column after grouping each state on an arbitrary date
pop = tmp.loc[tmp.date == '2020-05-08'].groupby(['Province_State'], as_index=False)[['Population']].sum()
# Group cases and death data by state
cd_state = case_death_df.groupby(['Province_State', 'date'], as_index=False)[['cases', 'deaths']].sum()
# Merge Population column
cdp_state = pd.merge(cd_state, pop, how='left', left_on=['Province_State'], right_on=['Province_State'])
#Add measures categorical columns
# Add columns with 0s for each measure to the main df
dfzeros = pd.DataFrame(np.zeros((len(cdp_state), len(meas_sel.StatePolicy.unique()))),
columns=list(meas_sel.StatePolicy.unique())).astype(int)
tseries = pd.concat([cdp_state, dfzeros], axis=1)
tseries.columns = tseries.columns.str.replace('Province_State', 'state').str.replace('Admin2', 'county')
# Loop over states and measures. Plug 1s in the rows when measures were enacted, leave the rest as 0s
for state in meas_sel.StateName.unique():
for i, meas in enumerate(meas_sel.StatePolicy.unique()):
# select rows by state and measure
mask1 = (meas_sel.StateName == state) & (meas_sel.StatePolicy == meas)
if not meas_sel[mask1].empty:
# date policy enacted
start = meas_sel.loc[mask1, "DateEnacted"].values[0]
# date policy ended
end = meas_sel.loc[mask1, "DateEnded"].values[0]
else:
# print(state+ " is missing " + meas)
continue
if pd.notnull(start) & pd.notnull(end):
mask2 = (tseries.state == state) & (tseries.date >= start) & (tseries.date <= end)
elif pd.notnull(start):
mask2 = (tseries.state == state) & (tseries.date >= start)
else:
continue
# set measure values to 1 after date was enacted by state
tseries.loc[mask2, meas] = 1
#Merge mobility and columns from daily_df reports
tseries['date'] = tseries['date'].dt.strftime('%Y-%m-%d')
df = | pd.merge(tseries, mobility_states, how='left', left_on=['state', 'date'], right_on=['sub_region_1', 'date']) | pandas.merge |
"""
Make dataset pipeline
"""
import pandas as pd
import numpy as np
df = | pd.read_csv("../data/processed/dns.csv") | pandas.read_csv |
# author: <NAME> (Group 24)
# contributors: <NAME>, <NAME>, <NAME>
# date: 2021-11-25
# last updated: 2021-11-27
'''This script models for the Crime Vancouver
Usage: modelling.py --input_path=<input_path> --out_path=<out_path>
Options:
--input_path=<input_path> Path to directory where the data are stored
--out_path=<out_path> Path to directory where the results are stored
'''
import dataframe_image as dfi
from sklearn.metrics import classification_report
import warnings
import pickle
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score, roc_auc_score, average_precision_score, balanced_accuracy_score
from sklearn.metrics import make_scorer
from sklearn.svm import SVC, SVR
from sklearn.preprocessing import (
OneHotEncoder,
OrdinalEncoder,
PolynomialFeatures,
StandardScaler,
)
from sklearn.pipeline import Pipeline, make_pipeline
from sklearn.model_selection import (
GridSearchCV,
RandomizedSearchCV,
ShuffleSplit,
cross_val_score,
cross_validate,
train_test_split,
)
from sklearn.linear_model import LogisticRegression, Ridge, RidgeCV, RidgeClassifier
from sklearn.impute import SimpleImputer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
from sklearn.dummy import DummyClassifier, DummyRegressor
from sklearn.compose import ColumnTransformer, make_column_transformer
import random
import altair as alt
import pandas as pd
import numpy as np
from docopt import docopt
import os
from sklearn.model_selection import train_test_split
alt.data_transformers.enable('data_server')
alt.renderers.enable('mimetype')
opt = docopt(__doc__)
def main(input_path, out_path):
if not os.path.exists(out_path):
os.makedirs(os.path.dirname(out_path))
# using the objects that are in the folder
X_train = pd.read_csv(
input_path + '/training_feature.csv', index_col="index")
y_train = pd.read_csv(
input_path + '/training_target.csv', index_col="index").loc[:, "TYPE"]
X_test = pd.read_csv(
input_path + '/test_feature.csv', index_col="index")
y_test = pd.read_csv(input_path + '/test_target.csv',
index_col="index").loc[:, "TYPE"]
file = open(input_path + '/preprocessor.p', 'rb')
preprocessor = pickle.load(file)
file.close()
file = open(input_path + '/models.p', 'rb')
models = pickle.load(file)
file.close()
# Cross validation
# Exports the result into png file
results_cv = cv_models(models, X_train, y_train,
preprocessor, cv=5)
filename = 'models_results_cv.png'
outfile = open(out_path + "/" + filename, 'wb')
dfi.export(results_cv, outfile, table_conversion='matplotlib')
# Hyperparameter tuning of the best model
# Creates a pipeline of the best results
best_results = best_LR_model(X_train, y_train, preprocessor)
best_result_df = best_results["scores"]
pipe_best = make_pipeline(preprocessor, best_results['best_model'])
filename = 'best_LR_model.png'
outfile = open(out_path + "/" + filename, 'wb')
dfi.export(best_result_df, outfile,
table_conversion="matplotlib")
# Save pipe_best data
try:
filename = 'pipe_best.p'
outfile = open(out_path + "/" + filename, 'wb')
pickle.dump(pipe_best, outfile)
outfile.close()
print("")
print(f"Best pipe is loaded successfully at %s" %
(out_path + filename))
except Exception as error:
print(f"Error message: %s" % error)
print("Error while saving pipe")
classification_report = print_scores(
pipe_best, X_train, y_train, X_test, y_test, preprocessor)['report']
filename = 'classification_report.png'
outfile = open(out_path + "/" + filename, 'wb')
dfi.export(classification_report.T, outfile, table_conversion='matplotlib')
# it is saved inside the function, just need to call
print_confusion_matrix(pipe_best, X_train, y_train,
X_test, y_test, out_path)
# Adopted from lecture notes of DSCI 571 and DSCI 573
def mean_std_cross_val_scores(model, X_train, y_train, **kwargs):
"""
Returns mean and std of cross validation
Parameters
----------
model :
scikit-learn model
X_train : numpy array or pandas DataFrame
X in the training data
y_train :
y in the training data
Returns
----------
pandas Series with mean scores from cross_validation
"""
scores = cross_validate(model, X_train, y_train, **kwargs)
mean_scores = pd.DataFrame(scores).mean()
std_scores = pd.DataFrame(scores).std()
out_col = []
for i in range(len(mean_scores)):
out_col.append((f"%0.3f (+/- %0.3f)" %
(mean_scores[i], std_scores[i])))
return pd.Series(data=out_col, index=mean_scores.index)
def cv_models(models, X_train, y_train, preprocessor, cv=5):
"""Returns CV f1 scores
Parameters
----------
models : list
A list of sklearn classifiers
X_train : numpy ndarray
The feature matrix
y_train : numpy ndarray
The target labels
cv : int, optional
Number of folds, default 5
Returns
-------
pandas DataFrame
The results of cross validation for the given models
"""
# X_train = pd.read_csv(
# '../data/processed/training_feature.csv', index_col="index")
# y_train = pd.read_csv(
# '../data/processed/training_target.csv', index_col="index").loc[:, "TYPE"]
#file = open('../data/processed/preprocessor.p', 'rb')
#preprocessor = pickle.load(file)
#file = open('../data/processed/models.p', 'rb')
#models = pickle.load(file)
print("")
print("Start cross validation.")
f1_scorer = make_scorer(f1_score, average='micro')
scoring_metrics = {
"f1": f1_scorer,
}
results = {}
for name, model in models.items():
print(f"running %s" % name)
pipe = make_pipeline(preprocessor, model)
results[name] = mean_std_cross_val_scores(
pipe, X_train, y_train, cv=cv, return_train_score=True, scoring=scoring_metrics
)
results_df = pd.DataFrame(results)
print("")
print("Completed cross validation of different models.")
return results_df
def best_LR_model(X_train, y_train, preprocessor):
"""
Finds the best LR model based on C and weight class, based on f1 scorer
Parameters
----------
models : list
A list of sklearn classifiers
X_train : numpy ndarray
The feature matrix
Returns
-------
dictionary
dictionary with scores and best model with optimized hyperparameters
"""
# X_train = pd.read_csv(
# '../data/processed/training_feature.csv', index_col="index")
# y_train = pd.read_csv(
# '../data/processed/training_target.csv', index_col="index").loc[:, "TYPE"]
# X_test = pd.read_csv(
# '../data/processed/test_feature.csv', index_col="index")
# y_test = pd.read_csv('../data/processed/test_target.csv',
# index_col="index").loc[:, "TYPE"]
#file = open('../data/processed/preprocessor.p', 'rb')
#preprocessor = pickle.load(file)
print("")
print("Start hyperparameter tuning")
pipe = make_pipeline(preprocessor,
LogisticRegression(max_iter=2000,
multi_class='ovr',))
f1_scorer = make_scorer(f1_score, average='micro')
scoring_metrics = {
"f1": f1_scorer,
}
param_grid = {
"logisticregression__C": [0.01, 0.1, 1, 10, 100],
"logisticregression__class_weight": [None, "balanced"]
}
search = RandomizedSearchCV(
pipe,
param_grid,
verbose=1,
n_jobs=6,
n_iter=10,
return_train_score=True,
scoring=make_scorer(f1_score, average='micro'),
random_state=123,
)
search.fit(X_train, y_train)
search_df = pd.DataFrame(search.cv_results_).loc[ | pd.DataFrame(search.cv_results_) | pandas.DataFrame |
from typing import List, Optional, Union
import numpy as np
import pandas as pd
from trackml.score import score_event
from .type_alias import TQubo, TDimodSample, TXplet, XpletType, TDoublet
from .utils import truth_to_xplets, track_to_xplets, diff_rows
class DataWrapper:
"""
Wraps a hits and a truth file and exposes useful functions to compute scores, check xplet validity and more.
"""
def __init__(self, hits: pd.DataFrame, truth: pd.DataFrame):
"""
Create a wrapper. Hits and Truth should match the TrackML challenge schema.
See `TrackML data <https://www.kaggle.com/c/trackml-particle-identification/data>`_ on Kaggle for more info.
Note that indexes and all will be handled here, so you can just use `pd.read_csv` to load the
files.
"""
self.hits = hits
self.truth = truth
# add proper indexing
for df in [self.hits, self.truth]:
df['idx'] = df.hit_id.values
df.set_index('idx', inplace=True)
# add radius information
hits['r'] = np.linalg.norm(hits[['x', 'y']].values.T, axis=0)
# keep a lookup of real doublets: '{hit_id_1}_{hit_id_2}' -> [hit_id_1, hit_id_2]
df = hits.join(truth, lsuffix='_')
self._doublets = truth_to_xplets(hits, df[df.weight > 0], x=2)
self._unfocused = truth_to_xplets(hits, df[df.weight == 0], x=2)
self._lookup = dict(
[(self._get_dkey(*d), XpletType.REAL) for d in self._doublets] +
[(self._get_dkey(*d), XpletType.REAL_UNFOCUSED) for d in self._unfocused]
)
def _get_dkey(self, h1, h2):
return f'{h1}_{h2}'
def get_unfocused_doublets(self) -> List[TDoublet]:
return self._unfocused
def get_real_doublets(self, with_unfocused=False) -> List[TDoublet]:
"""Return the list of real doublets"""
if with_unfocused:
return self._doublets + self._unfocused
return self._doublets
# ==== doublets and subtrack checking
def is_real_doublet(self, doublet: TDoublet) -> XpletType:
"""Test whether a doublet is real, i.e. part of a real track."""
key = self._get_dkey(*doublet)
return self._lookup.get(key, XpletType.FAKE)
def is_real_xplet(self, xplet: TXplet) -> XpletType:
"""Test whether an xplet is real, i.e. a sub-track of a real track."""
doublets = track_to_xplets(xplet, x=2)
if len(doublets) == 0:
raise Exception(f'Got a subtrack with no doublets in it "{xplet}"')
xplet_type = set(self.is_real_doublet(s) for s in doublets)
return XpletType.FAKE if len(xplet_type) > 1 else xplet_type.pop()
# =============== QUBO and energy checking
def sample_qubo(self, Q: TQubo) -> TDimodSample:
"""
Compute the ideal solution for a given QUBO. Here, ideal means correct, but I doesn't guarantee that
the energy is minimal.
"""
sample = dict()
for (k1, k2), v in Q.items():
if k1 == k2:
subtrack = list(map(int, k1.split('_')))
sample[k1] = int(self.is_real_xplet(subtrack) != XpletType.FAKE)
return sample
def compute_energy(self, Q: TQubo, sample: Optional[TDimodSample] = None) -> float:
"""Compute the energy of a given sample. If sample is None, the ideal sample is used (see :py:meth:~`sample_qubo`). """
if sample is None:
sample = self.sample_qubo(Q)
en = 0
for (k1, k2), v in Q.items():
if sample[k1] != 0 and sample[k2] != 0:
en += v
return en
# =============== scoring
def get_score_numbers(self, doublets: Union[List, np.array, pd.DataFrame]) -> [float, float, float]:
"""
:param doublets: a set of doublets
:return: the number of real, fake and missing doublets
"""
if isinstance(doublets, pd.DataFrame): doublets = doublets.values
doublets_found, _, unfocused_found = diff_rows(doublets, self._unfocused)
missing, fakes, real = diff_rows(self._doublets, doublets_found)
return len(real), len(fakes), len(missing)
def compute_score(self, doublets: Union[List, np.array, pd.DataFrame]) -> [float, float, List[List]]:
"""
Precision and recall are defined as follow:
* precision (purity): how many doublets are correct ? `len(real ∈ doublets) / len(doublets)`
* recall (efficiency): how well does the solution covers the truth ? `len(real ∈ doublets) / len(truth)`
:param doublets: a set of doublets
:return: the precision, the recall and the list of missing doublets. p and r are between 0 and 1.
"""
#unique_doublets = set()
#for d in doublets:
# unique_doublets.add(d)
#doublets = list(d)
if isinstance(doublets, pd.DataFrame): doublets = doublets.values
doublets_found, _, unfocused_found = diff_rows(doublets, self._unfocused)
missing, fakes, real = diff_rows(self._doublets, doublets_found)
return len(real) / len(doublets_found), \
len(real) / len(self._doublets), \
missing
def add_missing_doublets(self, doublets: Union[np.array, pd.DataFrame]) -> pd.DataFrame:
"""
:param doublets: a list of doublets
:return: a list of doublets with 100% recall
"""
if isinstance(doublets, pd.DataFrame):
doublets = doublets.values
ip, ir, missing = self.compute_score(doublets)
print(f'got {len(doublets)} doublets.')
print(f' Input precision (%): {ip * 100:.4f}, recall (%): {ir * 100:.4f}')
if len(missing) == 0:
# nothing to do
return doublets
else:
ret = pd.DataFrame(np.vstack((doublets, missing)), columns=['start', 'end'])
p, _, _ = self.compute_score(ret.values)
print(f' New precision (%): {p * 100:.4f}')
return ret
def compute_trackml_score(self, final_tracks: List[TXplet], submission=None) -> float:
"""
:param final_tracks: a list of xplets representing tracks
:param submission: (optional) a TrackML submission, see :py:meth:~`create_submission`
:return: the trackml score (between 0 and 1)
"""
if submission is None:
submission = self.create_submission(final_tracks)
return score_event(self.truth, submission)
def create_submission(self, tracks: List[TXplet], event_id=1000) -> pd.DataFrame:
"""Encode a solution into a dataframe following the structure of a trackml submission."""
hit_ids = self.hits.hit_id.values
n_rows = len(hit_ids)
sub_data = np.column_stack(([event_id] * n_rows, hit_ids, np.zeros(n_rows)))
submission = pd.DataFrame(
data=sub_data, columns=["event_id", "hit_id", "track_id"], index=hit_ids, dtype=int)
for idx, track in enumerate(tracks):
submission.loc[track, 'track_id'] = idx + 1
return submission
# =============== class utils
@classmethod
def from_path(cls, path):
"""
Create a DataWrapper by reading the hits and the truth from a path.
:path: the path + event id, in the format `/path/to/directory/eventXXXXX`
"""
path = path.replace('-hits.csv', '')
return cls(hits= | pd.read_csv(path + '-hits.csv') | pandas.read_csv |
from functools import partial
import logging
from os.path import join, exists
import pandas as pd
from sklearn.model_selection import KFold, train_test_split
from sklearn.preprocessing import PowerTransformer, QuantileTransformer
from .tabular_torch_dataset import TorchTabularTextDataset
from .data_utils import (
CategoricalFeatures,
agg_text_columns_func,
convert_to_func,
get_matching_cols,
load_num_feats,
load_cat_and_num_feats,
normalize_numerical_feats,
)
logger = logging.getLogger(__name__)
def load_data_into_folds(data_csv_path,
num_splits,
validation_ratio,
text_cols,
tokenizer,
label_col,
label_list=None,
categorical_cols=None,
numerical_cols=None,
sep_text_token_str=' ',
categorical_encode_type='ohe',
numerical_transformer_method='quantile_normal',
empty_text_values=None,
replace_empty_text=None,
max_token_length=None,
debug=False
):
"""
Function to load tabular and text data from a specified folder into folds
Loads train, test and/or validation text and tabular data from specified
csv path into num_splits of train, val and test for Kfold cross validation.
Performs categorical and numerical data preprocessing if specified. `data_csv_path` is a path to
Args:
data_csv_path (str): The path to the csv containing the data
num_splits (int): The number of cross validation folds to split the data into.
validation_ratio (float): A float between 0 and 1 representing the percent of the data to hold as a consistent validation set.
text_cols (:obj:`list` of :obj:`str`): The column names in the dataset that contain text
from which we want to load
tokenizer (:obj:`transformers.tokenization_utils.PreTrainedTokenizer`):
HuggingFace tokenizer used to tokenize the input texts as specifed by text_cols
label_col (str): The column name of the label, for classification the column should have
int values from 0 to n_classes-1 as the label for each class.
For regression the column can have any numerical value
label_list (:obj:`list` of :obj:`str`, optional): Used for classification;
the names of the classes indexed by the values in label_col.
categorical_cols (:obj:`list` of :obj:`str`, optional): The column names in the dataset that
contain categorical features. The features can be already prepared numerically, or
could be preprocessed by the method specified by categorical_encode_type
numerical_cols (:obj:`list` of :obj:`str`, optional): The column names in the dataset that contain numerical features.
These columns should contain only numeric values.
sep_text_token_str (str, optional): The string token that is used to separate between the
different text columns for a given data example. For Bert for example,
this could be the [SEP] token.
categorical_encode_type (str, optional): Given categorical_cols, this specifies
what method we want to preprocess our categorical features.
choices: [ 'ohe', 'binary', None]
see encode_features.CategoricalFeatures for more details
numerical_transformer_method (str, optional): Given numerical_cols, this specifies
what method we want to use for normalizing our numerical data.
choices: ['yeo_johnson', 'box_cox', 'quantile_normal', None]
see https://scikit-learn.org/stable/auto_examples/preprocessing/plot_all_scaling.html
for more details
empty_text_values (:obj:`list` of :obj:`str`, optional): specifies what texts should be considered as
missing which would be replaced by replace_empty_text
replace_empty_text (str, optional): The value of the string that will replace the texts
that match with those in empty_text_values. If this argument is None then
the text that match with empty_text_values will be skipped
max_token_length (int, optional): The token length to pad or truncate to on the
input text
debug (bool, optional): Whether or not to load a smaller debug version of the dataset
Returns:
:obj:`tuple` of `list` of `tabular_torch_dataset.TorchTextDataset`:
This tuple contains three lists representing the splits of
training, validation and testing sets. The length of the lists is
equal to the number of folds specified by `num_splits`
"""
assert 0 <= validation_ratio <= 1, 'validation ratio needs to be between 0 and 1'
all_data_df = | pd.read_csv(data_csv_path) | pandas.read_csv |
'''Wrapper for the Twitter API'''
import requests
from typing import Union, List, Dict
from tqdm.auto import tqdm
import pandas as pd
import time
import numpy as np
import logging
from json import JSONDecodeError
logger = logging.getLogger(__name__)
class Twitter:
'''A wrapper for the Twitter API.
Args:
twitter_bearer_token (str):
The bearer token from the Twitter API.
'''
tweet_lookup_url: str = 'https://api.twitter.com/2/tweets'
def __init__(self, twitter_bearer_token: str):
self.api_key = twitter_bearer_token
self.headers = dict(Authorization=f'Bearer {self.api_key}')
self.expansions = ['attachments.poll_ids',
'attachments.media_keys',
'author_id',
'entities.mentions.username',
'geo.place_id',
'in_reply_to_user_id',
'referenced_tweets.id',
'referenced_tweets.id.author_id']
self.tweet_fields = ['attachments',
'author_id',
'conversation_id',
'created_at',
'entities',
'geo',
'id',
'in_reply_to_user_id',
'lang',
'public_metrics',
'possibly_sensitive',
'referenced_tweets',
'reply_settings',
'source',
'text',
'withheld']
self.user_fields = ['created_at',
'description',
'entities',
'id',
'location',
'name',
'pinned_tweet_id',
'profile_image_url',
'protected',
'public_metrics',
'url',
'username',
'verified',
'withheld']
self.media_fields = ['duration_ms',
'height',
'media_key',
'preview_image_url',
'type',
'url',
'width',
'public_metrics']
self.place_fields = ['contained_within',
'country',
'country_code',
'full_name',
'geo',
'id',
'name',
'place_type']
self.poll_fields = ['duration_minutes',
'end_datetime',
'id',
'options',
'voting_status']
def rehydrate_tweets(self,
tweet_ids: List[Union[str, int]]
) -> Dict[str, pd.DataFrame]:
'''Rehydrates the tweets for the given tweet IDs.
Args:
tweet_ids (list of either str or int):
The tweet IDs to rehydrate.
Returns:
dict:
A dictionary with keys 'tweets', 'users', 'media',
'polls' and 'places', where the values are the
associated Pandas DataFrame objects.
'''
# Ensure that the tweet IDs are strings
tweet_ids = [str(tweet_id) for tweet_id in tweet_ids]
# Set up the params for the GET request
get_params = {'expansions': ','.join(self.expansions),
'media.fields': ','.join(self.media_fields),
'place.fields': ','.join(self.place_fields),
'poll.fields': ','.join(self.poll_fields),
'tweet.fields': ','.join(self.tweet_fields),
'user.fields': ','.join(self.user_fields)}
# Split `tweet_ids` into batches of at most 100, as this is the
# maximum number allowed by the API
num_batches = len(tweet_ids) // 100
if len(tweet_ids) % 100 != 0:
num_batches += 1
batches = np.array_split(tweet_ids, num_batches)
# Initialise dataframes
tweet_df = pd.DataFrame()
user_df = pd.DataFrame()
media_df = pd.DataFrame()
poll_df = pd.DataFrame()
place_df = pd.DataFrame()
# Initialise progress bar
if len(batches) > 1:
pbar = tqdm(total=len(tweet_ids), desc='Rehydrating')
# Loop over all the batches
for batch in batches:
# Add the batch tweet IDs to the batch
get_params['ids'] = ','.join(batch)
# Perform the GET request
try:
response = requests.get(self.tweet_lookup_url,
params=get_params,
headers=self.headers)
except requests.exceptions.RequestException as e:
logger.error(f'[{e}] Error in rehydrating tweets.\nThe '
f'parameters used were {get_params}.')
continue
# If we have reached the API limit then wait a bit and try again
while response.status_code in [429, 503]:
logger.debug('Request limit reached. Waiting...')
time.sleep(1)
try:
response = requests.get(self.tweet_lookup_url,
params=get_params,
headers=self.headers)
except requests.exceptions.RequestException as e:
logger.error(f'[{e}] Error in rehydrating tweets.\nThe '
f'parameters used were {get_params}.')
continue
# If we are not authorised then continue to the next batch
if response.status_code == 401:
continue
# If the GET request failed then continue to the next batch
elif response.status_code != 200:
msg = f'[{response.status_code}] {response.text}'
logger.error(msg)
continue
# Convert the response to a dict
try:
data_dict = response.json()
except JSONDecodeError as e:
logger.error(f'[{e}] Error in unpacking tweets.\nThe '
f'parameters used were {get_params}.')
continue
# If the query returned errors then continue to the next batch
if 'data' not in data_dict and 'errors' in data_dict:
error = data_dict['errors'][0]
logger.error(error['detail'])
continue
# Tweet dataframe
if 'data' in data_dict:
df = (pd.json_normalize(data_dict['data'])
.rename(columns=dict(id='tweet_id')))
tweet_df = (pd.concat((tweet_df, df))
.drop_duplicates(subset='tweet_id')
.astype(dict(tweet_id=int))
.reset_index(drop=True))
# User dataframe
if 'includes' in data_dict and 'users' in data_dict['includes']:
users = data_dict['includes']['users']
df = (pd.json_normalize(users)
.rename(columns=dict(id='user_id')))
user_df = (pd.concat((user_df, df))
.drop_duplicates(subset='user_id')
.astype(dict(user_id=int))
.reset_index(drop=True))
# Media dataframe
if 'includes' in data_dict and 'media' in data_dict['includes']:
media = data_dict['includes']['media']
df = pd.json_normalize(media)
media_df = (pd.concat((media_df, df))
.drop_duplicates(subset='media_key')
.reset_index(drop=True))
# Poll dataframe
if 'includes' in data_dict and 'polls' in data_dict['includes']:
polls = data_dict['includes']['polls']
df = (pd.json_normalize(polls)
.rename(columns=dict(id='poll_id')))
poll_df = (pd.concat((poll_df, df))
.drop_duplicates(subset='poll_id')
.astype(dict(poll_id=int))
.reset_index(drop=True))
# Places dataframe
if 'includes' in data_dict and 'places' in data_dict['includes']:
places = data_dict['includes']['places']
df = ( | pd.json_normalize(places) | pandas.json_normalize |
import sys
import time
import datetime
import logging
from sqlalchemy.exc import IntegrityError
import Utils.configuration_file_service as config_service
import Utils.DB_utils as dbUtil
import pandas as pd
import tushare as ts
from sqlalchemy import Column, String, Float, MetaData, Table, create_engine, INT
token = config_service.getProperty(section_name=config_service.TOKEN_SECTION_NAME,
property_name=config_service.TS_TOKEN_NAME)
pro = ts.pro_api(token)
def getTableMeta(year: int, metadata: MetaData) -> Table:
"""
get corresponding table meta data.
:param year: year of the data
:return: a Table object representing the table structure
"""
return Table(
dbUtil.getTableName(year, "cashflow"), metadata,
Column("id", INT, primary_key=True),
Column("ts_code", String(10)), # 股票代码
Column("ann_date", String(8)),
Column("f_ann_date", String(8)),
Column("end_date", String(8)),
Column("comp_type", String(3)),
Column("report_type", String(3)),
Column("net_profit", Float), # 净利润
Column("finan_exp", Float), # 财务费用
Column("c_fr_sale_sg", Float), # 销售商品、提供劳务收到的现金
Column("recp_tax_rends", Float), # 收到的税费返还
Column("n_depos_incr_fi", Float), # 客户存款和同业存放款项净增加额
Column("n_incr_loans_cb", Float), # 向中央银行借款净增加额
Column("n_inc_borr_oth_fi", Float), # 向其他金融机构拆入资金净增加额
Column("prem_fr_orig_contr", Float), # 收到原保险合同保费取得的现金
Column("n_incr_insured_dep", Float), # 保户储金净增加额
Column("n_reinsur_prem", Float), # 收到再保业务现金净额
Column("n_incr_disp_tfa", Float), # 处置交易性金融资产净增加额
Column("ifc_cash_incr", Float), # 收取利息和手续费净增加额
Column("n_incr_disp_faas", Float), # 处置可供出售金融资产净增加额
Column("n_incr_loans_oth_bank", Float), # 拆入资金净增加额
Column("n_cap_incr_repur", Float), # 回购业务资金净增加额
Column("c_fr_oth_operate_a", Float), # 收到其他与经营活动有关的现金
Column("c_inf_fr_operate_a", Float), # 经营活动现金流入小计
Column("c_paid_goods_s", Float), # 购买商品、接受劳务支付的现金
Column("c_paid_to_for_empl", Float), # 支付给职工以及为职工支付的现金
Column("c_paid_for_taxes", Float), # 支付的各项税费
Column("n_incr_clt_loan_adv", Float), # 客户贷款及垫款净增加额
Column("n_incr_dep_cbob", Float), # 存放央行和同业款项净增加额
Column("c_pay_claims_orig_inco", Float), # 支付原保险合同赔付款项的现金
Column("pay_handling_chrg", Float), # 支付手续费的现金
Column("pay_comm_insur_plcy", Float), # 支付保单红利的现金
Column("oth_cash_pay_oper_act", Float), # 支付其他与经营活动有关的现金
Column("st_cash_out_act", Float), # 经营活动现金流出小计
Column("n_cashflow_act", Float), # 经营活动产生的现金流量净额
Column("oth_recp_ral_inv_act", Float), # 收到其他与投资活动有关的现金
Column("c_disp_withdrwl_invest", Float), # 收回投资收到的现金
Column("c_recp_return_invest", Float), # 取得投资收益收到的现金
Column("n_recp_disp_fiolta", Float), # 处置固定资产、无形资产和其他长期资产收回的现金净额
Column("n_recp_disp_sobu", Float), # 处置子公司及其他营业单位收到的现金净额
Column("stot_inflows_inv_act", Float), # 投资活动现金流入小计
Column("c_pay_acq_const_fiolta", Float), # 购建固定资产、无形资产和其他长期资产支付的现金
Column("c_paid_invest", Float), # 投资支付的现金
Column("n_disp_subs_oth_biz", Float), # 取得子公司及其他营业单位支付的现金净额
Column("oth_pay_ral_inv_act", Float), # 支付其他与投资活动有关的现金
Column("n_incr_pledge_loan", Float), # 质押贷款净增加额
Column("stot_out_inv_act", Float), # 投资活动现金流出小计
Column("n_cashflow_inv_act", Float), # 投资活动产生的现金流量净额
Column("c_recp_borrow", Float), # 取得借款收到的现金
Column("proc_issue_bonds", Float), # 发行债券收到的现金
Column("oth_cash_recp_ral_fnc_act", Float), # 收到其他与筹资活动有关的现金
Column("stot_cash_in_fnc_act", Float), # 筹资活动现金流入小计
Column("free_cashflow", Float), # 企业自由现金流量
Column("c_prepay_amt_borr", Float), # 偿还债务支付的现金
Column("c_pay_dist_dpcp_int_exp", Float), # 分配股利、利润或偿付利息支付的现金
Column("incl_dvd_profit_paid_sc_ms", Float), # 其中:子公司支付给少数股东的股利、利润
Column("oth_cashpay_ral_fnc_act", Float), # 支付其他与筹资活动有关的现金
Column("stot_cashout_fnc_act", Float), # 筹资活动现金流出小计
Column("n_cash_flows_fnc_act", Float), # 筹资活动产生的现金流量净额
Column("eff_fx_flu_cash", Float), # 汇率变动对现金的影响
Column("n_incr_cash_cash_equ", Float), # 现金及现金等价物净增加额
Column("c_cash_equ_beg_period", Float), # 期初现金及现金等价物余额
Column("c_cash_equ_end_period", Float), # 期末现金及现金等价物余额
Column("c_recp_cap_contrib", Float), # 吸收投资收到的现金
Column("incl_cash_rec_saims", Float), # 其中:子公司吸收少数股东投资收到的现金
Column("uncon_invest_loss", Float), # 未确认投资损失
Column("prov_depr_assets", Float), # 加:资产减值准备
Column("depr_fa_coga_dpba", Float), # 固定资产折旧、油气资产折耗、生产性生物资产折旧
Column("amort_intang_assets", Float), # 无形资产摊销
Column("lt_amort_deferred_exp", Float), # 长期待摊费用摊销
Column("decr_deferred_exp", Float), # 待摊费用减少
Column("incr_acc_exp", Float), # 预提费用增加
Column("loss_disp_fiolta", Float), # 处置固定、无形资产和其他长期资产的损失
Column("loss_scr_fa", Float), # 固定资产报废损失
Column("loss_fv_chg", Float), # 公允价值变动损失
Column("invest_loss", Float), # 投资损失
Column("decr_def_inc_tax_assets", Float), # 递延所得税资产减少
Column("incr_def_inc_tax_liab", Float), # 递延所得税负债增加
Column("decr_inventories", Float), # 存货的减少
Column("decr_oper_payable", Float), # 经营性应收项目的减少
Column("incr_oper_payable", Float), # 经营性应付项目的增加
Column("others", Float), # 其他
Column("im_net_cashflow_oper_act", Float), # 经营活动产生的现金流量净额(间接法)
Column("conv_debt_into_cap", Float), # 债务转为资本
Column("conv_copbonds_due_within_1y", Float), # 一年内到期的可转换公司债券
Column("fa_fnc_leases", Float), # 融资租入固定资产
Column("end_bal_cash", Float), # 现金的期末余额
Column("beg_bal_cash", Float), # 减:现金的期初余额
Column("end_bal_cash_equ", Float), # 加:现金等价物的期末余额
Column("beg_bal_cash_equ", Float), # 减:现金等价物的期初余额
Column("im_n_incr_cash_equ", Float), # 现金及现金等价物净增加额(间接法)
Column("update_flag", Float) # 更新标识
)
def get_ts_code(engine):
"""查询ts_code"""
return pd.read_sql('select ts_code from stock_basic', engine)
def get_ts_code_and_list_date(engine):
"""查询ts_code"""
return pd.read_sql('select ts_code,list_date from stock_basic', engine)
def update_bulk_cashflow_by_period_and_ts_code(base_name, engine, pro, codes, start_date, end_date, retry_count=3,
pause=2):
coverage = dbUtil.getTableRange(base_name="", start_date=start_date, end_date=end_date)
for i in coverage:
for rownum in range(0, len(codes)):
logger.debug("started processing data for " + codes.iloc[rownum]['ts_code'] + " for period " + i)
if (int(codes.iloc[rownum]['list_date'][0:4]) <= int(i[1:5]) or int(
codes.iloc[rownum]['list_date'][0:4]) <= int(i[6:10])):
try:
to_insert = pro.cashflow_vip(ts_code=codes.iloc[rownum]['ts_code'], start_date=i[1:5] + '0101',
end_date=i[6:10] + '1231')
logger.debug("start inserting data into DB")
to_insert.to_sql(base_name + i, engine, if_exists='append', index=False)
logger.debug("end inserting data into DB")
except Exception as e:
logger.error(e)
logger.error(
"error processing data for range " + str(i) + " for code " + codes.iloc[rownum]['ts_code'])
def update_bulk_cashflow_by_ts_code_and_insert_by_year(base_name, engine, pro, codes, sharding_column, failed_count=0,
failed_tolerent=3):
failed = []
for code in codes['ts_code']:
logger.debug("started processing data for " + code)
try:
to_insert = pro.cashflow_vip(ts_code=code)
logger.debug("start inserting data into DB")
distinct_years = set(to_insert[sharding_column].str[0:4])
for year in distinct_years:
year_section = to_insert[to_insert[sharding_column].str[0:4] == year]
if (year == None):
year = 9999
year_section = to_insert[pd.isna(to_insert[sharding_column]) == True]
year_section.to_sql(dbUtil.getTableName(int(year), base_name=base_name), engine, if_exists='append',
index=False)
logger.debug("end inserting data into DB")
except Exception as e:
failed.append(code)
logger.error(e)
logger.error("error processing data for code " + code)
if (failed_count < failed_tolerent):
logger.warning("retrying now.")
failed_count = failed_count + 1
update_bulk_cashflow_by_ts_code_and_insert_by_year(base_name=base_name, engine=engine, pro=pro,
codes= | pd.DataFrame(failed, columns=['ts_code']) | pandas.DataFrame |
"""
inspiration from R Package - PerformanceAnalytics
"""
from collections import OrderedDict
import pandas as pd
import numpy as np
from tia.analysis.util import per_series
PER_YEAR_MAP = {
'BA': 1.,
'BAS': 1.,
'A': 1.,
'AS': 1.,
'BQ': 4.,
'BQS': 4.,
'Q': 4.,
'QS': 4.,
'D': 365.,
'B': 252.,
'BMS': 12.,
'BM': 12.,
'MS': 12.,
'M': 12.,
'W': 52.,
}
def guess_freq(index):
# admittedly weak way of doing this...This needs to be abolished
if isinstance(index, (pd.Series, pd.DataFrame)):
index = index.index
if hasattr(index, 'freqstr') and index.freqstr:
return index.freqstr[0]
elif len(index) < 3:
raise Exception('cannot guess frequency with less than 3 items')
else:
lb = min(7, len(index))
idx_zip = lambda: list(zip(index[-lb:-1], index[-(lb-1):]))
diff = min([t2 - t1 for t1, t2, in idx_zip()])
if diff.days <= 1:
if 5 in index.dayofweek or 6 in index.dayofweek:
return 'D'
else:
return 'B'
elif diff.days == 7:
return 'W'
else:
diff = min([t2.month - t1.month for t1, t2, in idx_zip()])
if diff == 1:
return 'M'
diff = min([t2.year - t1.year for t1, t2, in idx_zip()])
if diff == 1:
return 'A'
strs = ','.join([i.strftime('%Y-%m-%d') for i in index[-lb:]])
raise Exception('unable to determine frequency, last %s dates %s' % (lb, strs))
def periodicity(freq_or_frame):
"""
resolve the number of periods per year
"""
if hasattr(freq_or_frame, 'rule_code'):
rc = freq_or_frame.rule_code
rc = rc.split('-')[0]
factor = PER_YEAR_MAP.get(rc, None)
if factor is not None:
return factor / abs(freq_or_frame.n)
else:
raise Exception('Failed to determine periodicity. No factor mapping for %s' % freq_or_frame)
elif isinstance(freq_or_frame, str):
factor = PER_YEAR_MAP.get(freq_or_frame, None)
if factor is not None:
return factor
else:
raise Exception('Failed to determine periodicity. No factor mapping for %s' % freq_or_frame)
elif isinstance(freq_or_frame, (pd.Series, pd.DataFrame, pd.TimeSeries)):
freq = freq_or_frame.index.freq
if not freq:
freq = pd.infer_freq(freq_or_frame.index)
if freq:
return periodicity(freq)
else:
# Attempt to resolve it
import warnings
freq = guess_freq(freq_or_frame.index)
warnings.warn('frequency not set. guessed it to be %s' % freq)
return periodicity(freq)
else:
return periodicity(freq)
else:
raise ValueError("periodicity expects DataFrame, Series, or rule_code property")
periods_in_year = periodicity
def _resolve_periods_in_year(scale, frame):
""" Convert the scale to an annualzation factor. If scale is None then attempt to resolve from frame. If scale is a scalar then
use it. If scale is a string then use it to lookup the annual factor
"""
if scale is None:
return periodicity(frame)
elif isinstance(scale, str):
return periodicity(scale)
elif np.isscalar(scale):
return scale
else:
raise ValueError("scale must be None, scalar, or string, not %s" % type(scale))
def excess_returns(returns, bm=0):
"""
Return the excess amount of returns above the given benchmark bm
"""
return returns - bm
def returns(prices, method='simple', periods=1, fill_method='pad', limit=None, freq=None):
"""
compute the returns for the specified prices.
method: [simple,compound,log], compound is log
"""
if method not in ('simple', 'compound', 'log'):
raise ValueError("Invalid method type. Valid values are ('simple', 'compound')")
if method == 'simple':
return prices.pct_change(periods=periods, fill_method=fill_method, limit=limit, freq=freq)
else:
if freq is not None:
raise NotImplementedError("TODO: implement this logic if needed")
if isinstance(prices, pd.Series):
if fill_method is None:
data = prices
else:
data = prices.fillna(method=fill_method, limit=limit)
data = np.log(data / data.shift(periods=periods))
mask = pd.isnull(prices.values)
np.putmask(data.values, mask, np.nan)
return data
else:
return pd.DataFrame(
{name: returns(col, method, periods, fill_method, limit, freq) for name, col in prices.items()},
columns=prices.columns,
index=prices.index)
def returns_cumulative(returns, geometric=True, expanding=False):
""" return the cumulative return
Parameters
----------
returns : DataFrame or Series
geometric : bool, default is True
If True, geometrically link returns
expanding : bool default is False
If True, return expanding series/frame of returns
If False, return the final value(s)
"""
if expanding:
if geometric:
return (1. + returns).cumprod() - 1.
else:
return returns.cumsum()
else:
if geometric:
return (1. + returns).prod() - 1.
else:
return returns.sum()
def rolling_returns_cumulative(returns, window, min_periods=1, geometric=True):
""" return the rolling cumulative returns
Parameters
----------
returns : DataFrame or Series
window : number of observations
min_periods : minimum number of observations in a window
geometric : link the returns geometrically
"""
if geometric:
rc = lambda x: (1. + x[np.isfinite(x)]).prod() - 1.
else:
rc = lambda x: (x[np.isfinite(x)]).sum()
return pd.rolling_apply(returns, window, rc, min_periods=min_periods)
def returns_annualized(returns, geometric=True, scale=None, expanding=False):
""" return the annualized cumulative returns
Parameters
----------
returns : DataFrame or Series
geometric : link the returns geometrically
scale: None or scalar or string (ie 12 for months in year),
If None, attempt to resolve from returns
If scalar, then use this as the annualization factor
If string, then pass this to periodicity function to resolve annualization factor
expanding: bool, default is False
If True, return expanding series/frames.
If False, return final result.
"""
scale = _resolve_periods_in_year(scale, returns)
if expanding:
if geometric:
n = pd.expanding_count(returns)
return ((1. + returns).cumprod() ** (scale / n)) - 1.
else:
return pd.expanding_mean(returns) * scale
else:
if geometric:
n = returns.count()
return ((1. + returns).prod() ** (scale / n)) - 1.
else:
return returns.mean() * scale
def drawdowns(returns, geometric=True):
"""
compute the drawdown series for the period return series
return: periodic return Series or DataFrame
"""
wealth = 1. + returns_cumulative(returns, geometric=geometric, expanding=True)
values = wealth.values
if values.ndim == 2:
ncols = values.shape[-1]
values = np.vstack(([1.] * ncols, values))
maxwealth = pd.expanding_max(values)[1:]
dds = wealth / maxwealth - 1.
dds[dds > 0] = 0 # Can happen if first returns are positive
return dds
elif values.ndim == 1:
values = np.hstack(([1.], values))
maxwealth = | pd.expanding_max(values) | pandas.expanding_max |
from __future__ import division
from unittest import TestCase
from nose_parameterized import parameterized
from numpy.testing import assert_allclose, assert_almost_equal
import numpy as np
import pandas as pd
import pandas.util.testing as pdt
from .. import timeseries
from .. import utils
DECIMAL_PLACES = 8
class TestDrawdown(TestCase):
drawdown_list = np.array(
[100, 90, 75]
) / 10.
dt = pd.date_range('2000-1-3', periods=3, freq='D')
drawdown_serie = pd.Series(drawdown_list, index=dt)
@parameterized.expand([
(drawdown_serie,)
])
def test_get_max_drawdown_begins_first_day(self, px):
rets = px.pct_change()
drawdowns = timeseries.gen_drawdown_table(rets, top=1)
self.assertEqual(drawdowns.loc[0, 'net drawdown in %'], 25)
drawdown_list = np.array(
[100, 110, 120, 150, 180, 200, 100, 120,
160, 180, 200, 300, 400, 500, 600, 800,
900, 1000, 650, 600]
) / 10.
dt = pd.date_range('2000-1-3', periods=20, freq='D')
drawdown_serie = pd.Series(drawdown_list, index=dt)
@parameterized.expand([
(drawdown_serie,
pd.Timestamp('2000-01-08'),
pd.Timestamp('2000-01-09'),
pd.Timestamp('2000-01-13'),
50,
pd.Timestamp('2000-01-20'),
pd.Timestamp('2000-01-22'),
None,
40
)
])
def test_gen_drawdown_table_relative(
self, px,
first_expected_peak, first_expected_valley,
first_expected_recovery, first_net_drawdown,
second_expected_peak, second_expected_valley,
second_expected_recovery, second_net_drawdown
):
rets = px.pct_change()
drawdowns = timeseries.gen_drawdown_table(rets, top=2)
self.assertEqual(np.round(drawdowns.loc[0, 'net drawdown in %']),
first_net_drawdown)
self.assertEqual(drawdowns.loc[0, 'peak date'],
first_expected_peak)
self.assertEqual(drawdowns.loc[0, 'valley date'],
first_expected_valley)
self.assertEqual(drawdowns.loc[0, 'recovery date'],
first_expected_recovery)
self.assertEqual(np.round(drawdowns.loc[1, 'net drawdown in %']),
second_net_drawdown)
self.assertEqual(drawdowns.loc[1, 'peak date'],
second_expected_peak)
self.assertEqual(drawdowns.loc[1, 'valley date'],
second_expected_valley)
self.assertTrue(pd.isnull(drawdowns.loc[1, 'recovery date']))
px_list_1 = np.array(
[100, 120, 100, 80, 70, 110, 180, 150]) / 100. # Simple
px_list_2 = np.array(
[100, 120, 100, 80, 70, 80, 90, 90]) / 100. # Ends in drawdown
dt = pd.date_range('2000-1-3', periods=8, freq='D')
@parameterized.expand([
(pd.Series(px_list_1,
index=dt),
pd.Timestamp('2000-1-4'),
pd.Timestamp('2000-1-7'),
pd.Timestamp('2000-1-9')),
(pd.Series(px_list_2,
index=dt),
pd.Timestamp('2000-1-4'),
pd.Timestamp('2000-1-7'),
None)
])
def test_get_max_drawdown(
self, px, expected_peak, expected_valley, expected_recovery):
rets = px.pct_change().iloc[1:]
peak, valley, recovery = timeseries.get_max_drawdown(rets)
# Need to use isnull because the result can be NaN, NaT, etc.
self.assertTrue(
pd.isnull(peak)) if expected_peak is None else self.assertEqual(
peak,
expected_peak)
self.assertTrue(
pd.isnull(valley)) if expected_valley is None else \
self.assertEqual(
valley,
expected_valley)
self.assertTrue(
pd.isnull(recovery)) if expected_recovery is None else \
self.assertEqual(
recovery,
expected_recovery)
@parameterized.expand([
(pd.Series(px_list_2,
index=dt),
pd.Timestamp('2000-1-4'),
pd.Timestamp('2000-1-7'),
None,
None),
(pd.Series(px_list_1,
index=dt),
pd.Timestamp('2000-1-4'),
pd.Timestamp('2000-1-7'),
pd.Timestamp('2000-1-9'),
4)
])
def test_gen_drawdown_table(self, px, expected_peak,
expected_valley, expected_recovery,
expected_duration):
rets = px.pct_change().iloc[1:]
drawdowns = timeseries.gen_drawdown_table(rets, top=1)
self.assertTrue(
pd.isnull(
drawdowns.loc[
0,
'peak date'])) if expected_peak is None \
else self.assertEqual(drawdowns.loc[0, 'peak date'],
expected_peak)
self.assertTrue(
pd.isnull(
drawdowns.loc[0, 'valley date'])) \
if expected_valley is None else self.assertEqual(
drawdowns.loc[0, 'valley date'],
expected_valley)
self.assertTrue(
pd.isnull(
drawdowns.loc[0, 'recovery date'])) \
if expected_recovery is None else self.assertEqual(
drawdowns.loc[0, 'recovery date'],
expected_recovery)
self.assertTrue(
pd.isnull(drawdowns.loc[0, 'duration'])) \
if expected_duration is None else self.assertEqual(
drawdowns.loc[0, 'duration'], expected_duration)
def test_drawdown_overlaps(self):
# Add test to show that drawdowns don't overlap
# Bug #145 observed for FB stock on the period 2014-10-24 - 2015-03-19
# Reproduced on SPY data (cached) but need a large number of drawdowns
spy_rets = utils.get_symbol_rets('SPY',
start='1997-01-01',
end='2004-12-31')
spy_drawdowns = timeseries.gen_drawdown_table(
spy_rets,
top=20).sort_values(by='peak date')
# Compare the recovery date of each drawdown with the peak of the next
# Last pair might contain a NaT if drawdown didn't finish, so ignore it
pairs = list(zip(spy_drawdowns['recovery date'],
spy_drawdowns['peak date'].shift(-1)))[:-1]
for recovery, peak in pairs:
self.assertLessEqual(recovery, peak)
@parameterized.expand([
(pd.Series(px_list_1 - 1, index=dt), -0.44000000000000011)
])
def test_max_drawdown(self, returns, expected):
self.assertEqual(timeseries.max_drawdown(returns), expected)
@parameterized.expand([
(pd.Series(px_list_1 - 1, index=dt), -0.44000000000000011)
])
def test_max_drawdown_underwater(self, underwater, expected):
self.assertEqual(timeseries.max_drawdown(underwater), expected)
@parameterized.expand([
(pd.Series(px_list_1,
index=dt),
1,
[(pd.Timestamp('2000-01-03 00:00:00'),
pd.Timestamp('2000-01-03 00:00:00'),
pd.Timestamp('2000-01-03 00:00:00'))])
])
def test_top_drawdowns(self, returns, top, expected):
self.assertEqual(
timeseries.get_top_drawdowns(
returns,
top=top),
expected)
class TestCumReturns(TestCase):
dt = pd.date_range('2000-1-3', periods=3, freq='D')
@parameterized.expand([
(pd.Series([.1, -.05, .1], index=dt),
pd.Series([1.1, 1.1 * .95, 1.1 * .95 * 1.1], index=dt), 1.),
(pd.Series([np.nan, -.05, .1], index=dt),
pd.Series([1., 1. * .95, 1. * .95 * 1.1], index=dt), 1.),
])
def test_expected_result(self, input, expected, starting_value):
output = timeseries.cum_returns(input, starting_value=starting_value)
pdt.assert_series_equal(output, expected)
class TestVariance(TestCase):
@parameterized.expand([
(1e7, 0.5, 1, 1, -10000000.0)
])
def test_var_cov_var_normal(self, P, c, mu, sigma, expected):
self.assertEqual(
timeseries.var_cov_var_normal(
P,
c,
mu,
sigma),
expected)
class TestNormalize(TestCase):
dt = pd.date_range('2000-1-3', periods=8, freq='D')
px_list = [1.0, 1.2, 1.0, 0.8, 0.7, 0.8, 0.8, 0.8]
@parameterized.expand([
(pd.Series(np.array(px_list) * 100, index=dt),
| pd.Series(px_list, index=dt) | pandas.Series |
from skyfield.api import load
import numpy as np
import math
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
from skyfield.api import utc
from scipy.optimize import brentq # machine learning
from datetime import timedelta, datetime
import pytz
# Custom helper functions
from definitions import *
from whereIsData import *
# convert binary to elemental representation
b_to_q = lambda x: [from_binary_to_element_symbols[x[i*2:i*2+2]] for i in [0,1,2]]
b_to_q_el = lambda x: [from_binary_to_element_ix[x[i*2:i*2+2]] for i in [0,1,2]]
b_to_ching = lambda x: [b for b in x]
# index elemental composition of the I Ching - binary becomes a 'triplet' with one of the four elements (air - fire - water - earth)
iching_ix = [b_to_q_el(str(x['binary']))for x in iching]
# binary position of the I Ching - binary becomes a string
iching_binary = [b_to_q(str(x['binary']))for x in iching]
# binary position of the I Ching - binary becomes an array of [1 and 0]
iching_binary_full = [b_to_ching(str(x['binary']))for x in iching]
def get_color_map():
water = "#0E61B0"
air = "#C29F17"
earth = "#55A349"
fire = "#C9280C"
return [air, fire, water, earth]
def __test_hex_binary_to_element():
print ([x for x in '10000'])
print( from_binary_to_element_symbols['00'] )
print ( b_to_q('000000'))
print ( b_to_q_el('101010'))
print ( b_to_ching('111000'))
def neutron_stream_pos(planet_position):
""" returns mandala position (base 64) given planet position"""
return ( (planet_position + (2*line_width - 1*color_width - 2*tone_width) ) / (2*math.pi) * 64) % 64
def map_on_hexagram(df, include=[]):
""" maps df planet positions onto position onto a hexagram and line """
# convert dataframe to numpy array
neutron_stream = df.to_numpy()
hexagram_bin = np.floor(neutron_stream) # rounded up downwards
# map bin number onto 'hexagram' (neutron stream is sequential order, hexagram is King Wen Sequence)
strong = np.array(iching_map)
flat = hexagram_bin.astype(int).flatten()
previous_shape = neutron_stream.shape
mapped = strong[flat]
hexagram = mapped.reshape(previous_shape)
hexagram_fraction = neutron_stream - hexagram_bin
line = hexagram_fraction // (1/6) + 1 # count in which 6th this neutrino stream falls in
line_fraction = (hexagram_fraction - (line - 1)*1/6 ) / (1/6)
color = line_fraction // (1/6) + 1
color_fraction = (line_fraction - (color -1) * 1/6) / (1/6)
tone = color_fraction // (1/6) + 1
return_info = [hexagram]
if 'lines' in include:
return_info += [line.astype(int)]
return return_info
def get_elemental_ching_lines_map(df_planets):
# input = ephemeris planets for a certain time period
# map 'neutron' stream, aka influences of the probability field (the planets in the solar system physical space)
# position to index on a wheel
df_planets['earth'] = df_planets['sun'] - math.pi
df_angles = neutron_stream_pos(df_planets)
# index on a wheel to specific binary I-Ching Sequence - King Wen Version
z = map_on_hexagram(df_angles, include=['lines'])
hexagrams = z[0]
lines = z[1]
# many_2_b = np.array(iching_binary) # strong
many_2 = np.array(iching_binary_full) # strong
one_2 = hexagrams.astype(int).flatten() - 1 # flat
# binary el
#el_b = many_2_b[one_2]
# normal el (0 -> 3)
el = many_2[one_2]
finish = el.reshape((df_angles.shape[0], df_angles.shape[1]*6))
return finish.astype(int), lines
def get_elemental_ching_map(df_planets):
# input = ephemeris planets for a certain time period
# map 'neutron' stream, aka influences of the probability field (the planets in the solar system physical space)
# position to index on a wheel
df_planets['earth'] = df_planets['sun'] - math.pi
df_angles = neutron_stream_pos(df_planets)
# index on a wheel to specific binary I-Ching Sequence - King Wen Version
z = map_on_hexagram(df_angles)
# many_2_b = np.array(iching_binary) # strong
many_2 = np.array(iching_binary_full) # strong
one_2 = z.astype(int).flatten() - 1 # flat
# binary el
#el_b = many_2_b[one_2]
# normal el (0 -> 3)
el = many_2[one_2]
finish = el.reshape((df_angles.shape[0], df_angles.shape[1]*6))
return finish.astype(int)
def get_elemental_map(df_planets):
# map 'neutron' stream, aka influences of the probability field (the planets in the solar system physical space)
# position to index on a wheel
df_planets['earth'] = df_planets['sun'] - math.pi
df_angles = neutron_stream_pos(df_planets)
# index on a wheel to specific binary I-Ching Sequence - King Wen Version
z = map_on_hexagram(df_angles)
many_2_b = np.array(iching_binary) # strong
many_2 = np.array(iching_ix) # strong
one_2 = z.astype(int).flatten() - 1 # flat
# binary el
el_b = many_2_b[one_2]
# normal el (0 -> 3)
el = many_2[one_2]
finish = el.reshape((df_angles.shape[0], df_angles.shape[1]*3))
return finish.astype(int)
def __test_neutron_stream_and_mapping(df_planets):
# map 'neutron' stream, aka influences of the probability field (the planets in the solar system physical space)
df_angles = neutron_stream_pos(df_planets.iloc[:, 1:6])
z = map_on_hexagram(df_angles)
print (z)
many_2_b = np.array(iching_binary) # strong
many_2 = np.array(iching_ix) # strong
one_2 = z.astype(int).flatten() - 1 # flat
print (many_2_b[63])
# binary el
el_b = many_2_b[one_2]
# normal el (0 -> 3)
el = many_2[one_2]
print (el)
def get_crypto_planet_data(size, mapping_type='elemental'):
""" Returns bitstamp data with size = # of ticks (in minutes for this dataset)
params:
size: # of ticks (seconds in this case)
mapping_type:
- 'elemental' (3-compound elements)
- 'elemntal_ching' (I-Ching Plain Binary)
- 'elemental_ching_lines' (I-Ching Plain Binary + Lines Dummified [1-6 lines -> 6 columns with 0 or 1])
"""
# get planetary positions
_planets, ts = get_planetary_ephemeris()
color_map = get_color_map()
df_c = pd.read_csv('bitstampUSD_1-min_data_2012-01-01_to_2020-09-14.csv', parse_dates=True)
# make data timestamp
df_c['date'] = pd.to_datetime(df_c['Timestamp'], unit='s')
# cast down to hourly data
groupkey = pd.to_datetime(df_c[-size:].date.dt.strftime('%Y-%m-%d %H'))
df_hourly = df_c[-size:].groupby(groupkey).agg({'Close':'last','Volume_(BTC)':'sum'})
df_hourly.head()
first_date = df_hourly.iloc[0].name
print ( first_date )
# generate ephemerial elements
h = first_date.hour
hours_in_trading_code = len(df_hourly) # stock exchange count of number differences
t_time_array = ts.utc(first_date.year, first_date.month, first_date.day, range(h,h+hours_in_trading_code), 0) # -3000 BC to 3000 BC, increments in hours
# generate empheremis for time period
df_crypto_planets = generate_planets(_planets, ts, t_time_array) # can take a while
# selected desired planets for attribution
r = ['earth','moon','mercury','venus','sun', 'mars', 'jupiter','saturn', 'uranus','neptune']
r.reverse()
# create elemental data map
if mapping_type == 'elemental':
data_tmp = get_elemental_map(df_crypto_planets.loc[:,r])
elif mapping_type == 'elemental_ching':
data_tmp = get_elemental_ching_map(df_crypto_planets.loc[:,r])
elif mapping_type == 'element_ching_lines':
data_tmp, lines = get_elemental_ching_lines_map(df_crypto_planets.loc[:,r])
# return data_tmp, lines
# plot data map
fig, ax = plt.subplots(figsize=(5,5))
sns.heatmap(data_tmp.transpose(), ax=ax, cmap=color_map, cbar=False)
fig, ax = plt.subplots(figsize=(5,5))
sns.heatmap(lines.transpose(), ax=ax, cmap=color_map, cbar=False)
if mapping_type == 'elemental' or mapping_type == 'elemental_ching':
# create the training dataset [Close, Solar System Time]
df_solar = pd.DataFrame(data_tmp)
df_solar.index = df_hourly.index
df_dataset = pd.concat([df_hourly[['Close']], df_solar], axis=1)
return df_dataset
elif mapping_type == 'element_ching_lines':
# create the training dataset [Close, Solar System Time]
df_solar = pd.DataFrame(data_tmp)
df_solar.index = df_hourly.index
df_lines = pd.DataFrame(lines, columns = [str(x) for x in range(10)] ).astype(str)
df_lines = pd.get_dummies(df_lines)
df_lines.index = df_hourly.index
df_dataset = | pd.concat([df_hourly[['Close']], df_solar, df_lines], axis=1) | pandas.concat |
"""
This is an upgraded version of Ceshine's LGBM starter script, simply adding more
average features and weekly average features on it.
"""
from datetime import date, timedelta
import pandas as pd
import numpy as np
from sklearn.metrics import mean_squared_error
import lightgbm as lgb
import pickle
print("Loading pickles...")
df_2017 = pickle.load(open('../input/processed/df_2017.pickle', 'rb'))
promo_2017 = pickle.load(open('../input/processed/promo_2017.pickle', 'rb'))
items = pickle.load(open('../input/processed/items.pickle', 'rb'))
df_test = pickle.load(open('../input/processed/df_test.pickle', 'rb'))
stores_items = pd.DataFrame(index=df_2017.index)
test_ids = df_test[['id']]
items = items.reindex(stores_items.index.get_level_values(1))
items_class = pd.get_dummies(items["class"], prefix="class", drop_first=True)
items_class.reset_index(drop=True, inplace=True)
def get_timespan(df, dt, minus, periods, freq='D'):
date_index = [c for c in pd.date_range(dt - timedelta(days=minus), periods=periods, freq=freq)
if c in df.columns]
return df[date_index]
def prepare_dataset(t2017, is_train=True):
X = pd.DataFrame({
"day_1_2017": get_timespan(df_2017, t2017, 1, 1).values.ravel(),
"mean_3_2017": get_timespan(df_2017, t2017, 3, 3).mean(axis=1).values,
"mean_7_2017": get_timespan(df_2017, t2017, 7, 7).mean(axis=1).values,
"mean_14_2017": get_timespan(df_2017, t2017, 14, 14).mean(axis=1).values,
"mean_30_2017": get_timespan(df_2017, t2017, 30, 30).mean(axis=1).values,
"mean_60_2017": get_timespan(df_2017, t2017, 60, 60).mean(axis=1).values,
"mean_140_2017": get_timespan(df_2017, t2017, 140, 140).mean(axis=1).values,
"promo_14_2017": get_timespan(promo_2017, t2017, 14, 14).sum(axis=1).values,
"promo_60_2017": get_timespan(promo_2017, t2017, 60, 60).sum(axis=1).values,
"promo_140_2017": get_timespan(promo_2017, t2017, 140, 140).sum(axis=1).values,
"mean_365_2017": get_timespan(df_2017, t2017, 365 - 8, 16).mean(axis=1).values, # yearly trend
})
for i in range(7):
X['mean_4_dow{}_2017'.format(i)] = get_timespan(df_2017, t2017, 28-i, 4, freq='7D').mean(axis=1).values
X['mean_20_dow{}_2017'.format(i)] = get_timespan(df_2017, t2017, 140-i, 20, freq='7D').mean(axis=1).values
for i in range(16):
X["promo_{}".format(i)] = promo_2017[
t2017 + timedelta(days=i)].values.astype(np.uint8)
#X = pd.concat([X, items_class], axis=1)
if is_train:
y = df_2017[
| pd.date_range(t2017, periods=16) | pandas.date_range |
import torch,os,torchvision
import torch.nn as nn
import torch.nn.functional as F
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from torch.utils.data import DataLoader, Dataset
from torchvision import datasets, models, transforms
from PIL import Image
from sklearn.model_selection import StratifiedShuffleSplit
import os
import logging
import time
class config:
DATA_DIR='/Public/YongkunLiu/Datasets'
#由于是test数据,所以reb,split中的都一样,因此随便取一个即可
PATH_ISET=DATA_DIR+'/BEAUTY1.0_VOCLikeComb/VOC2007/ImageSets/Split0'
PATH_IMG=DATA_DIR+'/BEAUTY1.0_VOCLikeComb/VOC2007/JPEGImages'
IMG_SIZE = 224 # vgg的输入是224的所以需要将图片统一大小
BATCH_SIZE= 64 #这个批次大小需要占用4.6-5g的显存,如果不够的化可以改下批次,如果内存超过10G可以改为512
IMG_MEAN = [0.485, 0.456, 0.406]#imagenet的
IMG_STD = [0.229, 0.224, 0.225]
LOG_DIR='./log'
CUDA=torch.cuda.is_available()
ctx=torch.device("cuda" if CUDA else "cpu")
labels = ['Residential','Public', 'Industrial', 'Commercial']
PATH_PARAMS='/Public/YongkunLiu/beauty_cnn_work_dir/pth'
PATH_PR='/Public/YongkunLiu/beauty_cnn_work_dir/pr'
if not os.path.exists(PATH_PR):
os.makedirs(PATH_PR)
def get_model_file_name():
list_t=os.listdir(config.PATH_PARAMS)
list_t=[b for b in list_t if b[-3:]=='pth']
list_t.sort()
return list_t
def get_test_DataSet():
PATH_ISET=config.DATA_DIR+'/BEAUTY1.0_VOCLikeComb/VOC2007/ImageSets/Split0'
test_dict_t={}
test_dict={'id':[],'label':[]}
list_t1=os.listdir(PATH_ISET)
for file_list in list_t1:
if(file_list.split('_')[-1]=='test.txt'and file_list.split('_')[0]=='Districts'):
with open(PATH_ISET+'/'+file_list) as f:
t1=f.readlines()
t1=[t2.split('\n')[0] for t2 in t1]
test_dict_t[file_list.split('_')[1]]=t1
for t1 in test_dict_t:
test_dict['id']+=test_dict_t[t1]
for t2 in test_dict_t[t1]:
test_dict['label']+=[t1]
df = | pd.DataFrame(test_dict,columns=['id', 'label']) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# 这个ipython notebook主要是我解决Kaggle Titanic问题的思路和过程
# (1)
import pandas as pd #数据分析
import numpy as np #科学计算
from pandas import Series,DataFrame
pd.set_option('display.max_columns', None)
#data_train = pd.read_csv("Train.csv")
data_train = pd.read_csv("../data/train.csv")
print(data_train.columns)
#data_train[data_train.Cabin.notnull()]['Survived'].value_counts()
# (2)
print(data_train.info())
# (3)
print(data_train.describe())
# # (4)
# import sys
# reload(sys)
# sys.setdefaultencoding( "utf-8" )
# import matplotlib.pyplot as plt
# fig = plt.figure()
# fig.set(alpha=0.2) # 设定图表颜色alpha参数
#
# plt.subplot2grid((2,3),(0,0)) # 在一张大图里分列几个小图
# data_train.Survived.value_counts().plot(kind='bar')# plots a bar graph of those who surived vs those who did not.
# plt.title(u"获救情况 (1为获救)") # puts a title on our graph
# plt.ylabel(u"人数")
#
# plt.subplot2grid((2,3),(0,1))
# data_train.Pclass.value_counts().plot(kind="bar")
# plt.ylabel(u"人数")
# plt.title(u"乘客等级分布")
#
# plt.subplot2grid((2,3),(0,2))
# plt.scatter(data_train.Survived, data_train.Age)
# plt.ylabel(u"年龄") # sets the y axis lable
# plt.grid(b=True, which='major', axis='y') # formats the grid line style of our graphs
# plt.title(u"按年龄看获救分布 (1为获救)")
#
# plt.subplot2grid((2,3),(1,0), colspan=2)
# data_train.Age[data_train.Pclass == 1].plot(kind='kde') # plots a kernel desnsity estimate of the subset of the 1st class passanges's age
# data_train.Age[data_train.Pclass == 2].plot(kind='kde')
# data_train.Age[data_train.Pclass == 3].plot(kind='kde')
# plt.xlabel(u"年龄")# plots an axis lable
# plt.ylabel(u"密度")
# plt.title(u"各等级的乘客年龄分布")
# plt.legend((u'头等舱', u'2等舱',u'3等舱'),loc='best') # sets our legend for our graph.
#
# plt.subplot2grid((2,3),(1,2))
# data_train.Embarked.value_counts().plot(kind='bar')
# plt.title(u"各登船口岸上船人数")
# plt.ylabel(u"人数")
# plt.show()
#
# # (5)
# #看看各乘客等级的获救情况
# fig = plt.figure()
# fig.set(alpha=0.2) # 设定图表颜色alpha参数
#
# Survived_0 = data_train.Pclass[data_train.Survived == 0].value_counts()
# Survived_1 = data_train.Pclass[data_train.Survived == 1].value_counts()
# df=pd.DataFrame({u'获救':Survived_1, u'未获救':Survived_0})
# df.plot(kind='bar', stacked=True)
# plt.title(u"各乘客等级的获救情况")
# plt.xlabel(u"乘客等级")
# plt.ylabel(u"人数")
#
# plt.show()
#
# # (6)
# #看看各登录港口的获救情况
# fig = plt.figure()
# fig.set(alpha=0.2) # 设定图表颜色alpha参数
#
# Survived_0 = data_train.Embarked[data_train.Survived == 0].value_counts()
# Survived_1 = data_train.Embarked[data_train.Survived == 1].value_counts()
# df=pd.DataFrame({u'获救':Survived_1, u'未获救':Survived_0})
# df.plot(kind='bar', stacked=True)
# plt.title(u"各登录港口乘客的获救情况")
# plt.xlabel(u"登录港口")
# plt.ylabel(u"人数")
#
# plt.show()
#
# # (7)
# #看看各性别的获救情况
# fig = plt.figure()
# fig.set(alpha=0.2) # 设定图表颜色alpha参数
#
# Survived_m = data_train.Survived[data_train.Sex == 'male'].value_counts()
# Survived_f = data_train.Survived[data_train.Sex == 'female'].value_counts()
# df=pd.DataFrame({u'男性':Survived_m, u'女性':Survived_f})
# df.plot(kind='bar', stacked=True)
# plt.title(u"按性别看获救情况")
# plt.xlabel(u"性别")
# plt.ylabel(u"人数")
# plt.show()
#
# # (8)
# #看看各性别的获救情况
# fig = plt.figure()
# fig.set(alpha=0.2) # 设定图表颜色alpha参数
#
# Survived_m = data_train.Survived[data_train.Sex == 'male'].value_counts()
# Survived_f = data_train.Survived[data_train.Sex == 'female'].value_counts()
# df=pd.DataFrame({u'男性':Survived_m, u'女性':Survived_f})
# df.plot(kind='bar', stacked=True)
# plt.title(u"按性别看获救情况")
# plt.xlabel(u"性别")
# plt.ylabel(u"人数")
# plt.show()
# (9)
g = data_train.groupby(['SibSp','Survived'])
df = pd.DataFrame(g.count()['PassengerId'])
df
# (10)
g = data_train.groupby(['Parch','Survived'])
df = pd.DataFrame(g.count()['PassengerId'])
df
# (11)
#ticket是船票编号,应该是unique的,和最后的结果没有太大的关系,不纳入考虑的特征范畴
#cabin只有204个乘客有值,我们先看看它的一个分布
data_train.Cabin.value_counts()
# # (12)
# #cabin的值计数太分散了,绝大多数Cabin值只出现一次。感觉上作为类目,加入特征未必会有效
# #那我们一起看看这个值的有无,对于survival的分布状况,影响如何吧
# fig = plt.figure()
# fig.set(alpha=0.2) # 设定图表颜色alpha参数
#
# Survived_cabin = data_train.Survived[pd.notnull(data_train.Cabin)].value_counts()
# Survived_nocabin = data_train.Survived[pd.isnull(data_train.Cabin)].value_counts()
# df=pd.DataFrame({u'有':Survived_cabin, u'无':Survived_nocabin}).transpose()
# df.plot(kind='bar', stacked=True)
# plt.title(u"按Cabin有无看获救情况")
# plt.xlabel(u"Cabin有无")
# plt.ylabel(u"人数")
# plt.show()
#似乎有cabin记录的乘客survival比例稍高,那先试试把这个值分为两类,有cabin值/无cabin值,一会儿加到类别特征好了
# (13)
from sklearn.ensemble import RandomForestRegressor
### 使用 RandomForestClassifier 填补缺失的年龄属性
def set_missing_ages(df):
# 把已有的数值型特征取出来丢进Random Forest Regressor中
age_df = df[['Age', 'Fare', 'Parch', 'SibSp', 'Pclass']]
# 乘客分成已知年龄和未知年龄两部分
known_age = age_df[age_df.Age.notnull()].as_matrix()
unknown_age = age_df[age_df.Age.isnull()].as_matrix()
# y即目标年龄
y = known_age[:, 0]
# X即特征属性值
X = known_age[:, 1:]
# fit到RandomForestRegressor之中
rfr = RandomForestRegressor(random_state=0, n_estimators=2000, n_jobs=-1)
rfr.fit(X, y)
# 用得到的模型进行未知年龄结果预测
predictedAges = rfr.predict(unknown_age[:, 1::])
# 用得到的预测结果填补原缺失数据
df.loc[(df.Age.isnull()), 'Age'] = predictedAges
return df, rfr
def set_Cabin_type(df):
df.loc[(df.Cabin.notnull()), 'Cabin'] = "Yes"
df.loc[(df.Cabin.isnull()), 'Cabin'] = "No"
return df
data_train, rfr = set_missing_ages(data_train)
data_train = set_Cabin_type(data_train)
data_train
# (14)
# 因为逻辑回归建模时,需要输入的特征都是数值型特征
# 我们先对类目型的特征离散/因子化
# 以Cabin为例,原本一个属性维度,因为其取值可以是['yes','no'],而将其平展开为'Cabin_yes','Cabin_no'两个属性
# 原本Cabin取值为yes的,在此处的'Cabin_yes'下取值为1,在'Cabin_no'下取值为0
# 原本Cabin取值为no的,在此处的'Cabin_yes'下取值为0,在'Cabin_no'下取值为1
# 我们使用pandas的get_dummies来完成这个工作,并拼接在原来的data_train之上,如下所示
dummies_Cabin = pd.get_dummies(data_train['Cabin'], prefix= 'Cabin')
dummies_Embarked = pd.get_dummies(data_train['Embarked'], prefix= 'Embarked')
dummies_Sex = pd.get_dummies(data_train['Sex'], prefix= 'Sex')
dummies_Pclass = pd.get_dummies(data_train['Pclass'], prefix= 'Pclass')
df = pd.concat([data_train, dummies_Cabin, dummies_Embarked, dummies_Sex, dummies_Pclass], axis=1)
df.drop(['Pclass', 'Name', 'Sex', 'Ticket', 'Cabin', 'Embarked'], axis=1, inplace=True)
df
# (15)
# 接下来我们要接着做一些数据预处理的工作,比如scaling,将一些变化幅度较大的特征化到[-1,1]之内
# 这样可以加速logistic regression的收敛
import sklearn.preprocessing as preprocessing
scaler = preprocessing.StandardScaler()
age_scale_param = scaler.fit(df['Age'])
df['Age_scaled'] = scaler.fit_transform(df['Age'], age_scale_param)
fare_scale_param = scaler.fit(df['Fare'])
df['Fare_scaled'] = scaler.fit_transform(df['Fare'], fare_scale_param)
df
# (16)
# 我们把需要的feature字段取出来,转成numpy格式,使用scikit-learn中的LogisticRegression建模
from sklearn import linear_model
train_df = df.filter(regex='Survived|Age_.*|SibSp|Parch|Fare_.*|Cabin_.*|Embarked_.*|Sex_.*|Pclass_.*')
train_np = train_df.as_matrix()
# y即Survival结果
y = train_np[:, 0]
# X即特征属性值
X = train_np[:, 1:]
# fit到RandomForestRegressor之中
clf = linear_model.LogisticRegression(C=1.0, penalty='l1', tol=1e-6)
clf.fit(X, y)
print(clf)
# (17)
X.shape
# (18)
# data_test = pd.read_csv("D:\\project\\peixun\\ai_course_project_px\\1_intro\\4_anli_project_titanic\\Kaggle_Titanic_Chinese\\Kaggle_Titanic-master\\test.csv")
# data_test.loc[ (data_test.Fare.isnull()), 'Fare' ] = 0
# # 接着我们对test_data做和train_data中一致的特征变换
# # 首先用同样的RandomForestRegressor模型填上丢失的年龄
# tmp_df = data_test[['Age','Fare', 'Parch', 'SibSp', 'Pclass']]
# null_age = tmp_df[data_test.Age.isnull()].as_matrix()
# # 根据特征属性X预测年龄并补上
# X = null_age[:, 1:]
# predictedAges = rfr.predict(X)
# data_test.loc[ (data_test.Age.isnull()), 'Age' ] = predictedAges
#
# data_test = set_Cabin_type(data_test)
# dummies_Cabin = pd.get_dummies(data_test['Cabin'], prefix= 'Cabin')
# dummies_Embarked = pd.get_dummies(data_test['Embarked'], prefix= 'Embarked')
# dummies_Sex = pd.get_dummies(data_test['Sex'], prefix= 'Sex')
# dummies_Pclass = pd.get_dummies(data_test['Pclass'], prefix= 'Pclass')
#
#
# df_test = pd.concat([data_test, dummies_Cabin, dummies_Embarked, dummies_Sex, dummies_Pclass], axis=1)
# df_test.drop(['Pclass', 'Name', 'Sex', 'Ticket', 'Cabin', 'Embarked'], axis=1, inplace=True)
# df_test['Age_scaled'] = scaler.fit_transform(df_test['Age'], age_scale_param)
# df_test['Fare_scaled'] = scaler.fit_transform(df_test['Fare'], fare_scale_param)
# df_test
#
# # (19)
# test = df_test.filter(regex='Age_.*|SibSp|Parch|Fare_.*|Cabin_.*|Embarked_.*|Sex_.*|Pclass_.*')
# predictions = clf.predict(test)
# result = pd.DataFrame({'PassengerId':data_test['PassengerId'].as_matrix(), 'Survived':predictions.astype(np.int32)})
# result.to_csv("logistic_regression_predictions.csv", index=False)
#
# # (20) (3)
# pd.read_csv("logistic_regression_predictions.csv")
# (21)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.learning_curve import learning_curve
# 用sklearn的learning_curve得到training_score和cv_score,使用matplotlib画出learning curve
def plot_learning_curve(estimator, title, X, y, ylim=None, cv=None, n_jobs=1,
train_sizes=np.linspace(.05, 1., 20), verbose=0, plot=True):
"""
画出data在某模型上的learning curve.
参数解释
----------
estimator : 你用的分类器。
title : 表格的标题。
X : 输入的feature,numpy类型
y : 输入的target vector
ylim : tuple格式的(ymin, ymax), 设定图像中纵坐标的最低点和最高点
cv : 做cross-validation的时候,数据分成的份数,其中一份作为cv集,其余n-1份作为training(默认为3份)
n_jobs : 并行的的任务数(默认1)
"""
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes, verbose=verbose)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
if plot:
plt.figure()
plt.title(title)
if ylim is not None:
plt.ylim(*ylim)
plt.xlabel(u"训练样本数")
plt.ylabel(u"得分")
plt.gca().invert_yaxis()
plt.grid()
plt.fill_between(train_sizes, train_scores_mean - train_scores_std, train_scores_mean + train_scores_std,
alpha=0.1, color="b")
plt.fill_between(train_sizes, test_scores_mean - test_scores_std, test_scores_mean + test_scores_std,
alpha=0.1, color="r")
plt.plot(train_sizes, train_scores_mean, 'o-', color="b", label=u"训练集上得分")
plt.plot(train_sizes, test_scores_mean, 'o-', color="r", label=u"交叉验证集上得分")
plt.legend(loc="best")
plt.draw()
plt.gca().invert_yaxis()
plt.show()
midpoint = ((train_scores_mean[-1] + train_scores_std[-1]) + (test_scores_mean[-1] - test_scores_std[-1])) / 2
diff = (train_scores_mean[-1] + train_scores_std[-1]) - (test_scores_mean[-1] - test_scores_std[-1])
return midpoint, diff
plot_learning_curve(clf, u"学习曲线", X, y)
# (22)
pd.DataFrame({"columns":list(train_df.columns)[1:], "coef":list(clf.coef_.T)})
# (23)
from sklearn import cross_validation
# 简单看看打分情况
clf = linear_model.LogisticRegression(C=1.0, penalty='l1', tol=1e-6)
all_data = df.filter(regex='Survived|Age_.*|SibSp|Parch|Fare_.*|Cabin_.*|Embarked_.*|Sex_.*|Pclass_.*')
X = all_data.as_matrix()[:,1:]
y = all_data.as_matrix()[:,0]
print(cross_validation.cross_val_score(clf, X, y, cv=5))
# 分割数据
split_train, split_cv = cross_validation.train_test_split(df, test_size=0.3, random_state=0)
train_df = split_train.filter(regex='Survived|Age_.*|SibSp|Parch|Fare_.*|Cabin_.*|Embarked_.*|Sex_.*|Pclass_.*')
# 生成模型
clf = linear_model.LogisticRegression(C=1.0, penalty='l1', tol=1e-6)
clf.fit(train_df.as_matrix()[:,1:], train_df.as_matrix()[:,0])
# 对cross validation数据进行预测
cv_df = split_cv.filter(regex='Survived|Age_.*|SibSp|Parch|Fare_.*|Cabin_.*|Embarked_.*|Sex_.*|Pclass_.*')
predictions = clf.predict(cv_df.as_matrix()[:,1:])
#split_cv[ predictions != cv_df.as_matrix()[:,0] ].drop()
# (24)
# 去除预测错误的case看原始dataframe数据
#split_cv['PredictResult'] = predictions
origin_data_train = | pd.read_csv("D:\\project\\peixun\\ai_course_project_px\\1_intro\\4_anli_project_titanic\\Kaggle_Titanic_Chinese\\Kaggle_Titanic-master\\train.csv") | pandas.read_csv |
import pandas as pd
import numpy as np
import pickle
import shap
from lightgbm import LGBMClassifier
def get_new_prediction(bus_line, hour, month, day, bus_carrying_cap, city, temp, pressure, bus_age, total_rain):
'''
This function calculates new predictions for a given bus line, hour, month, day, bus carrying capacity, bus age
(years), city, temperature (degrees celcius), pressure (kPA) and rain (mm). Assumes that a file named
final_fitted.pickle is in the results/ml_model directory.
This is solely for use in the interactive report so the user can dynamically generate a graph
as needed by querying results from the model. Arguments are fed to this function via. user
selected input in the report.
Parameters:
bus_line: A str that represents one of the bus lines in the Greater Vancouver area.
hour: An integer 0-23 representing a particular hour of the day.
month: An integer 1-12 representing a particular month of the year.
day: A str (Mon, Tue, Wed, Thu, Fri, Sat, Sun) that represents a particular day
of the week.
bus_carrying_cap: An integer representing the carrying capacity of a bus.
city: A str representing the city of interest.
temp: A float representing the temperature in degrees celsius.
pressure: A float representing the atmospheric pressure in kPa
bus_age: An integer representing the bus age in years.
total_rain: A float representing the total rain in mm.
Returns:
dict
A dictionary with keys shap, predicted, and column_names containing the
SHAP scores (numpy array), predicted 0/1
scores (numpy array), and column names used in the model fit (list).
'''
shuttles = ["23", "31", "42", "68", "103", "105", "109", "131", "132", "146",
"147", "148", "157", "169", "170", "171", "172", "173", "174", "175", "180", "181",
"182", "184", "185", "186", "187", "189", "215", "227", "251", "252", "256", "262",
"280", "281", "282", "310", "322", "360", "361", "362", "363", "370", "371", "372",
"373", "412", "413", "414", "416", "560", "561", "562", "563", "564", "609", "614",
"616", "617", "618", "619", "719", "722", "733", "741", "743", "744", "745", "746", "748", "749"]
# The values that are held constant: just use the means/modes
new_data = pd.DataFrame({
'hour': pd.Series(hour, dtype='int'),
'day_of_week': pd.Series(day, dtype='str'),
'bus_age': pd.Series(bus_age, dtype='float'),
'bus_carry_capacity': pd.Series(bus_carrying_cap if bus_carrying_cap != "NA" else np.nan, dtype='float'),
'line_no': pd.Series(bus_line, dtype='str'),
'city': | pd.Series(city, dtype='str') | pandas.Series |
from __future__ import division
from datetime import timedelta
from functools import partial
import itertools
from nose.tools import assert_true
from parameterized import parameterized
import numpy as np
from numpy.testing import assert_array_equal, assert_almost_equal
import pandas as pd
from toolz import merge
from zipline.pipeline import SimplePipelineEngine, Pipeline, CustomFactor
from zipline.pipeline.common import (
EVENT_DATE_FIELD_NAME,
FISCAL_QUARTER_FIELD_NAME,
FISCAL_YEAR_FIELD_NAME,
SID_FIELD_NAME,
TS_FIELD_NAME,
)
from zipline.pipeline.data import DataSet
from zipline.pipeline.data import Column
from zipline.pipeline.domain import EquitySessionDomain
import platform
if platform.system() != 'Windows':
from zipline.pipeline.loaders.blaze.estimates import (
BlazeNextEstimatesLoader,
BlazeNextSplitAdjustedEstimatesLoader,
BlazePreviousEstimatesLoader,
BlazePreviousSplitAdjustedEstimatesLoader,
)
from zipline.pipeline.loaders.earnings_estimates import (
INVALID_NUM_QTRS_MESSAGE,
NextEarningsEstimatesLoader,
NextSplitAdjustedEarningsEstimatesLoader,
normalize_quarters,
PreviousEarningsEstimatesLoader,
PreviousSplitAdjustedEarningsEstimatesLoader,
split_normalized_quarters,
)
from zipline.testing.fixtures import (
WithAdjustmentReader,
WithTradingSessions,
ZiplineTestCase,
)
from zipline.testing.predicates import assert_equal, assert_raises_regex
from zipline.testing.predicates import assert_frame_equal
from zipline.utils.numpy_utils import datetime64ns_dtype
from zipline.utils.numpy_utils import float64_dtype
import platform
import unittest
class Estimates(DataSet):
event_date = Column(dtype=datetime64ns_dtype)
fiscal_quarter = Column(dtype=float64_dtype)
fiscal_year = Column(dtype=float64_dtype)
estimate = Column(dtype=float64_dtype)
class MultipleColumnsEstimates(DataSet):
event_date = Column(dtype=datetime64ns_dtype)
fiscal_quarter = Column(dtype=float64_dtype)
fiscal_year = Column(dtype=float64_dtype)
estimate1 = Column(dtype=float64_dtype)
estimate2 = Column(dtype=float64_dtype)
def QuartersEstimates(announcements_out):
class QtrEstimates(Estimates):
num_announcements = announcements_out
name = Estimates
return QtrEstimates
def MultipleColumnsQuartersEstimates(announcements_out):
class QtrEstimates(MultipleColumnsEstimates):
num_announcements = announcements_out
name = Estimates
return QtrEstimates
def QuartersEstimatesNoNumQuartersAttr(num_qtr):
class QtrEstimates(Estimates):
name = Estimates
return QtrEstimates
def create_expected_df_for_factor_compute(start_date,
sids,
tuples,
end_date):
"""
Given a list of tuples of new data we get for each sid on each critical
date (when information changes), create a DataFrame that fills that
data through a date range ending at `end_date`.
"""
df = pd.DataFrame(tuples,
columns=[SID_FIELD_NAME,
'estimate',
'knowledge_date'])
df = df.pivot_table(columns=SID_FIELD_NAME,
values='estimate',
index='knowledge_date')
df = df.reindex(
pd.date_range(start_date, end_date)
)
# Index name is lost during reindex.
df.index = df.index.rename('knowledge_date')
df['at_date'] = end_date.tz_localize('utc')
df = df.set_index(['at_date', df.index.tz_localize('utc')]).ffill()
new_sids = set(sids) - set(df.columns)
df = df.reindex(columns=df.columns.union(new_sids))
return df
class WithEstimates(WithTradingSessions, WithAdjustmentReader):
"""
ZiplineTestCase mixin providing cls.loader and cls.events as class
level fixtures.
Methods
-------
make_loader(events, columns) -> PipelineLoader
Method which returns the loader to be used throughout tests.
events : pd.DataFrame
The raw events to be used as input to the pipeline loader.
columns : dict[str -> str]
The dictionary mapping the names of BoundColumns to the
associated column name in the events DataFrame.
make_columns() -> dict[BoundColumn -> str]
Method which returns a dictionary of BoundColumns mapped to the
associated column names in the raw data.
"""
# Short window defined in order for test to run faster.
START_DATE = pd.Timestamp('2014-12-28')
END_DATE = pd.Timestamp('2015-02-04')
@classmethod
def make_loader(cls, events, columns):
raise NotImplementedError('make_loader')
@classmethod
def make_events(cls):
raise NotImplementedError('make_events')
@classmethod
def get_sids(cls):
return cls.events[SID_FIELD_NAME].unique()
@classmethod
def make_columns(cls):
return {
Estimates.event_date: 'event_date',
Estimates.fiscal_quarter: 'fiscal_quarter',
Estimates.fiscal_year: 'fiscal_year',
Estimates.estimate: 'estimate'
}
def make_engine(self, loader=None):
if loader is None:
loader = self.loader
return SimplePipelineEngine(
lambda x: loader,
self.asset_finder,
default_domain=EquitySessionDomain(
self.trading_days, self.ASSET_FINDER_COUNTRY_CODE,
),
)
@classmethod
def init_class_fixtures(cls):
cls.events = cls.make_events()
cls.ASSET_FINDER_EQUITY_SIDS = cls.get_sids()
cls.ASSET_FINDER_EQUITY_SYMBOLS = [
's' + str(n) for n in cls.ASSET_FINDER_EQUITY_SIDS
]
# We need to instantiate certain constants needed by supers of
# `WithEstimates` before we call their `init_class_fixtures`.
super(WithEstimates, cls).init_class_fixtures()
cls.columns = cls.make_columns()
# Some tests require `WithAdjustmentReader` to be set up by the time we
# make the loader.
cls.loader = cls.make_loader(cls.events, {column.name: val for
column, val in
cls.columns.items()})
class WithOneDayPipeline(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
events : pd.DataFrame
A simple DataFrame with columns needed for estimates and a single sid
and no other data.
Tests
------
test_wrong_num_announcements_passed()
Tests that loading with an incorrect quarter number raises an error.
test_no_num_announcements_attr()
Tests that the loader throws an AssertionError if the dataset being
loaded has no `num_announcements` attribute.
"""
@classmethod
def make_columns(cls):
return {
MultipleColumnsEstimates.event_date: 'event_date',
MultipleColumnsEstimates.fiscal_quarter: 'fiscal_quarter',
MultipleColumnsEstimates.fiscal_year: 'fiscal_year',
MultipleColumnsEstimates.estimate1: 'estimate1',
MultipleColumnsEstimates.estimate2: 'estimate2'
}
@classmethod
def make_events(cls):
return pd.DataFrame({
SID_FIELD_NAME: [0] * 2,
TS_FIELD_NAME: [pd.Timestamp('2015-01-01'),
pd.Timestamp('2015-01-06')],
EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-10'),
pd.Timestamp('2015-01-20')],
'estimate1': [1., 2.],
'estimate2': [3., 4.],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: [2015, 2015]
})
@classmethod
def make_expected_out(cls):
raise NotImplementedError('make_expected_out')
@classmethod
def init_class_fixtures(cls):
super(WithOneDayPipeline, cls).init_class_fixtures()
cls.sid0 = cls.asset_finder.retrieve_asset(0)
cls.expected_out = cls.make_expected_out()
def test_load_one_day(self):
# We want to test multiple columns
dataset = MultipleColumnsQuartersEstimates(1)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline({c.name: c.latest for c in dataset.columns}),
start_date=pd.Timestamp('2015-01-15', tz='utc'),
end_date=pd.Timestamp('2015-01-15', tz='utc'),
)
assert_frame_equal(results, self.expected_out)
class PreviousWithOneDayPipeline(WithOneDayPipeline, ZiplineTestCase):
"""
Tests that previous quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_out(cls):
return pd.DataFrame(
{
EVENT_DATE_FIELD_NAME: pd.Timestamp('2015-01-10'),
'estimate1': 1.,
'estimate2': 3.,
FISCAL_QUARTER_FIELD_NAME: 1.,
FISCAL_YEAR_FIELD_NAME: 2015.,
},
index=pd.MultiIndex.from_tuples(
(( | pd.Timestamp('2015-01-15', tz='utc') | pandas.Timestamp |
"""A package of tools for simulating the US Electoral College.
...
"""
__version__ = "0.5.0"
__all__ = ["Election", "OutcomeList"]
import logging
from typing import Optional, Union
from pandas import DataFrame, Series
from numpy.random import default_rng
from ecsim.loaders import *
from ecsim.apportionment import *
from ecsim.outcomes import Outcome, OutcomeList
# TODO: improve logging
logger = logging.getLogger(__name__)
logger.setLevel(level=logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(level=logging.WARNING)
formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
ch.setFormatter(formatter)
logger.addHandler(ch)
_RANDOM_SEED = 713
SENATORS_PER_STATE: int = 2
"""The number of senators each state receives as specified in the US Constitution."""
DC_STATEHOOD: bool = False
"""Whether or not the District of Columbia is a state.
The seat of the US Government, the District of Columbia (DC), is not a state, nor
is it a part of any state. Until the ratification of the 23rd amendment, this meant
that it did not receive any electors in the vote for the president. With the adoption
of the 23rd Amendment in 1961, DC is granted as many electors it would have if it
were a state, but no more than the least populous state.
"""
census_data: DataFrame = load_census_data()
"""The (historical) US population data gathered with each decennial census."""
class Election:
"""Contains the data and method for simulating historical US elections.
Attributes
----------
year : int
The year of the presidential election to simulate.
union : DataFrame
The historical election data used for the simulation.
"""
__slots__ = [
"year",
"representatives",
"apportionment_method",
"union",
"_logger",
"_rng",
]
def __init__(
self,
year: int,
representatives: int = 435,
apportionment_method: ApportionmentMethod = huntington_hill,
) -> None:
"""Initializes an election by loading Census data and election data.
Parameters
----------
year
A presidential election year, must be a multiple of 4.
representatives
The total number of representatives in the House, by default 435,
and must be at least equal to the number of states.
apportionment_method
The specific apportionment method to be used when apportioning
representatives.
"""
self._logger = logging.getLogger(f"{__name__}.{self.__class__.__name__}")
self._rng = default_rng(_RANDOM_SEED)
self.apportionment_method = apportionment_method
if year % 4 != 0:
raise ValueError(f"{year} is not an election year!")
self.year = year
self._logger.debug(f"Loading election data for {self.year}")
self.union = load_election_data(self.year)
number_states = len(self.union) - (
1 if "District of Columbia" in self.union.index and not DC_STATEHOOD else 0
)
if representatives < number_states:
raise ValueError("There are not enough representatives!")
self.representatives = self.apportion_representatives(representatives)
def get_electors(self, state: Optional[str] = None) -> Union[int, Series]:
"""Returns the number of electors for each state.
Parameters
----------
state : str, optional
If present, computes the states electors from its number of representatives
and the number of senators each state receives. Implements special logic
for the District of Columbia, which is not a state but does receive electors.
Returns
-------
Union[int, Series]
If no state is given, then a pandas.Series is returned containing the number
of electors each state receives.
"""
if state is not None:
if state == "District of Columbia" and not DC_STATEHOOD:
# TODO: move this comment to a Note in the docstring
# this is not techincally correct, but it is funcationally so,
# ie. if the population of DC was so less than the least populous state
# that it would be awarded 1 representative where the least populous
# state would receive 2, then this equation will give too many electors
# to DC, but this scenario seems implausible
return (
self.representatives["Representatives"].min() + SENATORS_PER_STATE
)
else:
return (
self.representatives["Representatives"][state] + SENATORS_PER_STATE
)
electors = | Series(0, index=self.union.index, name="Electors", dtype=int) | pandas.Series |
import csv
import pandas as pd
import numpy as np
import itertools
from sklearn import metrics
from sklearn.metrics import confusion_matrix, accuracy_score, roc_curve, auc
import matplotlib.pyplot as plt
import json
import multiprocessing
import os
from tqdm import tqdm
from pathlib import Path
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import tensorflow as tf
# tf.compat.v1.disable_eager_execution()
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
import logging
mpl_logger = logging.getLogger('matplotlib')
mpl_logger.setLevel(logging.WARNING)
with open(Path(__file__).parent/"morph.json", "r") as f:
identities = json.load(f)
# Positives
positives = pd.DataFrame()
for value in tqdm(identities.values(),desc="Positives"):
positives = positives.append(pd.DataFrame(itertools.combinations(value, 2), columns=["file_x", "file_y"]), ignore_index=True)
positives["decision"] = "Yes"
print(positives)
# --------------------------
# Negatives
samples_list = list(identities.values())
negatives = | pd.DataFrame() | pandas.DataFrame |
import sys
import pytz
import hashlib
import numpy as np
import pandas as pd
from datetime import datetime
def edit_form_link(link_text='Submit edits'):
"""Return HTML for link to form for edits"""
return f'<a href="https://docs.google.com/forms/d/e/1FAIpQLScw8EUGIOtUj994IYEM1W7PfBGV0anXjEmz_YKiKJc4fm-tTg/viewform">{link_text}</a>'
def add_google_analytics(input_html):
"""
Return HTML with Google Analytics block added
"""
ga_block = """
<!-- Global site tag (gtag.js) - Google Analytics -->
<script async src="https://www.googletagmanager.com/gtag/js?id=UA-173043454-1"></script>
<script>
window.dataLayer = window.dataLayer || [];
function gtag(){dataLayer.push(arguments);}
gtag('js', new Date());
gtag('config', 'UA-173043454-1');
</script>
"""
output_html = input_html.replace('<!-- replace with google analytics -->', ga_block)
return output_html
def add_geojson(shape_gdf, field_name, field_value, input_html):
"""
Add a GeoJSON feature as a Javascript variable to an HTML string
This variable will be used to calculate the bounds of the map
"""
shape_row = shape_gdf[shape_gdf[field_name] == field_value].copy()
shape_geo = shape_row.geometry.iloc[0]
geo_bounds = shape_geo.boundary[0].xy
output_string = '[['
for idx, value in enumerate(geo_bounds[0]):
if idx > 0:
output_string += ','
output_string += '['
x = geo_bounds[0][idx]
output_string += '{}'.format(x)
y = geo_bounds[1][idx]
output_string += ', {}'.format(y)
output_string += ']\n'
output_string += ']]'
output_html = input_html.replace('REPLACE_WITH_XY', output_string)
return output_html
def dc_coordinates():
"""Return coordinates for a DC-wide map"""
dc_longitude = -77.016243706276569
dc_latitude = 38.894858329321485
dc_zoom_level = 10.3
return dc_longitude, dc_latitude, dc_zoom_level
def anc_names(anc_id):
"""
Return formatted ANC names
"""
ancs = pd.read_csv('data/ancs.csv')
anc_upper = 'ANC' + anc_id
anc_lower = anc_upper.lower()
anc_neighborhoods = ancs[ancs['anc_id'] == anc_id]['neighborhoods'].values[0]
return anc_upper, anc_lower, anc_neighborhoods
def assemble_divo():
"""
Return DataFrame with one row per SMD and various stats about each SMD's ranking
divo = district-votes
"""
results = pd.read_csv('data/results.csv')
districts = pd.read_csv('data/districts.csv')
votes_per_smd = pd.DataFrame(results.groupby('smd_id').votes.sum()).reset_index()
# Calculate number of SMDs in each Ward and ANC
smds_per_ward = pd.DataFrame(districts.groupby('ward').size(), columns=['smds_in_ward']).reset_index()
smds_per_anc = pd.DataFrame(districts.groupby('anc_id').size(), columns=['smds_in_anc']).reset_index()
divo = pd.merge(districts, votes_per_smd, how='inner', on='smd_id')
divo = pd.merge(divo, smds_per_ward, how='inner', on='ward')
divo = pd.merge(divo, smds_per_anc, how='inner', on='anc_id')
divo['smds_in_dc'] = len(districts)
# Rank each SMD by the number of votes recorded for ANC races within that SMD
# method = min: assigns the lowest rank when multiple rows are tied
divo['rank_dc'] = divo['votes'].rank(method='min', ascending=False)
divo['rank_ward'] = divo.groupby('ward').votes.rank(method='min', ascending=False)
divo['rank_anc'] = divo.groupby('anc_id').votes.rank(method='min', ascending=False)
# Create strings showing the ranking of each SMD within its ANC, Ward, and DC-wide
divo['string_dc'] = divo.apply(
lambda row: f"{make_ordinal(row['rank_dc'])} out of {row['smds_in_dc']} SMDs", axis=1)
divo['string_ward'] = divo.apply(
lambda row: f"{make_ordinal(row['rank_ward'])} out of {row['smds_in_ward']} SMDs", axis=1)
divo['string_anc'] = divo.apply(
lambda row: f"{make_ordinal(row['rank_anc'])} out of {row['smds_in_anc']} SMDs", axis=1)
average_votes_in_dc = divo.votes.mean()
average_votes_by_ward = divo.groupby('ward').votes.mean()
average_votes_by_anc = divo.groupby('anc_id').votes.mean()
return divo
def list_commissioners(status=None, date_point=None):
"""
Return dataframe with list of commissioners by status
Options:
status=None (all statuses returned) -- default
status='former'
status='current'
status='future'
date_point=None -- all statuses calculated from current DC time (default)
date_point=(some other datetime) -- all statuses calculated from that datetime
"""
commissioners = pd.read_csv('data/commissioners.csv')
if not date_point:
tz = pytz.timezone('America/New_York')
date_point = datetime.now(tz)
commissioners['start_date'] = pd.to_datetime(commissioners['start_date']).dt.tz_localize(tz='America/New_York')
commissioners['end_date'] = | pd.to_datetime(commissioners['end_date']) | pandas.to_datetime |
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 18 16:25:41 2020
@author: ludovic.spaeth
"""
import matplotlib
matplotlib.rcParams['pdf.fonttype'] = 42
import pandas as pd
import numpy as np
import seaborn as sn
from matplotlib import pyplot as plt
from scipy import stats
import os
alpha=0.5
swarm = True
sharex, sharey = True, True
cap=0.2
fig, ax = plt.subplots(1,2, figsize=(12,4))
#First the synaptic properties
file = 'D:/000_PAPER/00_ANSWER_TO_REVIEWERS/RandomForestanalysis/OuputWithFolds/RF_Output_With_Folds_Training_Segregated_SORTED_LABELS.xlsx'
rdnFile = 'D:/000_PAPER/00_ANSWER_TO_REVIEWERS/RandomForestanalysis/OuputWithFolds/RF_Output_With_Folds_Training_Segregated_RANDOM_LABELS.xlsx'
df = pd.read_excel(file,header=0)
rdnDf = pd.read_excel(rdnFile, header=0)
#result = df.groupby('Random Forest').mean()[['Random Forest (CTRL)','Random Forest (EC)',
# 'Random Forest (ENR)','Random Forest (ES)',
# 'Random Forest (LC)','Random Forest (LS)']]
sortedResult = df.groupby('Condition').mean()[['Random Forest (CTRL)','Random Forest (EC)',
'Random Forest (LTR)','Random Forest (STR)','Random Forest (ES)',
'Random Forest (LC)','Random Forest (LS)']]
rndmResult = rdnDf.groupby('Condition').mean()[['Random Forest (CTRL)','Random Forest (EC)',
'Random Forest (LTR)','Random Forest (STR)','Random Forest (ES)',
'Random Forest (LC)','Random Forest (LS)']]
sortedResult = sortedResult.reindex(index=['EC', 'LTR','STR', 'ES', 'LC', 'LS', 'CTRL'], columns=['Random Forest (EC)','Random Forest (LTR)','Random Forest (STR)',
'Random Forest (ES)','Random Forest (LC)',
'Random Forest (LS)','Random Forest (CTRL)'])
rndmResult = rndmResult.reindex(index=['EC', 'LTR','STR', 'ES', 'LC', 'LS', 'CTRL'], columns=['Random Forest (EC)','Random Forest (LTR)','Random Forest (STR)',
'Random Forest (ES)','Random Forest (LC)',
'Random Forest (LS)','Random Forest (CTRL)'])
#Show heatmaps
sn.heatmap(sortedResult,annot=True, cmap='magma_r', vmax=0.5, ax=ax[0])
ax[0].set_title('Sorted Lables (synaptic)')
sn.heatmap(rndmResult, annot=True, cmap='magma_r', vmax=0.5, ax=ax[1])
ax[1].set_title('Random Lables (synaptic)')
ACCURACIES, RDM_ACCURACIES = [],[]
#Determine accuracy for sorted labels
for condition in np.unique(df['Condition'].values):
subDf = df.loc[df['Condition']==condition]
avg = subDf.groupby('Fold').mean()
accuracyInCondition = avg['Random Forest ({})'.format(condition)].values
ACCURACIES.append(accuracyInCondition)
#Determine accuracy for randomized labels
for condition in np.unique(df['Condition'].values):
subDf = rdnDf.loc[rdnDf['Condition']==condition]
avg = subDf.groupby('Fold').mean()
accuracyInCondition = avg['Random Forest ({})'.format(condition)].values
RDM_ACCURACIES.append(accuracyInCondition)
sortedLabelsAccuracy = pd.DataFrame(ACCURACIES, index=np.unique(df['Condition'].values)).T.mean(axis=1)
randomLabelAccuracy = pd.DataFrame(RDM_ACCURACIES, index=np.unique(rdnDf['Condition'].values)).T.mean(axis=1)
barPlotdf = | pd.concat([sortedLabelsAccuracy,randomLabelAccuracy],axis=1) | pandas.concat |
from datetime import datetime, timedelta
from io import StringIO
import re
import sys
import numpy as np
import pytest
from pandas._libs.tslib import iNaT
from pandas.compat import PYPY
from pandas.compat.numpy import np_array_datetime64_compat
from pandas.core.dtypes.common import (
is_datetime64_dtype,
is_datetime64tz_dtype,
is_object_dtype,
is_timedelta64_dtype,
needs_i8_conversion,
)
from pandas.core.dtypes.dtypes import DatetimeTZDtype
import pandas as pd
from pandas import (
CategoricalIndex,
DataFrame,
DatetimeIndex,
Index,
Interval,
IntervalIndex,
PeriodIndex,
Series,
Timedelta,
TimedeltaIndex,
Timestamp,
)
from pandas.core.accessor import PandasDelegate
from pandas.core.arrays import DatetimeArray, PandasArray, TimedeltaArray
from pandas.core.base import NoNewAttributesMixin, PandasObject
from pandas.core.indexes.datetimelike import DatetimeIndexOpsMixin
import pandas.util.testing as tm
class CheckStringMixin:
def test_string_methods_dont_fail(self):
repr(self.container)
str(self.container)
bytes(self.container)
def test_tricky_container(self):
if not hasattr(self, "unicode_container"):
pytest.skip("Need unicode_container to test with this")
repr(self.unicode_container)
str(self.unicode_container)
class CheckImmutable:
mutable_regex = re.compile("does not support mutable operations")
def check_mutable_error(self, *args, **kwargs):
# Pass whatever function you normally would to pytest.raises
# (after the Exception kind).
with pytest.raises(TypeError):
self.mutable_regex(*args, **kwargs)
def test_no_mutable_funcs(self):
def setitem():
self.container[0] = 5
self.check_mutable_error(setitem)
def setslice():
self.container[1:2] = 3
self.check_mutable_error(setslice)
def delitem():
del self.container[0]
self.check_mutable_error(delitem)
def delslice():
del self.container[0:3]
self.check_mutable_error(delslice)
mutable_methods = getattr(self, "mutable_methods", [])
for meth in mutable_methods:
self.check_mutable_error(getattr(self.container, meth))
def test_slicing_maintains_type(self):
result = self.container[1:2]
expected = self.lst[1:2]
self.check_result(result, expected)
def check_result(self, result, expected, klass=None):
klass = klass or self.klass
assert isinstance(result, klass)
assert result == expected
class TestPandasDelegate:
class Delegator:
_properties = ["foo"]
_methods = ["bar"]
def _set_foo(self, value):
self.foo = value
def _get_foo(self):
return self.foo
foo = property(_get_foo, _set_foo, doc="foo property")
def bar(self, *args, **kwargs):
""" a test bar method """
pass
class Delegate(PandasDelegate, PandasObject):
def __init__(self, obj):
self.obj = obj
def setup_method(self, method):
pass
def test_invalid_delegation(self):
# these show that in order for the delegation to work
# the _delegate_* methods need to be overridden to not raise
# a TypeError
self.Delegate._add_delegate_accessors(
delegate=self.Delegator,
accessors=self.Delegator._properties,
typ="property",
)
self.Delegate._add_delegate_accessors(
delegate=self.Delegator, accessors=self.Delegator._methods, typ="method"
)
delegate = self.Delegate(self.Delegator())
with pytest.raises(TypeError):
delegate.foo
with pytest.raises(TypeError):
delegate.foo = 5
with pytest.raises(TypeError):
delegate.foo()
@pytest.mark.skipif(PYPY, reason="not relevant for PyPy")
def test_memory_usage(self):
# Delegate does not implement memory_usage.
# Check that we fall back to in-built `__sizeof__`
# GH 12924
delegate = self.Delegate(self.Delegator())
sys.getsizeof(delegate)
class Ops:
def _allow_na_ops(self, obj):
"""Whether to skip test cases including NaN"""
if (isinstance(obj, Index) and obj.is_boolean()) or not obj._can_hold_na:
# don't test boolean / integer dtypes
return False
return True
def setup_method(self, method):
self.bool_index = tm.makeBoolIndex(10, name="a")
self.int_index = tm.makeIntIndex(10, name="a")
self.float_index = tm.makeFloatIndex(10, name="a")
self.dt_index = tm.makeDateIndex(10, name="a")
self.dt_tz_index = tm.makeDateIndex(10, name="a").tz_localize(tz="US/Eastern")
self.period_index = tm.makePeriodIndex(10, name="a")
self.string_index = tm.makeStringIndex(10, name="a")
self.unicode_index = tm.makeUnicodeIndex(10, name="a")
arr = np.random.randn(10)
self.bool_series = Series(arr, index=self.bool_index, name="a")
self.int_series = Series(arr, index=self.int_index, name="a")
self.float_series = Series(arr, index=self.float_index, name="a")
self.dt_series = Series(arr, index=self.dt_index, name="a")
self.dt_tz_series = self.dt_tz_index.to_series(keep_tz=True)
self.period_series = Series(arr, index=self.period_index, name="a")
self.string_series = Series(arr, index=self.string_index, name="a")
self.unicode_series = Series(arr, index=self.unicode_index, name="a")
types = ["bool", "int", "float", "dt", "dt_tz", "period", "string", "unicode"]
self.indexes = [getattr(self, "{}_index".format(t)) for t in types]
self.series = [getattr(self, "{}_series".format(t)) for t in types]
# To test narrow dtypes, we use narrower *data* elements, not *index* elements
index = self.int_index
self.float32_series = Series(arr.astype(np.float32), index=index, name="a")
arr_int = np.random.choice(10, size=10, replace=False)
self.int8_series = Series(arr_int.astype(np.int8), index=index, name="a")
self.int16_series = Series(arr_int.astype(np.int16), index=index, name="a")
self.int32_series = Series(arr_int.astype(np.int32), index=index, name="a")
self.uint8_series = Series(arr_int.astype(np.uint8), index=index, name="a")
self.uint16_series = Series(arr_int.astype(np.uint16), index=index, name="a")
self.uint32_series = Series(arr_int.astype(np.uint32), index=index, name="a")
nrw_types = ["float32", "int8", "int16", "int32", "uint8", "uint16", "uint32"]
self.narrow_series = [getattr(self, "{}_series".format(t)) for t in nrw_types]
self.objs = self.indexes + self.series + self.narrow_series
def check_ops_properties(self, props, filter=None, ignore_failures=False):
for op in props:
for o in self.is_valid_objs:
# if a filter, skip if it doesn't match
if filter is not None:
filt = o.index if isinstance(o, Series) else o
if not filter(filt):
continue
try:
if isinstance(o, Series):
expected = Series(getattr(o.index, op), index=o.index, name="a")
else:
expected = getattr(o, op)
except (AttributeError):
if ignore_failures:
continue
result = getattr(o, op)
# these could be series, arrays or scalars
if isinstance(result, Series) and isinstance(expected, Series):
tm.assert_series_equal(result, expected)
elif isinstance(result, Index) and isinstance(expected, Index):
tm.assert_index_equal(result, expected)
elif isinstance(result, np.ndarray) and isinstance(
expected, np.ndarray
):
tm.assert_numpy_array_equal(result, expected)
else:
assert result == expected
# freq raises AttributeError on an Int64Index because its not
# defined we mostly care about Series here anyhow
if not ignore_failures:
for o in self.not_valid_objs:
# an object that is datetimelike will raise a TypeError,
# otherwise an AttributeError
err = AttributeError
if issubclass(type(o), DatetimeIndexOpsMixin):
err = TypeError
with pytest.raises(err):
getattr(o, op)
@pytest.mark.parametrize("klass", [Series, DataFrame])
def test_binary_ops_docs(self, klass):
op_map = {
"add": "+",
"sub": "-",
"mul": "*",
"mod": "%",
"pow": "**",
"truediv": "/",
"floordiv": "//",
}
for op_name in op_map:
operand1 = klass.__name__.lower()
operand2 = "other"
op = op_map[op_name]
expected_str = " ".join([operand1, op, operand2])
assert expected_str in getattr(klass, op_name).__doc__
# reverse version of the binary ops
expected_str = " ".join([operand2, op, operand1])
assert expected_str in getattr(klass, "r" + op_name).__doc__
class TestIndexOps(Ops):
def setup_method(self, method):
super().setup_method(method)
self.is_valid_objs = self.objs
self.not_valid_objs = []
def test_none_comparison(self):
# bug brought up by #1079
# changed from TypeError in 0.17.0
for o in self.is_valid_objs:
if isinstance(o, Series):
o[0] = np.nan
# noinspection PyComparisonWithNone
result = o == None # noqa
assert not result.iat[0]
assert not result.iat[1]
# noinspection PyComparisonWithNone
result = o != None # noqa
assert result.iat[0]
assert result.iat[1]
result = None == o # noqa
assert not result.iat[0]
assert not result.iat[1]
result = None != o # noqa
assert result.iat[0]
assert result.iat[1]
if is_datetime64_dtype(o) or is_datetime64tz_dtype(o):
# Following DatetimeIndex (and Timestamp) convention,
# inequality comparisons with Series[datetime64] raise
with pytest.raises(TypeError):
None > o
with pytest.raises(TypeError):
o > None
else:
result = None > o
assert not result.iat[0]
assert not result.iat[1]
result = o < None
assert not result.iat[0]
assert not result.iat[1]
def test_ndarray_compat_properties(self):
for o in self.objs:
# Check that we work.
for p in ["shape", "dtype", "T", "nbytes"]:
assert getattr(o, p, None) is not None
# deprecated properties
for p in ["flags", "strides", "itemsize"]:
with tm.assert_produces_warning(FutureWarning):
assert getattr(o, p, None) is not None
with tm.assert_produces_warning(FutureWarning):
assert hasattr(o, "base")
# If we have a datetime-like dtype then needs a view to work
# but the user is responsible for that
try:
with tm.assert_produces_warning(FutureWarning):
assert o.data is not None
except ValueError:
pass
with pytest.raises(ValueError):
with tm.assert_produces_warning(FutureWarning):
o.item() # len > 1
assert o.ndim == 1
assert o.size == len(o)
with tm.assert_produces_warning(FutureWarning):
assert Index([1]).item() == 1
assert Series([1]).item() == 1
def test_value_counts_unique_nunique(self):
for orig in self.objs:
o = orig.copy()
klass = type(o)
values = o._values
if isinstance(values, Index):
# reset name not to affect latter process
values.name = None
# create repeated values, 'n'th element is repeated by n+1 times
# skip boolean, because it only has 2 values at most
if isinstance(o, Index) and o.is_boolean():
continue
elif isinstance(o, Index):
expected_index = Index(o[::-1])
expected_index.name = None
o = o.repeat(range(1, len(o) + 1))
o.name = "a"
else:
expected_index = Index(values[::-1])
idx = o.index.repeat(range(1, len(o) + 1))
# take-based repeat
indices = np.repeat(np.arange(len(o)), range(1, len(o) + 1))
rep = values.take(indices)
o = klass(rep, index=idx, name="a")
# check values has the same dtype as the original
assert o.dtype == orig.dtype
expected_s = Series(
range(10, 0, -1), index=expected_index, dtype="int64", name="a"
)
result = o.value_counts()
tm.assert_series_equal(result, expected_s)
assert result.index.name is None
assert result.name == "a"
result = o.unique()
if isinstance(o, Index):
assert isinstance(result, o.__class__)
tm.assert_index_equal(result, orig)
assert result.dtype == orig.dtype
elif is_datetime64tz_dtype(o):
# datetimetz Series returns array of Timestamp
assert result[0] == orig[0]
for r in result:
assert isinstance(r, Timestamp)
tm.assert_numpy_array_equal(
result.astype(object), orig._values.astype(object)
)
else:
tm.assert_numpy_array_equal(result, orig.values)
assert result.dtype == orig.dtype
assert o.nunique() == len(np.unique(o.values))
@pytest.mark.parametrize("null_obj", [np.nan, None])
def test_value_counts_unique_nunique_null(self, null_obj):
for orig in self.objs:
o = orig.copy()
klass = type(o)
values = o._ndarray_values
if not self._allow_na_ops(o):
continue
# special assign to the numpy array
if is_datetime64tz_dtype(o):
if isinstance(o, DatetimeIndex):
v = o.asi8
v[0:2] = iNaT
values = o._shallow_copy(v)
else:
o = o.copy()
o[0:2] = pd.NaT
values = o._values
elif | needs_i8_conversion(o) | pandas.core.dtypes.common.needs_i8_conversion |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import base64
import datetime
from io import StringIO
import pandas as pd
def draw():
"""pandas dataframe 绘图后写入 html"""
end_datetime = datetime.datetime.now()
beg_datetime = end_datetime - datetime.timedelta(days=30)
daily_video_member_list = DailyVideoMemberReportDAO.get_by_date_range(
beg_datetime.date(), end_datetime.date()
)
df = | pd.DataFrame(daily_video_member_list) | pandas.DataFrame |
import requests
import json
import arrow
from datetime import datetime
from requests.auth import HTTPBasicAuth
import numpy as np
import pandas as pd
from datetime import date, datetime, timedelta as td
def get_activities(url, start_date, end_date, period, level):
# Configuration for Query
# SEE: https://www.rescuetime.com/apidoc
payload = {
'perspective':'interval',
'resolution_time': period, #1 of "month", "week", "day", "hour", "minute"
'restrict_kind': level, #'overview', #'document', #'category'
'restrict_begin': start_date,
'restrict_end': end_date,
'format':'json' #csv
}
# Setup Iteration - by Day
d1 = pd.to_datetime(payload['restrict_begin'])
d2 = | pd.to_datetime(payload['restrict_end']) | pandas.to_datetime |
import pandas as pd
import matplotlib.pyplot as plt
from data import games
plays = games[games['type'] == 'play']
plays.columns = ['type', 'inning', 'team', 'player', 'count', 'pitches',
'event', 'game_id', 'year']
# These functions select the rows where the event column's value starts with S
# (not SB), D, T, and HR in the plays DataFrame to create the hits DataFrame.
hits = plays.loc[plays['event'].str.contains('^(?:S(?!B)|D|T|HR)'),
['inning', 'event']]
# This function converts the inning column of the hits DataFrame from
# strings to numeric.
hits.loc[:, 'inning'] = | pd.to_numeric(hits.loc[:, 'inning']) | pandas.to_numeric |
import re
import os
import gzip
import warnings
import pandas as pd
from glob import glob
from datetime import datetime
from string import ascii_letters, digits
# TODO: Ideally these should be imported from Qiita
REQUIRED_COLUMNS = {'well_description', 'sample_plate', 'sample_well',
'i7_index_id', 'index', 'i5_index_id', 'index2'}
PREP_COLUMNS = ['sample_name', 'experiment_design_description',
'library_construction_protocol', 'platform', 'run_center',
'run_date', 'run_prefix', 'sequencing_meth', 'center_name',
'center_project_name', 'instrument_model', 'runid',
'lane', 'sample_project'] + list(REQUIRED_COLUMNS)
AMPLICON_PREP_COLUMN_RENAMER = {
'Sample': 'sample_name',
'Golay Barcode': 'barcode',
'515FB Forward Primer (Parada)': 'primer',
'Project Plate': 'project_plate',
'Project Name': 'project_name',
'Well': 'well',
'Primer Plate #': 'primer_plate_number',
'Plating': 'plating',
'Extraction Kit Lot': 'extractionkit_lot',
'Extraction Robot': 'extraction_robot',
'TM1000 8 Tool': 'tm1000_8_tool',
'Primer Date': 'primer_date',
'MasterMix Lot': 'mastermix_lot',
'Water Lot': 'water_lot',
'Processing Robot': 'processing_robot',
'sample sheet Sample_ID': 'well_description'
}
# put together by Gail, based on the instruments we know of
INSTRUMENT_LOOKUP = pd.DataFrame({
'A00953': {'machine prefix': 'A', 'Vocab': 'Illumina NovaSeq 6000',
'Machine type': 'NovaSeq', 'run_center': 'IGM'},
'A00169': {'machine prefix': 'A', 'Vocab': 'Illumina NovaSeq 6000',
'Machine type': 'NovaSeq', 'run_center': 'LJI'},
'M05314': {'machine prefix': 'M', 'Vocab': 'Illumina MiSeq',
'Machine type': 'MiSeq', 'run_center': 'KLM'},
'K00180': {'machine prefix': 'K', 'Vocab': 'Illumina HiSeq 4000',
'Machine type': 'HiSeq', 'run_center': 'IGM'},
'D00611': {'machine prefix': 'D', 'Vocab': 'Illumina HiSeq 2500',
'Machine type': 'HiSeq/RR', 'run_center': 'IGM'},
'MN01225': {'machine prefix': 'MN', 'Vocab': 'Illumina MiniSeq',
'Machine type': 'MiniSeq', 'run_center': 'CMI'}}).T
def parse_illumina_run_id(run_id):
"""Parse a run identifier
Parameters
----------
run_id: str
The name of a run
Returns
-------
str:
When the run happened (YYYY-MM-DD)
str:
Instrument code
"""
# Format should be YYMMDD_machinename_XXXX_FC
# this regex has two groups, the first one is the date, and the second one
# is the machine name + suffix. This URL shows some examples
# tinyurl.com/rmy67kw
matches = re.search(r'^(\d{6})_(\w*)', run_id)
if matches is None or len(matches.groups()) != 2:
raise ValueError('Unrecognized run identifier format "%s". The '
'expected format is YYMMDD_machinename_XXXX_FC.' %
run_id)
# convert illumina's format to qiita's format
run_date = datetime.strptime(matches[1], '%y%m%d').strftime('%Y-%m-%d')
return run_date, matches[2]
def is_nonempty_gz_file(name):
"""Taken from https://stackoverflow.com/a/37878550/379593"""
with gzip.open(name, 'rb') as f:
try:
file_content = f.read(1)
return len(file_content) > 0
except Exception:
return False
def remove_qiita_id(project_name):
# project identifiers are digit groups at the end of the project name
# preceded by an underscore CaporasoIllumina_550
qiita_id_re = re.compile(r'(.+)_(\d+)$')
# no matches
matches = re.search(qiita_id_re, project_name)
if matches is None:
return project_name
else:
# group 1 is the project name
return matches[1]
def get_run_prefix(run_path, project, sample, lane, pipeline):
"""For a sample find the run prefix
Parameters
----------
run_path: str
Base path for the run
project: str
Name of the project
sample: str
Sample name
lane: str
Lane number
pipeline: str
The pipeline used to generate the data. Should be one of
`atropos-and-bowtie2` or `fastp-and-minimap2`.
Returns
-------
str
The run prefix of the sequence file in the lane, only if the sequence
file is not empty.
"""
base = os.path.join(run_path, project)
path = base
# each pipeline sets up a slightly different directory structure,
# importantly fastp-and-minimap2 won't save intermediate files
if pipeline == 'atropos-and-bowtie2':
qc = os.path.join(base, 'atropos_qc')
hf = os.path.join(base, 'filtered_sequences')
# If both folders exist and have sequence files always prefer the
# human-filtered sequences
if _exists_and_has_files(qc):
path = qc
if _exists_and_has_files(hf):
path = hf
elif pipeline == 'fastp-and-minimap2':
qc = os.path.join(base, 'trimmed_sequences')
hf = os.path.join(base, 'filtered_sequences')
if _exists_and_has_files(qc) and _exists_and_has_files(hf):
path = hf
elif _exists_and_has_files(qc):
path = qc
elif _exists_and_has_files(hf):
path = hf
else:
path = base
else:
raise ValueError('Invalid pipeline "%s"' % pipeline)
results = glob(os.path.join(path, '%s_S*_L*%s_R*.fastq.gz' %
(sample, lane)))
# at this stage there should only be two files forward and reverse
if len(results) == 2:
forward, reverse = sorted(results)
if is_nonempty_gz_file(forward) and is_nonempty_gz_file(reverse):
f, r = os.path.basename(forward), os.path.basename(reverse)
if len(f) != len(r):
raise ValueError("Forward and reverse sequences filenames "
"don't match f:%s r:%s" % (f, r))
# The first character that's different is the number in R1/R2. We
# find this position this way because sometimes filenames are
# written as _R1_.. or _R1.trimmed... and splitting on _R1 might
# catch some substrings not part of R1/R2.
for i in range(len(f)):
if f[i] != r[i]:
i -= 2
break
return f[:i]
else:
return None
elif len(results) > 2:
warnings.warn(('There are %d matches for sample "%s" in lane %s. Only'
' two matches are allowed (forward and reverse): %s') %
(len(results), sample, lane, ', '.join(sorted(results))))
return None
def _file_list(path):
return [f for f in os.listdir(path)
if not os.path.isdir(os.path.join(path, f))]
def _exists_and_has_files(path):
return os.path.exists(path) and len(_file_list(path))
def get_machine_code(instrument_model):
"""Get the machine code for an instrument's code
Parameters
----------
instrument_model: str
An instrument's model of the form A999999 or AA999999
Returns
-------
"""
# the machine code represents the first 1 to 2 letters of the
# instrument model
machine_code = re.compile(r'^([a-zA-Z]{1,2})')
matches = re.search(machine_code, instrument_model)
if matches is None:
raise ValueError('Cannot find a machine code. This instrument '
'model is malformed %s. The machine code is a '
'one or two character prefix.' % instrument_model)
return matches[0]
def get_model_and_center(instrument_code):
"""Determine instrument model and center based on a lookup
Parameters
----------
instrument_code: str
Instrument code from a run identifier.
Returns
-------
str
Instrument model.
str
Run center based on the machine's id.
"""
run_center = "UCSDMI"
instrument_model = instrument_code.split('_')[0]
if instrument_model in INSTRUMENT_LOOKUP.index:
run_center = INSTRUMENT_LOOKUP.loc[instrument_model, 'run_center']
instrument_model = INSTRUMENT_LOOKUP.loc[instrument_model, 'Vocab']
else:
instrument_prefix = get_machine_code(instrument_model)
if instrument_prefix not in INSTRUMENT_LOOKUP['machine prefix']:
ValueError('Unrecognized machine prefix %s' % instrument_prefix)
instrument_model = INSTRUMENT_LOOKUP[
INSTRUMENT_LOOKUP['machine prefix'] == instrument_prefix
]['Vocab'].unique()[0]
return instrument_model, run_center
def agp_transform(frame, study_id):
"""If the prep belongs to the American Gut Project fill in some blanks
Parameters
----------
frame: pd.DataFrame
The preparation file for a single project.
study_id: str
The Qiita study identifier for this preparations.
Returns
-------
pd.DataFrame:
If the study_id is "10317" then:
- `center_name`, 'library_construction_protocol', and
`experiment_design_description` columns are filled in with
default values.
- `sample_name` will be zero-filled no 9 digits.
Otherwise no changes are made to `frame`.
"""
if study_id == '10317':
def zero_fill(name):
if 'blank' not in name.lower() and name[0].isdigit():
return name.zfill(9)
return name
frame['sample_name'] = frame['sample_name'].apply(zero_fill)
frame['center_name'] = 'UCSDMI'
frame['library_construction_protocol'] = 'Knight Lab KHP'
frame['experiment_design_description'] = (
'samples of skin, saliva and feces and other samples from the AGP')
return frame
def _check_invalid_names(sample_names):
# taken from qiita.qiita_db.metadata.util.get_invalid_sample_names
valid = set(ascii_letters + digits + '.')
def _has_invalid_chars(name):
return bool(set(name) - valid)
invalid = sample_names[sample_names.apply(_has_invalid_chars)]
if len(invalid):
warnings.warn('The following sample names have invalid '
'characters: %s' %
', '.join(['"%s"' % i for i in invalid.values]))
def preparations_for_run(run_path, sheet, pipeline='fastp-and-minimap2'):
"""Given a run's path and sample sheet generates preparation files
Parameters
----------
run_path: str
Path to the run folder
sheet: sample_sheet.SampleSheet
Sample sheet to convert
pipeline: str, optional
Which pipeline generated the data. The important difference is that
`atropos-and-bowtie2` saves intermediate files, whereas
`fastp-and-minimap2` doesn't. Default is `fastp-and-minimap2`, the
latest version of the sequence processing pipeline.
Returns
-------
dict
Dictionary keyed by run identifier, project name and lane. Values are
preparations represented as DataFrames.
"""
_, run_id = os.path.split(os.path.normpath(run_path))
run_date, instrument_code = parse_illumina_run_id(run_id)
instrument_model, run_center = get_model_and_center(instrument_code)
output = {}
not_present = REQUIRED_COLUMNS - set(sheet.columns)
if not_present:
warnings.warn('These required columns were not found %s'
% ', '.join(not_present), UserWarning)
for col in not_present:
if col == 'well_description':
warnings.warn("Using 'description' instead of "
"'well_description' because that column "
"isn't present", UserWarning)
sheet[col] = sheet['description'].copy()
else:
sheet[col] = 'MISSING_FROM_THE_SAMPLE_SHEET'
for project, project_sheet in sheet.groupby('sample_project'):
project_name = remove_qiita_id(project)
qiita_id = project.replace(project_name + '_', '')
# if the Qiita ID is not found then make for an easy find/replace
if qiita_id == project:
qiita_id = 'QIITA-ID'
for lane, lane_sheet in project_sheet.groupby('lane'):
# this is the portion of the loop that creates the prep
data = []
for sample_name, sample in lane_sheet.iterrows():
run_prefix = get_run_prefix(run_path, project, sample_name,
lane, pipeline)
# we don't care about the sample if there's no file
if run_prefix is None:
continue
row = {c: '' for c in PREP_COLUMNS}
row["sample_name"] = sample.well_description
row["experiment_design_description"] = \
sample.experiment_design_description
row["library_construction_protocol"] = \
sample.library_construction_protocol
row["platform"] = "Illumina"
row["run_center"] = run_center
row["run_date"] = run_date
row["run_prefix"] = run_prefix
row["sequencing_meth"] = "sequencing by synthesis"
row["center_name"] = "CENTER_NAME"
row["center_project_name"] = project_name
row["instrument_model"] = instrument_model
row["runid"] = run_id
row["sample_plate"] = sample.sample_plate
row["sample_well"] = sample.sample_well
row["i7_index_id"] = sample['i7_index_id']
row["index"] = sample['index']
row["i5_index_id"] = sample['i5_index_id']
row["index2"] = sample['index2']
row["lane"] = lane
row["sample_project"] = project
row["well_description"] = '%s.%s.%s' % (sample.sample_plate,
sample.sample_name,
sample.sample_well)
data.append(row)
if not data:
warnings.warn('Project %s and Lane %s have no data' %
(project, lane), UserWarning)
# the American Gut Project is a special case. We'll likely continue
# to grow this study with more and more runs. So we fill some of
# the blanks if we can verify the study id corresponds to the AGP.
# This was a request by <NAME> and Gail
prep = agp_transform( | pd.DataFrame(columns=PREP_COLUMNS, data=data) | pandas.DataFrame |
import numpy as np
import pytest
from pandas import DataFrame, SparseArray, SparseDataFrame, bdate_range
data = {
"A": [np.nan, np.nan, np.nan, 0, 1, 2, 3, 4, 5, 6],
"B": [0, 1, 2, np.nan, np.nan, np.nan, 3, 4, 5, 6],
"C": np.arange(10, dtype=np.float64),
"D": [0, 1, 2, 3, 4, 5, np.nan, np.nan, np.nan, np.nan],
}
dates = bdate_range("1/1/2011", periods=10)
# fixture names must be compatible with the tests in
# tests/frame/test_api.SharedWithSparse
@pytest.fixture
def float_frame_dense():
"""
Fixture for dense DataFrame of floats with DatetimeIndex
Columns are ['A', 'B', 'C', 'D']; some entries are missing
"""
return DataFrame(data, index=dates)
@pytest.fixture
def float_frame():
"""
Fixture for sparse DataFrame of floats with DatetimeIndex
Columns are ['A', 'B', 'C', 'D']; some entries are missing
"""
# default_kind='block' is the default
return SparseDataFrame(data, index=dates, default_kind="block")
@pytest.fixture
def float_frame_int_kind():
"""
Fixture for sparse DataFrame of floats with DatetimeIndex
Columns are ['A', 'B', 'C', 'D'] and default_kind='integer'.
Some entries are missing.
"""
return SparseDataFrame(data, index=dates, default_kind="integer")
@pytest.fixture
def float_string_frame():
"""
Fixture for sparse DataFrame of floats and strings with DatetimeIndex
Columns are ['A', 'B', 'C', 'D', 'foo']; some entries are missing
"""
sdf = SparseDataFrame(data, index=dates)
sdf["foo"] = SparseArray(["bar"] * len(dates))
return sdf
@pytest.fixture
def float_frame_fill0_dense():
"""
Fixture for dense DataFrame of floats with DatetimeIndex
Columns are ['A', 'B', 'C', 'D']; missing entries have been filled with 0
"""
values = | SparseDataFrame(data) | pandas.SparseDataFrame |
import email
import pandas as pd
def extract(data, structured_fields=[], extract_payload=True):
r"""This function extracts data for the given header list from the Enron email dataset.
It provides flexibilty to choose which fields needs to be extracted.
The header list provided by the user are the tags in the email of Enron dataset, eg. Date, Subject etc.
By default, if no header is provided, this function returns only the email text body of the Enron dataset.
Arguments:
1) data: Dataframe It is the Enron dataset with column headings. This argument can not be kept empty.
2) structured_fields: List It is a of tags for which data needs to be extracted. Example: ['Date', 'Subject', 'X-To']. This argument can be droppped if not required.
3) extract_pyload: Boolean True if email text body is required. False in case only structured_fields needs to be extracted. This field can alo be dropped while calling the function. In case nothing is specified, default boolean value True is used.
return: Dataframe A dataframe with specified fields along with the original columns passsed as the data argument.
This function is created to take off the burden of extracting desired fields from the Enron dataset. However, this does not clean the data, eg. it does not remove the empty rows or columns. Neither it does the pre-processing of data like lowercase and removal of unwanted characters.
In order to make it more powerful, above functions can be added.
"""
headers=data.columns
emails = data.rename(columns={headers[0]:'email_path', headers[1]:'email'})
#getting structured text
def create_dict(dictionary, key, value):
if key in dictionary:
values = dictionary.get(key)
values.append(value)
dictionary[key] = values
else:
dictionary[key] = [value]
return dictionary
def get_structured_data(df, fields):
structured_data = {}
messages = df["email"]
for message in messages:
e = email.message_from_string(message)
for header in fields:
header_data = e.get(header)
create_dict(dictionary = structured_data, key = header, value = header_data)
return pd.DataFrame(structured_data)
#getting unstructured text
def get_unstructured_email(df):
messages = []
for item in df["email"]:
e = email.message_from_string(item)
message_body = e.get_payload()
#message_body = message_body.lower()
messages.append(message_body)
return messages
if extract_payload == True:
email_body = get_unstructured_email(emails)
emails["Message-Body"] = email_body
structured_data = get_structured_data(emails, structured_fields)
emails = | pd.concat([emails, structured_data], axis=1) | pandas.concat |
"""
NAME
dnd_monster_scrape
DESCRIPTION
A simple web scraper for the site www.aidedd.org, a French website about DnD. The following information can
be found for all monsters released for DnD 5e. Attributes marked with * are not available for all creatures.
name
challenge rating
create type
size
armor class
health
alignment
legendary
monster source
* strength
* dexterity
* constitution
* intelligence
* wisdom
* charisma
CONTENTS
DnDScrapeShallow - collect information about all monsters, collected from a single page
DnDScrapeDeep - if available, collects deeper information about an individual monster
"""
import bs4
import requests
import pandas as pd
import numpy as np
import time
def DnDScrapeShallow(df_name='data/dnd_data.csv'):
""" The main function that collects monster information, this gets a lot of details including a url that leads
to more information (not available for all monsters). Information is stored in a dataframe, this can be
expanded by calling deep scrape afterwards.
Args:
df_name : file name to load / save values from
Return:
None
"""
hdr = {'User-Agent': 'Mozilla/5.0'}
url = 'https://www.aidedd.org/dnd-filters/monsters.php'
req = requests.get(url, headers=hdr)
if req.status_code == 200:
soup = bs4.BeautifulSoup(req.text, 'html.parser')
df = pd.DataFrame(columns=['name', 'url', 'cr', 'type', 'size', 'ac', 'hp', 'speed', 'align', 'legendary', 'source'])
# grab the entire row and then we'll just divy it up and take what we want, first element is not a monster
results = soup.find_all('tr')
for row in results[1:]:
to_add = []
# name
col = row.find_all('input')
to_add.append(col[0]['value'])
# link
col = row.find_all('a')
to_add.append(col[0]['href'] if col else '')
# challenge rating
col = row.find_all('td', class_='center')
to_add.append(col[0].text)
# type
col = row.find_all('td', class_='col1')
to_add.append(col[0].text)
# sizes
col = row.find_all('td', class_='col2')
to_add.append(col[0].text)
# armor class and hp are stored in col3
col = row.find_all('td', class_='col3')
to_add.append(col[0].text)
to_add.append(col[1].text)
# speed
col = row.find_all('td', class_='col6')
to_add.append(col[0].text)
# alignment
col = row.find_all('td', class_='col4')
to_add.append(col[0].text)
# # legendary
col = row.find_all('td', class_='col5')
to_add.append(col[0].text)
# # source
col = row.find_all('td', class_='colS')
to_add.append(col[0].text)
# add to the end of the df
df.loc[len(df)] = to_add
df.to_csv(df_name, index=False)
else:
print('Connection Error!')
def DnDScrapeDeep(df_name='data/dnd_data.csv', sleep_time=10):
""" Supplement function that extends the data from the shallow scrape if a url is available.
Args:
df_name : file name to load / save values from
sleep_time : how long to sleep between page accesses
Return:
None
"""
df = pd.read_csv(df_name)
for i, row in df.iterrows():
# if there is a url and we haven't fetched it yet
if not | pd.isna(row['url']) | pandas.isna |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Apr 19 2020
@author: <NAME> (@SergioMinuto90)
"""
from pandas.io.json import json_normalize
from abc import ABC, abstractmethod
import socceraction.vaep as vaep
import pandas as pd
import warnings
import os
warnings.simplefilter(action='ignore', category=pd.errors.PerformanceWarning)
from processing import PassingNetworkBuilder
from utils import read_json
class StatsBombPassingNetwork(PassingNetworkBuilder, ABC):
def __init__(self, args):
self.plot_type = args.plot_type
self.team_name = args.team_name
self.match_id = args.match_id
self.plot_name = None
self.df_events = None
self.plot_title = None
self.names_dict = None
self.plot_legend = None
self.num_minutes = None
self.player_position = None
self.pair_pass_value = None
self.pair_pass_count = None
self.player_pass_value = None
self.player_pass_count = None
def read_data(self):
"""
Read StatsBomb eventing data of the selected 'match_id', generating a pandas DataFrame
with the events and a dictionary of player names and nicknames.
"""
# Player name translation dict
lineups = read_json("data/eventing/lineups/{0}.json".format(self.match_id))
self.names_dict = {player["player_name"]: player["player_nickname"]
for team in lineups for player in team["lineup"]}
# Pandas dataframe containing the events of the match
events = read_json("data/eventing/events/{0}.json".format(self.match_id))
self.df_events = | json_normalize(events, sep="_") | pandas.io.json.json_normalize |
import argparse
from multiprocessing import Value
import numpy as np
import pandas as pd
from joblib import Parallel, delayed
import mne
from mne_bids import BIDSPath
import coffeine
from mne_features.feature_extraction import extract_features
from mne.minimum_norm import apply_inverse_cov
from utils import prepare_dataset
DATASETS = ['chbp', 'lemon', 'tuab', 'camcan']
FEATURE_TYPE = ['fb_covs', 'handcrafted', 'source_power']
parser = argparse.ArgumentParser(description='Compute features.')
parser.add_argument(
'-d', '--dataset',
default=None,
nargs='+',
help='the dataset for which features should be computed')
parser.add_argument(
'-t', '--feature_type',
default=None,
nargs='+', help='Type of features to compute')
parser.add_argument(
'--n_jobs', type=int, default=1,
help='number of parallel processes to use (default: 1)')
args = parser.parse_args()
datasets = args.dataset
feature_types = args.feature_type
n_jobs = args.n_jobs
if datasets is None:
datasets = list(DATASETS)
if feature_types is None:
feature_types = list(FEATURE_TYPE)
tasks = [(ds, bs) for ds in datasets for bs in feature_types]
for dataset, feature_type in tasks:
if dataset not in DATASETS:
raise ValueError(f"The dataset '{dataset}' passed is unkonwn")
if feature_type not in FEATURE_TYPE:
raise ValueError(f"The benchmark '{feature_type}' passed is unkonwn")
print(f"Running benchmarks: {', '.join(feature_types)}")
print(f"Datasets: {', '.join(datasets)}")
DEBUG = False
frequency_bands = {
"low": (0.1, 1),
"delta": (1, 4),
"theta": (4.0, 8.0),
"alpha": (8.0, 15.0),
"beta_low": (15.0, 26.0),
"beta_mid": (26.0, 35.0),
"beta_high": (35.0, 49)
}
hc_selected_funcs = [
'std',
'kurtosis',
'skewness',
'quantile',
'ptp_amp',
'mean',
'pow_freq_bands',
'spect_entropy',
'app_entropy',
'samp_entropy',
'svd_entropy',
'hurst_exp',
'hjorth_complexity',
'hjorth_mobility',
'line_length',
'wavelet_coef_energy',
'higuchi_fd',
'zero_crossings',
'svd_fisher_info'
]
hc_func_params = {
'quantile__q': [0.1, 0.25, 0.75, 0.9],
'pow_freq_bands__freq_bands': [0, 2, 4, 8, 13, 18, 24, 30, 49],
'pow_freq_bands__ratios': 'all',
'pow_freq_bands__ratios_triu': True,
'pow_freq_bands__log': True,
'pow_freq_bands__normalize': None,
}
def extract_fb_covs(epochs, condition):
features, meta_info = coffeine.compute_features(
epochs[condition], features=('covs',), n_fft=1024, n_overlap=512,
fs=epochs.info['sfreq'], fmax=49, frequency_bands=frequency_bands)
features['meta_info'] = meta_info
return features
def extract_handcrafted_feats(epochs, condition):
features = extract_features(
epochs[condition].get_data(), epochs.info['sfreq'], hc_selected_funcs,
funcs_params=hc_func_params, n_jobs=1, ch_names=epochs.ch_names,
return_as_df=False)
out = {'feats': features}
return out
def extract_source_power(bp, info, subject, subjects_dir, covs):
fname_inv = bp.copy().update(suffix='inv',
processing=None,
extension='.fif')
inv = mne.minimum_norm.read_inverse_operator(fname_inv)
# Prepare label time series
labels = mne.read_labels_from_annot('fsaverage', 'aparc_sub',
subjects_dir=subjects_dir)
labels = [ll for ll in labels if 'unknown' not in ll.name]
# for each frequency band
result = []
for i in range(covs.shape[0]):
cov = mne.Covariance(data=covs[i, :, :],
names=info['ch_names'],
bads=info['bads'],
projs=info['projs'],
nfree=0) # nfree ?
stc = apply_inverse_cov(cov, info, inv,
nave=1,
method="dSPM")
label_power = mne.extract_label_time_course(stc,
labels,
inv['src'],
mode="mean")
result.append(np.diag(label_power[:,0]))
return result
def run_subject(subject, cfg, condition):
task = cfg.task
deriv_root = cfg.deriv_root
data_type = cfg.data_type
session = cfg.session
if session.startswith('ses-'):
session = session.lstrip('ses-')
bp_args = dict(root=deriv_root, subject=subject,
datatype=data_type, processing="autoreject",
task=task,
check=False, suffix="epo")
if session:
bp_args['session'] = session
bp = BIDSPath(**bp_args)
if not bp.fpath.exists():
return 'no file'
epochs = mne.read_epochs(bp, proj=False, preload=True)
if not any(condition in cc for cc in epochs.event_id):
return 'condition not found'
out = None
# make sure that no EOG/ECG made it into the selection
epochs.pick_types(**{data_type: True})
try:
if feature_type == 'fb_covs':
out = extract_fb_covs(epochs, condition)
elif feature_type == 'handcrafted':
out = extract_handcrafted_feats(epochs, condition)
elif feature_type == 'source_power':
covs = extract_fb_covs(epochs, condition)
covs = covs['covs']
out = extract_source_power(
bp, epochs.info, subject, cfg.subjects_dir, covs)
else:
NotImplementedError()
except Exception as err:
return repr(err)
return out
for dataset, feature_type in tasks:
cfg, subjects = prepare_dataset(dataset)
N_JOBS = cfg.N_JOBS if not n_jobs else n_jobs
if DEBUG:
subjects = subjects[:1]
N_JOBS = 1
frequency_bands = {"alpha": (8.0, 15.0)}
hc_selected_funcs = ['std']
hc_func_params = dict()
for condition in cfg.feature_conditions:
print(
f"Computing {feature_type} features on {dataset} for '{condition}'")
features = Parallel(n_jobs=N_JOBS)(
delayed(run_subject)(sub.split('-')[1], cfg=cfg,
condition=condition) for sub in subjects)
out = {sub: ff for sub, ff in zip(subjects, features)
if not isinstance(ff, str)}
label = None
if dataset in ("chbp", "lemon"):
label = 'pooled'
if '/' in condition:
label = f'eyes-{condition.split("/")[1]}'
elif dataset in ("tuab", 'camcan'):
label = 'rest'
out_fname = cfg.deriv_root / f'features_{feature_type}_{label}.h5'
log_out_fname = (
cfg.deriv_root / f'feature_{feature_type}_{label}-log.csv')
mne.externals.h5io.write_hdf5(
out_fname,
out,
overwrite=True
)
print(f'Features saved under {out_fname}.')
logging = ['OK' if not isinstance(ff, str) else ff for sub, ff in
zip(subjects, features)]
out_log = | pd.DataFrame({"ok": logging, "subject": subjects}) | pandas.DataFrame |
import datetime
import typing
from dataclasses import dataclass
from typing import List
import pandas
from dataclasses_json import dataclass_json
from pytest import fixture
from typing_extensions import Annotated
from flytekit import SQLTask, dynamic, kwtypes
from flytekit.core.hash import HashMethod
from flytekit.core.local_cache import LocalTaskCache
from flytekit.core.task import TaskMetadata, task
from flytekit.core.testing import task_mock
from flytekit.core.workflow import workflow
from flytekit.types.schema import FlyteSchema
# Global counter used to validate number of calls to cache
n_cached_task_calls = 0
@fixture(scope="function", autouse=True)
def setup():
global n_cached_task_calls
n_cached_task_calls = 0
LocalTaskCache.initialize()
LocalTaskCache.clear()
def test_to_confirm_that_cache_keys_include_function_name():
"""
This test confirms that the function name is part of the cache key. It does so by defining 2 tasks with
identical parameters and metadata (i.e. cache=True and cache version).
"""
@task(cache=True, cache_version="v1")
def f1(n: int) -> int:
global n_cached_task_calls
n_cached_task_calls += 1
return n
@task(cache=True, cache_version="v1")
def f2(n: int) -> int:
global n_cached_task_calls
n_cached_task_calls += 1
return n + 1
@workflow
def wf(n: int) -> typing.Tuple[int, int]:
n_f1 = f1(n=n)
n_f2 = f2(n=n)
return n_f1, n_f2
# This is demonstrating that calls to f1 and f2 are cached by input parameters.
assert wf(n=1) == (1, 2)
def test_single_task_workflow():
@task(cache=True, cache_version="v1")
def is_even(n: int) -> bool:
global n_cached_task_calls
n_cached_task_calls += 1
return n % 2 == 0
@task(cache=False)
def uncached_task(a: int, b: int) -> int:
return a + b
@workflow
def check_evenness(n: int) -> bool:
uncached_task(a=n, b=n)
return is_even(n=n)
assert n_cached_task_calls == 0
assert check_evenness(n=1) is False
# Confirm task is called
assert n_cached_task_calls == 1
assert check_evenness(n=1) is False
# Subsequent calls of the workflow with the same parameter do not bump the counter
assert n_cached_task_calls == 1
assert check_evenness(n=1) is False
assert n_cached_task_calls == 1
# Run workflow with a different parameter and confirm counter is bumped
assert check_evenness(n=8) is True
assert n_cached_task_calls == 2
# Run workflow again with the same parameter and confirm the counter is not bumped
assert check_evenness(n=8) is True
assert n_cached_task_calls == 2
def test_shared_tasks_in_two_separate_workflows():
@task(cache=True, cache_version="0.0.1")
def is_odd(n: int) -> bool:
global n_cached_task_calls
n_cached_task_calls += 1
return n % 2 == 1
@workflow
def check_oddness_wf1(n: int) -> bool:
return is_odd(n=n)
@workflow
def check_oddness_wf2(n: int) -> bool:
return is_odd(n=n)
assert n_cached_task_calls == 0
assert check_oddness_wf1(n=42) is False
assert check_oddness_wf1(n=99) is True
assert n_cached_task_calls == 2
# The next two executions of the *_wf2 workflow are going to
# hit the cache for the calls to `is_odd`
assert check_oddness_wf2(n=42) is False
assert check_oddness_wf2(n=99) is True
assert n_cached_task_calls == 2
# TODO add test with typing.List[str]
def test_sql_task():
sql = SQLTask(
"my-query",
query_template="SELECT * FROM hive.city.fact_airport_sessions WHERE ds = '{{ .Inputs.ds }}' LIMIT 10",
inputs=kwtypes(ds=datetime.datetime),
outputs=kwtypes(results=FlyteSchema),
metadata=TaskMetadata(retries=2, cache=True, cache_version="0.1"),
)
@task(cache=True, cache_version="0.1.2")
def t1() -> datetime.datetime:
global n_cached_task_calls
n_cached_task_calls += 1
return datetime.datetime.now()
@workflow
def my_wf() -> FlyteSchema:
dt = t1()
return sql(ds=dt)
with task_mock(sql) as mock:
mock.return_value = pandas.DataFrame(data={"x": [1, 2], "y": ["3", "4"]})
assert n_cached_task_calls == 0
assert (my_wf().open().all() == pandas.DataFrame(data={"x": [1, 2], "y": ["3", "4"]})).all().all()
assert n_cached_task_calls == 1
# The second and third calls hit the cache
assert (my_wf().open().all() == pandas.DataFrame(data={"x": [1, 2], "y": ["3", "4"]})).all().all()
assert n_cached_task_calls == 1
assert (my_wf().open().all() == pandas.DataFrame(data={"x": [1, 2], "y": ["3", "4"]})).all().all()
assert n_cached_task_calls == 1
def test_wf_custom_types():
@dataclass_json
@dataclass
class MyCustomType(object):
x: int
y: str
@task(cache=True, cache_version="a.b.c")
def t1(a: int) -> MyCustomType:
global n_cached_task_calls
n_cached_task_calls += 1
return MyCustomType(x=a, y="t1")
@task(cache=True, cache_version="v1")
def t2(a: MyCustomType, b: str) -> (MyCustomType, int):
global n_cached_task_calls
n_cached_task_calls += 1
return MyCustomType(x=a.x, y=f"{a.y} {b}"), 5
@workflow
def my_wf(a: int, b: str) -> (MyCustomType, int):
return t2(a=t1(a=a), b=b)
assert n_cached_task_calls == 0
c, v = my_wf(a=10, b="hello")
assert v == 5
assert c.x == 10
assert c.y == "t1 hello"
assert n_cached_task_calls == 2
c, v = my_wf(a=10, b="hello")
assert v == 5
assert c.x == 10
assert c.y == "t1 hello"
assert n_cached_task_calls == 2
def test_wf_schema_to_df():
schema1 = FlyteSchema[kwtypes(x=int, y=str)]
@task(cache=True, cache_version="v0")
def t1() -> schema1:
global n_cached_task_calls
n_cached_task_calls += 1
s = schema1()
s.open().write(pandas.DataFrame(data={"x": [1, 2], "y": ["3", "4"]}))
return s
@task(cache=True, cache_version="v1")
def t2(df: pandas.DataFrame) -> int:
global n_cached_task_calls
n_cached_task_calls += 1
return len(df.columns.values)
@workflow
def wf() -> int:
return t2(df=t1())
assert n_cached_task_calls == 0
x = wf()
assert x == 2
assert n_cached_task_calls == 2
# Second call does not bump the counter
x = wf()
assert x == 2
assert n_cached_task_calls == 2
def test_dict_wf_with_constants():
@task(cache=True, cache_version="v99")
def t1(a: int) -> typing.NamedTuple("OutputsBC", t1_int_output=int, c=str):
global n_cached_task_calls
n_cached_task_calls += 1
return a + 2, "world"
@task(cache=True, cache_version="v101")
def t2(a: typing.Dict[str, str]) -> str:
global n_cached_task_calls
n_cached_task_calls += 1
return " ".join([v for k, v in a.items()])
@workflow
def my_wf(a: int, b: str) -> (int, str):
x, y = t1(a=a)
d = t2(a={"key1": b, "key2": y})
return x, d
assert n_cached_task_calls == 0
x = my_wf(a=5, b="hello")
assert x == (7, "hello world")
assert n_cached_task_calls == 2
# Second call does not bump the counter
x = my_wf(a=5, b="hello")
assert x == (7, "hello world")
assert n_cached_task_calls == 2
def test_set_integer_literal_hash_is_not_cached():
"""
Test to confirm that the local cache is not set in the case of integers, even if we
return an annotated integer. In order to make this very explicit, we define a constant hash
function, i.e. the same value is returned by it regardless of the input.
"""
def constant_hash_function(a: int) -> str:
return "hash"
@task
def t0(a: int) -> Annotated[int, HashMethod(function=constant_hash_function)]:
return a
@task(cache=True, cache_version="0.0.1")
def t1(cached_a: int) -> int:
global n_cached_task_calls
n_cached_task_calls += 1
return cached_a
@workflow
def wf(a: int) -> int:
annotated_a = t0(a=a)
return t1(cached_a=annotated_a)
assert n_cached_task_calls == 0
assert wf(a=3) == 3
assert n_cached_task_calls == 1
# Confirm that the value is not cached, even though we set a hash function that
# returns a constant value and that the task has only one input.
assert wf(a=2) == 2
assert n_cached_task_calls == 2
# Confirm that the cache is hit if we execute the workflow with the same value as previous run.
assert wf(a=2) == 2
assert n_cached_task_calls == 2
def test_pass_annotated_to_downstream_tasks():
@task
def t0(a: int) -> Annotated[int, HashMethod(function=str)]:
return a + 1
@task(cache=True, cache_version="42")
def downstream_t(a: int) -> int:
global n_cached_task_calls
n_cached_task_calls += 1
return a + 2
@dynamic
def t1(a: int) -> int:
v = t0(a=a)
# We should have a cache miss in the first call to downstream_t and have a cache hit
# on the second call.
v_1 = downstream_t(a=v)
v_2 = downstream_t(a=v)
return v_1 + v_2
assert n_cached_task_calls == 0
assert t1(a=3) == (6 + 6)
assert n_cached_task_calls == 1
def test_pandas_dataframe_hash():
"""
Test that cache is hit in the case of pandas dataframes where we annotated dataframes to hash
the contents of the dataframes.
"""
def hash_pandas_dataframe(df: pandas.DataFrame) -> str:
return str(pandas.util.hash_pandas_object(df))
@task
def uncached_data_reading_task() -> Annotated[pandas.DataFrame, HashMethod(hash_pandas_dataframe)]:
return pandas.DataFrame({"column_1": [1, 2, 3]})
@task(cache=True, cache_version="0.1")
def cached_data_processing_task(data: pandas.DataFrame) -> pandas.DataFrame:
global n_cached_task_calls
n_cached_task_calls += 1
return data * 2
@workflow
def my_workflow():
raw_data = uncached_data_reading_task()
cached_data_processing_task(data=raw_data)
assert n_cached_task_calls == 0
my_workflow()
assert n_cached_task_calls == 1
# Confirm that we see a cache hit in the case of annotated dataframes.
my_workflow()
assert n_cached_task_calls == 1
def test_list_of_pandas_dataframe_hash():
"""
Test that cache is hit in the case of a list of pandas dataframes where we annotated dataframes to hash
the contents of the dataframes.
"""
def hash_pandas_dataframe(df: pandas.DataFrame) -> str:
return str(pandas.util.hash_pandas_object(df))
@task
def uncached_data_reading_task() -> List[Annotated[pandas.DataFrame, HashMethod(hash_pandas_dataframe)]]:
return [pandas.DataFrame({"column_1": [1, 2, 3]}), | pandas.DataFrame({"column_1": [10, 20, 30]}) | pandas.DataFrame |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.